pax_global_header00006660000000000000000000000064147237204730014523gustar00rootroot0000000000000052 comment=ea47a34bde1ba1ecf9eb8dc6d44fcf7aef42f5e3 git-lfs-3.6.1/000077500000000000000000000000001472372047300130775ustar00rootroot00000000000000git-lfs-3.6.1/.gitattributes000066400000000000000000000000441472372047300157700ustar00rootroot00000000000000* text=auto * eol=lf *.bat eol=crlf git-lfs-3.6.1/.github/000077500000000000000000000000001472372047300144375ustar00rootroot00000000000000git-lfs-3.6.1/.github/CODEOWNERS000066400000000000000000000000221472372047300160240ustar00rootroot00000000000000* @git-lfs/core git-lfs-3.6.1/.github/ISSUE_TEMPLATE/000077500000000000000000000000001472372047300166225ustar00rootroot00000000000000git-lfs-3.6.1/.github/ISSUE_TEMPLATE/bug_report.md000066400000000000000000000027731472372047300213250ustar00rootroot00000000000000--- name: Bug report about: Create a report to help us improve title: '' labels: '' assignees: '' --- **Describe the bug** A clear and concise description of what the bug is. **To Reproduce** Steps to reproduce the behavior: 1. Go to '...' 2. Click on '....' 3. Scroll down to '....' 4. See error **Expected behavior** A clear and concise description of what you expected to happen. **System environment** The version of your operating system, plus any relevant information about platform or configuration (e.g., container or CI usage, Cygwin, WSL, or non-Basic authentication). If relevant, include the output of `git config -l` as a code block. Please also mention the usage of any proxy, including any TLS MITM device or non-default antivirus or firewall. **Output of `git lfs env`** The output of running `git lfs env` as a code block. **Additional context** Any other relevant context about the problem here. If you're having problems trying to push or pull data, please run the command with `GIT_TRACE=1 GIT_TRANSFER_TRACE=1 GIT_CURL_VERBOSE=1` and include it inline or attach it as a text file. In a bash or other POSIX shell, you can simply prepend this string and a space to the command. git-lfs-3.6.1/.github/ISSUE_TEMPLATE/other-issue.md000066400000000000000000000017611472372047300214200ustar00rootroot00000000000000--- name: Other issue about: Ask a question, request a feature, or report something that's not a bug title: '' labels: '' assignees: '' --- **Describe the issue** A clear and concise description of why you wrote in today. **System environment** The version of your operating system, plus any relevant information about platform or configuration (e.g., container or CI usage, Cygwin, WSL, or non-Basic authentication). If relevant, include the output of `git config -l` as a code block. **Output of `git lfs env`** The output of running `git lfs env` as a code block. **Additional context** Any other relevant context about the problem here. git-lfs-3.6.1/.github/dependabot.yml000066400000000000000000000001731472372047300172700ustar00rootroot00000000000000--- version: 2 updates: - package-ecosystem: "github-actions" directory: "/" schedule: interval: "monthly" git-lfs-3.6.1/.github/workflows/000077500000000000000000000000001472372047300164745ustar00rootroot00000000000000git-lfs-3.6.1/.github/workflows/ci.yml000066400000000000000000000162651472372047300176240ustar00rootroot00000000000000name: CI on: [push, pull_request] env: GOTOOLCHAIN: local jobs: build-default: name: Build with default Git strategy: matrix: os: [ubuntu-latest, macos-latest] go: ['1.23.x'] runs-on: ${{ matrix.os }} steps: - uses: actions/checkout@v4 with: fetch-depth: 0 - run: git fetch origin "+${GITHUB_REF}:${GITHUB_REF}" if: ${{ github.ref_type == 'tag' }} # We update the current tag as the checkout step turns annotated tags # into lightweight ones by accident, breaking "git describe". # See https://github.com/actions/checkout/issues/882 for details. - uses: ruby/setup-ruby@v1 - run: gem install asciidoctor - uses: actions/setup-go@v5 with: go-version: ${{ matrix.go }} - run: brew install gettext if: ${{ startsWith(matrix.os, 'macos-') }} - run: sudo apt-get update && sudo apt-get -y install gettext libarchive-tools if: ${{ startsWith(matrix.os, 'ubuntu-') }} env: DEBIAN_FRONTEND: noninteractive - run: script/cibuild env: FORCE_LOCALIZE: true - run: CGO_ENABLED=0 make release env: FORCE_LOCALIZE: true - run: mkdir -p bin/assets - run: find bin/releases -name "*$(uname -s | tr A-Z a-z)*" | xargs -I{} cp {} bin/assets - uses: actions/upload-artifact@v4 with: name: ${{ matrix.os }} path: bin/assets build-go: name: Build with specific Go strategy: matrix: go: ['1.22.x'] runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 with: fetch-depth: 0 - run: git fetch origin "+${GITHUB_REF}:${GITHUB_REF}" if: ${{ github.ref_type == 'tag' }} - uses: actions/setup-go@v5 with: go-version: ${{ matrix.go }} - run: script/cibuild build-windows: name: Build on Windows runs-on: windows-latest steps: - uses: actions/checkout@v4 with: fetch-depth: 0 - run: git fetch origin "+${GITHUB_REF}:${GITHUB_REF}" if: ${{ github.ref_type == 'tag' }} shell: bash - uses: ruby/setup-ruby@v1 - run: gem install asciidoctor - run: Rename-Item -Path C:\msys64 -NewName msys64-tmp -Force # We move the MSYS2 installed for Ruby aside to prevent use of its Git, # which does not honour the PATH we set to our built git-lfs binary. - uses: actions/setup-go@v5 with: go-version: '1.23.x' - run: mkdir -p "$HOME/go/bin" shell: bash - run: set GOPATH=%HOME%\go - run: choco install -y InnoSetup - run: make man shell: bash - run: GOPATH="$HOME/go" PATH="$HOME/go/bin:$PATH" go install github.com/josephspurrier/goversioninfo/cmd/goversioninfo@latest shell: bash - uses: git-for-windows/setup-git-for-windows-sdk@v1 with: flavor: minimal # We install the SDK so as to have access to the msgfmt.exe binary # from the GNU gettext package. - run: GOPATH="$HOME/go" PATH="$HOME/go/bin:$PATH" env -u TMPDIR script/cibuild shell: bash # We clear the TMPDIR set for Ruby so mktemp and Go use the same # volume for temporary files. - run: rm -f commands/mancontent_gen.go shell: bash - run: GOPATH="$HOME/go" PATH="$HOME/go/bin:$PATH" make GOARCH=386 -B shell: bash env: FORCE_LOCALIZE: true - run: mv bin\git-lfs.exe git-lfs-x86.exe - run: rm -f commands/mancontent_gen.go shell: bash - run: GOPATH="$HOME/go" PATH="$HOME/go/bin:$PATH" make GOARCH=amd64 -B shell: bash env: FORCE_LOCALIZE: true - run: mv bin\git-lfs.exe git-lfs-x64.exe - run: rm -f commands/mancontent_gen.go shell: bash - run: GOPATH="$HOME/go" PATH="$HOME/go/bin:$PATH" make GOARCH=arm64 -B shell: bash env: FORCE_LOCALIZE: true - run: mv bin\git-lfs.exe git-lfs-arm64.exe - run: iscc script\windows-installer\inno-setup-git-lfs-installer.iss - run: mkdir -p bin/assets shell: bash - run: mv *.exe bin/assets shell: bash - uses: actions/upload-artifact@v4 with: name: windows-latest path: bin/assets build-latest: name: Build with latest Git strategy: matrix: os: [ubuntu-latest, macos-latest] runs-on: ${{ matrix.os }} steps: - uses: actions/checkout@v4 with: fetch-depth: 0 - uses: actions/setup-go@v5 with: go-version: '1.23.x' - run: git fetch origin "+${GITHUB_REF}:${GITHUB_REF}" if: ${{ github.ref_type == 'tag' }} - run: git clone -b master https://github.com/git/git.git "$HOME/git" - run: | echo "GIT_INSTALL_DIR=$HOME/git" >> "$GITHUB_ENV" echo "$HOME/git/bin" >> "$GITHUB_PATH" if: ${{ matrix.os == 'macos-latest' }} # We install our custom Git version into a PATH location ahead of # that of the Git installed by Homebrew. - run: script/build-git "$HOME/git" - run: GIT_DEFAULT_HASH=sha256 script/cibuild build-earliest: name: Build with earliest Git strategy: matrix: os: [ubuntu-latest, macos-latest] runs-on: ${{ matrix.os }} steps: - uses: actions/checkout@v4 with: fetch-depth: 0 - uses: actions/setup-go@v5 with: go-version: '1.23.x' - run: git fetch origin "+${GITHUB_REF}:${GITHUB_REF}" if: ${{ github.ref_type == 'tag' }} - run: git clone -b v2.0.0 https://github.com/git/git.git "$HOME/git" - run: | echo "GIT_INSTALL_DIR=$HOME/git" >> "$GITHUB_ENV" echo "$HOME/git/bin" >> "$GITHUB_PATH" if: ${{ matrix.os == 'macos-latest' }} # We install our custom Git version into a PATH location ahead of # that of the Git installed by Homebrew. - run: script/build-git "$HOME/git" - run: script/cibuild build-docker: name: Build Linux packages runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 with: fetch-depth: 0 - run: git fetch origin "+${GITHUB_REF}:${GITHUB_REF}" if: ${{ github.ref_type == 'tag' }} - uses: ruby/setup-ruby@v1 - run: git clone https://github.com/git-lfs/build-dockers.git "$HOME/build-dockers" - run: (cd "$HOME/build-dockers" && ./build_dockers.bsh) - run: ./docker/run_dockers.bsh --prune build-docker-cross: name: Build Cross Linux packages runs-on: ubuntu-latest strategy: matrix: arch: [arm64] container: [debian_12] steps: - uses: actions/checkout@v4 with: fetch-depth: 0 - run: git fetch origin "+${GITHUB_REF}:${GITHUB_REF}" if: ${{ github.ref_type == 'tag' }} - uses: ruby/setup-ruby@v1 - run: | echo '{"experimental": true}' | sudo tee /etc/docker/daemon.json sudo systemctl restart docker.service docker version -f '{{.Server.Experimental}}' - uses: docker/setup-qemu-action@v3 - run: git clone https://github.com/git-lfs/build-dockers.git "$HOME/build-dockers" - run: (cd "$HOME/build-dockers" && ./build_dockers.bsh --arch=$ARCH $CONTAINER) env: ARCH: ${{matrix.arch}} CONTAINER: ${{matrix.container}} - run: ./docker/run_dockers.bsh --prune --arch=$ARCH $CONTAINER env: ARCH: ${{matrix.arch}} CONTAINER: ${{matrix.container}} git-lfs-3.6.1/.github/workflows/release.yml000066400000000000000000000177511472372047300206520ustar00rootroot00000000000000name: Release on: push: tags: '*' env: GOTOOLCHAIN: local jobs: build-windows: name: Build Windows Assets runs-on: windows-latest strategy: matrix: go: ['1.23.x'] steps: - uses: actions/checkout@v4 with: fetch-depth: 0 - run: git fetch origin "+${GITHUB_REF}:${GITHUB_REF}" shell: bash # We update the current tag as the checkout step turns annotated tags # into lightweight ones by accident, breaking "git describe". # See https://github.com/actions/checkout/issues/882 for details. - uses: ruby/setup-ruby@v1 - run: gem install asciidoctor - run: Rename-Item -Path C:\msys64 -NewName msys64-tmp -Force # We move the MSYS2 installed for Ruby aside to prevent use of its Git, # which does not honour the PATH we set to our built git-lfs binary. - uses: actions/setup-go@v5 with: go-version: ${{ matrix.go }} - run: mkdir -p "$HOME/go/bin" shell: bash - run: set GOPATH=%HOME%\go - run: choco install -y InnoSetup - run: choco install -y zip - run: choco install -y jq - run: GOPATH="$HOME/go" PATH="$HOME/go/bin:$PATH" go install github.com/josephspurrier/goversioninfo/cmd/goversioninfo@latest shell: bash - uses: git-for-windows/setup-git-for-windows-sdk@v1 with: flavor: minimal # We install the SDK so as to have access to the msgfmt.exe binary # from the GNU gettext package. - run: mkdir -p bin/releases shell: bash # We clear the TMPDIR set for Ruby so mktemp and Go use the same # volume for temporary files. - run: PATH="$HOME/go/bin:$PATH" GOARCH=amd64 go generate && env -u TMPDIR make bin/releases/git-lfs-windows-amd64-$(git describe).zip shell: bash env: FORCE_LOCALIZE: true - run: PATH="$HOME/go/bin:$PATH" GOARCH=386 go generate && env -u TMPDIR make bin/releases/git-lfs-windows-386-$(git describe).zip shell: bash env: FORCE_LOCALIZE: true - run: PATH="$HOME/go/bin:$PATH" GOARCH=arm64 go generate && env -u TMPDIR make bin/releases/git-lfs-windows-arm64-$(git describe).zip shell: bash env: FORCE_LOCALIZE: true - run: env -u TMPDIR make release-windows-stage-1 shell: bash env: FORCE_LOCALIZE: true - uses: azure/trusted-signing-action@v0.5.0 with: azure-tenant-id: ${{ secrets.SPN_GIT_LFS_SIGNING_TENANT_ID }} azure-client-id: ${{ secrets.SPN_GIT_LFS_SIGNING_CLIENT_ID }} azure-client-secret: ${{ secrets.SPN_GIT_LFS_SIGNING }} endpoint: https://wus.codesigning.azure.net/ trusted-signing-account-name: GitHubInc certificate-profile-name: GitHubInc files-folder: ${{ github.workspace }}/tmp/stage1 files-folder-filter: exe file-digest: SHA256 timestamp-rfc3161: http://timestamp.acs.microsoft.com timestamp-digest: SHA256 - run: env -u TMPDIR make release-windows-stage-2 shell: bash - uses: azure/trusted-signing-action@v0.5.0 with: azure-tenant-id: ${{ secrets.SPN_GIT_LFS_SIGNING_TENANT_ID }} azure-client-id: ${{ secrets.SPN_GIT_LFS_SIGNING_CLIENT_ID }} azure-client-secret: ${{ secrets.SPN_GIT_LFS_SIGNING }} endpoint: https://wus.codesigning.azure.net/ trusted-signing-account-name: GitHubInc certificate-profile-name: GitHubInc files-folder: ${{ github.workspace }}/tmp/stage2 files-folder-filter: exe file-digest: SHA256 timestamp-rfc3161: http://timestamp.acs.microsoft.com timestamp-digest: SHA256 - run: env -u TMPDIR make release-windows-stage-3 shell: bash - run: env -u TMPDIR make release-windows-rebuild shell: bash - uses: actions/upload-artifact@v4 with: name: windows-assets path: bin/releases build-macos: name: Build macOS Assets runs-on: macos-latest strategy: matrix: go: ['1.23.x'] steps: - uses: actions/checkout@v4 with: fetch-depth: 0 - run: git fetch origin "+${GITHUB_REF}:${GITHUB_REF}" - uses: ruby/setup-ruby@v1 - run: gem install asciidoctor - uses: actions/setup-go@v5 with: go-version: ${{ matrix.go }} - run: brew install gettext - run: make release env: FORCE_LOCALIZE: true - run: CERT_FILE="$HOME/cert.p12" make release-write-certificate env: CERT_CONTENTS: ${{secrets.MACOS_CERT_BASE64}} - run: CERT_FILE="$HOME/cert.p12" make release-import-certificate env: CERT_PASS: ${{secrets.MACOS_CERT_PASS}} - run: make release-darwin env: DARWIN_DEV_USER: ${{secrets.MACOS_DEV_USER}} DARWIN_DEV_PASS: ${{secrets.MACOS_DEV_PASS}} DARWIN_DEV_TEAM: ${{secrets.MACOS_DEV_TEAM}} DARWIN_CERT_ID: ${{secrets.MACOS_CERT_ID}} - uses: actions/upload-artifact@v4 with: name: macos-assets path: bin/releases build-main: name: Main Release Assets needs: - build-windows - build-macos runs-on: ubuntu-latest strategy: matrix: go: ['1.23.x'] steps: - uses: actions/checkout@v4 with: fetch-depth: 0 - run: git fetch origin "+${GITHUB_REF}:${GITHUB_REF}" - uses: ruby/setup-ruby@v1 - run: gem install asciidoctor - uses: actions/setup-go@v5 with: go-version: ${{ matrix.go }} - run: sudo apt-get update && sudo apt-get -y install gettext libarchive-tools env: DEBIAN_FRONTEND: noninteractive - uses: actions/download-artifact@v4 with: name: windows-assets path: windows-assets - uses: actions/download-artifact@v4 with: name: macos-assets path: macos-assets - run: CGO_ENABLED=0 make release - run: rm -f bin/releases/*windows* bin/releases/*darwin* - run: 'find windows-assets -name "*windows*" -type f | xargs -I{} mv {} bin/releases' - run: 'find macos-assets -name "*darwin*" -type f | xargs -I{} mv {} bin/releases' - run: script/upload --skip-verify $(git describe) env: GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} build-docker: name: Build Linux Packages runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 with: fetch-depth: 0 - run: git fetch origin "+${GITHUB_REF}:${GITHUB_REF}" - uses: ruby/setup-ruby@v1 - run: gem install packagecloud-ruby - run: git clone https://github.com/git-lfs/build-dockers.git "$HOME/build-dockers" - run: (cd "$HOME/build-dockers" && ./build_dockers.bsh) - run: ./docker/run_dockers.bsh --prune # If this is a pre-release tag, don't upload anything to packagecloud. - run: '[ -z "${GITHUB_REF%%refs/tags/*-pre*}" ] || ./script/packagecloud.rb' env: PACKAGECLOUD_TOKEN: ${{secrets.PACKAGECLOUD_TOKEN}} build-docker-cross: name: Build Cross Linux packages runs-on: ubuntu-latest strategy: matrix: arch: [arm64] container: [debian_12] steps: - uses: actions/checkout@v4 with: fetch-depth: 0 - run: git fetch origin "+${GITHUB_REF}:${GITHUB_REF}" - uses: ruby/setup-ruby@v1 - run: gem install packagecloud-ruby - run: | echo '{"experimental": true}' | sudo tee /etc/docker/daemon.json sudo systemctl restart docker.service docker version -f '{{.Server.Experimental}}' - uses: docker/setup-qemu-action@v3 - run: git clone https://github.com/git-lfs/build-dockers.git "$HOME/build-dockers" - run: (cd "$HOME/build-dockers" && ./build_dockers.bsh --arch=$ARCH $CONTAINER) env: ARCH: ${{matrix.arch}} CONTAINER: ${{matrix.container}} - run: ./docker/run_dockers.bsh --prune --arch=$ARCH $CONTAINER env: ARCH: ${{matrix.arch}} CONTAINER: ${{matrix.container}} # If this is a pre-release tag, don't upload anything to packagecloud. - run: '[ -z "${GITHUB_REF%%refs/tags/*-pre*}" ] || ./script/packagecloud.rb' env: PACKAGECLOUD_TOKEN: ${{secrets.PACKAGECLOUD_TOKEN}} git-lfs-3.6.1/.gitignore000066400000000000000000000006251472372047300150720ustar00rootroot00000000000000bin/ benchmark/ out/ resource.syso man/* *.test tmp t/remote t/scutiger t/test_count t/test_count.lock debian/git-lfs/ debian/*.log debian/files debian/*.substvars debian/debhelper-build-stamp debian/.debhelper /.pc obj-* rpm/BUILD* rpm/*RPMS rpm/*.log rpm/SOURCES repos docker/*.key src commands/mancontent_gen.go po/build po/i-reverse.po *.mo *.pot tr/tr_gen.go lfstest-* !lfstest-*.go vendor/ git-lfs-3.6.1/.mailmap000066400000000000000000000025431472372047300145240ustar00rootroot00000000000000Andy Neff Artem V. Navrotskiy a.navrotskiy Artem V. Navrotskiy Brandon Keepers David Pursehouse Evan Priestley Josh Vera Lars Schneider Lee Reilly Noam Y. Tenne noamt Rick Olson rick Rick Olson risk danger olson Rick Olson Your Name Riku Lääkkölä Ryan Simmen Scott Barron rubyist Scott Barron Scott Barron Scott Richmond Sebastian Schuberth Taylor Blau William Hipschman Will git-lfs-3.6.1/.ruby-version000066400000000000000000000000061472372047300155400ustar00rootroot000000000000003.1.2 git-lfs-3.6.1/CHANGELOG.md000066400000000000000000003570651472372047300147300ustar00rootroot00000000000000# Git LFS Changelog ## 3.6.1 (3 December 2024) This release introduces a security fix for Linux, macOS, and Windows systems, which has been assigned CVE-2024-53263. When Git LFS requests credentials from Git for a remote host, it passes portions of the host's URL to the `git-credential(1)` command without checking for embedded line-ending control characters, and then sends any credentials it receives back from the Git credential helper to the remote host. By inserting URL-encoded control characters such as line feed (LF) or carriage return (CR) characters into the URL, an attacker may be able to retrieve a user's Git credentials. By default Git LFS will now report an error if a line-ending control character (LF or CR) or a null byte (NUL) is found in any value Git LFS would otherwise pass to the `git-credential(1)` command. For users who depend on the ability to pass bare carriage return characters in a Git credential request, Git LFS will now honour the `credential.protectProtocol` Git configuration option. If this option is set to `false`, Git LFS will allow carriage return characters in the values it sends to the `git-credential(1)` command. This option will be introduced in Git as part of the remedy for the vulnerability in Git designated as CVE-2024-52006. Git LFS v3.6.1 will be released in coordination with releases from several other projects including Git, Git for Windows, and Git Credential Manager (GCM). We would like to extend a special thanks to the following open-source contributors: * @Ry0taK for reporting this to us responsibly ### Bugs * Reject bare line-ending control characters in Git credential requests (@chrisd8088) ## 3.6.0 (20 November 2024) This release is a feature release which includes support for multi-stage authentication with Git credential helpers (requires Git 2.46.0) and relative worktree paths (requires Git 2.48.0), a new object transfer batch size configuration option, better path handling when installing on Windows, more POSIX-compliant hook scripts, and improved performance with sparse checkouts, partial clones, and Git remotes with large numbers of tags. Note that the 3.6.x series of Git LFS releases will be the last releases for which we provide packages or support for versions of any Linux distribution based on either Red Hat Enterprise Linux 7 (RHEL 7) or SUSE Linux Enterprise Server 12 (SLES 12). Note also that the 3.6.x series of Git LFS releases may be the last releases for which we provide packages or support for versions of any Linux distribution based on Debian 10 ("buster"). This release is built using Go v1.23 and therefore on macOS systems requires macOS 11 (Big Sur) or later, and on Windows systems requires at least Windows 10 or Windows Server 2016 (although Windows 8.1 may suffice). We would like to extend a special thanks to the following open-source contributors: * @blanet for fixing a crash bug when handling HTTP 429 responses * @bogomolets-owl for implementing a batch size configuration option * @ConcurrentCrab for preventing hung SSH transfer protocol connections * @jochenhz for ensuring files with Unicode names are not accidentally pruned * @pastelsky for optimizing performance of our pre-push hook * @rustfix for correcting some code comments * @rusttech for fixing an array size allocation bug * @xdavidwu for improving the portability of our tests and hooks ### Features * git: improve sparse checkout support #5796 (@bk2204) * hook: fix newlines in command missing message #5886 (@xdavidwu) * Add batch size config value and use it everywhere #5876 (@bogomolets-owl) * Support relative paths to linked working trees #5898 (@chrisd8088) * git-lfs: omit tags in ls-remote; optimize pre-push #5863 (@pastelsky) * Support multistage authentication with a Git credential helper #5803 (@bk2204) * Support arbitrary HTTP credential schemes for authentication #5779 (@bk2204) * Optimize performance for scanning trees in partial clones #5699 (@bk2204) * Use lower-case file extensions in Windows installer path checks #5688 (@chrisd8088) * Match `PATH` case insensitively in Windows installer #5680 (@bk2204) ### Bugs * Fix crash during pure SSH object transfer with multiple objects #5905 (@chrisd8088) * ssh: fix connection creation "leaking" connections #5816 (@ConcurrentCrab) * fix: fix slice init length #5874 (@rusttech) * Fix panic caused by accessing non-existent header #5804 (@blanet) * Avoid deadlocking on log scanning with lots of output on stderr #5738 (@bk2204) * checkout: gracefully handle files deleted from the index #5698 (@bk2204) * Fix logScanner fails to parse pointer file containing unicode chars #5655 (@jochenhz) ### Misc * Fix improper negated test expressions and refine TLS client certificate tests #5914 (@chrisd8088) * Always capture clone logs in tests and remove or update stale workarounds #5906 (@chrisd8088) * Update Linux distribution package list for v3.6.0 release #5911 (@chrisd8088) * doc: mention the pointer size constraint #5900 (@bk2204) * Repair and restore all tests of cloning over TLS #5882 (@chrisd8088) * t: increase portability #5887 (@xdavidwu) * script/build-git: update Ubuntu 24.04 APT sources #5889 (@chrisd8088) * Run tests in parallel on Windows and always cleanup test directories #5879 (@chrisd8088) * Update release workflow to use Windows Trusted Signing Action #5873 (@chrisd8088) * Upgrade to Go 1.23 #5872 (@chrisd8088) * Use custom random data generator for all test objects and filenames #5868 (@chrisd8088) * Always build Git against custom libcurl in CI workflows on macOS #5866 (@chrisd8088) * Use expected version of Git on macOS in CI jobs #5813 (@chrisd8088) * Move @bk2204 to alumni #5808 (@bk2204) * docs/api: note API clients may send `charset` parameter in `Content-Type` header #5778 (@chrisd8088) * issue template: add more information we'd want to see #5728 (@bk2204) * .github/workflows: use actions/setup-go everywhere #5729 (@bk2204) * build(deps): bump golang.org/x/net from 0.17.0 to 0.23.0 #5718 (@dependabot[bot]) * chore: fix function names in comment #5709 (@rustfix) * Include remote error when pure SSH protocol fails #5674 (@bk2204) * Build release assets with 1.22 #5673 (@bk2204) * Build release assets with Go 1.21 #5668 (@bk2204) * script/packagecloud: instantiate distro map properly #5662 (@bk2204) * Install msgfmt on Windows in CI and release workflows #5666 (@chrisd8088) ## 3.5.0 (28 February 2024) This release is a feature release which includes support for LoongArch and RISC-V Linux binary tarballs, `FETCH_HEAD` as a remote source (from a plain `git fetch`), better support for detection of the system gitattributes file, and configuration options for the SSH protocol. In this release, the `FETCH_HEAD` support is experimental and subject to change. We would like to extend a special thanks to the following open-source contributors: * @jochenhz for improvements to `git lfs prune` * @murez for improvements to our installation script * @qiangxuhui for tooling to build 64-bit LoongArch packages * @AaronDewes for tooling to build 64-bit RISC-V packages * @b-camacho for allowing `FETCH_HEAD` as a fallback remote source * @tigsikram for fixing some broken links * @aymanbagabas for fixing our SSH protocol documentation * @KyleFromKitware for improvements to the pure SSH protocol * @Juneezee for several code cleanups * @cmaves for improving performance of our progress indications * @QuLogic for improving completions and updating docs * @philip-peterson for helping detect invalid refs in `git lfs push` * @bogomolets-owl for helping include the reference specified in unlock requests ### Features * Add --verify-unreachable option to LFS prune #5648 (@jochenhz) * attribute: warn if config exists after uninstalling #5635 (@bk2204) * Initialize sessions lazily #5634 (@bk2204) * Add a `--local` argument to install.sh #5618 (@murez) * Provide loong64 Binary Support #5607 (@qiangxuhui) * Improve locking performance #5561 (@bk2204) * Allow configuring the SSH protocol #5555 (@bk2204) * Add `FETCH_HEAD` as fallback remote source #5357 (@b-camacho) * Use `git var` to find system gitattributes file #5412 (@bk2204) * Add RISC-V support #5438 (@AaronDewes) ### Bugs * Add support for homedir expansion in SSL key and cert paths #5657 (@bk2204) * Display correct status information when `git lfs ls-files` run in subdirectory #5653 (@chrisd8088) * Fix git lfs prune is deleting staged files in the index #5637 (@jochenhz) * Report invalid ref in `git lfs push` command #5639 (@chrisd8088) * Always close open files when cloning and spooling #5617 (@chrisd8088) * Fix git-scm.com links #5589 (@tigsikram) * doc: update ssh_adapter.md #5560 (@aymanbagabas) * track: don't modify `.gitattributes` with `--dry-run` #5559 (@bk2204) * Update project home page URL in Linux builds and remove unused spec files #5551 (@chrisd8088) * Retrieve endpoint URL only once when checking standalone transfer adapter configurations #5550 (@chrisd8088) * ssh: Specifically designate a master multiplex connection #5537 (@KyleFromKitware) * Include reference specifier in unlock requests #5538 (@chrisd8088) * tq/transfer: copy Id and Token #5534 (@KyleFromKitware) * Mock time in copy callback log file test #5524 (@chrisd8088) * track: reject attempts to modify `.gitattributes` #5515 (@bk2204) * Fix a panic in the credential code #5490 (@bk2204) * Avoid modifying the mtime of empty files #5491 (@bk2204) * Make track handle backslashes correctly on Unix #5482 (@bk2204) * Print an error when pushing with no refs #5437 (@bk2204) ### Misc * workflows: update to Go 1.22 #5650 (@bk2204) * Distro updates for v3.5 #5647 (@bk2204) * Use Azure Code Signing for Windows release binaries #5630 (@bk2204) * Fix flaky test `t-credentials.sh` #5616 (@bk2204) * t: pipe random data to `base64(1)` to be compatible with macOS #5614 (@chrisd8088) * Replace deprecated `io/ioutil` functions #5595 (@Juneezee) * t/t-path.sh: avoid flaky test setup failure on Windows due to new Go path security checks #5611 (@chrisd8088) * build(deps): bump golang.org/x/crypto from 0.14.0 to 0.17.0 #5591 (@dependabot[bot]) * Update release documentation and changelog summary script for patch releases #5590 (@chrisd8088) * Update notarization to use `notarytool` #5554 (@bk2204) * lfs: avoid unnecessary byte/string conversion #5552 (@Juneezee) * build(deps): bump golang.org/x/net from 0.7.0 to 0.17.0 #5541 (@dependabot[bot]) * Limit CopyCallbackFile to print every 200 ms #5504 (@cmaves) * Update to Go 1.21 #5487 (@chrisd8088) * Add installation note about restarting shells on Windows for PATH changes #5507 (@chrisd8088) * Remove unused Docker scripts and update README #5506 (@chrisd8088) * Remove old and unused release script #5500 (@chrisd8088) * Update and expand documentation of the Git LFS release process #5452 (@chrisd8088) * Update cobra to 1.7.0 #5444 (@QuLogic) * Add FAQ entries for TLS data #5446 (@bk2204) * Remove vendoring instructions from contributing docs #5443 (@QuLogic) * FAQ: add an entry about proxies #5445 (@bk2204) * tq/transfer_test.go: enable and fix all tests #5442 (@chrisd8088) * Add a single source of truth for distro info #5439 (@bk2204) ## 3.4.0 (26 July 2023) This release is a feature release which includes support for generating shell scripts for command-line tab-completion of Git LFS commands with the new `git-lfs-completion(1)` command, providing multiple headers to Git credential helpers (a new feature as of Git 2.41), and installing Git LFS with a Git configuration file stored under the XDG configuration path. Note that this release will be made by a different member of the core team than the person who performed many of the past releases, and thus this release will be signed with a different OpenPGP key. Please follow [the steps in the README to download all of the keys for the core team](https://github.com/git-lfs/git-lfs#verifying-releases) to verify this release. We would like to extend a special thanks to the following open-source contributors: * @anihm136 for implementing shell completion script generation * @aymanbagabas for multiple fixes to our SSH and transfer queue code * @dscho for ensuring our Windows installer works on 32-bit systems * @dyrone for correcting an erroneous file name in our documentation * @jlosito for making sure our CI job actions are up to date * @nfgferreira for speeding up the track command on Windows * @ry167 for improving our Ubuntu installation instructions * @salvorizza for fixing a JSON bug in our unlock command * @slonopotamus for cleaning up accommodations for legacy Go versions * @steffen for improving our installation documentation ### Features * Add support for `wwwauth[]` to credential helpers #5381 (@bk2204) * Add a `--file` option to install and uninstall #5355 (@bk2204) * Add shell completion generation #5311 (@anihm136) ### Bugs * Handle local paths with trailing slashes #5402 (@bk2204) * Unlock by ID with JSON Flag returns empty array #5385 (@salvorizza) * Refactor macro attribute handling to prevent crashes with `--fixup` migration option #5382 (@chrisd8088) * locks: print warning about locking API to standard error #5350 (@bk2204) * Avoid needlessly spawning SSH connections with `git archive` #5309 (@bk2204) * standalone: print an error if the destination isn't a Git repository #5283 (@bk2204) * locks: ensure local locks are synced on error #5284 (@bk2204) * installer: let it work on 32-bit Windows again #5266 (@dscho) * fix(ssh): use /tmp to place control dir on darwin #5223 (@aymanbagabas) * commands: avoid remote connections in checkout #5226 (@bk2204) * fix(tq): stop adding transfers to queue after abort #5230 (@aymanbagabas) * fix: pure SSH list lock command name #5219 (@aymanbagabas) * git: match patterns, not file names, for tracked files #5423 (@bk2204) * Resolve git-lfs track slowness on Windows due to unneeded user lookup #5431 (@nfgferreira) ### Misc * Update distributions #5392 (@bk2204) * workflows: remove refreshenv #5393 (@bk2204) * Refactor `GitScanner` and simplify implementation #5389 (@chrisd8088) * Ensure all logging tasks are closed and simplify log task dispatching #5375 (@chrisd8088) * FAQ: add entry on archiving subdirectories #5349 (@bk2204) * Drop pre-1.13 Go compatibility code #5364 (@slonopotamus) * Fix CI by enabling Git protocol v2 #5353 (@bk2204) * Clarify `git lfs migrate` handling of local and remote references #5327 (@chrisd8088) * Update to Go version 1.20 #5326 (@chrisd8088) * Remove stale video link #5325 (@chrisd8088) * Fix Windows CI with Go 1.20 #5317 (@bk2204) * Update Windows signing certificate hash #5300 (@chrisd8088) * t: avoid incorrect negated commands #5282 (@bk2204) * Update golang.org/x/text #5290 (@bk2204) * Improve error handling for pure SSH protocol #5063 (@bk2204) * workflow: use `choco install` #5281 (@bk2204) * Update Linux releases in Packagecloud publication script #5276 (@chrisd8088) * Simplify and deduplicate installation instructions #5260 (@steffen) * Make hooks refer to `core.hookspath` #5245 (@bk2204) * Update INSTALLING.md to fix ubuntu derivative command and allow easy install for some distros #5014 (@ry167) * Check for github action updates monthly #5228 (@jlosito) * Upgrade workflows to latest Ubuntu and Actions versions #5243 (@chrisd8088) * Upgrade GitHub Actions workflows to use `ruby/setup-ruby@v1` #5236 (@chrisd8088) * Add `git lfs migrate export` command examples to manual page #5239 (@chrisd8088) * Unset `GIT_TRACE` environment variable for Git commands in `Makefile` #5240 (@chrisd8088) * Clean up RPM builds and fix i686 RPM file names #5241 (@chrisd8088) * Add a FAQ entry on Jenkins problems #5177 (@bk2204) * Fix missing parameter in git lfs logs manual page #5414 (@chrisd8088) * proposals: fix filename typo #5425 (@dyrone) * Update shell tab-completion script support and add manual page #5429 (@chrisd8088) ## 3.3.0 (30 November 2022) This release is a feature release which includes package support for Red Hat Enterprise Linux 9 and compatible OSes, experimental support for multiple remotes, and some command-line helpers for `git lfs push`. In this release, we no longer include vendored versions of our dependencies in the repository or the tarballs. These were a source of noise and bloat, and users can easily download the required dependencies with Go itself. Users who need to continue to vendor the dependencies can use the `make vendor` target. In addition, we've also switched the documentation to AsciiDoc from ronn-flavoured Markdown and included the FAQ in the repository. This means that the manual pages now render properly in the GitHub web interface and it's also much easier to create additional formats, such as PDF, by leveraging the ability of Asciidoctor to convert to DocBook. It should also be noted that `git lfs migrate import --everything` now processes all refs that aren't special to Git instead of just branches and tags. This is what it was documented to do, but didn't, so we've fixed it. Finally, please note that future releases may be done by a different member of the core team than many of the past releases, and thus may be signed by a different OpenPGP key. Please follow [the steps in the README to download all of the keys for the core team](https://github.com/git-lfs/git-lfs#verifying-releases) to verify releases successfully in the future. We would like to extend a special thanks to the following open-source contributors: * @dhiwakarK for fixing a broken link * @dscho for improving our installer * @Leo1690 for speeding things up with sparse checkout * @pratap043 for proposing an extension to locking * @rcoup for fixing our Makefile and adding scripting features to `git lfs push` * @srohmen for adding support for alternative remotes * @WhatTheFuzz for improving our error messages * @wuhaochen for fixing a long-standing bug with `git lfs migrate import` ### Features * Add the FAQ in the repository #5167 (@bk2204) * Add support for Rocky Linux 9 #5144 (@bk2204) * push: add ability to read refs/oids from stdin #5086 (@rcoup) * Allow alternative remotes to be handled by LFS #5066 (@srohmen) * Switch documentation to AsciiDoc #5054 (@bk2204) ### Bugs * Handle macro attribute references with unspecified flag #5168 (@chrisd8088) * Fixed broken link for git-lfs-migrate #5153 (@dhiwakarK) * ssh: disable concurrent transfers if no multiplexing #5136 (@bk2204) * Fix setting commit & vendor variables via make #5141 (@rcoup) * ssh: don't leak resources when falling back to legacy protocol #5137 (@bk2204) * Bump gitobj to v2.1.1 #5130 (@bk2204) * tools: don't match MINGW as Cygwin #5106 (@bk2204) * installer: handle `BashOnly` Git for Windows gracefully #5048 (@dscho) * Change git-lfs migrate import --everything to migrate everything except for special git refs #5045 (@wuhaochen) ### Misc * Use --sparse parameter for ls-files for performance optimization #5187 (@Leo1690) * Add information to ambiguous error message. #5172 (@WhatTheFuzz) * Distro update for v3.3.0 #5169 (@bk2204) * docs/man: clarify Git LFS setup instructions #5166 (@larsxschneider) * Update more stale comments relating to object scanning #5164 (@chrisd8088) * Update stale comments relating to object scanning and uploading #5163 (@chrisd8088) * script/cibuild: exclude icons from whitespace check #5142 (@bk2204) * Update to Go version 1.19 #5126 (@chrisd8088) * Drop vendoring #4903 (@bk2204) * Adding locking_notes.md #5079 (@pratap043) * t: set init.defaultBranch #5082 (@bk2204) * go.mod: require gopkg.in/yaml.v3 v3.0.1 #5033 (@bk2204) * script/upload: improve readability of asset verification #5032 (@bk2204) ## 3.2.0 (25 May 2022) This release is a feature release which includes support for machine-readable formats for a couple more commands, plus the ability to automatically merge LFS-based text files from the command-line. It's likely that the merge driver will see future improvements, but if you have feedback on the design, please use the discussions feature. Note that our binary release archives no longer unpack into the current directory, and now contain a top-level directory just like the source archives do. We would like to extend a special thanks to the following open-source contributors: * @bbodenmiller for fixing the formatting in our manual pages * @breyed for fixing a typo in our manual pages * @btoll for improving our README * @rcoup for fixing our Accept header syntax * @vtbassmatt for documenting our deprecation of NTLM ### Features * ls-files: add a --json option #5007 (@bk2204) * Add --json output for git lfs track #5006 (@bk2204) * Add a merge driver #4970 (@bk2204) * lfs: don't write hooks when they haven't changed #4935 (@bk2204) * Tarballs, not tarbombs #4980 (@bk2204) ### Bugs * Apply several Makefile fixes for Windows #5016 (@bk2204) * git: don't panic on pkt-line without equals #4982 (@bk2204) * lfshttp: fix invalid Accept header syntax #4996 (@rcoup) * Grammar fix #4981 (@breyed) * Use `gitignore`-style path matching for additional commands #4951 (@chrisd8088) * Avoid pruning when identical files both match and do not match `lfs.fetchexclude` #4973 (@chrisd8088) * Apply `lfs.fetchexclude` filter to previous commits when pruning #4968 (@chrisd8088) * Update and correct several error message strings #4943 (@chrisd8088) * script/upload: correct RHEL 8 package repo #4936 (@bk2204) * lfs: add old hook content to the list of old hooks #4878 (@bk2204) * .github/workflows: install packagecloud gem #4873 (@bk2204) ### Misc * Update distros for packagecloud.io #5010 (@bk2204) * lfshttp: log the Negotiate error on failure #5000 (@bk2204) * Build CI on Windows 2022 #4997 (@chrisd8088) * workflows: use ronn-ng #4992 (@bk2204) * Multiple hash support #4971 (@bk2204) * note deprecation of NTLM #4987 (@vtbassmatt) * Update to Go 1.18, drop older Go version support, and update modules and dependencies #4963 (@chrisd8088) * Update tests to check `prune` command excludes `lfs.fetchexclude` paths #4964 (@chrisd8088) * Add test to check `prune` command retains tagged unpushed objects #4962 (@chrisd8088) * Adjust test helpers and tests related to path filtering #4960 (@chrisd8088) * Include shell path in restricted `PATH` in credential helper path test #4959 (@chrisd8088) * Build test helper commands with `.exe` file extension on Windows #4954 (@chrisd8088) * Update Windows signing certificate SHA hash in `Makefile` #4946 (@chrisd8088) * remove unused `Pipe[Media]Command()` functions #4942 (@chrisd8088) * Makefile: remove legacy trimpath code #4938 (@bk2204) * add Inno Setup check of Git install paths and remove old uninstaller checks #4925 (@chrisd8088) * note `git lfs push --all` only pushes local refs in man page #4898 (@chrisd8088) * Build man pages into per-section subdirectories #4890 (@chrisd8088) * Call out destructive command in README #4880 (@btoll) * Improve formatting #4863 (@bbodenmiller) * docs/howto: remind core team member to check Actions workflows #4868 (@bk2204) * .github: fix syntax error in release workflow #4866 (@bk2204) ## 3.1.4 (19 Apr 2022) This release is a bugfix release to fix some problems during the build of v3.1.3. There are otherwise no substantial changes from v3.1.3. ### Misc * Use only Windows Server 2019 runners for CI in GitHub Actions #4883 (@chrisd8088) * remove unused `Pipe[Media]Command()` functions #4942 (@chrisd8088) ## 3.1.3 (19 Apr 2022) This release introduces a security fix for Windows systems, which has been assigned CVE-2022-24826. On Windows, if Git LFS operates on a malicious repository with a `..exe` file as well as a file named `git.exe`, and `git.exe` is not found in PATH, the `..exe` program will be executed, permitting the attacker to execute arbitrary code. Similarly, if the malicious repository contains files named `..exe` and `cygpath.exe`, and `cygpath.exe` is not found in PATH, the `..exe` program will be executed when certain Git LFS commands are run. This security problem does not affect Unix systems. This is the same issue as CVE-2020-27955 and CVE-2021-21237, but the fix for those issue was incomplete and certain options can still cause the problem to occur. This occurs because on Windows, Go includes (and prefers) the current directory when the name of a command run does not contain a directory separator, and it continues to search for programs even when the specified program name is empty. This has been solved by failing if the path is empty or not found. We would like to extend a special thanks to the following open-source contributors: * @yuske for reporting this to us responsibly ### Bugs * Report errors when finding executables and revise PATH search tests (@chrisd8088) ### Misc * Update Windows signing certificate SHA hash in Makefile (@chrisd8088) ## 3.1.2 (16 Feb 2022) This is a bugfix release which fixes a bug in `git lfs install` and some issues in our CI release processes, including one that prevented arm64 packages for Debian 11 from being uploaded. ### Bugs * lfs: add old hook content to the list of old hooks #4878 (@bk2204) ### Misc * Revert "Merge pull request #4795 from bk2204/actions-checkout-v2" #4877 (@bk2204) * .github/workflows: install packagecloud gem #4873 (@bk2204) ## 3.1.1 (14 Feb 2022) This is a bugfix release which fixes a syntax error in the release workflow. ### Misc * .github: fix syntax error in release workflow #4866 (@bk2204) ## 3.1.0 (14 Feb 2022) This release is a feature release which includes support for fallback from Negotiate to Basic authentication, new ARM64 packages for Debian 11, a new localization infrastructure, and improved netrc support, in addition to various bug fixes. In addition, we've addressed a performance regression for `git lfs migrate import` that was introduced in v3.0.2. At the moment, there are no translations available, but if you are interested in contributing to one, please reach out in an issue. For compatibility with Windows and to retain the ability to have a single relocatable binary, the translations are included in the binary at build time. We would like to extend a special thanks to the following open source contributors: * @donno2048 for improving our error checking * @howardlyliao for improved netrc support * @HermannDppes for improving our large file warning on Windows * @rex4539 for fixing various typos throughout our codebase ### Features * Fall back from Negotiate to Basic #4815 (@bk2204) * Add basic support for localization #4729 (@bk2204) * Add support for ARM64 Debian packages #4728 (@bk2204) * netrc: consider same machine may have different login names #4726 (@howardlyliao) ### Bugs * smudge: honor GIT_LFS_SKIP_SMUDGE with checkout-index #4860 (@bk2204) * fix `git lfs fsck --objects A..B` handling and drop all left/right ref terminology #4859 (@chrisd8088) * halt migration when `.gitattributes` symbolic link encountered #4849 (@chrisd8088) * fix merging of `.gitattributes` with execute file mode during migration #4835 (@chrisd8088) * Fix migrate import speed regression #4813 (@bk2204) * Fix typos #4806 (@rex4539) * Move `err` checking to before the value was used #4776 (@donno2048) * migrate import: don't allow path filters with --above #4771 (@bk2204) * avoid panic on checkout with `--to` but no path, and update checkout manual #4766 (@chrisd8088) * creds: gracefully handle lack of askpass helper #4759 (@bk2204) * post-checkout: don't modify permissions of untracked files #4760 (@bk2204) * use gitattributes filepath matching for migrate filter options #4758 (@chrisd8088) * Avoid errors in git lfs env #4713 (@bk2204) * fs: specify a file as existing if it's empty #4654 (@bk2204) * Fix bound for largefilewarning #4633 (@HermannDppes) ### Misc * build missing man pages and correct HTML renderings #4851 (@chrisd8088) * Update and mark message strings for translation #4846 (@chrisd8088) * Mark almost all strings for translation #4781 (@bk2204) * .github/workflows: switch to actions/checkout@v2 #4795 (@bk2204) * script/packagecloud: update for latest distros #4794 (@bk2204) * filter-process: don't print large file warning on fixed versions #4768 (@bk2204) * ssh: avoid using -- where possible #4741 (@bk2204) * vendor,go.*: update x/crypto and dependencies #4738 (@chrisd8088) * Stop supporting Go older than 1.13 #4641 (@bk2204) ## 3.0.2 (28 Oct 2021) This release is a bugfix release which fixes a variety of problems seen since 3.0.0, including problems with empty files, `git lfs fsck --pointers`, and the testsuite. We would like to extend a special thanks to the following open-source contributors: * @fh1ch for patches to make things work better on Alpine Linux * @pyckle for fixing our handling of filenames in `git lfs migrate import` * @ycongal-smile for fixing `git lfs migrate import` with similarly named files ### Bugs * Fix two types of misdetection in git lfs fsck #4697 (@bk2204) * lfs: don't flag non-LFS files as invalid pointers #4691 (@bk2204) * git: honor GIT_OBJECT_DIRECTORY #4686 (@bk2204) * migrate: properly escape blob filenames #4683 (@pyckle) * ls-files: don't process empty files as pointers #4681 (@bk2204) * Call migrate() BlobFn on every blob #4671 (@ycongal-smile) * Correct t-lock regular expression to be musl compatible #4673 (@fh1ch) ### Misc * Allow git-lfs-transfer integration tests to be skipped #4677 (@fh1ch) * Make CI environment GIT prefix grep more specific #4678 (@fh1ch) ## 3.0.1 (28 Sep 2021) This release is a bugfix release which fixes the Windows ARM64 build process and addresses a regression in support for empty files in pull and fetch. We would like to extend a special thanks to the following open-source contributors: * @dennisameling for fixing support for Windows on ARM64 ### Bugs * Fix Windows arm64 release #4647 (@dennisameling) * fs: specify a file as existing if it's empty #4654 (@bk2204) ## 3.0.0 (24 Sep 2021) This release is a major new release and introduces several new features, such as a pure SSH-based protocol, packages for several new OS versions, support for ARM64 Windows, Git-compatible pattern matching, and locking multiple files on the command line, among other items. When connecting over SSH, the first attempt will be made to use `git-lfs-transfer`, the pure SSH protocol, and if it fails, Git LFS will fall back to the hybrid protocol using `git-lfs-authenticate`. Note that no major forges are known to support the pure SSH protocol at this time. Because it is a major release, we've made some backwards-incompatible changes. A (possibly incomplete) list of them is as follows: * NTLM support has been completely removed, since nobody volunteered to fix issues in it. Users are advised to use Kerberos or Basic authentication instead. * When using an SSH URL (that is, the syntax starting with `ssh://`), the leading slash is not stripped off when invoking `git-lfs-authenticate` or `git-lfs-transfer`. This is compatible with the behavior of Git when invoking commands over SSH. * `git lfs fsck` now additionally checks that pointers are canonical and that files that are supposed to be LFS files actually are. It also exits nonzero if any problem is found. * Pattern matching should be stricter and should either match the behavior of `.gitattributes` or `.gitignore`, as appropriate. Deviations from Git's behavior will henceforth be treated as bugs and fixed accordingly. * Git LFS will now write a Git LFS repository format version into the repository. This is designed to allow future extension with incompatible changes. Repositories without this version will be assumed to be version 0. Note that this is different from, but modeled on, Git's repository format version. * `git lfs lock` and `git lfs unlock` now handle multiple pathname arguments and the JSON format has changed to handle multiple responses. * The Go package name now contains a version number. This should have no effect on users because we don't provide a stable Go ABI. * Empty components in `PATH` are no longer treated as the current directory on Windows because unintentionally having such empty components is common and the behavior was surprising. We would like to extend a special thanks to the following open-source contributors: * @codykrieger for ensuring that we process includes correctly * @corngood for fixing a hang in prune * @dennisameling for adding support for Windows on ARM64 * @fh1ch for fixing our 429 handling * @geki-yaba for fixing problems with askpass on Cygwin * @gison93 for fixing a bug in our documentation * @jvimr for ensuring our Debian packages are built properly * @opohorel for ensuring our copyright notices were up to date * @rhansen for fixing systems where / is a repository * @sergiou87 for improving support for cross builds * @slonopotamus for improving our error handling * @stanhu for improving our handling of invalid OIDs * @Timmmm for improving our support of .lfsconfig * @tklauser for avoiding the need for cgo on macOS ### Features * Advertise hash algorithm supported in batch request #4624 (@bk2204) * Bump package version to v3 #4611 (@bk2204) * Update OS versions #4610 (@bk2204) * Add support for Debian 11 #4592 (@bk2204) * Support for locking and unlocking multiple files #4604 (@bk2204) * Add support for Windows ARM64 #4586 (@dennisameling) * LFS repository format version #4552 (@bk2204) * Pure SSH-based protocol #4446 (@bk2204) * Make fsck able to check for invalid pointers #4525 (@bk2204) * Add --fixup option to migrate info command #4501 (@chrisd8088) * Allow reporting of LFS pointers separately in migrate info command #4436 (@chrisd8088) * Add config variables for default remotes #4469 (@bk2204) * Make lfshttp package builds more portable #4476 (@bk2204) * Mark skipdownloaderrors as safe #4468 (@Timmmm) * Make migrate commands default to preserving uncommitted changes #4454 (@chrisd8088) * Darwin ARM64 support #4437 (@bk2204) * tools: implement cloneFileSyscall on darwin without cgo #4387 (@tklauser) * prune: add options to be more aggressive about pruning #4368 (@bk2204) ### Bugs * corrected debian 11 & 12 derived variants #4622 (@jvimr) * urlconfig: anchor regexp for key matching #4598 (@bk2204) * filepathfilter: always use Git-compatible pattern matching #4556 (@bk2204) * debian and rpm: Pass `--skip-repo` to `install` and `uninstall` #4594 (@rhansen) * Fix hang in prune #4557 (@corngood) * Disable ANSI color codes while log parsing and anchor diff regular expressions #4585 (@chrisd8088) * Fix 429 retry-after handling for LFS batch API endpoint #4573 (@fh1ch) * go.mod: bump gitobj to v2.0.2 #4555 (@bk2204) * Fix locking with multiple paths and absolute paths #4535 (@bk2204) * locking: avoid nil pointer dereference with invalid response #4509 (@bk2204) * migrate import: make --above affect only individual files #4512 (@bk2204) * fs: be a little less aggressive with cleanup #4490 (@bk2204) * Fix downloadFile in gitfilter_smudge.go to actually propagate all errors #4478 (@slonopotamus) * Translate Cygwin path patches for askpass helper and cert dir/file #4473 (@geki-yaba) * Avoid panic on SIGINT by skipping cleanup when config uninitialized #4463 (@chrisd8088) * Parse stash log entries parsimonously in prune command #4449 (@chrisd8088) * docs: note that -I and -X override configuration settings #4442 (@bk2204) * Make all checks of blobSizeCutoff consistent #4435 (@chrisd8088) * Fix up handling of the "migrate info" command's --top option #4434 (@chrisd8088) * Tighten LFS pointer regexp #4421 (@stanhu) * invoke git-config with --includes to ensure it always evaluates `include.*` directives #4420 (@codykrieger) * Canonicalize Windows paths like Git does #4418 (@bk2204) * lfsapi: don't warn about duplicate but identical aliases #4413 (@bk2204) * lfs: don't invoke diff drivers when pruning repositories #4407 (@bk2204) * Consider scheme of request URL, not proxy URL, when choosing proxy #4396 (@bk2204) * Makefile: allow make release to be run twice in a row #4344 (@bk2204) * Makefile: don't fail the second time macOS builds are built #4341 (@bk2204) ### Misc * subprocess: don't treat empty PATH component as . on Windows #4603 (@bk2204) * Switch from which to command -v #4591 (@bk2204) * Bump Go to 1.17 #4584 (@dennisameling) * Add cautions about unstable Go API and fix GPG key link #4582 (@chrisd8088) * Update go.mod module path with explicit v2 #4575 (@chrisd8088) * Drop unused ClearTempStorage() transfer adapter method and tune stale comments #4554 (@chrisd8088) * README: improve steps for building from source #4527 (@bk2204) * Update license year #4513 (@opohorel) * docs/man: add note re post-import use of checkout #4504 (@chrisd8088) * Bump transitive dependencies #4502 (@bk2204) * script/packagecloud: update distros #4494 (@bk2204) * Use host architecture and OS when running `go generate` #4492 (@sergiou87) * Bump go-spnego to the latest version #4482 (@bk2204) * Update git-lfs-migrate man page and add description section #4458 (@chrisd8088) * update x/text and dependencies #4455 (@opohorel) * Use blobSizeCutoff in clean pointer buffer length check #4433 (@chrisd8088) * tools: unset XDG_CONFIG_HOME for filetools test #4432 (@chrisd8088) * vendor,go.{mod,sum}: update x/net and dependencies #4398 (@chrisd8088) * Remove NTLM #4384 (@bk2204) * gitobj 2.0.1 #4348 (@bk2204) * Fix numbered list in git lfs examples #4347 (@gison93) * Add test for download gzip transport compression #4345 (@bk2204) ## 2.13.3 (26 Mar 2021) This release fixes two bugs that caused `git lfs prune` to hang, updates some dependencies to versions which lack a security issue (which did not affect Git LFS), and adds support for ARM64 builds on macOS. ### Bugs * lfs: don't invoke diff drivers when pruning repositories #4407 (@bk2204) * Parse stash log entries parsimonously in prune command #4449 (@chrisd8088) ### Misc * Darwin ARM64 support #4437 (@bk2204) * vendor,go.{mod,sum}: update x/net and dependencies #4398 (@chrisd8088) ## 2.13.2 (13 Jan 2021) This release introduces a security fix for Windows systems, which has been assigned CVE-2021-21237. On Windows, if Git LFS operates on a malicious repository with a git.bat or git.exe file in the current directory, that program is executed, permitting the attacker to execute arbitrary code. This security problem does not affect Unix systems. This is the same issue as CVE-2020-27955, but the fix for that issue was incomplete and certain options can still cause the problem to occur. This occurs because on Windows, Go includes (and prefers) the current directory when the name of a command run does not contain a directory separator. This has been solved by always using PATH to pre-resolve paths before handing them to Go. We would like to extend a special thanks to the following open-source contributors: * @Ry0taK for reporting this to us responsibly ### Bugs * Use subprocess for invoking all commands (@bk2204) ## 2.13.1 (11 Dec 2020) This release fixes a bug in our build tooling that prevents our release process from working properly. This release is otherwise identical to 2.13.0. ### Misc * Makefile: don't fail the second time macOS builds are built #4341 (@bk2204) ## 2.13.0 (10 Dec 2020) This release introduces several new features, such as the `--above` option to `git lfs migrate import` and support for `socks5h` proxies. In addition, many bugs have been fixed and several miscellaneous fixes have been included. Unless someone steps up to fix and maintain NTLM support, this will be the last Git LFS release to support NTLM. See #4247 for more details. Note that Git LFS supports Kerberos as well, which is far more secure and may be a viable replacement in many situations. We would like to extend a special thanks to the following open-source contributors: * @EliRibble for adding support for the `--above` option to `git lfs migrate import` * @andrewshadura for adding support for the `GIT_LFS_SKIP_PUSH` environment variable * @sinbad for fixing problems with retaining objects used by stashes * @tklauser for cleaning up our use of error constants in the code ### Features * Add --above parameter to 'migrate import'. #4276 (@EliRibble) * Add GIT_LFS_SKIP_PUSH to allow skipping the pre-push hook #4202 (@andrewshadura) * lfshttp: add support for socks5h proxies #4259 (@bk2204) * Add manual pages to release assets #4230 (@bk2204) * Honor GIT_WORK_TREE #4269 (@bk2204) ### Bugs * Make git lfs migrate import handle missing extensions #4318 (@bk2204) * fs: don't panic when using a too-short object ID to push #4307 (@bk2204) * Fix pattern matching for .gitattributes #4301 (@bk2204) * config: map missing port to default for HTTP key lookups #4282 (@bk2204) * tools: use IoctlFileClone from golang.org/x/sys/unix #4261 (@tklauser) * tools/util_darwin.go: Remove use of direct syscalls #4251 (@stanhu) * tools: always force a UTF-8 locale for cygpath #4231 (@bk2204) * prune: fix deleting objects referred to by stashes #4209 (@sinbad) ### Misc * migrate import: warn about refs on case insensitive file systems #4332 (@larsxschneider) * Drop obsolete OS support #4328 (@bk2204) * tools: use ERROR_SHARING_VIOLATION const from golang.org/x/sys/windows #4291 (@tklauser) * pull: gracefully handle merge conflicts #4289 (@bk2204) * script/upload: avoid using Ruby's URI.escape #4266 (@bk2204) * add documentation of security bug report process #4244 (@chrisd8088) ## 2.12.1 (4 Nov 2020) This release introduces a security fix for Windows systems, which has been assigned CVE-2020-27955. On Windows, if Git LFS operates on a malicious repository with a git.bat or git.exe file in the current directory, that program is executed, permitting the attacker to execute arbitrary code. This security problem does not affect Unix systems. This occurs because on Windows, Go includes (and prefers) the current directory when the name of a command run does not contain a directory separator. This has been solved by always using PATH to pre-resolve paths before handing them to Go. We would like to extend a special thanks to the following open-source contributors: * @dawidgolunski for reporting this to us responsibly ### Bugs * subprocess: avoid using relative program names (@bk2204) ## 2.12.0 (1 Sep 2020) This release introduces several new features, such as support for the SHA-256 repositories coming in a future version of Git, restored support for Go 1.11, the ability to read the contents of .lfsconfig from the repository, signed and notarized binaries on macOS, and pre-built 32-bit ARM binaries on Linux. In addition, several bugs have been fixed and miscellaneous fixes included. Note that macOS releases are now shipped as zip files, not tarballs, since it is not possible to notarize tarballs. macOS releases are now also built on macOS, so `git lfs dedup` should now function. We would like to extend a special thanks to the following open-source contributors: * @saracen for adding support for ARM binaries * @mversluys for improving locking support * @cccfeng for updating our documentation to make it more readable * @bluekeyes for improving performance and tracing * @gertcuykens for adding missing parts of our documentation ### Features * config: optionally read .lfsconfig from the repository #4200 (@bk2204) * Support SHA-256 repositories #4186 (@bk2204) * allow Go 1.11 builds by using WaitStatus.ExitStatus() #4183 (@chrisd8088) * add --worktree option to install and uninstall commands #4159 (@chrisd8088) * Sign and notarize binaries on macOS #4143 (@bk2204) * Makefile: add linux arm build and release targets #4126 (@saracen) * Allow locking and unlocking non-existent files #3992 (@mversluys) ### Bugs * docs/api/locking: add an explicit
#4208 (@cccfeng) * Fix hang when the user lacks permissions #4207 (@bk2204) * Don't mark unlocked files that aren't lockable as read-only #4171 (@bk2204) * locking: make patterns with slashes work on Windows #4139 (@bk2204) * git: consider full refspec when determining seen refs #4133 (@bk2204) ### Misc * Fix Windows CI #4199 (@bk2204) * Fix testsuite when working with non-master default branch #4174 (@bk2204) * git: improve performance of remote ref listing #4176 (@bluekeyes) * subprocess: trace all command execution #4175 (@bluekeyes) * Update git-lfs-migrate.1.ronn #3869 (@gertcuykens) * t: use repo v1 with extensions #4177 (@bk2204) * Makefile: ensure temp Go modules can be deleted #4157 (@chrisd8088) * Improve test suite robustness via environment #4132 (@bk2204) ## 2.11.0 (8 May 2020) This release introduces several new features, such as better support for unnamed local paths and URLs as remotes, support for `submodule.recurse`, exponential backoff on failure, and support for renegotiation. In addition, numerous bugs have been fixed and miscellaneous issues have been addressed. We would like to extend a special thanks to the following open-source contributors: * @bluekeyes for adding support for exponential backoff * @pluehne for adding support for `submodule.recurse` * @Electric26 for fixing the default behavior of a prompt * @nataliechen1 for fixing certain upload retry failures * @shalashik for fixing a panic during cherry-pick * @swisspol for updating our documentation to reflect supported `.lfsconfig` keys * @dan2468 for updating the copyright year ### Features * Allow literal local paths as remotes #4119 (@bk2204) * pre-push: find named remote for URL if possible #4103 (@bk2204) * tq: add exponential backoff for retries #4097 (@bluekeyes) * migrate import: set text to unspecified for excluded fields #4068 (@bk2204) * Update list of distros for packagecloud.io #4080 (@bk2204) * lfshttp: allow renegotiation #4066 (@bk2204) * Support submodule.recurse = true #4063 (@pluehne) * add man page for the post-commit hook command #4052 (@chrisd8088) * Add an option to control warning about files larger than 4 GiB #4009 (@bk2204) ### Bugs * commands/command_migrate.go: fix bug #4116 (@Electric26) * git: avoid "bad object" messages when force-pushing #4102 (@bk2204) * git: avoid trying to rewrite remote tags as remote branches #4096 (@bk2204) * make Go tests run consistently using local binary #4084 (@chrisd8088) * commands: don't honor lfs.fetch* for ls-files #4083 (@bk2204) * commands: print help output with --help #4059 (@bk2204) * fail dedup command with explanation when LFS extensions configured #4045 (@chrisd8088) * fix upload retry 'file already closed' issue' #4042 (@nataliechen1) * commands/command_filter_process: cherry-pick of several commits cause panic error #4017 (@shalashik) * Check error when creating local storage directory #4016 (@bk2204) * track: detect duplicate patterns with --filename #4000 (@bk2204) ### Misc * Removed lfs.extension.* from list of supported keys for .lfsconfig #4044 (@swisspol) * Tidy modules #4035 (@bk2204) * README: explain how to verify releases #4022 (@bk2204) * docs: document git lfs migrate --yes #4023 (@bk2204) * Stop using cgo on amd64 Linux #4026 (@bk2204) * updated copyright year #3995 (@dan2468) ## 2.10.0 (21 January 2020) This release introduces several new features, such as support for local paths in remotes, Kerberos support, and official binaries for S390x and little-endian 64-bit PowerPC systems. In addition, numerous bugs have been fixed and miscellaneous issues have been addressed. We would like to extend a special thanks to the following open-source contributors: * @ganadist for fixing a bug in the output of `git lfs env` * @exceed-alae for fixing a possible nil pointer dereference * @slonopotamus for improvements to Windows support and code cleanups * @nataliechen1 for fixing a data race * @ssgelm for writing and updating the code to use a new cookie jar parser * @austintraver for improving the output of `git lfs status` * @nikola-sh for improving option parity with Git * @alrs for fixing several error checks in the testsuite * @pluehne for improving our support for uncommon references ### Features * Optimize pushes for multiple refs #3978 (@bk2204) * Include ppc64le and s390x Linux builds in releases #3983 (@bk2204) * Kerberos (SPNEGO) support for HTTP #3941 (@bk2204) * Add support for local paths #3918 (@bk2204) * Allow specifying HTTP version to use #3887 (@bk2204) ### Bugs * t-duplicate-oids: use correct awk indexing #3981 (@bk2204) * Improved proxy support #3972 (@bk2204) * install: don't print error if run outside repository #3969 (@bk2204) * debian: bump version of golang-go #3959 (@bk2204) * lfshttp: Set valid default value for lfs.concurrenttransfers #3949 (@ganadist) * Add nil-check on defer block of DoTransfer() #3936 (@exceed-alae) * Retry batch failures #3930 (@bk2204) * rpm: use old setup code on CentOS 7 #3938 (@bk2204) * Interpret relative hook paths as relative to working tree #3926 (@bk2204) * Handle missing cygpath gracefully #3910 (@bk2204) * Update index before showing status #3921 (@bk2204) * Honor lfs.url when deciding on transfer adapters #3905 (@bk2204) * Implement retry logic to fix LFS storage race conditions on Windows #3890 (@slonopotamus) * Avoid hang when using git hash-object --stdin --path #3902 (@bk2204) * synchronize access to netrcCredentialHelper.skip #3896 (@nataliechen1) ### Misc * Improve license files #3973 (@bk2204) * Add CI link to CI badge in README #3960 (@slonopotamus) * Clarify output shown by `git lfs status` #3953 (@austintraver) * Revert "ci: force Windows Git version to 2.22.0" #3903 (@bk2204) * Better document pointer format constraints #3944 (@bk2204) * Don't abort with newer Git when in a bare repo #3940 (@bk2204) * Fix more Linux package issues #3932 (@bk2204) * docs: explain shell metacharacters better #3920 (@bk2204) * Reset the text attribute on export #3913 (@bk2204) * Support schannel ssl backend #3868 (@nikola-sh) * Allow migrate export to handle non-pointer files gracefully #3911 (@bk2204) * git/gitattr: fix dropped test errors #3904 (@alrs) * Accept all local references with git lfs push #3876 (@pluehne) * Drop pre-1.6 Go compatibility code #3897 (@slonopotamus) * tools/kv: Fix dropped test error #3882 (@alrs) * Use different parser for cookiejar files #3886 (@ssgelm) * Stop replacing files in LFS storage when downloading them concurrently on Windows #3880 (@slonopotamus) * Fix error strings to follow Go guidelines #3871 (@slonopotamus) * Miscellaneous release fixes #3866 (@bk2204) ## 2.9.2 (12 December 2019) This release fixes a few regressions, such as a possible nil pointer dereference, a failure to retry batch requests, and a bug where repositories could fail to be detected on Windows. We would like to extend a special thanks to the following open-source contributors: * @exceed-alae for fixing a possible nil pointer dereference ### Bugs * Add nil-check on defer block of DoTransfer() #3936 (@exceed-alae) * Retry batch failures #3930 (@bk2204) * rpm: use old setup code on CentOS 7 #3938 (@bk2204) * Handle missing cygpath gracefully #3910 (@bk2204) ### Misc * Don't abort with newer Git when in a bare repo #3940 (@bk2204) * Fix more Linux package issues #3932 (@bk2204) ## 2.9.1 (25 November 2019) This release fixes a few regressions, such as the ability to use HTTP/1.1 when required, addresses a race condition, and switches the cookie jar parser to something that's easier for distributions to package. We would like to extend a special thanks to the following open-source contributors: * @nataliechen1 for fixing a data race * @ssgelm for writing and updating the code to use a new cookie jar parser ### Features * Allow specifying HTTP version to use #3887 (@bk2204) ### Bugs * synchronize access to netrcCredentialHelper.skip #3896 (@nataliechen1) * Fix several causes of CI problems #3878 (@bk2204) * Miscellaneous release fixes #3866 (@bk2204) ### Misc * Build artifacts during CI for easier testing #3892 (@bk2204) * Use different parser for cookiejar files #3886 (@ssgelm) ## 2.9.0 (17 October 2019) This release adds support for DragonFly BSD, adds a new `git lfs dedup` command to save space if the file system supports it, adds support for file URLs, improves the performance when walking the repository, contains improvements to use HTTP/2 when available and cookies when required, and numerous other bug fixes, features, and modifications. We would like to extend a special thanks to the following open-source contributors: * @pluehne for adding support for fetching the history of specific refs * @kupson for adding cookie support * @liweitianux for adding Dragonfly BSD support * @kazuki-ma for implementing de-duplication support * @dvdveer for adding range support to ls-files * @dyrone, @pmeerw, @yamiacat, and @kittenking for cleaning up some documentation issues * @slonopotamus for improving concurrent downloads * @nataliechen1 for fixing remote names with dots * @jw3 for removing excessive logging * @SeamusConnor for significantly improving performance when walking the repository ### Features * Support fetching entire history of specific refs #3849 (@pluehne) * Add support for CentOS 8 #3854 (@bk2204) * Let git-lfs HTTPS transport send cookies #3825 (@kupson) * Support DragonFly BSD #3837 (@liweitianux) * HTTP/2 protocol support #3793 (@PastelMobileSuit) * Add clonefile on Windows over ReFS support. #3790 (@kazuki-ma) * Add new command `git lfs dedup` for file system level de-duplication. #3753 (@kazuki-ma) * Support GIT_ALTERNATE_OBJECT_DIRECTORIES #3765 (@bk2204) * ls-files: add support for reference range #3764 (@dvdveer) * Add several additional distros for packagecloud.io #3751 (@bk2204) * Provide an option to track to handle paths literally #3756 (@bk2204) * Optimize traversal of Git objects with URL remotes #3755 (@bk2204) * Support for file URLs #3748 (@bk2204) * Add clone file on MacOS X (darwin). #3745 (@kazuki-ma) ### Bugs * Fix JSON comma problems in docs #3851 (@dyrone) * Remove redundant comma in batch.md #3841 (@dyrone) * More robust handling of parallel attempts to download the same file #3826 (@slonopotamus) * Update wildmatch to v1.0.4 #3820 (@bk2204) * Update to gitobj v1.4.1 #3815 (@bk2204) * Fix build error when cross-compiling #3817 (@bk2204) * Do not fail when multiple processes download the same lfs file #3813 (@slonopotamus) * Fix Remote Name Parsing Bug #3812 (@nataliechen1) * status: gracefully handle files replaced by directories #3768 (@bk2204) * Avoid deadlock when transfer queue fails #3800 (@bk2204) * Avoid a hang when Git is slow to provide us data #3806 (@bk2204) * tasklog/log.go: print "done" messages with a trailing period #3789 (@ttaylorr) * track: make --filename work with spaces #3785 (@bk2204) * Fix couple of 'the the' typos #3786 (@pmeerw) * Use an absolute path for smudging #3780 (@bk2204) * Fix URL parsing with Go 1.12.8 #3771 (@bk2204) * Fix remote autoselection when not on a branch #3759 (@bk2204) * Replace deprecated SEEK_SET, SEEK_CUR usage. #3739 (@kazuki-ma) * Do not log skipped checkouts to file #3736 (@jw3) * Fix typos across git-lfs repository #3728 (@kittenking) * Accept legacy Git SSH URLs #3713 (@bk2204) ### Misc * ls-files --all man patch #3859 (@yamiacat) * Reworked to use git ls-files in some circumstances instead of FastWalkGitRepo #3823 (@SeamusConnor) * Clean up go.mod for Go 1.13 #3807 (@bk2204) * Use FICLONE instead of BTRFS_IOC_CLONE. #3796 (@kazuki-ma) * Remove unused pty code #3737 (@bk2204) ## 2.8.0 (23 July 2019) This release adds support for SOCKS proxies and Windows junctions, adds native packages for Debian 10 and similar distributions, reduces the number of situations in which running `git lfs fetch --all` is required, improves compatibility with Cygwin, and numerous other bug fixes and modifications. We would like to extend a special thanks to the following open-source contributors: * @mstrap for adding support for listing lock owners * @h-hirokawa for adding support for rewriting object URLs * @slonopotamus for helping get our specs and implementation in sync * @ssgelm for improving our Debian packaging * @TBK for fixing a test * @hartzell for improving the compatibility of our Makefile * @AJH16 for implementing support for NTLM SSO ### Features * Don't fail if we lack objects the server has #3634 (@bk2204) * Add support for Debian 10 #3697 (@bk2204) * Migrate tags pointing to other tags #3690 (@bk2204) * Add support for SOCKS proxies #3677 (@bk2204) * Allow vendors to customize the version info #3636 (@bk2204) * Wrap credential data to allow late credential prompting and update NTLM/SSPI to attempt SSPI login prior to prompting for credentials. #3617 (@AJH16) * git-lfs locks should optionally denote own locks #3569 (@mstrap) * tq/adapterbase: support rewriting href #3590 (@h-hirokawa) * Handle Windows junctions properly #3560 (@bk2204) * Allow specifying multiple insteadOf aliases #3550 (@bk2204) ### Bugs * Make API documentation lock creation example less confusing #3648 (@bk2204) * Use a download token when searching locks #3715 (@bk2204) * Copy mode from original file when rewriting objects #3694 (@bk2204) * Don't recurse into submodules when walking repository #3686 (@bk2204) * Be more precise about what timestamps we accept #3680 (@bk2204) * Canonicalize common directory paths on Cygwin #3671 (@bk2204) * Ensure we always use correct Content-Type and Accept headers #3663 (@bk2204) * Fix 'owner' lock field not documented as optional #3651 (@slonopotamus) * Improve error handling in git lfs install #3624 (@bk2204) * Properly handle config options for URLs with upper case letters #3584 (@bk2204) * Detect Cygwin-style pipe TTYs as TTYs #3582 (@bk2204) * Set push remote for pre-push #3579 (@bk2204) * Switch from manually running go generate to using dh-golang to run it #3549 (@ssgelm) * Install worktree hooks in the proper directory #3547 (@bk2204) * Avoid nil pointer dereference on download failure #3537 (@bk2204) * Avoid nil pointer dereference on unexpected failure #3534 (@bk2204) ### Misc * Update gitobj to v1.3.1 #3716 (@bk2204) * Use default line endings for core.autocrlf=input #3712 (@bk2204) * Fix CircleCI #3710 (@bk2204) * Vendor in gitobj v1.3.0 #3685 (@bk2204) * Update CONTRIBUTING #3673 (@bk2204) * Fix typo in t-locks.sh #3666 (@TBK) * Make 'locked_at' required in JSON schema #3655 (@slonopotamus) * Document a new batch error code #3639 (@bk2204) * Fix invalid JSON in LFS locking API documentation #3650 (@slonopotamus) * Fix invalid JSON in locking protocol docs #3644 (@slonopotamus) * Various release updates #3623 (@bk2204) * tq/adapterbase: fix typo enableHrefRerite to enableHrefRewrite #3594 (@h-hirokawa) * Use git-lfs version of go-ntlm #3588 (@bk2204) * Log test server standard error to log file #3577 (@bk2204) * Don't set -extldflags unless LDFLAGS has a value #3545 (@hartzell) * Retry on oversize file #3518 (@bk2204) * Fix asset uploading during releases #3538 (@bk2204) ## 2.7.0 (15 February 2019) This release adds better support for large files on 32-bit systems, adds attribute macros, fixes several file descriptor leaks, improves compatibility with Git's configuration parsing, and includes numerous other bug fixes and modifications. We would like to extend a special thanks to the following open-source contributors: * @andyneff and @torbjoernk for updating our release targets * @zkry for work on rate-limiting * @Foxboron for work on reproducible builds * @mstrap for adding a release target for Linux arm64 * @keiko713, @Erwyn, and @mloskot for improving our documentation * @QuLogic for fixing our tests under SELinux * @saracen and @steffengodskesen for improving our output handling * @mbsulliv for finding and fixing a bug where we ran out of file descriptors ### Features * Add sles 15 support #1055 #3515 (@andyneff) * docs/man/git-lfs-config.5.ronn: document GIT_LFS_SKIP_SMUDGE #3509 (@ttaylorr) * commands/command_pointer.go: introduce `--check` option #3501 (@ttaylorr) * Makefile additions for reproducible builds and asmflags #3444 (@Foxboron) * locking: add flag to control modification of ignored files #3409 (@bk2204) * build package for Ubuntu 18.10 aka Cosmic #3402 (@torbjoernk) * Add support for retries with delays (ex. rate limiting) #3449 (@zkry) * Trim embedded paths out of binaries #3443 (@bk2204) * Ensure 32-bit Git LFS binaries can handle files larger than 4 GiB #3426 (@bk2204) * Support attribute macros #3391 (@bk2204) * tasklog: don't log progress status when stdout is not a tty #3349 (@steffengodskesen) * locking: cache JSON response from server #3253 (@mstrap) * tq: enable transfer debugging when GIT_CURL_VERBOSE is set #3341 (@bk2204) ### Bugs * .circleci: don't use 'brew prune' #3514 (@ttaylorr) * t/t-smudge.sh: remove unnecessary test #3513 (@ttaylorr) * docs/man: fix inconsistency in 'git-lfs-ls-files(1)' #3496 (@ttaylorr) * lfshttp: close body on redirect #3479 (@bk2204) * status: handle deleted files gracefully #3482 (@bk2204) * Fix hang in prune with too few file descriptors #3460 (@bk2204) * Fix parameter name on List Locks API Documentation #3477 (@Erwyn) * TST: Trim security context when checking permissions. #3476 (@QuLogic) * command/env: ensure we honor lfs.url #3470 (@bk2204) * Fix swapped case sensitivity in patterns #3433 (@bk2204) * core.sharedRepository improvements for directories #3417 (@bk2204) * Update the doc of whitelisted .lfsconfig keys #3423 (@keiko713) * Rewrite URL config-matching #3392 (@PastelMobileSuit) * git: close blob objects when finished #3379 (@bk2204) * Avoid hang in repos cloned with --shared or --reference #3383 (@bk2204) * commands/command_status.go: require a working copy #3378 (@ttaylorr) * Fix test server API #3377 (@bk2204) * vendor: don't remove necessary dependencies #3356 (@ttaylorr) * filepathfilter: don't say file is both accepted and rejected #3360 (@bk2204) * Support pushInsteadOf aliases when determining endpoints #3353 (@bk2204) * Close attributes file #3347 (@mbsulliv) * Fix humanize's FormatByteRate() to work with 0s duration #3340 (@saracen) ### Misc * Release automation #3510 (@bk2204) * docs/man: update `git-lfs-fetch(1)` manpage #3488 (@ttaylorr) * Update Cobra #3483 (@bk2204) * Run go generate only on Windows #3480 (@bk2204) * docs/man/git-lfs-migrate: make examples less confusing #3424 (@bk2204) * Modify logic of 'migrate info' to process extensionless files #3458 (@zkry) * Improve error message on missing object #3398 (@bk2204) * docs/man: suggest using Git configuration for LFS keys #3394 (@bk2204) * Document default value of migrate info --top= #3387 (@mloskot) * Clarify minimum git version #3327 (@carlwgeorge) ## 2.6.1 (3 December 2018) This release contains miscellaneous bug fixes since v2.6.0. Most notably, release v2.6.1 restores support for alternate repositories, which was accidentally broken in v2.6.0. ### Bugs * git: close blob objects when finished #3379 (@bk2204) * Avoid hang in repos cloned with --shared or --reference #3383 (@bk2204) * vendor: don't remove necessary dependencies #3356 (@ttaylorr) ## 2.6.0 (1 November, 2018) This release adds better support for redirecting network calls from a Git LFS API server to one that requires a different authentication mode, builds Git LFS on Go 1.11, and numerous other bug fixes and modifications. We would like to extend a special thanks to the following open-source contributors: * @andyneff for updating our release targets * @gtsiolis: for removing the deprecated `git lfs clone` from the listing of supported Git LFS commands * @jsantell for fixing a formatting issue in the INCLUDE AND EXCLUDE man page section * @mmlb for adding a release target for Linux arm64 * @skashyap7 for adding the 'git lfs track -n' * @Villemoes: for modernizing the Git LFS installation procedure on Debian. ### Features * commands: list explicitly excluded patterns separately #3320 (@bk2204) * Uninstall improvements #3326 (@bk2204) * config: honor GIT_AUTHOR_DATE and GIT_COMMITTER_DATE #3314 (@bk2204) * Add new `.netrc` credential helper #3307 (@PastelMobileSuit) * Honor umask and core.sharedRepository #3304 (@bk2204) * Support listing only filename tracked by git lfs using --name (-n) option #3271 (@skashyap7) * all: use Go 1.11.1 in CI #3298 (@ttaylorr) * lfsapi/tq: Have DoWithAuth() caller determine URL Access Mode #3293 (@PastelMobileSuit) * commands: undeprecate checkout #3303 (@bk2204) * Checkout options for conflicts #3296 (@bk2204) * Makefile: build source tarballs for release #3283 (@bk2204) * Encrypted SSL key support #3270 (@bk2204) * Add support for core.sshCommand #3235 (@bk2204) * gitobj-based Object Scanner #3236 (@bk2204) * README.md: new core team members #3217 (@ttaylorr) * Add build and releases for linux arm64 #3196 (@mmlb) * Update packagecloud.rb #3210 (@andyneff) * all: use Go modules instead of Glide #3208 (@ttaylorr) * all: use Go 1.11 in CI #3203 (@ttaylorr) ### Bugs * Fix formatting of INCLUDE AND EXCLUDE (REFS) #3330 (@jsantell) * go.sum: add missing entries #3319 (@bk2204) * Ensure correct syntax for commit headers in lfs migrate import #3313 (@bk2204) * Clean up trailing whitespace #3299 (@bk2204) * commands: unambiguously resolve remote references #3285 (@ttaylorr) * Expand custom transfer args by using the shell #3259 (@bk2204) * Canonicalize paths properly on Windows #3277 (@bk2204) * debian/prerm: add --system flag #3272 (@Villemoes) * t: make testsuite run under git rebase -x #3262 (@bk2204) * git/gitattr: parse 'set' attributes #3255 (@ttaylorr) * t: avoid panic in lfstest-customadapter #3243 (@bk2204) * t: avoid using shell variables in printf's first argument #3242 (@bk2204) * lfsapi: handle SSH hostnames and aliases without users #3230 (@bk2204) * commands/command_ls_files.go: ignore index with argument #3219 (@ttaylorr) * commands/command_migrate_import.go: install hooks #3227 (@ttaylorr) * t: mark test sources as .PHONY #3228 (@ttaylorr) * Pass GIT_SSH_COMMAND to the shell #3199 (@bk2204) * Tidy misformatted files #3202 (@bk2204) * config: expand core.hooksPath #3212 (@ttaylorr) * locks: manage write permissions of ignored files #3190 (@ttaylorr) ### Misc * CONTRIBUTING.md: :nail_care: #3325 (@ttaylorr) * Update CONTRIBUTING #3317 (@bk2204) * go.mod: depend on tagged gitobj #3311 (@ttaylorr) * RFC: SSH protocol #3290 (@bk2204) * Remove `git lfs clone` command from man #3301 (@gtsiolis) * ROADMAP.md: use GitHub issues instead #3286 (@ttaylorr) * docs: add note about closing release milestone #3274 (@bk2204) * CI improvements #3268 (@bk2204) * docs/howto: document our release process #3261 (@ttaylorr) * Create new lfshttp package #3244 (@PastelMobileSuit) * CONTRIBUTING: update required go version #3232 (@PastelMobileSuit) * go.mod: use latest github.com/olekukonko/ts #3223 (@ttaylorr) * go.mod: pin github.com/git-lfs/wildmatch to v1.0.0 #3218 (@ttaylorr) * Update README.md #3193 (@srl295) ## 2.5.2 (17 September, 2018) ### Bugs * config: Treat [host:port]:path URLs correctly #3226 (@saschpe) * tq: Always provide a Content-Type when uploading files #3201 (@bk2204) * commands/track: Properly `lfs track` files with escaped characters in their name #3192 (@leonid-s-usov) ### Misc * packagecloud.rb: remove older versions #3210 (@andyneff) ## 2.5.1 (2 August, 2018) This release contains miscellaneous bug fixes since v2.5.0. Most notably, release v2.5.1 allows a user to disable automatic Content-Type detection (released in v2.5.0) via `git config lfs.contenttype false` for hosts that do not support it. ### Features * tq: make Content-Type detection disable-able #3163 (@ttaylorr) ### Bugs * Makefile: add explicit rule for commands/mancontent_gen.go #3160 (@jj1bdx) * script/install.sh: mark as executable #3155 (@ttaylorr) * config: add origin to remote list #3152 (@PastelMobileSuit) ### Misc * docs/man/mangen.go: don't show non-fatal output without --verbose #3168 (@ttaylorr) * LICENSE.md: update copyright year #3156 (@IMJ355) * Makefile: silence some output #3164 (@ttaylorr) * Makefile: list prerequisites for resource.syso #3153 (@ttaylorr) ## 2.5.0 (26 July, 2018) This release adds three new migration modes, updated developer ergonomics, and a handful of bug fixes to Git LFS. We would like to extend a special thanks to the following open-source contributors: * @calavera for fixing a broken Go test and adding support for custom Content-Type headers in #3137 and #3138. * @cbuehlmann for adding support for encoded character names in filepaths via #3093. * @larsxschneider for changing the default value of lfs.allowincompletepush in #3109. * @NoEffex for supporting TTL in SSH-based authentication tokens via #2867. * @ssgelm for adding 'go generate' to our Debian packages via #3083. ### Features * Makefile: replace many scripts with make targets #3144 (@ttaylorr) * {.travis,appveyor}.yml: upgrade to Go 1.10.3 #3146 (@ttaylorr) * t: run tests using prove #3125 (@ttaylorr) * commands/migrate: infer wildmatches with --fixup #3114 (@ttaylorr) * Retry SSH resolution 5 times #2934 (@stanhu) * Implement `migrate export` subcommand #3084 (@PastelMobileSuit) * Add `--no-rewrite` flag to `migrate import` command #3029 (@PastelMobileSuit) ### Bugs * t: fix contains_same_elements() fn #3145 (@PastelMobileSuit) * commands: warn if working copy is dirty #3124 (@ttaylorr) * Ensure provided remote takes precedence over configured pushRemote #3139 (@PastelMobileSuit) * Fix proxy unit tests. #3138 (@calavera) * commands/command_migrate.go: loosen meaning of '--everything' #3121 (@ttaylorr) * lfsapi: don't query askpass for given creds #3126 (@PastelMobileSuit) * config/git_fetcher.go: mark 'lfs.allowincompletepush' as safe #3113 (@ttaylorr) * fs: support multiple object alternates #3116 (@ttaylorr) * commands/checkout: checkout over read-only files #3120 (@ttaylorr) * test/testhelpers.sh: look for 64 character SHA-256's #3119 (@ttaylorr) * config/config.go: case-insensitive error search #3098 (@ttaylorr) * Encoded characters in pathnames #3093 (@cbuehlmann) * Support default TTL for authentication tokens acquired via SSH #2867 (@NoEffex) * commands/status.go: relative paths outside of root #3080 (@ttaylorr) * Run `go generate` on commands in deb build #3083 (@ssgelm) * lfsapi: prefer proxying from gitconfig before environment #3062 (@ttaylorr) * commands/track: respect global- and system-level gitattributes #3076 (@ttaylorr) * git/git.go: pass --multiple to git-fetch(1) when appropriate #3063 (@ttaylorr) * commands/checkout: fix inaccurate messaging #3055 (@ttaylorr) * commands/migrate: do not migrate empty commits #3054 (@ttaylorr) * git/odb: retain trailing newlines in commit messages #3053 (@ttaylorr) ### Misc * Set original file content type on basic upload. #3137 (@calavera) * README.md: Git for Windows ships LFS by default #3112 (@larsxschneider) * change lfs.allowincompletepush default from true to false #3109 (@larsxschneider) * *: replace git/odb with vendored copy #3108 (@ttaylorr) * test/test-ls-files.sh: skip on CircleCI #3101 (@ttaylorr) * lfsapi/ssh.go: use zero-value sentinels #3099 (@ttaylorr) * README.md: add link to installation wiki page #3075 (@ttaylorr) * docs/man/git-lfs.1.ronn: update casing and missing commands #3059 (@ttaylorr) * commands/checkout: mark 'git lfs checkout' as deprecated #3056 (@ttaylorr) ## 2.4.2 (28 May, 2018) ### Bugs * lfsapi: re-authenticate HTTP redirects when needed #3028 (@ttaylorr) * lfsapi: allow unknown keywords in netrc file(s) #3027 (@ttaylorr) ## 2.4.1 (18 May, 2018) This release fixes a handful of bugs found and fixed since v2.4.0. In particular, Git LFS no longer panic()'s after invalid API responses, can correctly run 'fetch' on SHAs instead of references, migrates symbolic links correctly, and avoids writing to `$HOME/.gitconfig` more than is necessary. We would like to extend a "thank you" to the following contributors for their gracious patches: - @QuLogic fixed an issue with running tests that require credentials - @patrickmarlier made it possible for 'git lfs migrate import' to work correctly with symbolic links. - @zackse fixed an inconsistency in `CONTRIBUTING.md` - @zanglang fixed an inconsistency in `README.md` Git LFS would not be possible without generous contributions from the open-source community. For these, and many more: thank you! ### Features * script/packagecloud.rb: release on Ubuntu Bionic #2961 (@ttaylorr) ### Bugs * lfsapi: canonicalize extra HTTP headers #3010 (@ttaylorr) * commands/lock: follow symlinks before locking #2996 (@ttaylorr) * lfs/attribute.go: remove default value from upgradeables #2994 (@ttaylorr) * git: include SHA1 in ref-less revisions #2982 (@ttaylorr) * Do not migrate the symlinks to LFS objects. #2983 (@patrickmarlier) * commands/uninstall: do not log about global hooks with --local #2976 (@ttaylorr) * commands/run.go: exit 127 on unknown sub-command #2969 (@ttaylorr) * commands/{un,}track: perform "prefix-agnostic" comparisons #2955 (@ttaylorr) * commands/migrate: escape paths before .gitattributes #2933 (@ttaylorr) * commands/ls-files: do not accept '--all' after '--' #2932 (@ttaylorr) * tq: prevent uint64 underflow with invalid API response #2902 (@ttaylorr) ### Misc * test/test-env: skip comparing GIT_EXEC_PATH #3015 (@ttaylorr) * remove reference to CLA from contributor's guide #2997 (@zackse) * .gitattributes link is broken #2985 (@zanglang) * commands: make --version a synonym for 'version' #2968, #3017 (@ttaylorr) * test: ensure that git-mergetool(1) works with large files #2939 (@ttaylorr) * README.md: note the correct PackageCloud URL #2960 (@ttaylorr) * README.md: mention note about `git lfs track` retroactively #2948 (@ttaylorr) * README.md: reorganize into Core Team, Alumni #2941 (@ttaylorr) * README.md: :nail_care: #2942 (@ttaylorr) * circle.yml: upgrade to 'version: 2' syntax #2928 (@ttaylorr) * Use unique repo name for tests that require credentials. #2901 (@QuLogic) ## 2.4.0 (2 March, 2018) This release introduces a rewrite of the underlying file matching engine, expands the API to include relevant refspecs for individual requests, standardizes the progress output among commands, and more. Please note: in the next MAJOR release (v3.0.0) the semantic meaning behind `--include` and `--exclude` flags will change. As the details of exactly which existing patterns will no longer function as previously are known, we will indicate them here. Any `--include` or `--exclude` patterns used in v2.3.0 or earlier are expected to work as previously in this release. This release would not be possible without the open-source community. Specifically, we would like to thank: - @larsxschneider: for contributing fixes to the filter operation in `git lfs fsck`, and `git lfs prune`, as well as the bug report leading to the filepathfilter changes. - @yfronto: for adding new Linux release targets. - @stffabi: for adding support for NTLM with SSPI on Windows. - @jeffreydwalter: for fixing memory alignment issues with `sync/atomic` on 32-bit architectures. - @b4mboo: for adding a LFS configuration key to the list of safe configuration options. Without the aforementioned indviduals, this release would not have been possible. Thank you! ### Features * __Support wildmatch-compliant options in `--include`, `--exclude`__ * filepathfilter: implement using wildmatch #2875 (@ttaylorr) * test: add wildmatch migration tests #2888 (@larsxschneider, @ttaylorr) * __Expand the specification to include relevant refspecs__ * verify locks against each ref being pushed #2706 (@technoweenie) * Batch send refspec take 2 #2809 (@technoweenie) * Run 1 TransferQueue per uploaded ref #2806 (@technoweenie) * Locks/verify: full refspec #2722 (@technoweenie) * send remote refspec for the other lock commands #2773 (@technoweenie) * __Standardize progress meter output and implementation__ * tq: standardized progress meter formatting #2811 (@ttaylorr) * commands/fetch: unify formatting #2758 (@ttaylorr) * commands/prune: unify formatting #2757 (@ttaylorr) * progress: use git/githistory/log package for formatting #2732 (@ttaylorr) * progress: remove `*progress.Meter` #2762 (@ttaylorr) * tasklog: teach `*Logger` how to enqueue new `*SimpleTask`'s #2767 (@ttaylorr) * progress: remove spinner.go #2759 (@ttaylorr) * __Teach new flags, functionality to `git lfs ls-files`__ * commands: teach '--all' to `git lfs ls-files` #2796 (@ttaylorr) * commands/ls-files: show cached, tree-less LFS objects #2795 (@ttaylorr) * commands/ls-files: add --include, --exclude #2793 (@ttaylorr) * commands/ls-files: add '--size' flag #2764 (@ttaylorr) * __Add new flags, functionality to `git lfs migrate`__ * commands/migrate: support '^'-prefix refspec in arguments #2785 (@ttaylorr) * commands/migrate: add '--skip-fetch' for offline migrations #2738 (@ttaylorr) * git: prefer sending revisions over STDIN than arguments #2739 (@ttaylorr) * __Release to new operating systems__ * release lfs for ubuntu/artful too #2704 (@technoweenie) * Adding Mint Sylvia to packagecloud.rb script #2829 (@yfronto) * __New functionality in package `lfsapi`__ * NTLM authentication with SSPI on windows #2871 (@stffabi) * lfsapi/auth: teach DoWithAuth to respect http.extraHeaders #2733 (@ttaylorr) * add support for url-specific proxies #2651 (@technoweenie) * __Code cleanup in git.Config, package `localstorage`__ * Tracked remote #2700 (@technoweenie) * Replace git.Config #2692 (@technoweenie) * Replace localstorage #2689 (@technoweenie) * Remove last global config #2687 (@technoweenie) * Git config refactor #2676 (@technoweenie) ### Bugs * all: fix 32-bit alignment issues with `sync/atomic` #2883 (@ttaylorr) * all: memory alignment issues on 32-bit systems. #2880 (@jeffreydwalter) * command/migrate: don't migrate remote references in bare repositories #2769 (@ttaylorr) * commands/ls-files: behave correctly before initial commit #2794 (@ttaylorr) * commands/migrate: allow for ambiguous references in migrations #2734 (@ttaylorr) * commands: fill in missing printf arg #2678 (@technoweenie) * config: Add `lfs.locksverify` to safe keys. #2797 (@b4mboo) * don't replace pointers with objects if clean filter is not configured #2626 (@technoweenie) * fsck: attach a filter to exclude unfetched items from fsck #2847 (@larsxschneider) * git/githistory: copy entries from cache, elsewhere #2884 (@ttaylorr) * git/githistory: migrate annotated tags correctly #2780 (@ttaylorr) * git/odb: don't print extra newline after commit message #2784 (@ttaylorr) * git/odb: extract identifiers from commits verbatim #2751 (@wsprent) * git/odb: implement parsing for annotated `*Tag`'s #2778 (@ttaylorr) * git/odb: retain newlines when parsing commit messages #2786 (@ttaylorr) * lfs: PointerScanner is nil after error, so don't close #2699 (@technoweenie) * lfsapi: Cred helper improvements #2695 (@technoweenie) * lfsapi: retry requests changing access from none IF Auth header is empty #2621 (@technoweenie) * prune: always prune excluded paths #2851 (@larsxschneider) * status: fix incorrect formatting with unpushed objects #2746 (@ttaylorr) * tasklog: don't drop updates in PercentageTask #2755 (@ttaylorr) * test: Fix integration test early exit #2735 (@technoweenie) * test: generate random repo names with fs-safe characters #2698 (@technoweenie) ### Misc * all: Nitpicks #2821 (@technoweenie) * all: introduce package 'tlog' #2747 (@ttaylorr) * all: remove CLA #2870 (@MikeMcQuaid) * build: Specify the embedded Windows icon as part of versioninfo.json #2770 (@sschuberth) * config,test: Testlib no global config #2709 (@mathstuf) * config: add PushRemote() for checking `branch.*.pushRemote` and `remote.pushDefault` first #2715 (@technoweenie) * docs: Added documentation for git-lfs-ls-files' `*/-` output. #2719 (@bilke) * docs: Uninstall man page improvements #2730 (@dpursehouse) * docs: Update usage info for post-checkout #2830 (@proinsias) * docs: add 'git lfs prune' to main man page #2849 (@larsxschneider) * docs: use consistent casing for Git #2850 (@larsxschneider) * git/githistory: have `*RefUpdater` hold `*odb.ObjectDatabase` reference #2779 (@ttaylorr) * progress: move CopyCallback (& related) to package 'tools' #2749 (@ttaylorr) * progress: move `*progressLogger` implementation to package 'tools' #2750 (@ttaylorr) * refspec docs #2820 (@technoweenie) * script/test: run 'go tool vet' during testing #2788 (@ttaylorr) * tasklog: introduce `*SimpleTask` #2756 (@ttaylorr) * test: Ignore comment attr lines #2708 (@mathstuf) * test: Wait longer for test lfs server to start. #2716 (@QuLogic) * test: ensure commented attr lines are ignored #2736 (@ttaylorr) * tools/humanize: add 'FormatByteRate' to format transfer speed #2810 (@ttaylorr) * vendor: update 'xeipuuv/gojsonpointer' #2846 (@ttaylorr) ## 2.3.4 (18 October, 2017) ### Features * 'git lfs install' updates filters with 'skip-smudge' option #2673 (@technoweenie) ### Bugs * FastWalkGitRepo: limit number of concurrent goroutines #2672 (@technoweenie) * handle scenario where multiple configuration values exist in ~/.gitconfig #2659 (@shiftkey) ## 2.3.3 (9 October, 2017) ### Bugs * invoke lfs for 'git update-index', fixing 'status' issues #2647 (@technoweenie) * cache http credential helper output by default #2648 (@technoweenie) ## 2.3.2 (3 October, 2017) ### Features * bump default activity timeout from 10s -> 30s #2632 (@technoweenie) ### Bugs * ensure files are marked readonly after unlocking by ID #2642 (@technoweenie) * add files to index with path relative to current dir #2641 (@technoweenie) * better Netrc errors #2633 (@technoweenie) * only use askpass if credential.helper is not configured #2637 (@technoweenie) * convert backslash to slash when writing to .gitattributes #2625 (@technoweenie) ### Misc * only copy req headers if there are git-configured extra headers #2622 (@technoweenie) * update tracerx to add timestamps #2620 (@rubyist) ## 2.3.1 (27 September, 2017) ### Features * add support for SSH_ASKPASS #2609 (@technoweenie) * `git lfs migrate --verbose` option #2610 (@technoweenie) * Support standalone custom transfer based on API URL prefix match #2590 (@sprohaska) ### Bugs * Improve invalid URL error messages #2614 (@technoweenie) * Fix double counting progress bug #2608 (@technoweenie) * trim whitespace from GIT_ASKPASS provided passwords #2607 (@technoweenie) * remove mmap usage in Packfile reader #2600 (@technoweenie) * `git lfs clone`: don't fetch for unborn repositories #2598 (@shiftkey) ### Misc * Windows Installer fixes: * Show proper icon in add/remove programs list #2585 (@shiftkey) * Make the Inno Setup installer script explicitly check for the binaries #2588 (@sschuberth) * Improve compile-win-installer-unsigned.bat a bit #2586 (@sschuberth) * Update migrate docs example for multiple file types #2596 (@technoweenie) ## 2.3.0 (14 September, 2017) Git LFS v2.3.0 includes performance optimizations for the `git-lfs-migrate(1)` and `git-clone(1)` commands, new features, bug-fixes, and more. This release was made possible by contributors to Git LFS. Specifically: - @aleb: added support for "standalone" transfer agents, for using `rsync(1)` and similar with Git LFS. - @bozaro: added support for custom `.git/lfs/objects` directories via the `lfs.storage` configuration option. - @larsxschneider: fixed a recursive process leak when shelling out to Git, added new features to `git lfs ls-files`, extra information in error messages used for debugging, documentation changes and more. - @mathstuf: contributed a documentation change clarifying LFS's handling of empty pointer files. - @rudineirk and @andyneff: updated our release process to build packages for fedora/26. - @ssgelm: ensured that LFS is able to be released on Ubuntu Universe. To everyone who has contributed to this or previous releases of Git LFS: Thank you! ### Features * git/odb/pack: improve `git lfs migrate` performance * git/odb/pack: introduce packed object reassembly #2550 #2551 #2552 #2553 #2554 (@ttaylorr) * git/odb/pack: teach packfile index entry lookups #2420 #2421 #2422 #2423 #2437 #2441 #2461 (@ttaylorr) * git/{odb,githistory}: don't write unchanged objects #2541 (@ttaylorr) * commands: improve `git clone` performance with 'delay' capability #2511 #2469 #2468 #2471 #2467 #2476 #2483 (@ttaylorr) * commands: mark `git lfs clone` as deprecated #2526 (@ttaylorr) * commands: enable `lfs.allowincompletepush` by default #2574 (@technoweenie) * commands: teach '--everything' to `git lfs migrate` #2558 (@ttaylorr) * commands: teach `git lfs ls-files` a '--debug' option #2540 (@larsxschneider) * commands,lfs: warn on 4gb size conversion during clean #2510 #2507 #2459 (@ttaylorr) * lfsapi/creds: teach about GIT_ASKPASS and core.askpass #2500 #2578 (@ttaylorr) * commands/status: indicate missing objects #2438 (@ttaylorr) * Allow using custom transfer agents directly #2429 (@aleb) * Add `lfs.storage` parameter for overriding LFS storage location #2023 (@bozaro) * lfsapi: enable credential caching by default #2508 (@ttaylorr) * commands/install: teach `--manual` to `git-lfs-install(1)` #2410 (@ttaylorr) ### Bugs * migrate: fix migrations with subdirectories in '--include' or '--exclude' #2485 (@ttaylorr) * commands/migrate: fix hardlinking issue when different filesystem is mounted at `/tmp` #2566 (@ttaylorr) * commands: make `git lfs migrate` fetch ref updates before migrating #2538 (@ttaylorr) * commands: remove '--above=1mb' default from `git lfs migrate info` #2460 (@ttaylorr) * filepathfilter: fix `HasPrefix()` when no '--include' filters present #2579 (@technoweenie) * git/githistory/log: fix race condition with `git/githistory/log` tests #2495 (@ttaylorr) * git/odb: fix closing object database test #2457 (@ttaylorr) * git/githistory: only update local refs after migrations #2559 (@ttaylorr) * locking: fix unlocking files not removing write flag #2514 (@ttaylorr) * locks: fix unlocking files in a symlinked directory #2505 (@ttaylorr) * commands: teach `git lfs unlock` to ignore status errs in appropriate conditions #2475 (@ttaylorr) * git: expand `GetAttributePaths` check to include non-LFS lockables #2528 (@ttaylorr) * fix multiple `git update-index` invocations #2531 (@larsxschneider) * tools: fix SSH credential cacher expiration #2530 (@ttaylorr) * lfsapi: fix read/write race condition in credential cacher #2493 (@ttaylorr) * lfs: fix cleaning contents larger than 1024 bytes over stdin #2488 (@ttaylorr) * fsck only scans current version of objects #2049 (@TheJare) * progress: fix writing updates to `$GIT_LFS_PROGRESS` #2465 (@ttaylorr) * commands/track: resolve symlinks before comparing attr paths #2463 (@ttaylorr) * test: ensure that empty pointers are empty #2458 (@ttaylorr) * git/githistory/log: prevent 'NaN' showing up in `*PercentageTask` #2455 (@ttaylorr) * tq: teach Batch() API to retry itself after io.EOF's #2516 (@ttaylorr) ### Misc * script/packagecloud: release LFS on Fedora/26 #2443 #2509 (@rudineirk, @andyneff) * git/githistory: change "Rewriting commits" when not updating refs #2577 (@ttaylorr) * commands: print IP addresses in error logs #2570 (@larsxschneider) * commands: print current time in UTC to error logs #2571 (@larsxschneider) * commands: Disable lock verification when using a standalone custom-tr… #2499 (@aleb) * docs/man: update `git lfs migrate` documentation with EXAMPLES #2580 (@technoweenie) * docs/man: recommend global per-host locking config #2546 (@larsxschneider) * commands: use transfer queue's batch size instead of constant #2529 (@ttaylorr) * add function to invoke Git with disabled LFS filters #2453 (@larsxschneider) * config: warn on unsafe keys in `.lfsconfig` #2502 (@ttaylorr) * glide: remove unused dependencies #2501 (@ttaylorr) * script/build: pass '-{ld,gc}flags' to compiler, if given #2462 (@ttaylorr) * spec: mention that an empty file is its own LFS pointer #2449 (@mathstuf) * Update to latest version of github.com/pkg/errors #2426 (@ssgelm) * Update gitignore to add some temp files that get created when building debs #2425 (@ssgelm) * lfs: indent contents of `git lfs install`, `update` #2392 (@ttaylorr) * tq: increase default `lfs.concurrenttransfers` to 8 #2506 (@ttaylorr) ## 2.2.1 (10 July, 2017) ### Bugs * git lfs status --json only includes lfs files #2374 (@asottile) * git/odb: remove temporary files after migration #2388 (@ttaylorr) * git/githistory: fix hanging on empty set of commits #2383 (@ttaylorr) * migrate: don't checkout HEAD on bare repositories #2389 (@ttaylorr) * git/odb: prevent cross-volume link error when saving objects #2382 (@ttaylorr) * commands: only pass --jobs to `git clone` if set #2369 (@technoweenie) ### Misc * lfs: trace hook install, uninstall, upgrade #2393 (@ttaylorr) * vendor: remove github.com/cheggaaa/pb #2386 (@ttaylorr) * Use FormatBytes from git-lfs/tools/humanize instead of cheggaaa/pb #2377 (@ssgelm) ## 2.2.0 (27 June, 2017) Git LFS v2.2.0 includes bug fixes, minor features, and a brand new `migrate` command. The `migrate` command rewrites commits, converting large files from Git blobs to LFS objects. The most common use case will fix a git push rejected for having large blobs: ``` $ git push origin master # ... remote: error: file a.psd is 1.2 gb; this exceeds github's file size limit of 100.00 mb to github.com:ttaylorr/demo.git ! [remote rejected] master -> master (pre-receive hook declined) error: failed to push some refs to 'git@github.com:ttaylorr/demo.git' $ git lfs migrate info *.psd 1.2 GB 27/27 files(s) 100% $ git lfs migrate import --include="*.psd" migrate: Sorting commits: ..., done migrate: Rewriting commits: 100% (810/810), done master f18bb746d44e8ea5065fc779bb1acdf3cdae7ed8 -> 35b0fe0a7bf3ae6952ec9584895a7fb6ebcd498b migrate: Updating refs: ..., done $ git push origin Git LFS: (1 of 1 files) 1.2 GB / 1.2 GB # ... To github.com:ttaylorr/demo.git * [new branch] master -> master ``` The `migrate` command has detailed options described in the `git-lfs-migrate(1)` man page. Keep in mind that this is the first pass at such a command, so we expect there to be bugs and performance issues (especially on long git histories). Future updates to the command will be focused on improvements to allow full LFS transitions on large repositories. ### Features * commands: add git-lfs-migrate(1) 'import' subcommand #2353 (@ttaylorr) * commands: add git-lfs-migrate(1) 'info' subcommand #2313 (@ttaylorr) * Implement status --json #2311 (@asottile) * commands/uploader: allow incomplete pushes #2199 (@ttaylorr) ### Bugs * Retry on timeout or temporary errors #2312 (@jakub-m) * commands/uploader: don't verify locks if verification is disabled #2278 (@ttaylorr) * Fix tools.TranslateCygwinPath() on MSYS #2277 (@raleksandar) * commands/clone: add new flags since Git 2.9 #2251, #2252 (@ttaylorr) * Make pull return non-zero error code when some downloads failed #2237 (@seth2810) * tq/basic_download: guard against nil HTTP response #2227 (@ttaylorr) * Bugfix: cannot push to scp style URL #2198 (@jiangxin) * support lfs..* values where url does not include .git #2192 (@technoweenie) * commands: fix logged error not interpolating format qualifiers #2228 (@ttaylorr) * commands/help: print helptext to stdout for consistency with Git #2210 (@ttaylorr) ### Misc * Minor cleanups in help index #2248 (@dpursehouse) * Add git-lfs-lock and git-lfs-unlock to help index #2232 (@dpursehouse) * packagecloud: add Debian 9 entry to formatted list #2211 (@ttaylorr) * Update Xenial is to use stretch packages #2212 (@andyneff) ## 2.1.1 (19 May, 2017) Git LFS v2.1.1 ships with bug fixes and a security patch fixing a remote code execution vulnerability exploitable by setting a SSH remote via your repository's `.lfsconfig` to contain the string "-oProxyCommand". This vulnerability is only exploitable if an attacker has write access to your repository, or you clone a repository with a `.lfsconfig` file containing that string. ### Bugs * Make pull return non-zero error code when some downloads failed #2245 (@seth2810, @technoweenie) * lfsapi: support cross-scheme redirection #2243 (@ttaylorr) * sanitize ssh options parsed from ssh:// url #2242 (@technoweenie) * filepathfilter: interpret as .gitignore syntax #2238 (@technoweenie) * tq/basic_download: guard against nil HTTP response #2229 (@ttaylorr) * commands: fix logged error not interpolating format qualifiers #2230 (@ttaylorr) ### Misc * release: backport Debian 9-related changes #2244 (@ssgelm, @andyneff, @ttaylorr) * Add git-lfs-lock and git-lfs-unlock to help index #2240 (@dpursehouse) * config: allow multiple environments when calling config.Unmarshal #2224 (@ttaylorr) ## 2.1.0 (28 April, 2017) ### Features * commands/track: teach --no-modify-attrs #2175 (@ttaylorr) * commands/status: add blob info to each entry #2070 (@ttaylorr) * lfsapi: improve HTTP request/response stats #2184 (@technoweenie) * all: support URL-style configuration lookups (@ttaylorr) * commands: support URL-style lookups for `lfs.{url}.locksverify` #2162 (@ttaylorr) * lfsapi: support URL-style lookups for `lfs.{url}.access` #2161 (@ttaylorr) * lfsapi/certs: use `*config.URLConfig` to do per-host config lookup #2160 (@ttaylorr) * lfsapi: support for http..extraHeader #2159 (@ttaylorr) * config: add prefix to URLConfig type #2158 (@ttaylorr) * config: remove dependency on lfsapi package #2156 (@ttaylorr) * config: support multi-value lookup on URLConfig #2154 (@ttaylorr) * lfsapi: initial httpconfig type #1912 (@technoweenie, @ttaylorr) * lfsapi,tq: relative expiration support #2130 (@ttaylorr) ### Bugs * commands: include error in `LoggedError()` #2179 (@ttaylorr) * commands: cross-platform log formatting to files #2178 (@ttaylorr) * locks: cross-platform path normalization #2139 (@ttaylorr) * commands,locking: don't disable locking for auth errors during verify #2110 (@ttaylorr) * commands/status: show partially staged files twice #2067 (@ttaylorr) ### Misc * all: build on Go 1.8.1 #2145 (@ttaylorr) * Polish custom-transfers.md #2171 (@sprohaska) * commands/push: Fix typo in comment #2170 (@sprohaska) * config: support multi-valued config entries #2152 (@ttaylorr) * smudge: use localstorage temp directory, not system #2140 (@ttaylorr) * locking: send locks limit to server #2107 (@ttaylorr) * lfs: extract `DiffIndexScanner` #2035 (@ttaylorr) * status: use DiffIndexScanner to populate results #2042 (@ttaylorr) ## 2.0.2 (29 March, 2017) ### Features * ssh auth and credential helper caching #2094 (@ttaylorr) * commands,tq: specialized logging for missing/corrupt objects #2085 (@ttaylorr) * commands/clone: install repo-level hooks after `git lfs clone` #2074 * (@ttaylorr) * debian: Support building on armhf and arm64 #2089 (@p12tic) ### Bugs * commands,locking: don't disable locking for auth errors during verify #2111 * (@ttaylorr) * commands: show real error while cleaning #2096 (@ttaylorr) * lfsapi/auth: optionally prepend an empty scheme to Git remote URLs #2092 * (@ttaylorr) * tq/verify: authenticate verify requests if required #2084 (@ttaylorr) * commands/{,un}track: correctly escape '#' and ' ' characters #2079 (@ttaylorr) * tq: use initialized lfsapi.Client instances in transfer adapters #2048 * (@ttaylorr) ### Misc * locking: send locks limit to server #2109 (@ttaylorr) * docs: update configuration documentation #2097 #2019 #2102 (@terrorobe) * docs: update locking API documentation #2099 #2101 (@dpursehouse) * fixed table markdown in README.md #2095 (@ZaninAndrea) * remove the the duplicate work #2098 (@grimreaper) ## 2.0.1 (6 March, 2017) ### Misc * tq: fallback to `_links` if present #2007 (@ttaylorr) ## 2.0.0 (1 March, 2017) Git LFS v2.0.0 brings a number of important bug fixes, some new features, and a lot of internal refactoring. It also completely removes old APIs that were deprecated in Git LFS v0.6. ### Locking File Locking is a brand new feature that lets teams communicate when they are working on files that are difficult to merge. Users are not able to edit or push changes to any files that are locked by other users. While the feature has been in discussion for a year, we are releasing a basic Locking implementation to solicit feedback from the community. ### Transfer Queue LFS 2.0 introduces a new Git Scanner, which walks a range of Git commits looking for LFS objects to transfer. The Git Scanner is now asynchronous, initiating large uploads or downloads in the Transfer Queue immediately once an LFS object is found. Previously, the Transfer Queue waited until all of the Git commits have been scanned before initiating the transfer. The Transfer Queue also automatically retries failed uploads and downloads more often. ### Deprecations Git LFS v2.0.0 also drops support for the legacy API in v0.5.0. If you're still using LFS servers on the old API, you'll have to stick to v1.5.6. ### Features * Mid-stage locking support #1769 (@sinbad) * Define lockable files, make read-only in working copy #1870 (@sinbad) * Check that files are not uncommitted before unlock #1896 (@sinbad) * Fix `lfs unlock --force` on a missing file #1927 (@technoweenie) * locking: teach pre-push hook to check for locks #1815 (@ttaylorr) * locking: add `--json` flag #1814 (@ttaylorr) * Implement local lock cache, support querying it #1760 (@sinbad) * support for client certificates pt 2 #1893 (@technoweenie) * Fix clash between progress meter and credential helper #1886 (@technoweenie) * Teach uninstall cmd about --local and --system #1887 (@technoweenie) * Add `--skip-repo` option to `git lfs install` & use in tests #1868 (@sinbad) * commands: convert push, pre-push to use async gitscanner #1812 (@ttaylorr) * tq: prioritize transferring retries before new items #1758 (@ttaylorr) ### Bugs * ensure you're in the correct directory when installing #1793 (@technoweenie) * locking: make API requests relative to repository, not root #1818 (@ttaylorr) * Teach 'track' about CRLF #1914 (@technoweenie) * Teach 'track' how to handle empty lines in .gitattributes #1921 (@technoweenie) * Closing stdout pipe before function return #1861 (@monitorjbl) * Custom transfer terminate #1847 (@sinbad) * Fix Install in root problems #1727 (@technoweenie) * cat-file batch: read all of the bytes #1680 (@technoweenie) * Fixed file paths on cygwin. #1820, #1965 (@creste, @ttaylorr) * tq: decrement uploaded bytes in basic_upload before retry #1958 (@ttaylorr) * progress: fix never reading bytes with sufficiently small files #1955 (@ttaylorr) * tools: fix truncating string fields between balanced quotes in GIT_SSH_COMMAND #1962 (@ttaylorr) * commands/smudge: treat empty pointers as empty files #1954 (@ttaylorr) ### Misc * all: build using Go 1.8 #1952 (@ttaylorr) * Embed the version information into the Windows executable #1689 (@sschuberth) * Add more meta-data to the Windows installer executable #1752 (@sschuberth) * docs/api: object size must be positive #1779 (@ttaylorr) * build: omit DWARF tables by default #1937 (@ttaylorr) * Add test to prove set operator [] works in filter matching #1768 (@sinbad) * test: add ntlm integration test #1840 (@technoweenie) * lfs/tq: completely remove legacy support #1686 (@ttaylorr) * remove deprecated features #1679 (@technoweenie) * remove legacy api support #1629 (@technoweenie) ## 1.5.6 (16 February, 2017) ## Bugs * Spool malformed pointers to avoid deadlock #1932 (@ttaylorr) ## 1.5.5 (12 January, 2017) ### Bugs * lfs: only buffer first 1k when creating a CleanPointerError #1856 (@ttaylorr) ## 1.5.4 (27 December, 2016) ### Bugs * progress: guard negative padding width, panic in `strings.Repeat` #1807 (@ttaylorr) * commands,lfs: handle malformed pointers #1805 (@ttaylorr) ### Misc * script/packagecloud: release LFS on fedora/25 #1798 (@ttaylorr) * backport filepathfilter to v1.5.x #1782 (@technoweenie) ## 1.5.3 (5 December, 2016) ### Bugs * Support LFS installations at filesystem root #1732 (@technoweenie) * git: parse filter process header values containing '=' properly #1733 (@larsxschneider) * Fix SSH endpoint parsing #1738 (@technoweenie) ### Misc * build: release on Go 1.7.4 #1741 (@ttaylorr) ## 1.5.2 (22 November, 2016) ### Features * Release LFS on Fedora 24 #1685 (@technoweenie) ### Bugs * filter-process: fix reading 1024 byte files #1708 (@ttaylorr) * Support long paths on Windows #1705 (@technoweenie) ### Misc * filter-process: exit with error if we detect an unknown command from Git #1707 (@ttaylorr) * vendor: remove contentaddressable lib #1706 (@technoweenie) ## 1.5.1 (18 November, 2016) ### Bugs * cat-file --batch parser errors on non-lfs git blobs #1680 (@technoweenie) ## 1.5.0 (17 November, 2016) ### Features * Filter Protocol Support #1617 (@ttaylorr, @larsxschneider) * Fast directory walk #1616 (@sinbad) * Allow usage of proxies even when contacting localhost #1605 (@chalstrick) ### Bugs * start reading off the Watch() channel before sending any input #1671 (@technoweenie) * wait for remote ref commands to exit before returning #1656 (@jjgod, @technoweenie) ### Misc * rewrite new catfilebatch implementation for upcoming gitscanner pkg #1650 (@technoweenie) * refactor testutils.FileInput so it's a little more clear #1666 (@technoweenie) * Update the lfs track docs #1642 (@technoweenie) * Pre push tracing #1638 (@technoweenie) * Remove `AllGitConfig()` #1634 (@technoweenie) * README: set minimal required Git version to 1.8.5 #1636 (@larsxschneider) * 'smudge --info' is deprecated in favor of 'ls-files' #1631 (@technoweenie) * travis-ci: test GitLFS with ancient Git version #1626 (@larsxschneider) ## 1.4.4 (24 October, 2016) ### Bugs * transfer: more descriptive "expired at" errors #1603 (@ttaylorr) * commands,lfs/tq: Only send unique OIDs to the Transfer Queue #1600 (@ttaylorr) * Expose the result message in case of an SSH authentication error #1599 (@sschuberth) ### Misc * AppVeyor: Do not build branches with open pull requests #1594 (@sschuberth) * Update .mailmap #1593 (@dpursehouse) ## 1.4.3 (17 October, 2016) ### Bugs * lfs/tq: use extra arguments given to tracerx.Printf #1583 (@ttaylorr) * api: correctly print legacy API warning to Stderr #1582 (@ttaylorr) ### Misc * Test storage retries #1585 (@ttaylorr) * Test legacy check retries behavior #1584 (@ttaylorr) * docs: Fix a link to the legacy API #1579 (@sschuberth) * Add a .mailmap file #1577 (@sschuberth) * Add a large wizard image to the Windows installer #1575 (@sschuberth) * Appveyor badge #1574 (@ttaylorr) ## 1.4.2 (10 October, 2016) v1.4.2 brings a number of bug fixes and usability improvements to LFS. This release also adds support for multiple retries within the transfer queue, making transfers much more reliable. To enable this feature, see the documentation for `lfs.transfer.maxretries` in `git-lfs-config(5)`. We'd also like to extend a special thank-you to @sschuberth who undertook the process of making LFS's test run on Windows through AppVeyor. Now all pull requests run tests on macOS, Linux, and Windows. ### Features * lfs: warn on usage of the legacy API #1564 (@ttaylorr) * use filepath.Clean() when comparing filenames to include/exclude patterns #1565 (@technoweenie) * lfs/transfer_queue: support multiple retries per object #1505, #1528, #1535, #1545 (@ttaylorr) * Automatically upgrade old filters instead of requiring —force #1497 (@sinbad) * Allow lfs.pushurl in .lfsconfig #1489 (@technoweenie) ### Bugs * Use "sha256sum" on Windows #1566 (@sschuberth) * git: ignore non-root wildcards #1563 (@ttaylorr) * Teach status to recognize multiple files with identical contents #1550 (@ttaylorr) * Status initial commit #1540 (@sinbad) * Make path comparison robust against Windows short / long path issues #1523 (@sschuberth) * Allow fetch to run without a remote configured #1507 (@sschuberth) ### Misc * travis: run tests on Go 1.7.1 #1568 (@ttaylorr) * Enable running tests on AppVeyor CI #1567 (@sschuberth) * Travis: Only install git if not installed yet #1557 (@sschuberth) * Windows test framework fixes #1522 (@sschuberth) * Simplify getting the absolute Git root directory #1518 (@sschuberth) * Add icons to the Windows installer #1504 (@sschuberth) * docs/man: reference git-lfs-pointer(1) in clean documentation #1503 (@ttaylorr) * Make AppVeyor CI for Windows work again #1506 (@sschuberth) * commands: try out RegisterCommand() #1495 (@technoweenie) ## 1.4.1 (26 August, 2016) ### Features * retry if file download failed #1454 (@larsxschneider) * Support wrapped clone in current directory #1478 (@ttaylorr) ### Misc * Test `RetriableReader` #1482 (@ttaylorr) ## 1.4.0 (19 August, 2016) ### Features * Install LFS at the system level when packaged #1460 (@javabrett) * Fetch remote urls #1451 (@technoweenie) * add object Authenticated property #1452 (@technoweenie) * add support for `url.*.insteadof` in git config #1117, #1443 (@artagnon, @technoweenie) ### Bugs * fix --include bug when multiple files have same lfs content #1458 (@technoweenie) * check the git version is ok in some key commands #1461 (@technoweenie) * fix duplicate error reporting #1445, #1453 (@dpursehouse, @technoweenie) * transfer/custom: encode "event" as lowercase #1441 (@ttaylorr) ### Misc * docs/man: note GIT_LFS_PROGRESS #1469 (@ttaylorr) * Reword the description of HTTP 509 status #1467 (@dpursehouse) * Update fetch include/exclude docs for pattern matching #1455 (@ralfthewise) * config-next: API changes to the `config` package #1425 (@ttaylorr) * errors-next: Contextualize error messages #1463 (@ttaylorr, @technoweenie) * scope commands to not leak instances of themselves #1434 (@technoweenie) * Transfer manifest #1430 (@technoweenie) ## 1.3.1 (2 August 2016) ### Features * lfs/hook: teach `lfs.Hook` about `core.hooksPath` #1409 (@ttaylorr) ### Bugs * distinguish between empty include/exclude paths #1411 (@technoweenie) * Fix sslCAInfo config lookup when host in config doesn't have a trailing slash #1404 (@dakotahawkins) ### Misc * Use commands.Config instead of config.Config #1390 (@technoweenie) ## 1.3.0 (21 July 2016) ### Features * use proxy from git config #1173, #1358 (@jonmagic, @LizzHale, @technoweenie) * Enhanced upload/download of LFS content: #1265 #1279 #1297 #1303 #1367 (@sinbad) * Resumable downloads using HTTP range headers * Resumable uploads using [tus.io protocol](http://tus.io) * Pluggable [custom transfer adapters](https://github.com/git-lfs/git-lfs/blob/master/docs/custom-transfers.md) * In git 2.9+, run "git lfs pull" in submodules after "git lfs clone" #1373 (@sinbad) * cmd,doc,test: teach `git lfs track --{no-touch,verbose,dry-run}` #1344 (@ttaylorr) * ⏳ Retry transfers with expired actions #1350 (@ttaylorr) * Safe track patterns #1346 (@ttaylorr) * Add checkout --unstaged flag #1262 (@orivej) * cmd/clone: add include/exclude via flags and config #1321 (@ttaylorr) * Improve progress reporting when files skipped #1296 (@sinbad) * Experimental file locking commands #1236, #1259, #1256, #1386 (@ttaylorr) * Implement support for GIT_SSH_COMMAND #1260 (@pdf) * Recognize include/exclude filters from config #1257 (@ttaylorr) ### Bugs * Fix bug in Windows installer under Win32. #1200 (@teo-tsirpanis) * Updated request.GetAuthType to handle multi-value auth headers #1379 (@VladimirKhvostov) * Windows fixes #1374 (@sinbad) * Handle artifactory responses #1371 (@ttaylorr) * use `git rev-list --stdin` instead of passing each remote ref #1359 (@technoweenie) * docs/man: move "logs" subcommands from OPTIONS to COMMANDS #1335 (@ttaylorr) * test/zero-len: update test for git v2.9.1 #1369 (@ttaylorr) * Unbreak building httputil on OpenBSD #1360 (@jasperla) * WIP transferqueue race fix #1255 (@technoweenie) * Safety check to `comands.requireStdin` #1349 (@ttaylorr) * Removed CentOS 5 from dockers. Fixed #1295. #1298 (@javabrett) * Fix 'git lfs fetch' with a sha1 ref #1323 (@omonnier) * Ignore HEAD ref when fetching with --all #1310 (@ttaylorr) * Return a fully remote ref to reduce chances of ref clashes #1248 (@technoweenie) * Fix reporting of `git update-index` errors in `git lfs checkout` and `git lfs pull` #1400 (@technoweenie) ### Misc * Added Linux Mint Sarah to package cloud script #1384 (@andyneff) * travis-ci: require successful tests against upcoming Git core release #1372 (@larsxschneider) * travis-ci: add a build job to test against upcoming versions of Git #1361 (@larsxschneider) * Create Makefiles for building with gccgo #1222 (@zeldin) * README: add @ttaylorr to core team #1332 (@ttaylorr) * Enforced a minimum gem version of 1.0.4 for packagecloud-ruby #1292 (@javabrett) * I think this should be "Once installed" and not "One installed", but … #1305 (@GabLeRoux) * script/test: propagate extra args to go test #1324 (@omonnier) * Add `lfs.basictransfersonly` option to disable non-basic transfer adapters #1299 (@sinbad) * Debian build vendor test excludes #1291 (@javabrett) * gitignore: ignore lfstest-\* files #1271 (@ttaylorr) * Disable gojsonschema test, causes failures when firewalls block it #1274 (@sinbad) * test: use noop credential helper for auth tests #1267 (@ttaylorr) * get git tests passing when run outside of repository #1229 (@technoweenie) * Package refactor no.1 #1226 (@sinbad) * vendor: vendor dependencies in vendor/ using Glide #1243 (@ttaylorr) ## 1.2.1 (2 June 2016) ### Features * Add missing config details to `env` command #1217 (@sinbad) * Allow smudge filter to return 0 on download failure #1213 (@sinbad) * Add `git lfs update --manual` option & promote it on hook install fail #1182 (@sinbad) * Pass `git lfs clone` flags through to `git clone` correctly, respect some options #1160 (@sinbad) ### Bugs * Clean trailing `/` from include/exclude paths #1278 (@ttaylorr) * Fix problems with user prompts in `git lfs clone` #1185 (@sinbad) * Fix failure to return non-zero exit code when lfs install/update fails to install hooks #1178 (@sinbad) * Fix missing man page #1149 (@javabrett) * fix concurrent map read and map write #1179 (@technoweenie) ### Misc * Allow additional fields on request & response schema #1276 (@sinbad) * Fix installer error on win32. #1198 (@teo-tsirpanis) * Applied same -ldflags -X name value -> name=value fix #1193 (@javabrett) * add instructions to install from MacPorts #1186 (@skymoo) * Add xenial repo #1170 (@graingert) ## 1.2.0 (14 April 2016) ### Features * netrc support #715 (@rubyist) * `git lfs clone` command #988 (@sinbad) * Support self-signed certs #1067 (@sinbad) * Support sslverify option for specific hosts #1081 (@sinbad) * Stop transferring duplicate objects on major push or fetch operations on multiple refs. #1128 (@technoweenie) * Touch existing git tracked files when tracked in LFS so they are flagged as modified #1104 (@sinbad) * Support for git reference clones #1007 (@jlehtnie) ### Bugs * Fix clean/smudge filter string for files starting with - #1083 (@epriestley) * Fix silent failure to push LFS objects when ref matches a filename in the working copy #1096 (@epriestley) * Fix problems with using LFS in symlinked folders #818 (@sinbad) * Fix git lfs push silently misbehaving on ambiguous refs; fail like git push instead #1118 (@sinbad) * Whitelist `lfs.*.access` config in local ~/.lfsconfig #1122 (@rjbell4) * Only write the encoded pointer information to Stdout #1105 (@sschuberth) * Use hardcoded auth from remote or lfs config when accessing the storage api #1136 (@technoweenie, @jonmagic) * SSH should be called more strictly with command as one argument #1134 (@sinbad) ## 1.1.2 (1 March, 2016) * Fix Base64 issues with `?` #989 (@technoweenie) * Fix zombie git proc issue #1012 (@rlaakkol) * Fix problems with files containing unicode characters #1016 (@technoweenie) * Fix panic in `git cat-file` parser #1006 (@technoweenie) * Display error messages in non-fatal errors #1028 #1039 #1042 (@technoweenie) * Fix concurrent map access in progress meter (@technoweenie) ## 1.1.1 (4 February, 2016) ### Features * Add copy-on-write support for Linux BTRFS filesystem #952 (@bozaro) * convert `git://` remotes to LFS servers automatically #964 (@technoweenie) * Fix `git lfs track` handling of absolute paths. #975 (@technoweenie) * Allow tunable http client timeouts #977 (@technoweenie) ### Bugs * Suppress git config warnings for non-LFS keys #861 (@technoweenie) * Fix fallthrough when `git-lfs-authenticate` returns an error #909 (@sinbad) * Fix progress bar issue #883 (@pokehanai) * Support `remote.name.pushurl` config #949 (@sinbad) * Fix handling of `GIT_DIR` and `GIT_WORK_TREE` #963, #971 (@technoweenie) * Fix handling of zero length files #966 (@nathanhi) * Guard against invalid remotes passed to `push` and `pre-push` #974 (@technoweenie) * Fix race condition in `git lfs pull` #972 (@technoweenie) ### Extra * Add server API test tool #868 (@sinbad) * Redo windows installer with innosetup #875 (@strich) * Pre-built packages are built with Go v1.5.3 ## 1.1.0 (18 November, 2015) * NTLM auth support #820 (@WillHipschman, @technoweenie) * Add `prune` command #742 (@sinbad) * Use .lfsconfig instead of .gitconfig #837 (@technoweenie) * Rename "init" command #838 (@technoweenie) * Raise error if credentials are needed #842 (@technoweenie) * Support git repos in symlinked directories #818 (@sinbad, @difro, @jiangxin) * Fix "git lfs env" to show correct SSH remote info #828 (@jiangxin) ## 1.0.2 (28 October, 2015) * Fix issue with 'git lfs smudge' and the batch API. #795 (@technoweenie) * Fix race condition in the git scanning code. #801 (@technoweenie) ## 1.0.1 (23 October, 2015) * Downcase git config keys (prevents Auth loop) #690 (@WillHipschman) * Show more info for unexpected http responses #710 (@rubyist) * Use separate stdout/stderr buffers for `git-lfs-authenticate` #718 (@bozaro) * Use LoggedError instead of Panic if update-index fails in checkout #735 (@sinbad) * `smudge` command exits with non-zero if the download fails #732 (@rubyist) * Use `git rev-parse` to find the git working dir #692 (@sinbad) * Improved default remote behaviour & validation for fetch/pull #713 (@sinbad) * Make fetch return error code when 1+ downloads failed #734 (@sinbad) * Improve lfs.InRepo() detection in `init`/`update` #756 (@technoweenie) * Teach smudge to use the batch api #711 (@rubyist) * Fix not setting global attribute when needed to b/c of local state #765 (@sinbad) * Fix clone fail when fetch is excluded globally #770 (@sinbad) * Fix for partial downloads problem #763 (@technoweenie) * Get integration tests passing on Windows #771 (@sinbad) ### Security * Whitelist the valid keys read from .gitconfig #760 (@technoweenie) This prevents unsafe git configuration values from being used by Git LFS. ## v1.0 (1 October, 2015) * Manual reference is integrated into the "help" options #665 @sinbad * Fix `ls-files` when run from an empty repository #668 @Aorjoa * Support listing duplicate files in `ls-files` #681 @Aorjoa @technoweenie * `update` and `init` commands can install the pre-push hook in bare repositories #671 @technoweenie * Add `GIT_LFS_SKIP_SMUDGE` and `init --skip-smudge` #679 @technoweenie ## v0.6.0 (10 September, 2015) This is the first release that uses the new Batch API by default, while still falling back to the Legacy API automatically. Also, new fetch/checkout/push commands have been added. Run `git lfs update` in any local repositories to make sure all config settings are updated. ### Fetch * Rename old `git lfs fetch` command to `git lfs pull`. #527 (@sinbad) * Add `git lfs checkout` #527 #543 #551 #566 (@sinbad) * Add `git lfs fetch` for _just_ downloading objects. #527 (@sinbad) * Add `remote` arg, and default to tracking remote instead of "origin". #583 (@sinbad) * Support fetching multiple refs #542 (@sinbad) * Add `--include` and `--exclude` flag for `git lfs fetch` #573 (@sinbad) * Add `--recent` flag for downloading recent files outside of the current checkout. #610 (@sinbad) * Add `--all` option for download all objects from the server. #633 (@sinbad) * Fix error handling while `git update-index` is running. #570 (@rubyist) See [git-lfs-fetch(1)](https://github.com/git-lfs/git-lfs/blob/v0.6.0/docs/man/git-lfs-fetch.1.ronn), [git-lfs-checkout(1)](https://github.com/git-lfs/git-lfs/blob/v0.6.0/docs/man/git-lfs-checkout.1.ronn), and [git-lfs-pull(1)](https://github.com/git-lfs/git-lfs/blob/v0.6.0/docs/man/git-lfs-pull.1.ronn) for details. ### Push * Support pushing multiple branches in the pre-push hook. #635 (@sinbad) * Fix pushing objects from a branch that's not HEAD. #608 (@sinbad) * Check server for objects before failing push because local is missing. #581 (@sinbad) * Filter out commits from remote refs when pushing. #578 (@billygor) * Support pushing all objects to the server, regardless of the remote ref. #646 (@technoweenie) * Fix case where pre-push git hook exits with 0. #582 (@sinbad) See [git-lfs-push(1)](https://github.com/git-lfs/git-lfs/blob/v0.6.0/docs/man/git-lfs-push.1.ronn) for details. ### API Clients * Fix some race conditions in the Batch API client. #577 #637 (@sinbad, @rubyist) * Support retries in the Batch API client. #595 (@rubyist) * Fix hanging batch client in certain error conditions. #594 (@rubyist) * Treat 401 responses as errors in the Legacy API client. #634 (@rubyist) * Fix bug in the Legacy API client when the object already exists on the server. #572 (@billygor) ### Credentials * Fix how git credentials are checked in certain edge cases. #611 #650 #652 (@technoweenie) * Send URI user to git credentials. #626 (@sinbad) * Support git credentials with useHttpPath enabled. #554 (@clareliguori) ### Installation * Docker images and scripts for building and testing linux packages. #511 #526 #555 #603 (@andyneff, @ssgelm) * Create Windows GUI installer. #642 (@technoweenie) * Binary releases use Go 1.5, which includes fix for Authorization when the request URL includes just the username. [golang/go#11399](https://github.com/golang/go/issues/11399) ### Misc * Documented Git config values used by Git LFS in [git-lfs-config(5)](https://github.com/git-lfs/git-lfs/blob/v0.6.0/docs/man/git-lfs-config.5.ronn). #610 (@sinbad) * Experimental support for Git worktrees (in Git 2.5+) #546 (@sinbad) * Experimental extension support. #486 (@ryansimmen) ## v0.5.4 (30 July, 2015) * Ensure `git lfs uninit` cleans your git config thoroughly. #530 (@technoweenie) * Fix issue with asking `git-credentials` for auth details after getting them from the SSH command. #534 (@technoweenie) ## v0.5.3 (23 July, 2015) * `git lfs fetch` bugs #429 (@rubyist) * Push can crash on 32 bit architectures #450 (@rubyist) * Improved SSH support #404, #464 (@sinbad, @technoweenie) * Support 307 redirects with relative url #442 (@sinbad) * Fix `init` issues when upgrading #446 #451 #452 #465 (@technoweenie, @rubyist) * Support chunked Transfer-Encoding #386 (@ryansimmen) * Fix issue with pushing deleted objects #461 (@technoweenie) * Teach `git lfs push` how to send specific objects #449 (@larsxschneider) * Update error message when attempting to push objects that don't exist in `.git/lfs/objects` #447 (@technoweenie) * Fix bug in HTTP client when response body is nil #472 #488 (@rubyist, @technoweenie) * `-crlf` flag in gitattributes is deprecated #475 (@technoweenie) * Improvements to the CentOS and Debian build and package scripts (@andyneff, @ssgelm) ## v0.5.2 (19 June, 2015) * Add `git lfs fetch` command for downloading objects. #285 (@rubyist) * Fix `git lfs track` issues when run outside of a git repository #312, #323 (@michael-k, @Aorjoa) * Fix `git lfs track` for paths with spaces in them #327 (@technoweenie) * Fix `git lfs track` by writing relative paths to .gitattributes #356 (@michael-k) * Fix `git lfs untrack` so it doesn't remove entries incorrectly from .gitattributes #398 (@michael-k) * Fix `git lfs clean` bug with zero length files #346 (@technoweenie) * Add `git lfs fsck` #373 (@zeroshirts, @michael-k) * The Git pre-push warns if Git LFS is not installed #339 (@rubyist) * Fix Content-Type header sent by the HTTP client #329 (@joerg) * Improve performance tracing while scanning refs #311 (@michael-k) * Fix detection of LocalGitDir and LocalWorkingDir #312 #354 #361 (@michael-k) * Fix inconsistent file mode bits for directories created by Git LFS #364 (@michael-k) * Optimize shell execs #377, #382, #391 (@bozaro) * Collect HTTP transfer stats #366, #400 (@rubyist) * Support GIT_DIR and GIT_WORK_TREE #370 (@michael-k) * Hide Git application window in Windows #381 (@bozaro) * Add support for configured URLs containing credentials per RFC1738 #408 (@ewbankkit, @technoweenie) * Add experimental support for batch API calls #285 (@rubyist) * Improve linux build instructions for CentOS and Debian. #299 #309 #313 #332 (@jsh, @ssgelm, @andyneff) ## v0.5.1 (30 April, 2015) * Fix Windows install.bat script. #223 (@PeterDaveHello) * Fix bug where `git lfs clean` will clean Git LFS pointers too #271 (@technoweenie) * Better timeouts for the HTTP client #215 (@Mistobaan) * Concurrent uploads through `git lfs push` #258 (@rubyist) * Fix `git lfs smudge` behavior with zero-length file in `.git/lfs/objects` #267 (@technoweenie) * Separate out pre-push hook behavior from `git lfs push` #263 (@technoweenie) * Add diff/merge properties to .gitattributes #265 (@technoweenie) * Respect `GIT_TERMINAL_PROMPT ` #257 (@technoweenie) * Fix CLI progress bar output #185 (@technoweenie) * Fail fast in `clean` and `smudge` commands when run without STDIN #264 (@technoweenie) * Fix shell quoting in pre-push hook. #235 (@mhagger) * Fix progress bar output during file uploads. #185 (@technoweenie) * Change `remote.{name}.lfs_url` to `remote.{name}.lfsurl` #237 (@technoweenie) * Swap `git config` order. #245 (@technoweenie) * New `git lfs pointer` command for generating and comparing pointers #246 (@technoweenie) * Follow optional "href" property from git-lfs-authenticate SSH command #247 (@technoweenie) * `.git/lfs/objects` spec clarifications: #212 (@rtyley), #244 (@technoweenie) * man page updates: #228 (@mhagger) * pointer spec clarifications: #246 (@technoweenie) * Code comments for the untrack command: #225 (@thekafkaf) ## v0.5.0 (10 April, 2015) * Initial public release git-lfs-3.6.1/CODE-OF-CONDUCT.md000066400000000000000000000162771472372047300155470ustar00rootroot00000000000000# Git LFS Code of Conduct This code of conduct outlines our expectations for participants within the Git LFS community, as well as steps to reporting unacceptable behavior. We are committed to providing a welcoming and inspiring community for all and expect our code of conduct to be honored. Anyone who violates this code of conduct may be banned from the community. Our open source community strives to: * **Be friendly and patient.** * **Be welcoming**: We strive to be a community that welcomes and supports people of all backgrounds and identities. This includes, but is not limited to members of any race, ethnicity, culture, national origin, colour, immigration status, social and economic class, educational level, sex, sexual orientation, gender identity and expression, age, size, family status, political belief, religion, and mental and physical ability. * **Be considerate**: Your work will be used by other people, and you in turn will depend on the work of others. Any decision you take will affect users and colleagues, and you should take those consequences into account when making decisions. Remember that we're a world-wide community, so you might not be communicating in someone else's primary language. * **Be respectful**: Not all of us will agree all the time, but disagreement is no excuse for poor behavior and poor manners. We might all experience some frustration now and then, but we cannot allow that frustration to turn into a personal attack. It’s important to remember that a community where people feel uncomfortable or threatened is not a productive one. * **Be careful in the words that we choose**: we are a community of professionals, and we conduct ourselves professionally. Be kind to others. Do not insult or put down other participants. Harassment and other exclusionary behavior aren't acceptable. * **Try to understand why we disagree**: Disagreements, both social and technical, happen all the time. It is important that we resolve disagreements and differing views constructively. Remember that we’re different. The strength of our community comes from its diversity, people from a wide range of backgrounds. Different people have different perspectives on issues. Being unable to understand why someone holds a viewpoint doesn’t mean that they’re wrong. Don’t forget that it is human to err and blaming each other doesn’t get us anywhere. Instead, focus on helping to resolve issues and learning from mistakes. ## Definitions Harassment includes, but is not limited to: - Offensive comments related to gender, gender identity and expression, sexual orientation, disability, mental illness, neuro(a)typicality, physical appearance, body size, race, age, regional discrimination, political or religious affiliation - Unwelcome comments regarding a person’s lifestyle choices and practices, including those related to food, health, parenting, drugs, and employment - Deliberate misgendering. This includes deadnaming or persistently using a pronoun that does not correctly reflect a person's gender identity. You must address people by the name they give you when not addressing them by their username or handle - Physical contact and simulated physical contact (eg, textual descriptions like “*hug*” or “*backrub*”) without consent or after a request to stop - Threats of violence, both physical and psychological - Incitement of violence towards any individual, including encouraging a person to commit suicide or to engage in self-harm - Deliberate intimidation - Stalking or following - Harassing photography or recording, including logging online activity for harassment purposes - Sustained disruption of discussion - Unwelcome sexual attention, including gratuitous or off-topic sexual images or behaviour - Pattern of inappropriate social contact, such as requesting/assuming inappropriate levels of intimacy with others - Continued one-on-one communication after requests to cease - Deliberate “outing” of any aspect of a person’s identity without their consent except as necessary to protect others from intentional abuse - Publication of non-harassing private communication Our open source community prioritizes marginalized people’s safety over privileged people’s comfort. We will not act on complaints regarding: - ‘Reverse’ -isms, including ‘reverse racism,’ ‘reverse sexism,’ and ‘cisphobia’ - Reasonable communication of boundaries, such as “leave me alone,” “go away,” or “I’m not discussing this with you” - Refusal to explain or debate social justice concepts - Communicating in a ‘tone’ you don’t find congenial - Criticizing racist, sexist, cissexist, or otherwise oppressive behavior or assumptions ### Diversity Statement We encourage everyone to participate and are committed to building a community for all. Although we will fail at times, we seek to treat everyone both as fairly and equally as possible. Whenever a participant has made a mistake, we expect them to take responsibility for it. If someone has been harmed or offended, it is our responsibility to listen carefully and respectfully, and do our best to right the wrong. Although this list cannot be exhaustive, we explicitly honor diversity in age, gender, gender identity or expression, culture, ethnicity, language, national origin, political beliefs, profession, race, religion, sexual orientation, socioeconomic status, and technical ability. We will not tolerate discrimination based on any of the protected characteristics above, including participants with disabilities. ### Reporting Issues If you experience or witness unacceptable behavior—or have any other concerns—please report it by contacting us via opensource@github.com. All reports will be handled with discretion. In your report please include: - Your contact information. - Names (real, nicknames, or pseudonyms) of any individuals involved. If there are additional witnesses, please include them as well. Your account of what occurred, and if you believe the incident is ongoing. If there is a publicly available record (e.g. a mailing list archive or a public IRC logger), please include a link. - Any additional information that may be helpful. After filing a report, a representative will contact you personally, review the incident, follow up with any additional questions, and make a decision as to how to respond. If the person who is harassing you is part of the response team, they will recuse themselves from handling your incident. If the complaint originates from a member of the response team, it will be handled by a different member of the response team. We will respect confidentiality requests for the purpose of protecting victims of abuse. ### Attribution & Acknowledgements We all stand on the shoulders of giants across many open source communities. We'd like to thank the communities and projects that established code of conducts and diversity statements as our inspiration: * [Django](https://www.djangoproject.com/conduct/reporting/) * [Python](https://www.python.org/community/diversity/) * [Ubuntu](http://www.ubuntu.com/about/about-ubuntu/conduct) * [Contributor Covenant](http://contributor-covenant.org/) * [Geek Feminism](http://geekfeminism.org/about/code-of-conduct/) * [Citizen Code of Conduct](http://citizencodeofconduct.org/) git-lfs-3.6.1/CONTRIBUTING.md000066400000000000000000000171371472372047300153410ustar00rootroot00000000000000## Contributing to Git Large File Storage Hi there! We're thrilled that you'd like to contribute to this project. Your help is essential for making it the best it can be. Contributions to this project are [released](https://help.github.com/articles/github-terms-of-service/#6-contributions-under-repository-license) to the public under the [project's open source license](LICENSE.md). This project adheres to the [Open Code of Conduct](./CODE-OF-CONDUCT.md). By participating, you are expected to uphold this code. ## Feature Requests Feature requests are welcome, but will have a much better chance of being accepted if they meet the first principles for the project. Git LFS is intended for end users, not Git experts. It should fit into the standard workflow as much as possible, and require little client configuration. * Large objects are pushed to Git LFS servers during git push. * Large objects are downloaded during git checkout. * Git LFS servers are linked to Git remotes by default. Git hosts can support users without requiring them to set up anything extra. Users can access different Git LFS servers like they can with different Git remotes. * Upload and download requests should use the same form of authentication built into Git: SSH through public keys, and HTTPS through Git credential helpers. * Git LFS servers use a JSON API designed around progressive enhancement. Servers can simply host off cloud storage, or implement more efficient methods of transferring data. Since the focus for the project is on end users, we're generally hesitant about introducing new features that make data loss easy or are prone to misuse. However, we're not necessarily opposed to adding generally applicable customizability or features for advanced users if they don't conflict with other project goals. ## Project Management The Git LFS project is managed completely through this open source project. The [milestones][] show the high level items that are prioritized for future work. Suggestions for major features should be submitted as a pull request that adds a markdown file to `docs/proposals` discussing the feature. This gives the community time to discuss it before a lot of code has been written. [milestones]: https://github.com/git-lfs/git-lfs/milestones The Git LFS teams mark issues and pull requests with the following labels: * `bug` - An issue describing a bug. * `enhancement` - An issue for a possible new feature. * `review` - A pull request ready to be reviewed. * `release` - A checklist issue showing items marked for an upcoming release. ## Branching strategy In general, contributors should develop on branches based off of `main` and pull requests should be to `main`. ## Submitting a pull request 1. [Fork][] and clone the repository 1. Configure and install the dependencies: `make` 1. Make sure the tests pass on your machine: `make test` 1. Create a new branch based on `main`: `git checkout -b main` 1. Make your change, add tests, and make sure the tests still pass 1. Push to your fork and [submit a pull request][pr] from your branch to `main` 1. Pat yourself on the back and wait for your pull request to be reviewed Here are a few things you can do that will increase the likelihood of your pull request being accepted: * Follow the [style guide][style] where possible. * Write tests. * Update documentation as necessary. Commands have [man pages](./docs/man). * Keep your change as focused as possible. If there are multiple changes you would like to make that are not dependent upon each other, consider submitting them as separate pull requests. * Write a [good commit message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html). * Explain the rationale for your change in the pull request. You can often use part of a good commit message as a starting point. ## Discussions [Our discussions](https://github.com/git-lfs/git-lfs/discussions) are the perfect place to ask a question if you're not sure on something, provide feedback that isn't a bug report or feature request, or learn about use cases or best practices with Git LFS. There's even a search box to help you see if someone has already answered your question! You can also check [the FAQ](https://github.com/git-lfs/git-lfs/blob/main/docs/man/git-lfs-faq.adoc) to see if your question is well known and already has an easy answer. ## Issues If you think you've found a bug or have an issue, we'd love to hear about it! Here are some tips for getting your question answered as quickly as possible: * It's helpful if your issue includes the output of `git lfs env`, plus any relevant information about platform or configuration (e.g., container or CI usage, Cygwin, WSL, or non-Basic authentication). * Take a look at the [troubleshooting](https://github.com/git-lfs/git-lfs/wiki/Troubleshooting) and [FAQ](https://github.com/git-lfs/git-lfs/wiki/FAQ) pages on the wiki. We update them from time to time with information on how to track down problems. If it seems relevant, include any information you've learned by following those steps. * If you're having problems with GitHub's server-side LFS support, it's best to reach out to [GitHub's support team](https://github.com/contact) to get help. We aren't able to address GitHub-specific issues in this project, but the GitHub support team will do their best to help you out. * If you see an old issue that's closed as fixed, but you're still experiencing the problem on your system, please open a new issue. The problem you're seeing is likely different, at least in the way it works internally, and we can help best when we have a new issue with all the information. ## Building ### Prerequisites Git LFS depends on having a working Go development environment. We officially support the latest version of Go, although we try not to break backwards compatibility with older versions if it's possible to avoid doing so. On RHEL etc. e.g. Red Hat Enterprise Linux Server release 7.2 (Maipo), you will need the minimum packages installed to build Git LFS: ```ShellSession $ sudo yum install gcc $ sudo yum install perl-Digest-SHA ``` In order to run the RPM build `rpm/build_rpms.bsh` you will also need to: ```ShellSession $ sudo yum install ruby-devel ``` (note on an AWS instance you may first need to `sudo yum-config-manager --enable rhui-REGION-rhel-server-optional`) ### Building Git LFS The easiest way to download Git LFS for making changes is `git clone`: ```ShellSession $ git clone git@github.com:git-lfs/git-lfs.git $ cd git-lfs ``` From here, run `make` to build Git LFS in the `./bin` directory. Before submitting changes, be sure to run the Go tests and the shell integration tests: ```ShellSession $ make test # runs just the Go tests $ cd t && make test # runs the shell tests in ./test $ script/cibuild # runs everything, with verbose debug output ``` ## Updating 3rd party packages 1. Update `go.mod` and `go.sum`. You can ensure the latter is up-to-date by using Go tools instead of manually editing `go.mod` (e.g., `go get gopath@version`) or by running `go mod tidy` or `make go.sum`. 1. Commit the change. 1. Submit a pull request. ## Releasing If you are the current maintainer, see [the release howto](./docs/howto/release-git-lfs.md) for how to perform a release. ## Resources - [Contributing to Open Source on GitHub](https://guides.github.com/activities/contributing-to-open-source/) - [Using Pull Requests](https://help.github.com/articles/using-pull-requests/) - [GitHub Help](https://help.github.com) [fork]: https://github.com/git-lfs/git-lfs/fork [pr]: https://github.com/git-lfs/git-lfs/compare [style]: https://github.com/golang/go/wiki/CodeReviewComments git-lfs-3.6.1/INSTALLING.md000066400000000000000000000057721472372047300151000ustar00rootroot00000000000000# Installing on Linux using packagecloud [packagecloud](https://packagecloud.io) hosts [`git-lfs` packages](https://packagecloud.io/github/git-lfs) for popular Linux distributions with apt/deb and yum/rpm based package-managers. Installing from packagecloud is reasonably straightforward and involves two steps: ## 1. Adding the packagecloud repository packagecloud provides scripts to automate the process of configuring the package repository on your system, importing signing-keys etc. These scripts must be run sudo root, and you should review them first. The scripts are: * apt/deb repositories: https://packagecloud.io/install/repositories/github/git-lfs/script.deb.sh * yum/rpm repositories: https://packagecloud.io/install/repositories/github/git-lfs/script.rpm.sh The scripts check your Linux distribution and version, and use those parameters to create the best repository URL. If you are running one of the distributions listed for the latest version of Git LFS listed at [packagecloud](https://packagecloud.io/github/git-lfs) e.g `debian/jessie`, `el/7`, you can run the script without parameters: apt/deb repos: `curl -s https://packagecloud.io/install/repositories/github/git-lfs/script.deb.sh | sudo bash` yum/rpm repos: `curl -s https://packagecloud.io/install/repositories/github/git-lfs/script.rpm.sh | sudo bash` If you are running a distribution which does not match exactly a repository uploaded for Git LFS, but for which there is a repository for a compatible upstream distribution, you can either run the script with some additional parameters, or run it and then manually-correct the resulting repository URLs. See [#1074](https://github.com/git-lfs/git-lfs/issues/1074) for details. If you are running LinuxMint 17.1 Rebecca, which is downstream of Ubuntu Trusty and Debian Jessie, you can run: `curl -s https://packagecloud.io/install/repositories/github/git-lfs/script.deb.sh | os=debian dist=jessie sudo -E bash` The `os` and `dist` variables passed-in will override what would be detected for your system and force the selection of the upstream distribution's repository. You may also be able to run the following to automatically detect the dist for Ubuntu based distributions such as Pop OS: ``` (. /etc/lsb-release && curl -s https://packagecloud.io/install/repositories/github/git-lfs/script.deb.sh | sudo env os=ubuntu dist="${DISTRIB_CODENAME}" bash) ``` ## 2. Installing packages With the packagecloud repository configured for your system, you can install Git LFS: * apt/deb: `sudo apt-get install git-lfs` * yum/rpm: `sudo yum install git-lfs` ## A note about proxies Several of the commands above assume internet access and use `sudo`. If your host is behind a proxy-server that is required for internet access, you may depend on environment-variables `http_proxy` or `https_proxy` being set, and these might not survive the switch to root with `sudo`, which resets environment by-default. To get around this, you can run `sudo` with the `-E` switch, `sudo -E ...`, which retains environment variables. git-lfs-3.6.1/LICENSE.md000066400000000000000000000054301472372047300145050ustar00rootroot00000000000000MIT License Copyright (c) 2014-2021 GitHub, Inc. and Git LFS contributors Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. Portions of the subprocess and tools directories are copied from Go and are under the following license: Copyright (c) 2010 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Note that Git LFS uses components from other Go modules (included in `vendor/`) which are under different licenses. See those LICENSE files for details. git-lfs-3.6.1/Makefile000066400000000000000000000661511472372047300145500ustar00rootroot00000000000000# GIT_LFS_SHA is the '--short'-form SHA1 of the current revision of Git LFS. GIT_LFS_SHA ?= $(shell env -u GIT_TRACE git rev-parse --short HEAD) # VERSION is the longer-form describe output of the current revision of Git LFS, # used for identifying intermediate releases. # # If Git LFS is being built for a published release, VERSION and GIT_LFS_SHA # should be identical. VERSION ?= $(shell env -u GIT_TRACE git describe HEAD) # PREFIX is VERSION without the leading v, for use in archive prefixes. PREFIX ?= $(patsubst v%,git-lfs-%,$(VERSION)) # GO is the name of the 'go' binary used to compile Git LFS. GO ?= go # GOTOOLCHAIN is an environment variable which, when set to 'local', # prevents Go from downloading and running non-local versions of itself. export GOTOOLCHAIN = local # GO_TEST_EXTRA_ARGS are extra arguments given to invocations of 'go test'. # # Examples include: # # make test GO_TEST_EXTRA_ARGS=-v # make test GO_TEST_EXTRA_ARGS='-run TestMyExample' GO_TEST_EXTRA_ARGS = # BUILTIN_LD_FLAGS are the internal flags used to pass to the linker. By default # the config.GitCommit variable is always set via this variable, and # DWARF-stripping is enabled unless DWARF=YesPlease. BUILTIN_LD_FLAGS = ifneq ("$(VENDOR)","") BUILTIN_LD_FLAGS += -X 'github.com/git-lfs/git-lfs/v3/config.Vendor=$(VENDOR)' endif BUILTIN_LD_FLAGS += -X github.com/git-lfs/git-lfs/v3/config.GitCommit=$(GIT_LFS_SHA) ifneq ("$(DWARF)","YesPlease") BUILTIN_LD_FLAGS += -s BUILTIN_LD_FLAGS += -w endif # EXTRA_LD_FLAGS are given by the caller, and are passed to the Go linker after # BUILTIN_LD_FLAGS are processed. By default the system LDFLAGS are passed. ifdef LDFLAGS EXTRA_LD_FLAGS ?= -extldflags ${LDFLAGS} endif # LD_FLAGS is the union of the above two BUILTIN_LD_FLAGS and EXTRA_LD_FLAGS. LD_FLAGS = $(BUILTIN_LD_FLAGS) $(EXTRA_LD_FLAGS) # BUILTIN_GC_FLAGS are the internal flags used to pass compiler. BUILTIN_GC_FLAGS = # EXTRA_GC_FLAGS are the caller-provided flags to pass to the compiler. EXTRA_GC_FLAGS = # GC_FLAGS are the union of the above two BUILTIN_GC_FLAGS and EXTRA_GC_FLAGS. GC_FLAGS = $(BUILTIN_GC_FLAGS) $(EXTRA_GC_FLAGS) # RONN is the name of the 'ronn' program used to generate man pages. RONN ?= ronn # RONN_EXTRA_ARGS are extra arguments given to the $(RONN) program when invoked. RONN_EXTRA_ARGS ?= # ASCIIDOCTOR is the name of the 'asciidoctor' program used to generate man pages. ASCIIDOCTOR ?= asciidoctor # ASCIIDOCTOR_EXTRA_ARGS are extra arguments given to the $(ASCIIDOCTOR) program when invoked. ASCIIDOCTOR_EXTRA_ARGS ?= -a reproducible # GREP is the name of the program used for regular expression matching, or # 'grep' if unset. GREP ?= grep # XARGS is the name of the program used to turn stdin into program arguments, or # 'xargs' if unset. XARGS ?= xargs # GOIMPORTS is the name of the program formatter used before compiling. GOIMPORTS ?= goimports # GOIMPORTS_EXTRA_OPTS are the default options given to the $(GOIMPORTS) # program. GOIMPORTS_EXTRA_OPTS ?= -w -l # TAR is the tar command, either GNU or BSD (libarchive) tar. TAR ?= tar TAR_XFORM_ARG ?= $(shell $(TAR) --version | grep -q 'GNU tar' && echo '--xform' || echo '-s') TAR_XFORM_CMD ?= $(shell $(TAR) --version | grep -q 'GNU tar' && echo 's') # CERT_SHA1 is the SHA-1 hash of the Windows code-signing cert to use. The # actual signature is made with SHA-256. CERT_SHA1 ?= 30a531ed3a246d3d07a4273adaef31552bf6473a # CERT_FILE is the PKCS#12 file holding the certificate. CERT_FILE ?= # CERT_PASS is the password for the certificate. It must not contain # double-quotes. CERT_PASS ?= # CERT_ARGS are additional arguments to pass when signing Windows binaries. ifneq ("$(CERT_FILE)$(CERT_PASS)","") CERT_ARGS ?= -f "$(CERT_FILE)" -p "$(CERT_PASS)" else CERT_ARGS ?= -sha1 $(CERT_SHA1) endif # DARWIN_CERT_ID is a portion of the common name of the signing certificatee. DARWIN_CERT_ID ?= # DARWIN_KEYCHAIN_ID is the name of the keychain (with suffix) where the # certificate is located. DARWIN_KEYCHAIN_ID ?= CI.keychain export DARWIN_DEV_USER DARWIN_DEV_PASS DARWIN_DEV_TEAM # SOURCES is a listing of all .go files in this and child directories, excluding # that in vendor. SOURCES = $(shell find . -type f -name '*.go' | grep -v vendor) # MSGFMT is the GNU gettext msgfmt binary. MSGFMT ?= msgfmt # PO is a list of all the po (gettext source) files. PO = $(wildcard po/*.po) # MO is a list of all the mo (gettext compiled) files to be built. MO = $(patsubst po/%.po,po/build/%.mo,$(PO)) # XGOTEXT is the string extractor for gotext. XGOTEXT ?= xgotext # CODESIGN is the macOS signing tool. CODESIGN ?= codesign # SIGNTOOL is the Windows signing tool. SIGNTOOL ?= signtool.exe # FORCE_LOCALIZE forces localization to be run if set to non-empty. FORCE_LOCALIZE ?= # PKGS is a listing of packages that are considered to be a part of Git LFS, and # are used in package-specific commands, such as the 'make test' targets. For # example: # # make test # run 'go test' in all packages # make PKGS='config git/githistory' test # run 'go test' in config and # # git/githistory # # By default, it is a listing of all packages in Git LFS. When new packages (or # sub-packages) are created, they should be added here. ifndef PKGS PKGS = PKGS += commands PKGS += config PKGS += creds PKGS += errors PKGS += filepathfilter PKGS += fs PKGS += git PKGS += git/gitattr PKGS += git/githistory PKGS += git PKGS += lfs PKGS += lfsapi PKGS += lfshttp PKGS += locking PKGS += ssh PKGS += subprocess PKGS += tasklog PKGS += tools PKGS += tools/humanize PKGS += tools/kv PKGS += tr PKGS += tq endif # X is the platform-specific extension for Git LFS binaries. It is automatically # set to .exe on Windows, and the empty string on all other platforms. It may be # overridden. # # BUILD_MAIN is the main ".go" file that contains func main() for Git LFS. On # macOS and other non-Windows platforms, it is required that a specific # entrypoint be given, hence the below conditional. On Windows, it is required # that an entrypoint not be given so that goversioninfo can successfully embed # the resource.syso file (for more, see below). # # BSDTAR is BSD (libarchive) tar. ifeq ($(OS),Windows_NT) X ?= .exe BUILD_MAIN ?= BSDTAR ?= C:/Windows/system32/tar.exe else X ?= BUILD_MAIN ?= ./git-lfs.go BSDTAR ?= $(shell $(TAR) --version | grep -q 'GNU tar' && echo bsdtar || echo $(TAR)) endif # BUILD is a macro used to build a single binary of Git LFS using the above # LD_FLAGS and GC_FLAGS. # # It takes three arguments: # # $(1) - a valid GOOS value, or empty-string # $(2) - a valid GOARCH value, or empty-string # $(3) - an optional program extension. If $(3) is given as '-foo', then the # program will be written to bin/git-lfs-foo. # # It uses BUILD_MAIN as defined above to specify the entrypoint for building Git # LFS. BUILD = GOOS=$(1) GOARCH=$(2) \ $(GO) build \ -ldflags="$(LD_FLAGS)" \ -gcflags="$(GC_FLAGS)" \ -trimpath \ -o ./bin/git-lfs$(3) $(BUILD_MAIN) # BUILD_TARGETS is the set of all platforms and architectures that Git LFS is # built for. BUILD_TARGETS = \ bin/git-lfs-darwin-amd64 \ bin/git-lfs-darwin-arm64 \ bin/git-lfs-linux-arm \ bin/git-lfs-linux-arm64 \ bin/git-lfs-linux-amd64 \ bin/git-lfs-linux-ppc64le \ bin/git-lfs-linux-riscv64 \ bin/git-lfs-linux-s390x \ bin/git-lfs-linux-loong64 \ bin/git-lfs-linux-386 \ bin/git-lfs-freebsd-amd64 \ bin/git-lfs-freebsd-386 \ bin/git-lfs-windows-amd64.exe \ bin/git-lfs-windows-386.exe \ bin/git-lfs-windows-arm64.exe # mangen is a shorthand for ensuring that commands/mancontent_gen.go is kept # up-to-date with the contents of docs/man/*.ronn. .PHONY : mangen mangen : commands/mancontent_gen.go # commands/mancontent_gen.go is generated by running 'go generate' on package # 'commands' of Git LFS. It depends upon the contents of the 'docs' directory # and converts those manpages into code. commands/mancontent_gen.go : $(wildcard docs/man/*.adoc) GOOS= GOARCH= $(GO) generate github.com/git-lfs/git-lfs/v3/commands # trgen is a shorthand for ensuring that tr/tr_gen.go is kept up-to-date with # the contents of po/build/*.mo. .PHONY : trgen trgen : tr/tr_gen.go # tr/tr_gen.go is generated by running 'go generate' on package # 'tr' of Git LFS. It depends upon the contents of the 'po' directory # and converts the .mo files. tr/tr_gen.go : $(MO) GOOS= GOARCH= $(GO) generate github.com/git-lfs/git-lfs/v3/tr po/build: mkdir -p po/build # These targets build the MO files. po/build/%.mo: po/%.po po/build ifeq ($(FORCE_LOCALIZE),) if command -v $(MSGFMT) >/dev/null 2>&1; \ then \ $(MSGFMT) -o $@ $<; \ fi else $(MSGFMT) -o $@ $< endif po/i-reverse.po: po/default.pot script/gen-i-reverse $< $@ po/default.pot: if command -v $(XGOTEXT) >/dev/null 2>&1; \ then \ $(XGOTEXT) -in . -exclude .git,.github,vendor -out po -v; \ fi # Targets 'all' and 'build' build binaries of Git LFS for the above release # matrix. .PHONY : all build all build : $(BUILD_TARGETS) # The following bin/git-lfs-% targets make a single binary compilation of Git # LFS for a specific operating system and architecture pair. # # They function by translating target names into arguments for the above BUILD # builtin, and appending the appropriate suffix to the build target. # # On Windows, they also depend on the resource.syso target, which installs and # embeds the versioninfo into the binary. bin/git-lfs-darwin-amd64 : $(SOURCES) mangen trgen $(call BUILD,darwin,amd64,-darwin-amd64) bin/git-lfs-darwin-arm64 : $(SOURCES) mangen trgen $(call BUILD,darwin,arm64,-darwin-arm64) bin/git-lfs-linux-arm : $(SOURCES) mangen trgen GOARM=5 $(call BUILD,linux,arm,-linux-arm) bin/git-lfs-linux-arm64 : $(SOURCES) mangen trgen $(call BUILD,linux,arm64,-linux-arm64) bin/git-lfs-linux-amd64 : $(SOURCES) mangen trgen $(call BUILD,linux,amd64,-linux-amd64) bin/git-lfs-linux-ppc64le : $(SOURCES) mangen trgen $(call BUILD,linux,ppc64le,-linux-ppc64le) bin/git-lfs-linux-riscv64 : $(SOURCES) mangen trgen $(call BUILD,linux,riscv64,-linux-riscv64) bin/git-lfs-linux-loong64 : $(SOURCES) mangen trgen $(call BUILD,linux,loong64,-linux-loong64) bin/git-lfs-linux-s390x : $(SOURCES) mangen trgen $(call BUILD,linux,s390x,-linux-s390x) bin/git-lfs-linux-386 : $(SOURCES) mangen trgen $(call BUILD,linux,386,-linux-386) bin/git-lfs-freebsd-amd64 : $(SOURCES) mangen trgen $(call BUILD,freebsd,amd64,-freebsd-amd64) bin/git-lfs-freebsd-386 : $(SOURCES) mangen trgen $(call BUILD,freebsd,386,-freebsd-386) bin/git-lfs-windows-amd64.exe : resource.syso $(SOURCES) mangen trgen $(call BUILD,windows,amd64,-windows-amd64.exe) bin/git-lfs-windows-386.exe : resource.syso $(SOURCES) mangen trgen $(call BUILD,windows,386,-windows-386.exe) bin/git-lfs-windows-arm64.exe : resource.syso $(SOURCES) mangen trgen $(call BUILD,windows,arm64,-windows-arm64.exe) # .DEFAULT_GOAL sets the operating system-appropriate Git LFS binary as the # default output of 'make'. .DEFAULT_GOAL := bin/git-lfs$(X) # bin/git-lfs targets the default output of Git LFS on non-Windows operating # systems, and respects the build knobs as above. bin/git-lfs : $(SOURCES) fmt mangen trgen $(call BUILD,$(GOOS),$(GOARCH),) # bin/git-lfs.exe targets the default output of Git LFS on Windows systems, and # respects the build knobs as above. bin/git-lfs.exe : $(SOURCES) resource.syso mangen trgen $(call BUILD,$(GOOS),$(GOARCH),.exe) # resource.syso installs the 'goversioninfo' command and uses it in order to # generate a binary that has information included necessary to create the # Windows installer. # # Generating a new resource.syso is a pure function of the contents in the # prerequisites listed below. resource.syso : \ versioninfo.json script/windows-installer/git-lfs-logo.bmp \ script/windows-installer/git-lfs-logo.ico \ script/windows-installer/git-lfs-wizard-image.bmp $(GO) generate # RELEASE_TARGETS is the set of all release artifacts that we generate over a # particular release. They each have a corresponding entry in BUILD_TARGETS as # above. # # Unlike BUILD_TARGETS above, each of the below create a compressed directory # containing the matching binary, as well as the contents of RELEASE_INCLUDES # below. # # To build a specific release, execute the following: # # make bin/releases/git-lfs-darwin-amd64-$(git describe HEAD).tar.gz # # To build a specific release with a custom VERSION suffix, run the following: # # make VERSION=my-version bin/releases/git-lfs-darwin-amd64-my-version.tar.gz RELEASE_TARGETS = \ bin/releases/git-lfs-darwin-amd64-$(VERSION).zip \ bin/releases/git-lfs-darwin-arm64-$(VERSION).zip \ bin/releases/git-lfs-linux-arm-$(VERSION).tar.gz \ bin/releases/git-lfs-linux-arm64-$(VERSION).tar.gz \ bin/releases/git-lfs-linux-amd64-$(VERSION).tar.gz \ bin/releases/git-lfs-linux-ppc64le-$(VERSION).tar.gz \ bin/releases/git-lfs-linux-riscv64-$(VERSION).tar.gz \ bin/releases/git-lfs-linux-s390x-$(VERSION).tar.gz \ bin/releases/git-lfs-linux-loong64-$(VERSION).tar.gz \ bin/releases/git-lfs-linux-386-$(VERSION).tar.gz \ bin/releases/git-lfs-freebsd-amd64-$(VERSION).tar.gz \ bin/releases/git-lfs-freebsd-386-$(VERSION).tar.gz \ bin/releases/git-lfs-windows-amd64-$(VERSION).zip \ bin/releases/git-lfs-windows-386-$(VERSION).zip \ bin/releases/git-lfs-windows-arm64-$(VERSION).zip \ bin/releases/git-lfs-$(VERSION).tar.gz # RELEASE_INCLUDES are the names of additional files that are added to each # release artifact. RELEASE_INCLUDES = README.md CHANGELOG.md man # release is a phony target that builds all of the release artifacts, and then # shows the SHA 256 signature of each. # # To build all of the release binaries for a given Git LFS release: # # make release .PHONY : release release : $(RELEASE_TARGETS) shasum -a 256 $(RELEASE_TARGETS) # bin/releases/git-lfs-%-$(VERSION).tar.gz generates a gzip-compressed TAR of # the non-Windows and non-macOS release artifacts. # # It includes all of RELEASE_INCLUDES, as well as script/install.sh. bin/releases/git-lfs-%-$(VERSION).tar.gz : \ $(RELEASE_INCLUDES) bin/git-lfs-% script/install.sh @mkdir -p bin/releases $(TAR) $(TAR_XFORM_ARG) '$(TAR_XFORM_CMD)!bin/git-lfs-.*!$(PREFIX)/git-lfs!' \ $(TAR_XFORM_ARG) '$(TAR_XFORM_CMD)!script/!$(PREFIX)/!' \ $(TAR_XFORM_ARG) '$(TAR_XFORM_CMD)!\(.*\)\.md!$(PREFIX)/\1.md!' \ $(TAR_XFORM_ARG) '$(TAR_XFORM_CMD)!man!$(PREFIX)/man!' \ --posix -czf $@ $^ # bin/releases/git-lfs-darwin-$(VERSION).zip generates a ZIP compression of all # of the macOS release artifacts. # # It includes all of the RELEASE_INCLUDES, as well as script/install.sh. bin/releases/git-lfs-darwin-%-$(VERSION).zip : \ $(RELEASE_INCLUDES) bin/git-lfs-darwin-% script/install.sh @mkdir -p bin/releases $(BSDTAR) --format zip \ -s '!bin/git-lfs-.*!$(PREFIX)/git-lfs!' \ -s '!script/!$(PREFIX)/!' \ -s '!\(.*\)\.md!$(PREFIX)/\1.md!' \ -s '!man!$(PREFIX)/man!' \ -cf $@ $^ # bin/releases/git-lfs-windows-$(VERSION).zip generates a ZIP compression of all # of the Windows release artifacts. # # It includes all of the RELEASE_INCLUDES, and converts LF-style line endings to # CRLF in the non-binary components of the artifact. bin/releases/git-lfs-windows-%-$(VERSION).zip : $(RELEASE_INCLUDES) bin/git-lfs-windows-%.exe @mkdir -p bin/releases # Windows's bsdtar doesn't support -s, so do the same thing as for Darwin, but # by hand. temp=$$(mktemp -d); \ file="$$PWD/$@" && \ mkdir -p "$$temp/$(PREFIX)/man" && \ cp -r $^ "$$temp/$(PREFIX)" && \ (cd "$$temp" && $(BSDTAR) --format zip -cf "$$file" $(PREFIX)) && \ $(RM) -r "$$temp" # bin/releases/git-lfs-$(VERSION).tar.gz generates a tarball of the source code. # # This is useful for third parties who wish to have a bit-for-bit identical # source archive to download and verify cryptographically. bin/releases/git-lfs-$(VERSION).tar.gz : git archive -o $@ --prefix=$(PREFIX)/ --format tar.gz $(VERSION) # release-linux is a target that builds Linux packages. It must be run on a # system with Docker that can run Linux containers. .PHONY : release-linux release-linux: ./docker/run_dockers.bsh # release-windows-stage-1 is a target that builds the Windows Git LFS binaries # and prepares them for signing. It must be run on a Windows machine under Git # Bash. .PHONY : release-windows-stage-1 release-windows-stage-1: tmp/stage1 # After this stage completes, the binaries in this directory will be signed. tmp/stage1: $(RM) -r tmp/stage1 @mkdir -p tmp/stage1 @# Using these particular filenames is required for the Inno Setup script to @# work properly. $(MAKE) -B GOOS=windows X=.exe GOARCH=amd64 && cp ./bin/git-lfs.exe ./git-lfs-x64.exe $(MAKE) -B GOOS=windows X=.exe GOARCH=386 && cp ./bin/git-lfs.exe ./git-lfs-x86.exe $(MAKE) -B GOOS=windows X=.exe GOARCH=arm64 && cp ./bin/git-lfs.exe ./git-lfs-arm64.exe mv git-lfs-x64.exe git-lfs-x86.exe git-lfs-arm64.exe tmp/stage1 # release-windows-stage-2 is a target that builds the InnoSetup installer and # prepares it for signing. It must be run on a Windows machine under Git Bash. .PHONY : release-windows-stage-2 release-windows-stage-2: tmp/stage2 # After this stage completes, the binaries in tmp/stage2 will be signed. tmp/stage2: tmp/stage1 cp tmp/stage1/*.exe . @# The git-lfs-windows-*.exe file will be named according to the version @# number in the versioninfo.json, not according to $(VERSION). iscc.exe script/windows-installer/inno-setup-git-lfs-installer.iss mv git-lfs-windows-*.exe git-lfs-windows.exe $(RM) -r tmp/stage2 @mkdir -p tmp/stage2 cp git-lfs-windows.exe tmp/stage2 # release-windows-stage-3 is a target that produces an archive from signed # Windows binaries from the previous stages. It must be run on a Windows # machine under Git Bash. .PHONY : release-windows-stage-3 release-windows-stage-3: bin/releases/git-lfs-windows-assets-$(VERSION).tar.gz bin/releases/git-lfs-windows-assets-$(VERSION).tar.gz : tmp/stage1 tmp/stage2 mv tmp/stage1/git-lfs-x64.exe git-lfs-windows-amd64.exe mv tmp/stage1/git-lfs-x86.exe git-lfs-windows-386.exe mv tmp/stage1/git-lfs-arm64.exe git-lfs-windows-arm64.exe mv tmp/stage2/git-lfs-windows.exe git-lfs-windows.exe @# We use tar because Git Bash doesn't include zip. $(TAR) -czf $@ git-lfs-windows-amd64.exe git-lfs-windows-386.exe git-lfs-windows-arm64.exe git-lfs-windows.exe $(RM) git-lfs-windows-amd64.exe git-lfs-windows-386.exe git-lfs-windows-arm64.exe git-lfs-windows.exe # release-windows-rebuild takes the archive produced by release-windows and # incorporates the signed binaries into the existing zip archives. .PHONY : release-windows-rebuild release-windows-rebuild: bin/releases/git-lfs-windows-assets-$(VERSION).tar.gz temp=$$(mktemp -d); \ file="$$PWD/$^"; \ root="$$PWD" && \ ( \ tar -C "$$temp" -xzf "$$file" && \ for i in 386 amd64 arm64; do \ temp2="$$(mktemp -d)" && \ $(BSDTAR) -C "$$temp2" -xf "$$root/bin/releases/git-lfs-windows-$$i-$(VERSION).zip" && \ rm -f "$$temp2/$(PREFIX)/"git-lfs*.exe && \ cp "$$temp/git-lfs-windows-$$i.exe" "$$temp2/$(PREFIX)/git-lfs.exe" && \ (cd "$$temp2" && $(BSDTAR) --format=zip -cf "$$root/bin/releases/git-lfs-windows-$$i-$(VERSION).zip" $(PREFIX)) && \ rm -fr "$$temp2"; \ done && \ cp "$$temp/git-lfs-windows.exe" bin/releases/git-lfs-windows-$(VERSION).exe \ ); \ status="$$?"; [ -n "$$temp" ] && $(RM) -r "$$temp"; exit "$$status" # release-darwin is a target that builds and signs Darwin (macOS) binaries. It must # be run on a macOS machine with a suitable version of XCode. # # You may sign with a different certificate by specifying DARWIN_CERT_ID. .PHONY : release-darwin release-darwin: bin/releases/git-lfs-darwin-amd64-$(VERSION).zip bin/releases/git-lfs-darwin-arm64-$(VERSION).zip for i in $^; do \ temp=$$(mktemp -d) && \ root=$$(pwd -P) && \ ( \ $(BSDTAR) -C "$$temp" -xf "$$i" && \ $(CODESIGN) --keychain $(DARWIN_KEYCHAIN_ID) -s "$(DARWIN_CERT_ID)" --force --timestamp -vvvv --options runtime "$$temp/$(PREFIX)/git-lfs" && \ $(CODESIGN) -dvvv "$$temp/$(PREFIX)/git-lfs" && \ (cd "$$temp" && $(BSDTAR) --format zip -cf "$$root/$$i" "$(PREFIX)") && \ $(CODESIGN) --keychain $(DARWIN_KEYCHAIN_ID) -s "$(DARWIN_CERT_ID)" --force --timestamp -vvvv --options runtime "$$i" && \ $(CODESIGN) -dvvv "$$i" && \ jq -e ".notarize.path = \"$$i\" | .apple_id.username = \"$(DARWIN_DEV_USER)\"" script/macos/manifest.json > "$$temp/manifest.json"; \ for j in 1 2 3; \ do \ script/notarize "$$i" && break; \ done; \ ); \ status="$$?"; [ -n "$$temp" ] && $(RM) -r "$$temp"; [ "$$status" -eq 0 ] || exit "$$status"; \ done .PHONY : release-write-certificate release-write-certificate: @echo "Writing certificate to $(CERT_FILE)" @echo "$$CERT_CONTENTS" | base64 --decode >"$$CERT_FILE" @printf 'Wrote %d bytes (SHA256 %s) to certificate file\n' $$(wc -c <"$$CERT_FILE") $$(shasum -ba 256 "$$CERT_FILE" | cut -d' ' -f1) # release-import-certificate imports the given certificate into the macOS # keychain "CI". It is not generally recommended to run this on a user system, # since it creates a new keychain and modifies the keychain search path. .PHONY : release-import-certificate release-import-certificate: @[ -n "$(CI)" ] || { echo "Don't run this target by hand." >&2; false; } @echo "Creating CI keychain" security create-keychain -p default CI.keychain security set-keychain-settings CI.keychain security unlock-keychain -p default CI.keychain @echo "Importing certificate from $(CERT_FILE)" @security import "$$CERT_FILE" -f pkcs12 -k CI.keychain -P "$$CERT_PASS" -A @echo "Verifying import and setting permissions" security list-keychains -s CI.keychain security default-keychain -s CI.keychain security set-key-partition-list -S apple-tool:,apple:,codesign: -s -k default CI.keychain security find-identity -vp codesigning CI.keychain # TEST_TARGETS is a list of all phony test targets. Each one of them corresponds # to a specific kind or subset of tests to run. TEST_TARGETS := test-bench test-verbose test-race .PHONY : $(TEST_TARGETS) test $(TEST_TARGETS) : test # test-bench runs all Go benchmark tests, and nothing more. test-bench : GO_TEST_EXTRA_ARGS=-run=__nothing__ -bench=. # test-verbose runs all Go tests in verbose mode. test-verbose : GO_TEST_EXTRA_ARGS=-v # test-race runs all Go tests in race-detection mode. test-race : GO_TEST_EXTRA_ARGS=-race # test runs the Go tests with GO_TEST_EXTRA_ARGS in all specified packages, # given by the PKGS variable. # # For example, a caller can invoke the race-detection tests in just the config # package by running: # # make PKGS=config test-race # # Or in a series of packages, like: # # make PKGS="config lfsapi tools/kv" test-race # # And so on. test : fmt $(.DEFAULT_GOAL) ( \ unset GIT_DIR; unset GIT_WORK_TREE; unset XDG_CONFIG_HOME; unset XDG_RUNTIME_DIR; \ tempdir="$$(mktemp -d)"; \ export HOME="$$tempdir"; \ export GIT_CONFIG_NOSYSTEM=1; \ $(GO) test -count=1 $(GO_TEST_EXTRA_ARGS) $(addprefix ./,$(PKGS)); \ RET=$$?; \ chmod -R u+w "$$tempdir"; \ rm -fr "$$tempdir"; \ exit $$RET; \ ) # integration is a shorthand for running 'make' in the 't' directory. .PHONY : integration integration : bin/git-lfs$(X) make -C t test # go.sum is a lockfile based on the contents of go.mod. go.sum : go.mod $(GO) mod verify >/dev/null # vendor updates the go.sum-file, and installs vendored dependencies into # the vendor/ sub-tree, removing sub-packages (listed below) that are unused by # Git LFS as well as test code. .PHONY : vendor vendor : go.mod $(GO) mod vendor -v # fmt runs goimports over all files in Git LFS (as defined by $(SOURCES) above), # and replaces their contents with a formatted one in-place. # # If $(GOIMPORTS) does not exist, or isn't otherwise executable, this recipe # still performs the linting sequence, but gracefully skips over running a # non-existent command. .PHONY : fmt ifeq ($(shell test -x "`command -v $(GOIMPORTS)`"; echo $$?),0) fmt : $(SOURCES) | lint @$(GOIMPORTS) $(GOIMPORTS_EXTRA_OPTS) $?; else fmt : $(SOURCES) | lint @echo "git-lfs: skipping fmt, no goimports found at \`$(GOIMPORTS)\` ..." endif # lint ensures that there are all dependencies outside of the standard library # are vendored in via vendor (see: above). .PHONY : lint lint : $(SOURCES) @! $(GO) list -f '{{ join .Deps "\n" }}' . \ | $(XARGS) $(GO) list -f \ '{{ if and (not .Standard) (not .Module) }} \ {{ .ImportPath }} \ {{ end }}' \ | $(GREP) -v "github.com/git-lfs/git-lfs" \ | $(GREP) "." # MAN_ROFF_TARGETS is a list of all ROFF-style targets in the man pages. MAN_ROFF_TARGETS = man/man1/git-lfs-checkout.1 \ man/man1/git-lfs-clean.1 \ man/man1/git-lfs-clone.1 \ man/man1/git-lfs-completion.1 \ man/man5/git-lfs-config.5 \ man/man1/git-lfs-dedup.1 \ man/man1/git-lfs-env.1 \ man/man1/git-lfs-ext.1 \ man/man7/git-lfs-faq.7 \ man/man1/git-lfs-fetch.1 \ man/man1/git-lfs-filter-process.1 \ man/man1/git-lfs-fsck.1 \ man/man1/git-lfs-install.1 \ man/man1/git-lfs-lock.1 \ man/man1/git-lfs-locks.1 \ man/man1/git-lfs-logs.1 \ man/man1/git-lfs-ls-files.1 \ man/man1/git-lfs-merge-driver.1 \ man/man1/git-lfs-migrate.1 \ man/man1/git-lfs-pointer.1 \ man/man1/git-lfs-post-checkout.1 \ man/man1/git-lfs-post-commit.1 \ man/man1/git-lfs-post-merge.1 \ man/man1/git-lfs-pre-push.1 \ man/man1/git-lfs-prune.1 \ man/man1/git-lfs-pull.1 \ man/man1/git-lfs-push.1 \ man/man1/git-lfs-smudge.1 \ man/man1/git-lfs-standalone-file.1 \ man/man1/git-lfs-status.1 \ man/man1/git-lfs-track.1 \ man/man1/git-lfs-uninstall.1 \ man/man1/git-lfs-unlock.1 \ man/man1/git-lfs-untrack.1 \ man/man1/git-lfs-update.1 \ man/man1/git-lfs.1 # MAN_HTML_TARGETS is a list of all HTML-style targets in the man pages. MAN_HTML_TARGETS = man/html/git-lfs-checkout.1.html \ man/html/git-lfs-clean.1.html \ man/html/git-lfs-clone.1.html \ man/html/git-lfs-completion.1.html \ man/html/git-lfs-config.5.html \ man/html/git-lfs-dedup.1.html \ man/html/git-lfs-env.1.html \ man/html/git-lfs-ext.1.html \ man/html/git-lfs-faq.7.html \ man/html/git-lfs-fetch.1.html \ man/html/git-lfs-filter-process.1.html \ man/html/git-lfs-fsck.1.html \ man/html/git-lfs-install.1.html \ man/html/git-lfs-lock.1.html \ man/html/git-lfs-locks.1.html \ man/html/git-lfs-logs.1.html \ man/html/git-lfs-ls-files.1.html \ man/html/git-lfs-merge-driver.1.html \ man/html/git-lfs-migrate.1.html \ man/html/git-lfs-pointer.1.html \ man/html/git-lfs-post-checkout.1.html \ man/html/git-lfs-post-commit.1.html \ man/html/git-lfs-post-merge.1.html \ man/html/git-lfs-pre-push.1.html \ man/html/git-lfs-prune.1.html \ man/html/git-lfs-pull.1.html \ man/html/git-lfs-push.1.html \ man/html/git-lfs-smudge.1.html \ man/html/git-lfs-standalone-file.1.html \ man/html/git-lfs-status.1.html \ man/html/git-lfs-track.1.html \ man/html/git-lfs-uninstall.1.html \ man/html/git-lfs-unlock.1.html \ man/html/git-lfs-untrack.1.html \ man/html/git-lfs-update.1.html \ man/html/git-lfs.1.html # man generates all ROFF- and HTML-style manpage targets. .PHONY : man man : $(MAN_ROFF_TARGETS) $(MAN_HTML_TARGETS) # man/% generates ROFF-style man pages from the corresponding .ronn file. man/man1/%.1 man/man5/%.5 man/man7/%.7 : docs/man/%.adoc @mkdir -p man/man1 man/man5 $(ASCIIDOCTOR) $(ASCIIDOCTOR_EXTRA_ARGS) -b manpage -o $@ $^ # man/%.html generates HTML-style man pages from the corresponding .ronn file. man/html/%.1.html man/html/%.5.html man/html/%.7.html : docs/man/%.adoc @mkdir -p man/html $(ASCIIDOCTOR) $(ASCIIDOCTOR_EXTRA_ARGS) -b html5 -o $@ $^ git-lfs-3.6.1/README.md000066400000000000000000000235021472372047300143600ustar00rootroot00000000000000# Git Large File Storage [![CI status][ci_badge]][ci_url] [ci_badge]: https://github.com/git-lfs/git-lfs/workflows/CI/badge.svg [ci_url]: https://github.com/git-lfs/git-lfs/actions?query=workflow%3ACI [Git LFS](https://git-lfs.github.com) is a command line extension and [specification](docs/spec.md) for managing large files with Git. The client is written in Go, with pre-compiled binaries available for Mac, Windows, Linux, and FreeBSD. Check out the [website](http://git-lfs.github.com) for an overview of features. ## Getting Started ### Installing #### On Linux Debian and RPM packages are available from packagecloud, see the [Linux installation instructions](INSTALLING.md). #### On macOS [Homebrew](https://brew.sh) bottles are distributed and can be installed via `brew install git-lfs`. #### On Windows Git LFS is included in the distribution of [Git for Windows](https://gitforwindows.org/). Alternatively, you can install a recent version of Git LFS from the [Chocolatey](https://chocolatey.org/) package manager. #### From binary [Binary packages](https://github.com/git-lfs/git-lfs/releases) are available for Linux, macOS, Windows, and FreeBSD. The binary packages include a script which will: - Install Git LFS binaries onto the system `$PATH`. On Windows in particular, you may need to restart your command shell so any change to `$PATH` will take effect and Git can locate the Git LFS binary. - Run `git lfs install` to perform required global configuration changes. ```ShellSession $ ./install.sh ``` Note that Debian and RPM packages are built for multiple Linux distributions and versions for both amd64 and i386. For arm64, only Debian packages are built and only for recent versions due to the cost of building in emulation. #### From source - Ensure you have the latest version of Go, GNU make, and a standard Unix-compatible build environment installed. - On Windows, install `goversioninfo` with `go install github.com/josephspurrier/goversioninfo/cmd/goversioninfo@latest`. - Run `make`. - Place the `git-lfs` binary, which can be found in `bin`, on your system’s executable `$PATH` or equivalent. - Git LFS requires global configuration changes once per-machine. This can be done by running: `git lfs install` #### Verifying releases Releases are signed with the OpenPGP key of one of the core team members. To get these keys, you can run the following command, which will print them to standard output: ```ShellSession $ curl -L https://api.github.com/repos/git-lfs/git-lfs/tarball/core-gpg-keys | tar -Ozxf - ``` Once you have the keys, you can download the `sha256sums.asc` file and verify the file you want like so: ```ShellSession $ gpg -d sha256sums.asc | grep git-lfs-linux-amd64-v2.10.0.tar.gz | shasum -a 256 -c ``` For the convenience of distributors, we also provide a wider variety of signed hashes in the `hashes.asc` file. Those hashes are in the tagged BSD format, but can be verified with Perl's `shasum` or the GNU hash utilities, just like the ones in `sha256sums.asc`. ## Example Usage To begin using Git LFS within a Git repository that is not already configured for Git LFS, you can indicate which files you would like Git LFS to manage. This can be done by running the following _from within a Git repository_: ```bash $ git lfs track "*.psd" ``` (Where `*.psd` is the pattern of filenames that you wish to track. You can read more about this pattern syntax [here](https://git-scm.com/docs/gitattributes)). > *Note:* the quotation marks surrounding the pattern are important to > prevent the glob pattern from being expanded by the shell. After any invocation of `git-lfs-track(1)` or `git-lfs-untrack(1)`, you _must commit changes to your `.gitattributes` file_. This can be done by running: ```bash $ git add .gitattributes $ git commit -m "track *.psd files using Git LFS" ``` You can now interact with your Git repository as usual, and Git LFS will take care of managing your large files. For example, changing a file named `my.psd` (tracked above via `*.psd`): ```bash $ git add my.psd $ git commit -m "add psd" ``` > _Tip:_ if you have large files already in your repository's history, `git lfs > track` will _not_ track them retroactively. To migrate existing large files > in your history to use Git LFS, use `git lfs migrate`. For example: > > ``` > $ git lfs migrate import --include="*.psd" --everything > ``` > > **Note that this will rewrite history and change all of the Git object IDs in your > repository, just like the export version of this command.** > > For more information, read [`git-lfs-migrate(1)`](https://github.com/git-lfs/git-lfs/blob/main/docs/man/git-lfs-migrate.adoc). You can confirm that Git LFS is managing your PSD file: ```bash $ git lfs ls-files 3c2f7aedfb * my.psd ``` Once you've made your commits, push your files to the Git remote: ```bash $ git push origin main Uploading LFS objects: 100% (1/1), 810 B, 1.2 KB/s # ... To https://github.com/git-lfs/git-lfs-test 67fcf6a..47b2002 main -> main ``` Note: Git LFS requires at least Git 1.8.2 on Linux or 1.8.5 on macOS. ### Uninstalling If you've decided that Git LFS isn't right for you, you can convert your repository back to a plain Git repository with `git lfs migrate` as well. For example: ```ShellSession $ git lfs migrate export --include="*.psd" --everything ``` **Note that this will rewrite history and change all of the Git object IDs in your repository, just like the import version of this command.** If there's some reason that things aren't working out for you, please let us know in an issue, and we'll definitely try to help or get it fixed. ## Limitations Git LFS maintains a list of currently known limitations, which you can find and edit [here](https://github.com/git-lfs/git-lfs/wiki/Limitations). Git LFS source code utilizes Go modules in its build system, and therefore this project contains a `go.mod` file with a defined Go module path. However, we do not maintain a stable Go language API or ABI, as Git LFS is intended to be used solely as a compiled binary utility. Please do not import the `git-lfs` module into other Go code and do not rely on it as a source code dependency. ## Need Help? You can get help on specific commands directly: ```bash $ git lfs help ``` The [official documentation](docs) has command references and specifications for the tool. There's also a [FAQ](https://github.com/git-lfs/git-lfs/blob/main/docs/man/git-lfs-faq.adoc) shipped with Git LFS which answers some common questions. If you have a question on how to use Git LFS, aren't sure about something, or are looking for input from others on tips about best practices or use cases, feel free to [start a discussion](https://github.com/git-lfs/git-lfs/discussions). You can always [open an issue](https://github.com/git-lfs/git-lfs/issues), and one of the Core Team members will respond to you. Please be sure to include: 1. The output of `git lfs env`, which displays helpful information about your Git repository useful in debugging. 2. Any failed commands re-run with `GIT_TRACE=1` in the environment, which displays additional information pertaining to why a command crashed. ## Contributing See [CONTRIBUTING.md](CONTRIBUTING.md) for info on working on Git LFS and sending patches. Related projects are listed on the [Implementations wiki page](https://github.com/git-lfs/git-lfs/wiki/Implementations). See also [SECURITY.md](SECURITY.md) for info on how to submit reports of security vulnerabilities. ## Core Team These are the humans that form the Git LFS core team, which runs the project. In alphabetical order: | [@chrisd8088][chrisd8088-user] | [@larsxschneider][larsxschneider-user] | | :---: | :---: | | [![][chrisd8088-img]][chrisd8088-user] | [![][larsxschneider-img]][larsxschneider-user] | | [PGP 088335A9][chrisd8088-pgp] | [PGP A5795889][larsxschneider-pgp] | [chrisd8088-img]: https://avatars1.githubusercontent.com/u/28857117?s=100&v=4 [larsxschneider-img]: https://avatars1.githubusercontent.com/u/477434?s=100&v=4 [chrisd8088-user]: https://github.com/chrisd8088 [larsxschneider-user]: https://github.com/larsxschneider [chrisd8088-pgp]: https://keyserver.ubuntu.com/pks/lookup?op=get&search=0x86cd3297749375bcf8206715f54fe648088335a9 [larsxschneider-pgp]: https://keyserver.ubuntu.com/pks/lookup?op=get&search=0xaa3b3450295830d2de6db90caba67be5a5795889 ### Alumni These are the humans that have in the past formed the Git LFS core team, or have otherwise contributed a significant amount to the project. Git LFS would not be possible without them. In alphabetical order: | [@andyneff][andyneff-user] | [@bk2204][bk2204-user] | [@PastelMobileSuit][PastelMobileSuit-user] | [@rubyist][rubyist-user] | [@sinbad][sinbad-user] | [@technoweenie][technoweenie-user] | [@ttaylorr][ttaylorr-user] | | :---: | :---: | :---: | :---: | :---: | :---: | :---: | | [![][andyneff-img]][andyneff-user] | [![][bk2204-img]][bk2204-user] | [![][PastelMobileSuit-img]][PastelMobileSuit-user] | [![][rubyist-img]][rubyist-user] | [![][sinbad-img]][sinbad-user] | [![][technoweenie-img]][technoweenie-user] | [![][ttaylorr-img]][ttaylorr-user] | [andyneff-img]: https://avatars1.githubusercontent.com/u/7596961?v=3&s=100 [bk2204-img]: https://avatars1.githubusercontent.com/u/497054?s=100&v=4 [PastelMobileSuit-img]: https://avatars2.githubusercontent.com/u/37254014?s=100&v=4 [rubyist-img]: https://avatars1.githubusercontent.com/u/143?v=3&s=100 [sinbad-img]: https://avatars1.githubusercontent.com/u/142735?v=3&s=100 [technoweenie-img]: https://avatars3.githubusercontent.com/u/21?v=3&s=100 [ttaylorr-img]: https://avatars2.githubusercontent.com/u/443245?s=100&v=4 [andyneff-user]: https://github.com/andyneff [bk2204-user]: https://github.com/bk2204 [PastelMobileSuit-user]: https://github.com/PastelMobileSuit [sinbad-user]: https://github.com/sinbad [rubyist-user]: https://github.com/rubyist [technoweenie-user]: https://github.com/technoweenie [ttaylorr-user]: https://github.com/ttaylorr git-lfs-3.6.1/SECURITY.md000066400000000000000000000055401472372047300146740ustar00rootroot00000000000000## Security Git LFS is a public, open-source project supported by GitHub and a broad community of other organizations and individual contributors. The Git LFS community takes the security of our project seriously, including the all of source code repositories managed through our GitHub [organization](https://github.com/git-lfs). If you believe you have found a security vulnerability in any Git LFS client software repository, please report it to us as described below. If you believe you have found a security vulnerability in a Git LFS API service, please report it to the relevant hosting company (e.g., Atlassian, GitLab, GitHub, etc.) by following their preferred security report process. ## Reporting Security Issues *Please do not report security vulnerabilities through public GitHub issues.* If you believe you have found a security vulnerability in the Git LFS client software, including any of our Go modules such as [gitobj](https://github.com/git-lfs/gitobj) or [pktline](https://github.com/git-lfs/pktline), please report it by email to one of the Git LFS [core team members](https://github.com/git-lfs/git-lfs#core-team). Email addresses for core team members may be found either on their personal GitHub pages or simply by searching through the Git history for this project; all commits from core team members should have their email address in the `Author` Git log field. If possible, encrypt your message with the core team member's PGP key. These may be located by searching a public keyserver or from the team member [list](https://github.com/git-lfs/git-lfs#core-team) on our home page. If you do not receive a timely response (generally within 24 hours of the first working day after your submission), please follow up by email with them and another core team member as well. Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: * Type of issue (e.g. buffer overflow, cross-site scripting, etc.) * Full paths of source file(s) related to the manifestation of the issue * The location of the affected source code (tag/branch/commit or direct URL) * Any special configuration required to reproduce the issue * Step-by-step instructions to reproduce the issue * Proof-of-concept or exploit code (if possible) * Impact of the issue, including how an attacker might exploit the issue This information will help us triage your report more quickly. We also recommend reviewing our [guidelines](CONTRIBUTING.md) for contributors and our [Open Code of Conduct](CODE-OF-CONDUCT.md). Note that because the Git LFS client is a public open-source project, it is not enrolled in any bug bounty programs; however, implementations of the Git LFS API service may be, depending on the hosting provider. ## Preferred Languages We prefer all communications to be in English. git-lfs-3.6.1/commands/000077500000000000000000000000001472372047300147005ustar00rootroot00000000000000git-lfs-3.6.1/commands/command_checkout.go000066400000000000000000000112671472372047300205410ustar00rootroot00000000000000package commands import ( "fmt" "os" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/filepathfilter" "github.com/git-lfs/git-lfs/v3/git" "github.com/git-lfs/git-lfs/v3/lfs" "github.com/git-lfs/git-lfs/v3/tasklog" "github.com/git-lfs/git-lfs/v3/tq" "github.com/git-lfs/git-lfs/v3/tr" "github.com/spf13/cobra" ) var ( checkoutTo string checkoutBase bool checkoutOurs bool checkoutTheirs bool ) func checkoutCommand(cmd *cobra.Command, args []string) { setupRepository() stage, err := whichCheckout() if err != nil { Exit(tr.Tr.Get("Error parsing args: %v", err)) } if checkoutTo != "" && stage != git.IndexStageDefault { if len(args) != 1 { Exit(tr.Tr.Get("--to requires exactly one Git LFS object file path")) } checkoutConflict(rootedPaths(args)[0], stage) return } else if checkoutTo != "" || stage != git.IndexStageDefault { Exit(tr.Tr.Get("--to and exactly one of --theirs, --ours, and --base must be used together")) } ref, err := git.CurrentRef() if err != nil { Panic(err, tr.Tr.Get("Could not checkout")) } singleCheckout := newSingleCheckout(cfg.Git, "") if singleCheckout.Skip() { fmt.Println(tr.Tr.Get("Cannot checkout LFS objects, Git LFS is not installed.")) return } var totalBytes int64 var pointers []*lfs.WrappedPointer logger := tasklog.NewLogger(os.Stdout, tasklog.ForceProgress(cfg.ForceProgress()), ) meter := tq.NewMeter(cfg) meter.Direction = tq.Checkout meter.Logger = meter.LoggerFromEnv(cfg.Os) logger.Enqueue(meter) chgitscanner := lfs.NewGitScanner(cfg, func(p *lfs.WrappedPointer, err error) { if err != nil { LoggedError(err, tr.Tr.Get("Scanner error: %s", err)) return } totalBytes += p.Size meter.Add(p.Size) meter.StartTransfer(p.Name) pointers = append(pointers, p) }) chgitscanner.Filter = filepathfilter.New(rootedPaths(args), nil, filepathfilter.GitIgnore) if err := chgitscanner.ScanLFSFiles(ref.Sha, nil); err != nil { ExitWithError(err) } meter.Start() for _, p := range pointers { singleCheckout.Run(p) // not strictly correct (parallel) but we don't have a callback & it's just local // plus only 1 slot in channel so it'll block & be close meter.TransferBytes("checkout", p.Name, p.Size, totalBytes, int(p.Size)) meter.FinishTransfer(p.Name) } meter.Finish() singleCheckout.Close() } func checkoutConflict(file string, stage git.IndexStage) { singleCheckout := newSingleCheckout(cfg.Git, "") if singleCheckout.Skip() { fmt.Println(tr.Tr.Get("Cannot checkout LFS objects, Git LFS is not installed.")) return } ref, err := git.ResolveRef(fmt.Sprintf(":%d:%s", stage, file)) if err != nil { Exit(tr.Tr.Get("Could not checkout (are you not in the middle of a merge?): %v", err)) } scanner, err := git.NewObjectScanner(cfg.GitEnv(), cfg.OSEnv()) if err != nil { Exit(tr.Tr.Get("Could not create object scanner: %v", err)) } if !scanner.Scan(ref.Sha) { Exit(tr.Tr.Get("Could not find object %q", ref.Sha)) } ptr, err := lfs.DecodePointer(scanner.Contents()) if err != nil { Exit(tr.Tr.Get("Could not find decoder pointer for object %q: %v", ref.Sha, err)) } p := &lfs.WrappedPointer{Name: file, Pointer: ptr} if err := singleCheckout.RunToPath(p, checkoutTo); err != nil { Exit(tr.Tr.Get("Error checking out %v to %q: %v", ref.Sha, checkoutTo, err)) } singleCheckout.Close() } func whichCheckout() (stage git.IndexStage, err error) { seen := 0 stage = git.IndexStageDefault if checkoutBase { seen++ stage = git.IndexStageBase } if checkoutOurs { seen++ stage = git.IndexStageOurs } if checkoutTheirs { seen++ stage = git.IndexStageTheirs } if seen > 1 { return 0, errors.New(tr.Tr.Get("at most one of --base, --theirs, and --ours is allowed")) } return stage, nil } // Parameters are filters // firstly convert any pathspecs to patterns relative to the root of the repo, // in case this is being executed in a sub-folder func rootedPaths(args []string) []string { pathConverter, err := lfs.NewCurrentToRepoPatternConverter(cfg) if err != nil { Panic(err, tr.Tr.Get("Could not checkout")) } rootedpaths := make([]string, 0, len(args)) for _, arg := range args { rootedpaths = append(rootedpaths, pathConverter.Convert(arg)) } return rootedpaths } func init() { RegisterCommand("checkout", checkoutCommand, func(cmd *cobra.Command) { cmd.Flags().StringVar(&checkoutTo, "to", "", "Checkout a conflicted file to this path") cmd.Flags().BoolVar(&checkoutOurs, "ours", false, "Checkout our version of a conflicted file") cmd.Flags().BoolVar(&checkoutTheirs, "theirs", false, "Checkout their version of a conflicted file") cmd.Flags().BoolVar(&checkoutBase, "base", false, "Checkout the base version of a conflicted file") }) } git-lfs-3.6.1/commands/command_clean.go000066400000000000000000000062571472372047300200210ustar00rootroot00000000000000package commands import ( "io" "os" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/lfs" "github.com/git-lfs/git-lfs/v3/tools" "github.com/git-lfs/git-lfs/v3/tr" "github.com/spf13/cobra" ) // clean cleans an object read from the given `io.Reader`, "from", and writes // out a corresponding pointer to the `io.Writer`, "to". If there were any // errors encountered along the way, they will be returned immediately if the // error is non-fatal, otherwise they will halt using the built in // `commands.Panic`. // // If fileSize is given as a non-negative (>= 0) integer, that value is used // with preference to os.Stat(fileName).Size(). If it is given as negative, the // value from the `stat(1)` call will be used instead. // // If the object read from "from" is _already_ a clean pointer, then it will be // written out verbatim to "to", without trying to make it a pointer again. func clean(gf *lfs.GitFilter, to io.Writer, from io.Reader, fileName string, fileSize int64) (*lfs.Pointer, error) { var cb tools.CopyCallback var file *os.File if len(fileName) > 0 { stat, err := os.Stat(fileName) if err == nil && stat != nil { if fileSize < 0 { fileSize = stat.Size() } localCb, localFile, err := gf.CopyCallbackFile("clean", fileName, 1, 1) if err != nil { Error(err.Error()) } else { cb = localCb file = localFile } } } cleaned, err := gf.Clean(from, fileName, fileSize, cb) if file != nil { file.Close() } if cleaned != nil { defer cleaned.Teardown() } if errors.IsCleanPointerError(err) { // If the contents read from the working directory was _already_ // a pointer, we'll get a `CleanPointerError`, with the context // containing the bytes that we should write back out to Git. _, err = to.Write(errors.GetContext(err, "bytes").([]byte)) return nil, err } if err != nil { ExitWithError(errors.Wrap(err, tr.Tr.Get("Error cleaning Git LFS object"))) } tmpfile := cleaned.Filename mediafile, err := gf.ObjectPath(cleaned.Oid) if err != nil { Panic(err, tr.Tr.Get("Unable to get local media path.")) } if stat, _ := os.Stat(mediafile); stat != nil { if stat.Size() != cleaned.Size && len(cleaned.Pointer.Extensions) == 0 { Exit("%s\n%s\n%s", tr.Tr.Get("Files don't match:"), mediafile, tmpfile) } Debug("%s exists", mediafile) } else { if err := os.Rename(tmpfile, mediafile); err != nil { Panic(err, tr.Tr.Get("Unable to move %s to %s", tmpfile, mediafile)) } Debug(tr.Tr.Get("Writing %s", mediafile)) } _, err = lfs.EncodePointer(to, cleaned.Pointer) return cleaned.Pointer, err } func cleanCommand(cmd *cobra.Command, args []string) { requireStdin(tr.Tr.Get("This command should be run by the Git 'clean' filter")) setupRepository() installHooks(false) var fileName string if len(args) > 0 { fileName = args[0] } gitfilter := lfs.NewGitFilter(cfg) ptr, err := clean(gitfilter, os.Stdout, os.Stdin, fileName, -1) if err != nil { Error(err.Error()) } if ptr != nil && possiblyMalformedObjectSize(ptr.Size) { Error(tr.Tr.Get("Possibly malformed conversion on Windows, see `git lfs help smudge` for more details.")) } } func init() { RegisterCommand("clean", cleanCommand, nil) } git-lfs-3.6.1/commands/command_clone.go000066400000000000000000000156021472372047300200310ustar00rootroot00000000000000package commands import ( "fmt" "os" "path" "path/filepath" "strings" "github.com/git-lfs/git-lfs/v3/subprocess" "github.com/git-lfs/git-lfs/v3/tr" "github.com/git-lfs/git-lfs/v3/git" "github.com/git-lfs/git-lfs/v3/tools" "github.com/spf13/cobra" ) var ( cloneFlags git.CloneFlags cloneSkipRepoInstall bool ) func cloneCommand(cmd *cobra.Command, args []string) { requireGitVersion() if git.IsGitVersionAtLeast("2.15.0") { // TRANSLATORS: Individual lines should not exceed 80 // characters, and any additional lines in the first message // should be indented to align with the first line's text // following the warning prefix and punctuation. msg := fmt.Sprintf("%s\n\n%s", tr.Tr.Get("WARNING: `git lfs clone` is deprecated and will not be updated\n with new flags from `git clone`"), tr.Tr.Get("`git clone` has been updated in upstream Git to have comparable\nspeeds to `git lfs clone`.")) fmt.Fprintln(os.Stderr, msg) } // We pass all args to git clone err := git.CloneWithoutFilters(cloneFlags, args) if err != nil { Exit("%s\n%v", tr.Tr.Get("Error(s) during clone:"), err) } // now execute pull (need to be inside dir) cwd, err := tools.Getwd() if err != nil { Exit(tr.Tr.Get("Unable to derive current working dir: %v", err)) } // Either the last argument was a relative or local dir, or we have to // derive it from the clone URL clonedir, err := filepath.Abs(args[len(args)-1]) if err != nil || !tools.DirExists(clonedir) { // Derive from clone URL instead base := path.Base(args[len(args)-1]) if strings.HasSuffix(base, ".git") { base = base[:len(base)-4] } clonedir, _ = filepath.Abs(base) if !tools.DirExists(clonedir) { Exit(tr.Tr.Get("Unable to find clone dir at %q", clonedir)) } } err = os.Chdir(clonedir) if err != nil { Exit(tr.Tr.Get("Unable to change directory to clone dir %q: %v", clonedir, err)) } // Make sure we pop back to dir we started in at the end defer os.Chdir(cwd) setupRepository() // Support --origin option to clone if len(cloneFlags.Origin) > 0 { cfg.SetRemote(cloneFlags.Origin) } if ref, err := git.CurrentRef(); err == nil { includeArg, excludeArg := getIncludeExcludeArgs(cmd) filter := buildFilepathFilter(cfg, includeArg, excludeArg, true) if cloneFlags.NoCheckout || cloneFlags.Bare { // If --no-checkout or --bare then we shouldn't check out, just fetch instead fetchRef(ref.Name, filter) } else { pull(filter) err := postCloneSubmodules(args) if err != nil { Exit(tr.Tr.Get("Error performing `git lfs pull` for submodules: %v", err)) } } } if !cloneSkipRepoInstall { // If --skip-repo wasn't given, install repo-level hooks while // we're still in the checkout directory. if err := installHooks(false); err != nil { ExitWithError(err) } } } func postCloneSubmodules(args []string) error { // In git 2.9+ the filter option will have been passed through to submodules // So we need to lfs pull inside each if !git.IsGitVersionAtLeast("2.9.0") { // In earlier versions submodules would have used smudge filter return nil } // Also we only do this if --recursive or --recurse-submodules was provided if !cloneFlags.Recursive && !cloneFlags.RecurseSubmodules { return nil } // Use `git submodule foreach --recursive` to cascade into nested submodules // Also good to call a new instance of git-lfs rather than do things // inside this instance, since that way we get a clean env in that subrepo cmd, err := subprocess.ExecCommand("git", "submodule", "foreach", "--recursive", "git lfs pull") if err != nil { return err } cmd.Stderr = os.Stderr cmd.Stdin = os.Stdin cmd.Stdout = os.Stdout return cmd.Run() } func init() { RegisterCommand("clone", cloneCommand, func(cmd *cobra.Command) { cmd.PreRun = nil // Mirror all git clone flags cmd.Flags().StringVarP(&cloneFlags.TemplateDirectory, "template", "", "", "See 'git clone --help'") cmd.Flags().BoolVarP(&cloneFlags.Local, "local", "l", false, "See 'git clone --help'") cmd.Flags().BoolVarP(&cloneFlags.Shared, "shared", "s", false, "See 'git clone --help'") cmd.Flags().BoolVarP(&cloneFlags.NoHardlinks, "no-hardlinks", "", false, "See 'git clone --help'") cmd.Flags().BoolVarP(&cloneFlags.Quiet, "quiet", "q", false, "See 'git clone --help'") cmd.Flags().BoolVarP(&cloneFlags.NoCheckout, "no-checkout", "n", false, "See 'git clone --help'") cmd.Flags().BoolVarP(&cloneFlags.Progress, "progress", "", false, "See 'git clone --help'") cmd.Flags().BoolVarP(&cloneFlags.Bare, "bare", "", false, "See 'git clone --help'") cmd.Flags().BoolVarP(&cloneFlags.Mirror, "mirror", "", false, "See 'git clone --help'") cmd.Flags().StringVarP(&cloneFlags.Origin, "origin", "o", "", "See 'git clone --help'") cmd.Flags().StringVarP(&cloneFlags.Branch, "branch", "b", "", "See 'git clone --help'") cmd.Flags().StringVarP(&cloneFlags.Upload, "upload-pack", "u", "", "See 'git clone --help'") cmd.Flags().StringVarP(&cloneFlags.Reference, "reference", "", "", "See 'git clone --help'") cmd.Flags().StringVarP(&cloneFlags.ReferenceIfAble, "reference-if-able", "", "", "See 'git clone --help'") cmd.Flags().BoolVarP(&cloneFlags.Dissociate, "dissociate", "", false, "See 'git clone --help'") cmd.Flags().StringVarP(&cloneFlags.SeparateGit, "separate-git-dir", "", "", "See 'git clone --help'") cmd.Flags().StringVarP(&cloneFlags.Depth, "depth", "", "", "See 'git clone --help'") cmd.Flags().BoolVarP(&cloneFlags.Recursive, "recursive", "", false, "See 'git clone --help'") cmd.Flags().BoolVarP(&cloneFlags.RecurseSubmodules, "recurse-submodules", "", false, "See 'git clone --help'") cmd.Flags().StringVarP(&cloneFlags.Config, "config", "c", "", "See 'git clone --help'") cmd.Flags().BoolVarP(&cloneFlags.SingleBranch, "single-branch", "", false, "See 'git clone --help'") cmd.Flags().BoolVarP(&cloneFlags.NoSingleBranch, "no-single-branch", "", false, "See 'git clone --help'") cmd.Flags().BoolVarP(&cloneFlags.Verbose, "verbose", "v", false, "See 'git clone --help'") cmd.Flags().BoolVarP(&cloneFlags.Ipv4, "ipv4", "", false, "See 'git clone --help'") cmd.Flags().BoolVarP(&cloneFlags.Ipv6, "ipv6", "", false, "See 'git clone --help'") cmd.Flags().StringVarP(&cloneFlags.ShallowSince, "shallow-since", "", "", "See 'git clone --help'") cmd.Flags().StringVarP(&cloneFlags.ShallowExclude, "shallow-exclude", "", "", "See 'git clone --help'") cmd.Flags().BoolVarP(&cloneFlags.ShallowSubmodules, "shallow-submodules", "", false, "See 'git clone --help'") cmd.Flags().BoolVarP(&cloneFlags.NoShallowSubmodules, "no-shallow-submodules", "", false, "See 'git clone --help'") cmd.Flags().Int64VarP(&cloneFlags.Jobs, "jobs", "j", -1, "See 'git clone --help'") cmd.Flags().StringVarP(&includeArg, "include", "I", "", "Include a list of paths") cmd.Flags().StringVarP(&excludeArg, "exclude", "X", "", "Exclude a list of paths") cmd.Flags().BoolVar(&cloneSkipRepoInstall, "skip-repo", false, "Skip LFS repo setup") }) } git-lfs-3.6.1/commands/command_dedup.go000066400000000000000000000104501472372047300200260ustar00rootroot00000000000000package commands import ( "os" "path/filepath" "sync/atomic" "github.com/git-lfs/git-lfs/v3/config" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/git" "github.com/git-lfs/git-lfs/v3/lfs" "github.com/git-lfs/git-lfs/v3/tools" "github.com/git-lfs/git-lfs/v3/tr" "github.com/spf13/cobra" ) var ( dedupFlags = struct { test bool }{} dedupStats = &struct { totalProcessedCount int64 totalProcessedSize int64 }{} ) func dedupTestCommand(*cobra.Command, []string) { setupRepository() if supported, err := tools.CheckCloneFileSupported(cfg.TempDir()); err != nil || !supported { if err == nil { err = errors.New(tr.Tr.Get("Unknown reason")) } Exit(tr.Tr.Get("This system does not support de-duplication: %s", err)) } if len(cfg.Extensions()) > 0 { Exit(tr.Tr.Get("This platform supports file de-duplication, however, Git LFS extensions are configured and therefore de-duplication can not be used.")) } Print(tr.Tr.Get("OK: This platform and repository support file de-duplication.")) } func dedupCommand(cmd *cobra.Command, args []string) { if dedupFlags.test { dedupTestCommand(cmd, args) return } setupRepository() if gitDir, err := git.GitDir(); err != nil { ExitWithError(err) } else if supported, err := tools.CheckCloneFileSupported(gitDir); err != nil || !supported { Exit(tr.Tr.Get("This system does not support de-duplication.")) } if len(cfg.Extensions()) > 0 { Exit(tr.Tr.Get("This platform supports file de-duplication, however, Git LFS extensions are configured and therefore de-duplication can not be used.")) } if dirty, err := git.IsWorkingCopyDirty(); err != nil { ExitWithError(err) } else if dirty { Exit(tr.Tr.Get("Working tree is dirty. Please commit or reset your change.")) } // We assume working tree is clean. gitScanner := lfs.NewGitScanner(config.New(), func(p *lfs.WrappedPointer, err error) { if err != nil { Exit(tr.Tr.Get("Could not scan for Git LFS tree: %s", err)) return } if success, err := dedup(p); err != nil { // TRANSLATORS: Leading spaces should be included on // the second line so the format specifier aligns with // with the first format specifier on the first line. Error(tr.Tr.Get("Skipped: %s (Size: %d)\n %s", p.Name, p.Size, err)) } else if !success { Error(tr.Tr.Get("Skipped: %s (Size: %d)", p.Name, p.Size)) } else if success { Print(tr.Tr.Get("Success: %s (Size: %d)", p.Name, p.Size)) atomic.AddInt64(&dedupStats.totalProcessedCount, 1) atomic.AddInt64(&dedupStats.totalProcessedSize, p.Size) } }) if err := gitScanner.ScanTree("HEAD", nil); err != nil { ExitWithError(err) } // TRANSLATORS: The second and third strings should have the colons // aligned in a column. Print("\n\n%s\n %s\n %s", tr.Tr.Get("Finished successfully."), tr.Tr.GetN( "De-duplicated size: %d byte", "De-duplicated size: %d bytes", int(dedupStats.totalProcessedSize), dedupStats.totalProcessedSize), tr.Tr.Get(" count: %d", dedupStats.totalProcessedCount)) } // dedup executes // Precondition: working tree MUST clean. We can replace working tree files from mediafile safely. func dedup(p *lfs.WrappedPointer) (success bool, err error) { // PRECONDITION, check ofs object exists or skip this file. if !cfg.LFSObjectExists(p.Oid, p.Size) { // Not exists, // Basically, this is not happens because executing 'git status' in `git.IsWorkingCopyDirty()` recover it. return false, errors.New(tr.Tr.Get("Git LFS object file does not exist")) } // DO de-dup // Gather original state originalStat, err := os.Stat(p.Name) if err != nil { return false, err } // Do clone srcFile := cfg.Filesystem().ObjectPathname(p.Oid) if srcFile == os.DevNull { return true, nil } dstFile := filepath.Join(cfg.LocalWorkingDir(), p.Name) // Clone the file. This overwrites the destination if it exists. if ok, err := tools.CloneFileByPath(dstFile, srcFile); err != nil { return false, err } else if !ok { return false, errors.Errorf(tr.Tr.Get("unknown clone file error")) } // Recover original state if err := os.Chmod(dstFile, originalStat.Mode()); err != nil { return false, err } return true, nil } func init() { RegisterCommand("dedup", dedupCommand, func(cmd *cobra.Command) { cmd.Flags().BoolVarP(&dedupFlags.test, "test", "t", false, "test") }) } git-lfs-3.6.1/commands/command_env.go000066400000000000000000000032661472372047300175240ustar00rootroot00000000000000package commands import ( "github.com/git-lfs/git-lfs/v3/config" "github.com/git-lfs/git-lfs/v3/git" "github.com/git-lfs/git-lfs/v3/lfs" "github.com/git-lfs/git-lfs/v3/tr" "github.com/spf13/cobra" ) func envCommand(cmd *cobra.Command, args []string) { config.ShowConfigWarnings = true gitV, err := git.Version() if err != nil { gitV = tr.Tr.Get("Error getting Git version: %s", err.Error()) } Print(config.VersionDesc) Print(gitV) Print("") defaultRemote := "" if cfg.IsDefaultRemote() { defaultRemote = cfg.Remote() endpoint := getAPIClient().Endpoints.Endpoint("download", defaultRemote) if len(endpoint.Url) > 0 { access := getAPIClient().Endpoints.AccessFor(endpoint.Url) Print("Endpoint=%s (auth=%s)", endpoint.Url, access.Mode()) if len(endpoint.SSHMetadata.UserAndHost) > 0 { Print(" SSH=%s:%s", endpoint.SSHMetadata.UserAndHost, endpoint.SSHMetadata.Path) } } } for _, remote := range cfg.Remotes() { if remote == defaultRemote { continue } remoteEndpoint := getAPIClient().Endpoints.Endpoint("download", remote) remoteAccess := getAPIClient().Endpoints.AccessFor(remoteEndpoint.Url) Print("Endpoint (%s)=%s (auth=%s)", remote, remoteEndpoint.Url, remoteAccess.Mode()) if len(remoteEndpoint.SSHMetadata.UserAndHost) > 0 { Print(" SSH=%s:%s", remoteEndpoint.SSHMetadata.UserAndHost, remoteEndpoint.SSHMetadata.Path) } } for _, env := range lfs.Environ(cfg, getTransferManifest(), oldEnv) { Print(env) } for _, key := range []string{"filter.lfs.process", "filter.lfs.smudge", "filter.lfs.clean"} { value, _ := cfg.Git.Get(key) Print("git config %s = %q", key, value) } } func init() { RegisterCommand("env", envCommand, nil) } git-lfs-3.6.1/commands/command_ext.go000066400000000000000000000016001472372047300175220ustar00rootroot00000000000000package commands import ( "fmt" "github.com/git-lfs/git-lfs/v3/config" "github.com/git-lfs/git-lfs/v3/tr" "github.com/spf13/cobra" ) func extCommand(cmd *cobra.Command, args []string) { printAllExts() } func extListCommand(cmd *cobra.Command, args []string) { n := len(args) if n == 0 { printAllExts() return } for _, key := range args { ext := cfg.Extensions()[key] printExt(ext) } } func printAllExts() { extensions, err := cfg.SortedExtensions() if err != nil { fmt.Println(err) return } for _, ext := range extensions { printExt(ext) } } func printExt(ext config.Extension) { Print(tr.Tr.Get("Extension: %s", ext.Name)) Print(` clean = %s smudge = %s priority = %d`, ext.Clean, ext.Smudge, ext.Priority) } func init() { RegisterCommand("ext", extCommand, func(cmd *cobra.Command) { cmd.AddCommand(NewCommand("list", extListCommand)) }) } git-lfs-3.6.1/commands/command_fetch.go000066400000000000000000000273521472372047300200270ustar00rootroot00000000000000package commands import ( "fmt" "os" "time" "github.com/git-lfs/git-lfs/v3/filepathfilter" "github.com/git-lfs/git-lfs/v3/git" "github.com/git-lfs/git-lfs/v3/lfs" "github.com/git-lfs/git-lfs/v3/tasklog" "github.com/git-lfs/git-lfs/v3/tq" "github.com/git-lfs/git-lfs/v3/tr" "github.com/rubyist/tracerx" "github.com/spf13/cobra" ) var ( fetchRecentArg bool fetchAllArg bool fetchPruneArg bool ) func getIncludeExcludeArgs(cmd *cobra.Command) (include, exclude *string) { includeFlag := cmd.Flag("include") excludeFlag := cmd.Flag("exclude") if includeFlag.Changed { include = &includeArg } if excludeFlag.Changed { exclude = &excludeArg } return } func fetchCommand(cmd *cobra.Command, args []string) { setupRepository() var refs []*git.Ref if len(args) > 0 { // Remote is first arg if err := cfg.SetValidRemote(args[0]); err != nil { Exit(tr.Tr.Get("Invalid remote name %q: %s", args[0], err)) } } if len(args) > 1 { resolvedrefs, err := git.ResolveRefs(args[1:]) if err != nil { Panic(err, tr.Tr.Get("Invalid ref argument: %v", args[1:])) } refs = resolvedrefs } else if !fetchAllArg { ref, err := git.CurrentRef() if err != nil { Panic(err, tr.Tr.Get("Could not fetch")) } refs = []*git.Ref{ref} } success := true include, exclude := getIncludeExcludeArgs(cmd) fetchPruneCfg := lfs.NewFetchPruneConfig(cfg.Git) if fetchAllArg { if fetchRecentArg { Exit(tr.Tr.Get("Cannot combine --all with --recent")) } if include != nil || exclude != nil { Exit(tr.Tr.Get("Cannot combine --all with --include or --exclude")) } if len(cfg.FetchIncludePaths()) > 0 || len(cfg.FetchExcludePaths()) > 0 { Print(tr.Tr.Get("Ignoring global include / exclude paths to fulfil --all")) } if len(args) > 1 { refShas := make([]string, 0, len(refs)) for _, ref := range refs { refShas = append(refShas, ref.Sha) } success = fetchRefs(refShas) } else { success = fetchAll() } } else { // !all filter := buildFilepathFilter(cfg, include, exclude, true) // Fetch refs sequentially per arg order; duplicates in later refs will be ignored for _, ref := range refs { Print("fetch: %s", tr.Tr.Get("Fetching reference %s", ref.Refspec())) s := fetchRef(ref.Sha, filter) success = success && s } if fetchRecentArg || fetchPruneCfg.FetchRecentAlways { s := fetchRecent(fetchPruneCfg, refs, filter) success = success && s } } if fetchPruneArg { verify := fetchPruneCfg.PruneVerifyRemoteAlways verifyUnreachable := fetchPruneCfg.PruneVerifyUnreachableAlways // assume false for non available options in fetch prune(fetchPruneCfg, verify, verifyUnreachable, false, false, false) } if !success { c := getAPIClient() e := c.Endpoints.Endpoint("download", cfg.Remote()) Exit(tr.Tr.Get("error: failed to fetch some objects from '%s'", e.Url)) } } func pointersToFetchForRef(ref string, filter *filepathfilter.Filter) ([]*lfs.WrappedPointer, error) { var pointers []*lfs.WrappedPointer var multiErr error tempgitscanner := lfs.NewGitScanner(cfg, func(p *lfs.WrappedPointer, err error) { if err != nil { if multiErr != nil { multiErr = fmt.Errorf("%v\n%v", multiErr, err) } else { multiErr = err } return } pointers = append(pointers, p) }) tempgitscanner.Filter = filter if err := tempgitscanner.ScanTree(ref, nil); err != nil { return nil, err } return pointers, multiErr } // Fetch all binaries for a given ref (that we don't have already) func fetchRef(ref string, filter *filepathfilter.Filter) bool { pointers, err := pointersToFetchForRef(ref, filter) if err != nil { Panic(err, tr.Tr.Get("Could not scan for Git LFS files")) } return fetchAndReportToChan(pointers, filter, nil) } func pointersToFetchForRefs(refs []string) ([]*lfs.WrappedPointer, error) { // This could be a long process so use the chan version & report progress logger := tasklog.NewLogger(OutputWriter, tasklog.ForceProgress(cfg.ForceProgress()), ) task := logger.Simple() defer task.Complete() // use temp gitscanner to collect pointers var pointers []*lfs.WrappedPointer var multiErr error var numObjs int64 tempgitscanner := lfs.NewGitScanner(cfg, func(p *lfs.WrappedPointer, err error) { if err != nil { if multiErr != nil { multiErr = fmt.Errorf("%v\n%v", multiErr, err) } else { multiErr = err } return } numObjs++ task.Logf("fetch: %s", tr.Tr.GetN("%d object found", "%d objects found", int(numObjs), numObjs)) pointers = append(pointers, p) }) if err := tempgitscanner.ScanRefs(refs, nil, nil); err != nil { return nil, err } return pointers, multiErr } func fetchRefs(refs []string) bool { pointers, err := pointersToFetchForRefs(refs) if err != nil { Panic(err, tr.Tr.Get("Could not scan for Git LFS files")) } return fetchAndReportToChan(pointers, nil, nil) } // Fetch all previous versions of objects from since to ref (not including final state at ref) // So this will fetch all the '-' sides of the diff from since to ref func fetchPreviousVersions(ref string, since time.Time, filter *filepathfilter.Filter) bool { var pointers []*lfs.WrappedPointer tempgitscanner := lfs.NewGitScanner(cfg, func(p *lfs.WrappedPointer, err error) { if err != nil { Panic(err, tr.Tr.Get("Could not scan for Git LFS previous versions")) return } pointers = append(pointers, p) }) tempgitscanner.Filter = filter if err := tempgitscanner.ScanPreviousVersions(ref, since, nil); err != nil { ExitWithError(err) } return fetchAndReportToChan(pointers, filter, nil) } // Fetch recent objects based on config func fetchRecent(fetchconf lfs.FetchPruneConfig, alreadyFetchedRefs []*git.Ref, filter *filepathfilter.Filter) bool { if fetchconf.FetchRecentRefsDays == 0 && fetchconf.FetchRecentCommitsDays == 0 { return true } ok := true // Make a list of what unique commits we've already fetched for to avoid duplicating work uniqueRefShas := make(map[string]string, len(alreadyFetchedRefs)) for _, ref := range alreadyFetchedRefs { uniqueRefShas[ref.Sha] = ref.Name } // First find any other recent refs if fetchconf.FetchRecentRefsDays > 0 { Print("fetch: %s", tr.Tr.GetN( "Fetching recent branches within %v day", "Fetching recent branches within %v days", fetchconf.FetchRecentRefsDays, fetchconf.FetchRecentRefsDays, )) refsSince := time.Now().AddDate(0, 0, -fetchconf.FetchRecentRefsDays) refs, err := git.RecentBranches(refsSince, fetchconf.FetchRecentRefsIncludeRemotes, cfg.Remote()) if err != nil { Panic(err, tr.Tr.Get("Could not scan for recent refs")) } for _, ref := range refs { // Don't fetch for the same SHA twice if prevRefName, ok := uniqueRefShas[ref.Sha]; ok { if ref.Name != prevRefName { tracerx.Printf("Skipping fetch for %v, already fetched via %v", ref.Name, prevRefName) } } else { uniqueRefShas[ref.Sha] = ref.Name Print("fetch: %s", tr.Tr.Get("Fetching reference %s", ref.Name)) k := fetchRef(ref.Sha, filter) ok = ok && k } } } // For every unique commit we've fetched, check recent commits too if fetchconf.FetchRecentCommitsDays > 0 { for commit, refName := range uniqueRefShas { // We measure from the last commit at the ref summ, err := git.GetCommitSummary(commit) if err != nil { Error(tr.Tr.Get("Couldn't scan commits at %v: %v", refName, err)) continue } Print("fetch: %s", tr.Tr.GetN( "Fetching changes within %v day of %v", "Fetching changes within %v days of %v", fetchconf.FetchRecentCommitsDays, fetchconf.FetchRecentCommitsDays, refName, )) commitsSince := summ.CommitDate.AddDate(0, 0, -fetchconf.FetchRecentCommitsDays) k := fetchPreviousVersions(commit, commitsSince, filter) ok = ok && k } } return ok } func fetchAll() bool { pointers := scanAll() Print("fetch: %s", tr.Tr.Get("Fetching all references...")) return fetchAndReportToChan(pointers, nil, nil) } func scanAll() []*lfs.WrappedPointer { // This could be a long process so use the chan version & report progress logger := tasklog.NewLogger(OutputWriter, tasklog.ForceProgress(cfg.ForceProgress()), ) task := logger.Simple() defer task.Complete() // use temp gitscanner to collect pointers var pointers []*lfs.WrappedPointer var multiErr error var numObjs int64 tempgitscanner := lfs.NewGitScanner(cfg, func(p *lfs.WrappedPointer, err error) { if err != nil { if multiErr != nil { multiErr = fmt.Errorf("%v\n%v", multiErr, err) } else { multiErr = err } return } numObjs++ task.Logf("fetch: %s", tr.Tr.GetN("%d object found", "%d objects found", int(numObjs), numObjs)) pointers = append(pointers, p) }) if err := tempgitscanner.ScanAll(nil); err != nil { Panic(err, tr.Tr.Get("Could not scan for Git LFS files")) } if multiErr != nil { Panic(multiErr, tr.Tr.Get("Could not scan for Git LFS files")) } return pointers } // Fetch and report completion of each OID to a channel (optional, pass nil to skip) // Returns true if all completed with no errors, false if errors were written to stderr/log func fetchAndReportToChan(allpointers []*lfs.WrappedPointer, filter *filepathfilter.Filter, out chan<- *lfs.WrappedPointer) bool { ready, pointers, meter := readyAndMissingPointers(allpointers, filter) q := newDownloadQueue( getTransferManifestOperationRemote("download", cfg.Remote()), cfg.Remote(), tq.WithProgress(meter), ) if out != nil { // If we already have it, or it won't be fetched // report it to chan immediately to support pull/checkout for _, p := range ready { out <- p } dlwatch := q.Watch() go func() { // fetch only reports single OID, but OID *might* be referenced by multiple // WrappedPointers if same content is at multiple paths, so map oid->slice oidToPointers := make(map[string][]*lfs.WrappedPointer, len(pointers)) for _, pointer := range pointers { plist := oidToPointers[pointer.Oid] oidToPointers[pointer.Oid] = append(plist, pointer) } for t := range dlwatch { plist, ok := oidToPointers[t.Oid] if !ok { continue } for _, p := range plist { out <- p } } close(out) }() } for _, p := range pointers { tracerx.Printf("fetch %v [%v]", p.Name, p.Oid) q.Add(downloadTransfer(p)) } processQueue := time.Now() q.Wait() tracerx.PerformanceSince("process queue", processQueue) ok := true for _, err := range q.Errors() { ok = false FullError(err) } return ok } func readyAndMissingPointers(allpointers []*lfs.WrappedPointer, filter *filepathfilter.Filter) ([]*lfs.WrappedPointer, []*lfs.WrappedPointer, *tq.Meter) { logger := tasklog.NewLogger(os.Stdout, tasklog.ForceProgress(cfg.ForceProgress()), ) meter := buildProgressMeter(false, tq.Download) logger.Enqueue(meter) seen := make(map[string]bool, len(allpointers)) missing := make([]*lfs.WrappedPointer, 0, len(allpointers)) ready := make([]*lfs.WrappedPointer, 0, len(allpointers)) for _, p := range allpointers { // no need to download the same object multiple times if seen[p.Oid] { continue } seen[p.Oid] = true // no need to download objects that exist locally already lfs.LinkOrCopyFromReference(cfg, p.Oid, p.Size) if cfg.LFSObjectExists(p.Oid, p.Size) { ready = append(ready, p) continue } missing = append(missing, p) meter.Add(p.Size) } return ready, missing, meter } func init() { RegisterCommand("fetch", fetchCommand, func(cmd *cobra.Command) { cmd.Flags().StringVarP(&includeArg, "include", "I", "", "Include a list of paths") cmd.Flags().StringVarP(&excludeArg, "exclude", "X", "", "Exclude a list of paths") cmd.Flags().BoolVarP(&fetchRecentArg, "recent", "r", false, "Fetch recent refs & commits") cmd.Flags().BoolVarP(&fetchAllArg, "all", "a", false, "Fetch all LFS files ever referenced") cmd.Flags().BoolVarP(&fetchPruneArg, "prune", "p", false, "After fetching, prune old data") }) } git-lfs-3.6.1/commands/command_filter_process.go000066400000000000000000000271541472372047300217610ustar00rootroot00000000000000package commands import ( "bytes" "fmt" "io" "os" "strings" "sync" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/filepathfilter" "github.com/git-lfs/git-lfs/v3/git" "github.com/git-lfs/git-lfs/v3/lfs" "github.com/git-lfs/git-lfs/v3/tq" "github.com/git-lfs/git-lfs/v3/tr" "github.com/git-lfs/pktline" "github.com/spf13/cobra" ) const ( // cleanFilterBufferCapacity is the desired capacity of the // `*git.PacketWriter`'s internal buffer when the filter protocol // dictates the "clean" command. 512 bytes is (in most cases) enough to // hold an entire LFS pointer in memory. cleanFilterBufferCapacity = 512 // smudgeFilterBufferCapacity is the desired capacity of the // `*git.PacketWriter`'s internal buffer when the filter protocol // dictates the "smudge" command. smudgeFilterBufferCapacity = pktline.MaxPacketLength ) // filterSmudgeSkip is a command-line flag owned by the `filter-process` command // dictating whether or not to skip the smudging process, leaving pointers as-is // in the working tree. var filterSmudgeSkip bool func filterCommand(cmd *cobra.Command, args []string) { requireStdin(tr.Tr.Get("This command should be run by the Git filter process")) setupRepository() installHooks(false) s := git.NewFilterProcessScanner(os.Stdin, os.Stdout) if err := s.Init(); err != nil { ExitWithError(err) } caps, err := s.NegotiateCapabilities() if err != nil { ExitWithError(err) } var supportsDelay bool for _, cap := range caps { if cap == "capability=delay" { supportsDelay = true break } } skip := filterSmudgeSkip || cfg.Os.Bool("GIT_LFS_SKIP_SMUDGE", false) filter := filepathfilter.New(cfg.FetchIncludePaths(), cfg.FetchExcludePaths(), filepathfilter.GitIgnore) ptrs := make(map[string]*lfs.Pointer) var q *tq.TransferQueue var malformed []string var malformedOnWindows []string var closeOnce *sync.Once var available chan *tq.Transfer gitfilter := lfs.NewGitFilter(cfg) for s.Scan() { var n int64 var err error var delayed bool var w *pktline.PktlineWriter req := s.Request() switch req.Header["command"] { case "clean": s.WriteStatus(statusFromErr(nil)) w = pktline.NewPktlineWriter(os.Stdout, cleanFilterBufferCapacity) var ptr *lfs.Pointer ptr, err = clean(gitfilter, w, req.Payload, req.Header["pathname"], -1) if ptr != nil { n = ptr.Size } case "smudge": if q == nil && supportsDelay { closeOnce = new(sync.Once) available = make(chan *tq.Transfer) if cfg.AutoDetectRemoteEnabled() { // update current remote with information gained by treeish newRemote := git.FirstRemoteForTreeish(req.Header["treeish"]) if newRemote != "" { cfg.SetRemote(newRemote) } } q = tq.NewTransferQueue( tq.Download, getTransferManifestOperationRemote("download", cfg.Remote()), cfg.Remote(), tq.RemoteRef(currentRemoteRef()), tq.WithBatchSize(cfg.TransferBatchSize()), ) go infiniteTransferBuffer(q, available) } w = pktline.NewPktlineWriter(os.Stdout, smudgeFilterBufferCapacity) if req.Header["can-delay"] == "1" { var ptr *lfs.Pointer n, delayed, ptr, err = delayedSmudge(gitfilter, s, w, req.Payload, q, req.Header["pathname"], skip, filter) if delayed { ptrs[req.Header["pathname"]] = ptr } } else { s.WriteStatus(statusFromErr(nil)) from, ferr := incomingOrCached(req.Payload, ptrs[req.Header["pathname"]]) if ferr != nil { break } n, err = smudge(gitfilter, w, from, req.Header["pathname"], skip, filter) if err == nil { delete(ptrs, req.Header["pathname"]) } } case "list_available_blobs": closeOnce.Do(func() { // The first time that Git sends us the // 'list_available_blobs' command, it is given // that now it waiting until all delayed blobs // are available within this smudge filter call // // This means that, by the time that we're here, // we have seen all entries in the checkout, and // should therefore instruct the transfer queue // to make a batch out of whatever remaining // items it has, and then close itself. // // This function call is wrapped in a // `sync.(*Once).Do()` call so we only call // `q.Wait()` once, and is called via a // goroutine since `q.Wait()` is blocking. go q.Wait() }) // The first, and all subsequent calls to // list_available_blobs, we read items from `tq.Watch()` // until a read from that channel becomes blocking (in // other words, we read until there are no more items // immediately ready to be sent back to Git). paths := pathnames(readAvailable(available, q.BatchSize())) if len(paths) == 0 { // If `len(paths) == 0`, `tq.Watch()` has // closed, indicating that all items have been // completely processed, and therefore, sent // back to Git for checkout. for path, _ := range ptrs { // If we sent a path to Git but it // didn't ask for the smudge contents, // that path is available and Git should // accept it later. paths = append(paths, fmt.Sprintf("pathname=%s", path)) } // At this point all items have been completely processed, // so we explicitly close transfer queue. If Git issues // another `smudge` command the transfer queue will be // created from scratch. Transfer queue needs to be recreated // because it has been already partially closed by `q.Wait()` q = nil } err = s.WriteList(paths) default: ExitWithError(errors.New(tr.Tr.Get("unknown command %q", req.Header["command"]))) } if errors.IsNotAPointerError(err) { malformed = append(malformed, req.Header["pathname"]) err = nil } else if possiblyMalformedObjectSize(n) { malformedOnWindows = append(malformedOnWindows, req.Header["pathname"]) } var status git.FilterProcessStatus if delayed { // If delayed, there is no need to call w.Flush() since // no data was written. Calculate the status from the // given error using 'delayedStatusFromErr'. status = delayedStatusFromErr(err) } else if ferr := w.Flush(); ferr != nil { // Otherwise, we do need to call w.Flush(), since we // have to assume that data was written. If the flush // operation was unsuccessful, calculate the status // using 'statusFromErr'. status = statusFromErr(ferr) } else { // If the above flush was successful, we calculate the // status from the above clean, smudge, or // list_available_blobs command using statusFromErr, // since we did not delay. status = statusFromErr(err) } s.WriteStatus(status) } if len(malformed) > 0 { fmt.Fprintln(os.Stderr, tr.Tr.GetN( "Encountered %d file that should have been a pointer, but wasn't:", "Encountered %d files that should have been pointers, but weren't:", len(malformed), len(malformed), )) for _, m := range malformed { fmt.Fprintf(os.Stderr, "\t%s\n", m) } } if len(malformedOnWindows) > 0 && cfg.Git.Bool("lfs.largefilewarning", !git.IsGitVersionAtLeast("2.34.0")) { fmt.Fprintln(os.Stderr, tr.Tr.GetN( "Encountered %d file that may not have been copied correctly on Windows:", "Encountered %d files that may not have been copied correctly on Windows:", len(malformedOnWindows), len(malformedOnWindows), )) for _, m := range malformedOnWindows { fmt.Fprintf(os.Stderr, "\t%s\n", m) } fmt.Fprint(os.Stderr, "\n", tr.Tr.Get("See: `git lfs help smudge` for more details."), "\n") } if err := s.Err(); err != nil && err != io.EOF { ExitWithError(err) } } // infiniteTransferBuffer streams the results of q.Watch() into "available" as // if available had an infinite channel buffer. func infiniteTransferBuffer(q *tq.TransferQueue, available chan<- *tq.Transfer) { // Stream results from q.Watch() into chan "available" via an infinite // buffer. watch := q.Watch() // pending is used to keep track of an ordered list of available // `*tq.Transfer`'s that cannot be written to "available" without // blocking. var pending []*tq.Transfer for { if len(pending) > 0 { select { case t, ok := <-watch: if !ok { // If the list of pending elements is // non-empty, stream them out (even if // they block), and then close(). for _, t = range pending { available <- t } close(available) return } pending = append(pending, t) case available <- pending[0]: // Otherwise, dequeue and shift the first // element from pending onto available. pending = pending[1:] } } else { t, ok := <-watch if !ok { // If watch is closed, the "tq" is done, and // there are no items on the buffer. Return // immediately. close(available) return } select { case available <- t: // Copy an item directly from <-watch onto available<-. default: // Otherwise, if that would have blocked, make // the new read pending. pending = append(pending, t) } } } } // incomingOrCached returns an io.Reader that is either the contents of the // given io.Reader "r", or the encoded contents of "ptr". It returns an error if // there was an error reading from "r". // // This is done because when a `command=smudge` with `can-delay=0` is issued, // the entry's contents are not sent, and must be re-encoded from the stored // pointer corresponding to the request's filepath. func incomingOrCached(r io.Reader, ptr *lfs.Pointer) (io.Reader, error) { buf := make([]byte, 1024) n, err := r.Read(buf) buf = buf[:n] if n == 0 { if ptr == nil { // If we read no data from the given io.Reader "r" _and_ // there was no data to fall back on, return an empty // io.Reader yielding no data. return bytes.NewReader(buf), nil } // If we read no data from the given io.Reader "r", _and_ there // is a pointer that we can fall back on, return an io.Reader // that yields the encoded version of the given pointer. return strings.NewReader(ptr.Encoded()), nil } if err == io.EOF { return bytes.NewReader(buf), nil } return io.MultiReader(bytes.NewReader(buf), r), err } // readAvailable satisfies the accumulation semantics for the // 'list_available_blobs' command. It accumulates items until: // // 1. Reading from the channel of available items blocks, or ... // 2. There is one item available, or ... // 3. The 'tq.TransferQueue' is completed. func readAvailable(ch <-chan *tq.Transfer, cap int) []*tq.Transfer { ts := make([]*tq.Transfer, 0, cap) for { select { case t, ok := <-ch: if !ok { return ts } ts = append(ts, t) default: if len(ts) > 0 { return ts } t, ok := <-ch if !ok { return ts } return append(ts, t) } } } // pathnames formats a list of *tq.Transfers as a valid response to the // 'list_available_blobs' command. func pathnames(ts []*tq.Transfer) []string { pathnames := make([]string, 0, len(ts)) for _, t := range ts { pathnames = append(pathnames, fmt.Sprintf("pathname=%s", t.Name)) } return pathnames } // statusFromErr returns the status code that should be sent over the filter // protocol based on a given error, "err". func statusFromErr(err error) git.FilterProcessStatus { if err != nil && err != io.EOF { return git.StatusError } return git.StatusSuccess } // delayedStatusFromErr returns the status code that should be sent over the // filter protocol based on a given error, "err" when the blob smudge operation // was delayed. func delayedStatusFromErr(err error) git.FilterProcessStatus { status := statusFromErr(err) switch status { case git.StatusSuccess: return git.StatusDelay default: return status } } func init() { RegisterCommand("filter-process", filterCommand, func(cmd *cobra.Command) { cmd.Flags().BoolVarP(&filterSmudgeSkip, "skip", "s", false, "") }) } git-lfs-3.6.1/commands/command_fsck.go000066400000000000000000000143011472372047300176520ustar00rootroot00000000000000package commands import ( "crypto/sha256" "encoding/hex" "fmt" "io" "os" "path/filepath" "strings" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/filepathfilter" "github.com/git-lfs/git-lfs/v3/git" "github.com/git-lfs/git-lfs/v3/lfs" "github.com/git-lfs/git-lfs/v3/tools" "github.com/git-lfs/git-lfs/v3/tr" "github.com/spf13/cobra" ) var ( fsckDryRun bool fsckObjects bool fsckPointers bool ) type corruptPointer struct { blobOid string treeOid string lfsOid string path string message string kind string } func (p corruptPointer) String() string { return fmt.Sprintf("%s: %s", p.kind, p.message) } // TODO(zeroshirts): 'git fsck' reports status (percentage, current#/total) as // it checks... we should do the same, as we are rehashing potentially gigs and // gigs of content. // // NOTE(zeroshirts): Ideally git would have hooks for fsck such that we could // chain a lfs-fsck, but I don't think it does. func fsckCommand(cmd *cobra.Command, args []string) { installHooks(false) setupRepository() useIndex := false exclude := "" include := "HEAD" switch len(args) { case 0: useIndex = true ref, err := git.CurrentRef() if err != nil { ExitWithError(err) } include = ref.Sha case 1: pieces := strings.SplitN(args[0], "..", 2) refs, err := git.ResolveRefs(pieces) if err != nil { ExitWithError(err) } if len(refs) == 2 { exclude = refs[0].Sha include = refs[1].Sha } else { include = refs[0].Sha } } if !fsckPointers && !fsckObjects { fsckPointers = true fsckObjects = true } ok := true var corruptOids []string var corruptPointers []corruptPointer if fsckObjects { corruptOids = doFsckObjects(include, exclude, useIndex) ok = ok && len(corruptOids) == 0 } if fsckPointers { corruptPointers = doFsckPointers(include, exclude) ok = ok && len(corruptPointers) == 0 } if ok { Print(tr.Tr.Get("Git LFS fsck OK")) return } if fsckDryRun || len(corruptOids) == 0 { os.Exit(1) } badDir := filepath.Join(cfg.LFSStorageDir(), "bad") Print("objects: repair: %s", tr.Tr.Get("moving corrupt objects to %s", badDir)) if err := tools.MkdirAll(badDir, cfg); err != nil { ExitWithError(err) } for _, oid := range corruptOids { badFile := filepath.Join(badDir, oid) srcFile := cfg.Filesystem().ObjectPathname(oid) if srcFile == os.DevNull { continue } if err := os.Rename(srcFile, badFile); err != nil { if os.IsNotExist(err) { continue } ExitWithError(err) } } os.Exit(1) } // doFsckObjects checks that the objects in the given ref are correct and exist. func doFsckObjects(include, exclude string, useIndex bool) []string { var corruptOids []string gitscanner := lfs.NewGitScanner(cfg, func(p *lfs.WrappedPointer, err error) { if err == nil { var pointerOk bool pointerOk, err = fsckPointer(p.Name, p.Oid, p.Size) if !pointerOk { corruptOids = append(corruptOids, p.Oid) } } if err != nil { Panic(err, tr.Tr.Get("Error checking Git LFS files")) } }) // If 'lfs.fetchexclude' is set and 'git lfs fsck' is run after the // initial fetch (i.e., has elected to fetch a subset of Git LFS // objects), the "missing" ones will fail the fsck. // // Attach a filepathfilter to avoid _only_ the excluded paths. gitscanner.Filter = filepathfilter.New(nil, cfg.FetchExcludePaths(), filepathfilter.GitIgnore) if exclude == "" { if err := gitscanner.ScanRef(include, nil); err != nil { ExitWithError(err) } } else { if err := gitscanner.ScanRefRange(include, exclude, nil); err != nil { ExitWithError(err) } } if useIndex { if err := gitscanner.ScanIndex("HEAD", "", nil); err != nil { ExitWithError(err) } } return corruptOids } // doFsckPointers checks that the pointers in the given ref are correct and canonical. func doFsckPointers(include, exclude string) []corruptPointer { var corruptPointers []corruptPointer gitscanner := lfs.NewGitScanner(cfg, func(p *lfs.WrappedPointer, err error) { if p != nil { Debug(tr.Tr.Get("Examining %v (%v)", p.Oid, p.Name)) if !p.Canonical { cp := corruptPointer{ blobOid: p.Sha1, lfsOid: p.Oid, message: tr.Tr.Get("Pointer for %s (blob %s) was not canonical", p.Oid, p.Sha1), kind: "nonCanonicalPointer", } Print("pointer: %s", cp.String()) corruptPointers = append(corruptPointers, cp) } } else if errors.IsPointerScanError(err) { psErr, ok := err.(errors.PointerScanError) if ok { cp := corruptPointer{ treeOid: psErr.OID(), path: psErr.Path(), message: tr.Tr.Get("%q (treeish %s) should have been a pointer but was not", psErr.Path(), psErr.OID()), kind: "unexpectedGitObject", } Print("pointer: %s", cp.String()) corruptPointers = append(corruptPointers, cp) } } else { Panic(err, tr.Tr.Get("Error checking Git LFS files")) } }) if exclude == "" { if err := gitscanner.ScanRefByTree(include, nil); err != nil { ExitWithError(err) } } else { if err := gitscanner.ScanRefRangeByTree(include, exclude, nil); err != nil { ExitWithError(err) } } return corruptPointers } func fsckPointer(name, oid string, size int64) (bool, error) { path := cfg.Filesystem().ObjectPathname(oid) Debug(tr.Tr.Get("Examining %v (%v)", name, path)) f, err := os.Open(path) if pErr, pOk := err.(*os.PathError); pOk { // This is an empty file. No problem here. if size == 0 { return true, nil } Print("objects: openError: %s", tr.Tr.Get("%s (%s) could not be checked: %s", name, oid, pErr.Err)) return false, nil } if err != nil { return false, err } oidHash := sha256.New() _, err = io.Copy(oidHash, f) f.Close() if err != nil { return false, err } recalculatedOid := hex.EncodeToString(oidHash.Sum(nil)) if recalculatedOid == oid { return true, nil } Print(fmt.Sprintf("objects: corruptObject: %s", tr.Tr.Get("%s (%s) is corrupt", name, oid))) return false, nil } func init() { RegisterCommand("fsck", fsckCommand, func(cmd *cobra.Command) { cmd.Flags().BoolVarP(&fsckDryRun, "dry-run", "d", false, "List corrupt objects without deleting them.") cmd.Flags().BoolVarP(&fsckObjects, "objects", "", false, "Fsck objects.") cmd.Flags().BoolVarP(&fsckPointers, "pointers", "", false, "Fsck pointers.") }) } git-lfs-3.6.1/commands/command_install.go000066400000000000000000000067131472372047300204020ustar00rootroot00000000000000package commands import ( "os" "github.com/git-lfs/git-lfs/v3/git" "github.com/git-lfs/git-lfs/v3/lfs" "github.com/git-lfs/git-lfs/v3/tr" "github.com/spf13/cobra" ) var ( fileInstall = "" forceInstall = false localInstall = false worktreeInstall = false manualInstall = false systemInstall = false skipSmudgeInstall = false skipRepoInstall = false ) func installCommand(cmd *cobra.Command, args []string) { if err := cmdInstallOptions().Install(); err != nil { Print(tr.Tr.Get("warning: %s", err.Error())) Print(tr.Tr.Get("Run `git lfs install --force` to reset Git configuration.")) os.Exit(2) } if !skipRepoInstall && (localInstall || worktreeInstall || cfg.InRepo()) { installHooksCommand(cmd, args) } Print(tr.Tr.Get("Git LFS initialized.")) } func cmdInstallOptions() *lfs.FilterOptions { requireGitVersion() if localInstall || worktreeInstall { setupRepository() } destArgs := 0 if localInstall { destArgs++ } if worktreeInstall { destArgs++ } if systemInstall { destArgs++ } if fileInstall != "" { destArgs++ } if destArgs > 1 { Exit(tr.Tr.Get("Only one of the --local, --system, --worktree, and --file options can be specified.")) } // This call will return -1 on Windows; don't warn about this there, // since we can't detect it correctly. uid := os.Geteuid() if systemInstall && uid != 0 && uid != -1 { Print(tr.Tr.Get("warning: current user is not root/admin, system install is likely to fail.")) } return &lfs.FilterOptions{ GitConfig: cfg.GitConfig(), Force: forceInstall, File: fileInstall, Local: localInstall, Worktree: worktreeInstall, System: systemInstall, SkipSmudge: skipSmudgeInstall, } } func installHooksCommand(cmd *cobra.Command, args []string) { updateForce = forceInstall // TODO(@ttaylorr): this is a hack since the `git-lfs-install(1)` calls // into the function that implements `git-lfs-update(1)`. Given that, // there is no way to pass flags into that function, other than // hijacking the flags that `git-lfs-update(1)` already owns. // // At a later date, extract `git-lfs-update(1)`-related logic into its // own function, and translate this flag as a boolean argument to it. updateManual = manualInstall updateCommand(cmd, args) } func init() { RegisterCommand("install", installCommand, func(cmd *cobra.Command) { cmd.Flags().BoolVarP(&forceInstall, "force", "f", false, "Set the Git LFS global config, overwriting previous values.") cmd.Flags().BoolVarP(&localInstall, "local", "l", false, "Set the Git LFS config for the local Git repository only.") cmd.Flags().StringVarP(&fileInstall, "file", "", "", "Set the Git LFS config for the given configuration file only.") if git.IsGitVersionAtLeast("2.20.0") { cmd.Flags().BoolVarP(&worktreeInstall, "worktree", "w", false, "Set the Git LFS config for the current Git working tree, if multiple working trees are configured; otherwise, the same as --local.") } cmd.Flags().BoolVarP(&systemInstall, "system", "", false, "Set the Git LFS config in system-wide scope.") cmd.Flags().BoolVarP(&skipSmudgeInstall, "skip-smudge", "s", false, "Skip automatic downloading of objects on clone or pull.") cmd.Flags().BoolVarP(&skipRepoInstall, "skip-repo", "", false, "Skip repo setup, just install global filters.") cmd.Flags().BoolVarP(&manualInstall, "manual", "m", false, "Print instructions for manual install.") cmd.AddCommand(NewCommand("hooks", installHooksCommand)) }) } git-lfs-3.6.1/commands/command_lock.go000066400000000000000000000067321472372047300176650ustar00rootroot00000000000000package commands import ( "encoding/json" "os" "path/filepath" "strings" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/git" "github.com/git-lfs/git-lfs/v3/locking" "github.com/git-lfs/git-lfs/v3/tools" "github.com/git-lfs/git-lfs/v3/tr" "github.com/spf13/cobra" ) var ( lockRemote string ) func lockCommand(cmd *cobra.Command, args []string) { if len(lockRemote) > 0 { cfg.SetRemote(lockRemote) } lockData, err := computeLockData() if err != nil { ExitWithError(err) } refUpdate := git.NewRefUpdate(cfg.Git, cfg.PushRemote(), cfg.CurrentRef(), nil) lockClient := newLockClient() lockClient.RemoteRef = refUpdate.RemoteRef() defer lockClient.Close() success := true locks := make([]locking.Lock, 0, len(args)) for _, path := range args { path, err := lockPath(lockData, path) if err != nil { Error(err.Error()) success = false continue } lock, err := lockClient.LockFile(path) if err != nil { Error(tr.Tr.Get("Locking %s failed: %v", path, errors.Cause(err))) success = false continue } locks = append(locks, lock) if locksCmdFlags.JSON { continue } Print(tr.Tr.Get("Locked %s", path)) } if locksCmdFlags.JSON { if err := json.NewEncoder(os.Stdout).Encode(locks); err != nil { Error(err.Error()) success = false } } if !success { lockClient.Close() os.Exit(2) } } type lockData struct { rootDir string workingDir string } // computeLockData computes data about the given repository and working // directory to use in lockPath. func computeLockData() (*lockData, error) { wd, err := tools.Getwd() if err != nil { return nil, err } wd, err = tools.CanonicalizeSystemPath(wd) if err != nil { return nil, err } return &lockData{ rootDir: cfg.LocalWorkingDir(), workingDir: wd, }, nil } // lockPath relativizes the given filepath such that it is relative to the root // path of the repository it is contained within, taking into account the // working directory of the caller. // // lockPaths also respects different filesystem directory separators, so that a // Windows path of "\foo\bar" will be normalized to "foo/bar". // // If the file path cannot be determined, an error will be returned. If the file // in question is actually a directory, an error will be returned. Otherwise, // the cleaned path will be returned. // // For example: // - Working directory: /code/foo/bar/ // - Repository root: /code/foo/ // - File to lock: ./baz // - Resolved path bar/baz func lockPath(data *lockData, file string) (string, error) { var abs string var err error if filepath.IsAbs(file) { abs, err = tools.CanonicalizeSystemPath(file) if err != nil { return "", errors.New(tr.Tr.Get("unable to canonicalize path %q: %v", file, err)) } } else { abs = filepath.Join(data.workingDir, file) } path, err := filepath.Rel(data.rootDir, abs) if err != nil { return "", err } path = filepath.ToSlash(path) if strings.HasPrefix(path, "../") { return "", errors.New(tr.Tr.Get("unable to canonicalize path %q", path)) } if stat, err := os.Stat(abs); err == nil && stat.IsDir() { return path, errors.New(tr.Tr.Get("cannot lock directory: %s", file)) } return filepath.ToSlash(path), nil } func init() { RegisterCommand("lock", lockCommand, func(cmd *cobra.Command) { cmd.Flags().StringVarP(&lockRemote, "remote", "r", "", "specify which remote to use when interacting with locks") cmd.Flags().BoolVarP(&locksCmdFlags.JSON, "json", "", false, "print output in json") }) } git-lfs-3.6.1/commands/command_locks.go000066400000000000000000000127761472372047300200550ustar00rootroot00000000000000package commands import ( "io" "os" "sort" "strings" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/git" "github.com/git-lfs/git-lfs/v3/locking" "github.com/git-lfs/git-lfs/v3/tools" "github.com/git-lfs/git-lfs/v3/tr" "github.com/spf13/cobra" ) var ( locksCmdFlags = new(locksFlags) ) func locksCommand(cmd *cobra.Command, args []string) { lockData, err := computeLockData() if err != nil { ExitWithError(err) } filters, err := locksCmdFlags.Filters(lockData) if err != nil { Exit(tr.Tr.Get("Error building filters: %v", err)) } if len(lockRemote) > 0 { cfg.SetRemote(lockRemote) } refUpdate := git.NewRefUpdate(cfg.Git, cfg.PushRemote(), cfg.CurrentRef(), nil) lockClient := newLockClient() lockClient.RemoteRef = refUpdate.RemoteRef() defer lockClient.Close() if locksCmdFlags.Cached { if locksCmdFlags.Limit > 0 { Exit(tr.Tr.Get("--cached option can't be combined with --limit")) } if len(filters) > 0 { Exit(tr.Tr.Get("--cached option can't be combined with filters")) } if locksCmdFlags.Local { Exit(tr.Tr.Get("--cached option can't be combined with --local")) } } if locksCmdFlags.Verify { if len(filters) > 0 { Exit(tr.Tr.Get("--verify option can't be combined with filters")) } if locksCmdFlags.Local { Exit(tr.Tr.Get("--verify option can't be combined with --local")) } } var locks []locking.Lock var locksOwned map[locking.Lock]bool var jsonWriteFunc func(io.Writer) error if locksCmdFlags.Verify { var ourLocks, theirLocks []locking.Lock ourLocks, theirLocks, err = lockClient.SearchLocksVerifiable(locksCmdFlags.Limit, locksCmdFlags.Cached) jsonWriteFunc = func(writer io.Writer) error { return lockClient.EncodeLocksVerifiable(ourLocks, theirLocks, writer) } locks = append(ourLocks, theirLocks...) locksOwned = make(map[locking.Lock]bool) for _, lock := range ourLocks { locksOwned[lock] = true } } else { locks, err = lockClient.SearchLocks(filters, locksCmdFlags.Limit, locksCmdFlags.Local, locksCmdFlags.Cached) jsonWriteFunc = func(writer io.Writer) error { return lockClient.EncodeLocks(locks, writer) } } // Print any we got before exiting if locksCmdFlags.JSON { if err := jsonWriteFunc(os.Stdout); err != nil { Error(err.Error()) } return } var maxPathLen int var maxNameLen int lockPaths := make([]string, 0, len(locks)) locksByPath := make(map[string]locking.Lock) for _, lock := range locks { lockPaths = append(lockPaths, lock.Path) locksByPath[lock.Path] = lock maxPathLen = tools.MaxInt(maxPathLen, len(lock.Path)) if lock.Owner != nil { maxNameLen = tools.MaxInt(maxNameLen, len(lock.Owner.Name)) } } sort.Strings(lockPaths) for _, lockPath := range lockPaths { var ownerName string lock := locksByPath[lockPath] if lock.Owner != nil { ownerName = lock.Owner.Name } pathPadding := tools.MaxInt(maxPathLen-len(lock.Path), 0) namePadding := tools.MaxInt(maxNameLen-len(ownerName), 0) kind := "" if locksOwned != nil { if locksOwned[lock] { kind = "O " } else { kind = " " } } Print("%s%s%s\t%s%s\tID:%s", kind, lock.Path, strings.Repeat(" ", pathPadding), ownerName, strings.Repeat(" ", namePadding), lock.Id, ) } if err != nil { Exit(tr.Tr.Get("Error while retrieving locks: %v", errors.Cause(err))) } } // locksFlags wraps up and holds all of the flags that can be given to the // `git lfs locks` command. type locksFlags struct { // Path is an optional filter parameter to filter against the lock's // path Path string // Id is an optional filter parameter used to filtere against the lock's // ID. Id string // limit is an optional request parameter sent to the server used to // limit the Limit int // local limits the scope of lock reporting to the locally cached record // of locks for the current user & doesn't query the server Local bool // JSON is an optional parameter to output data in json format. JSON bool // for non-local queries, report cached query results from the last query // instead of actually querying the server again Cached bool // for non-local queries, verify lock owner on server and // denote our locks in output Verify bool } // Filters produces a filter based on locksFlags instance. func (l *locksFlags) Filters(data *lockData) (map[string]string, error) { filters := make(map[string]string) if l.Path != "" { path, err := lockPath(data, l.Path) if err != nil { return nil, err } filters["path"] = path } if l.Id != "" { filters["id"] = l.Id } return filters, nil } func init() { RegisterCommand("locks", locksCommand, func(cmd *cobra.Command) { cmd.Flags().StringVarP(&lockRemote, "remote", "r", "", "specify which remote to use when interacting with locks") cmd.Flags().StringVarP(&locksCmdFlags.Path, "path", "p", "", "filter locks results matching a particular path") cmd.Flags().StringVarP(&locksCmdFlags.Id, "id", "i", "", "filter locks results matching a particular ID") cmd.Flags().IntVarP(&locksCmdFlags.Limit, "limit", "l", 0, "optional limit for number of results to return") cmd.Flags().BoolVarP(&locksCmdFlags.Local, "local", "", false, "only list cached local record of own locks") cmd.Flags().BoolVarP(&locksCmdFlags.Cached, "cached", "", false, "list cached lock information from the last remote query, instead of actually querying the server") cmd.Flags().BoolVarP(&locksCmdFlags.Verify, "verify", "", false, "verify lock owner on server and mark own locks by 'O'") cmd.Flags().BoolVarP(&locksCmdFlags.JSON, "json", "", false, "print output in json") }) } git-lfs-3.6.1/commands/command_logs.go000066400000000000000000000035351472372047300176770ustar00rootroot00000000000000package commands import ( "os" "path/filepath" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/tr" "github.com/spf13/cobra" ) func logsCommand(cmd *cobra.Command, args []string) { for _, path := range sortedLogs() { Print(path) } } func logsLastCommand(cmd *cobra.Command, args []string) { logs := sortedLogs() if len(logs) < 1 { Print(tr.Tr.Get("No logs to show")) return } logsShowCommand(cmd, logs[len(logs)-1:]) } func logsShowCommand(cmd *cobra.Command, args []string) { if len(args) == 0 { Print(tr.Tr.Get("Supply a log name.")) return } name := args[0] by, err := os.ReadFile(filepath.Join(cfg.LocalLogDir(), name)) if err != nil { Exit(tr.Tr.Get("Error reading log: %s", name)) } Debug(tr.Tr.Get("Reading log: %s", name)) os.Stdout.Write(by) } func logsClearCommand(cmd *cobra.Command, args []string) { err := os.RemoveAll(cfg.LocalLogDir()) if err != nil { Panic(err, tr.Tr.Get("Error clearing %s", cfg.LocalLogDir())) } Print(tr.Tr.Get("Cleared %s", cfg.LocalLogDir())) } func logsBoomtownCommand(cmd *cobra.Command, args []string) { Debug(tr.Tr.Get("Sample debug message")) err := errors.Wrapf(errors.New(tr.Tr.Get("Sample wrapped error message")), tr.Tr.Get("Sample error message")) Panic(err, tr.Tr.Get("Sample panic message")) } func sortedLogs() []string { fileinfos, err := os.ReadDir(cfg.LocalLogDir()) if err != nil { return []string{} } names := make([]string, 0, len(fileinfos)) for _, info := range fileinfos { if info.IsDir() { continue } names = append(names, info.Name()) } return names } func init() { RegisterCommand("logs", logsCommand, func(cmd *cobra.Command) { cmd.AddCommand( NewCommand("last", logsLastCommand), NewCommand("show", logsShowCommand), NewCommand("clear", logsClearCommand), NewCommand("boomtown", logsBoomtownCommand), ) }) } git-lfs-3.6.1/commands/command_ls_files.go000066400000000000000000000126041472372047300205300ustar00rootroot00000000000000package commands import ( "encoding/json" "os" "path/filepath" "strings" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/git" "github.com/git-lfs/git-lfs/v3/lfs" "github.com/git-lfs/git-lfs/v3/tools/humanize" "github.com/git-lfs/git-lfs/v3/tr" "github.com/spf13/cobra" ) var ( longOIDs = false lsFilesScanAll = false lsFilesScanDeleted = false lsFilesShowSize = false lsFilesShowNameOnly = false lsFilesJSON = false debug = false ) type lsFilesObject struct { Name string `json:"name"` Size int64 `json:"size"` Checkout bool `json:"checkout"` Downloaded bool `json:"downloaded"` OidType string `json:"oid_type"` Oid string `json:"oid"` Version string `json:"version"` } func lsFilesCommand(cmd *cobra.Command, args []string) { setupRepository() var ref string var includeRef string var scanRange = false if len(args) > 0 { if lsFilesScanAll { Exit(tr.Tr.Get("Cannot use --all with explicit reference")) } else if args[0] == "--all" { // Since --all is a valid argument to "git rev-parse", // if we try to give it to git.ResolveRef below, we'll // get an unexpected result. // // So, let's check early that the caller invoked the // command correctly. Exit(tr.Tr.Get("Did you mean `git lfs ls-files --all --` ?")) } ref = args[0] if len(args) > 1 { if lsFilesScanDeleted { Exit(tr.Tr.Get("Cannot use --deleted with reference range")) } includeRef = args[1] scanRange = true } } else { fullref, err := git.CurrentRef() if err != nil { ref, err = git.EmptyTree() if err != nil { ExitWithError(errors.Wrap( err, tr.Tr.Get("Could not read empty Git tree object"))) } } else { ref = fullref.Sha } } showOidLen := 10 if longOIDs { showOidLen = 64 } seen := make(map[string]struct{}) var items []lsFilesObject gitscanner := lfs.NewGitScanner(cfg, func(p *lfs.WrappedPointer, err error) { if err != nil { Exit(tr.Tr.Get("Could not scan for Git LFS tree: %s", err)) return } if p.Size == 0 { return } if !lsFilesScanAll && !scanRange { if _, ok := seen[p.Name]; ok { return } } if debug { // TRANSLATORS: these strings should have the colons // aligned in a column. Print( tr.Tr.Get("filepath: %s\n size: %d\ncheckout: %v\ndownload: %v\n oid: %s %s\n version: %s\n", p.Name, p.Size, fileExistsOfSize(p), cfg.LFSObjectExists(p.Oid, p.Size), p.OidType, p.Oid, p.Version)) } else if lsFilesJSON { items = append(items, lsFilesObject{ Name: p.Name, Size: p.Size, Checkout: fileExistsOfSize(p), Downloaded: cfg.LFSObjectExists(p.Oid, p.Size), OidType: p.OidType, Oid: p.Oid, Version: p.Version, }) } else { msg := []string{p.Oid[:showOidLen], lsFilesMarker(p), p.Name} if lsFilesShowNameOnly { msg = []string{p.Name} } if lsFilesShowSize { size := humanize.FormatBytes(uint64(p.Size)) msg = append(msg, "("+size+")") } Print(strings.Join(msg, " ")) } seen[p.Name] = struct{}{} }) includeArg, excludeArg := getIncludeExcludeArgs(cmd) gitscanner.Filter = buildFilepathFilter(cfg, includeArg, excludeArg, false) if len(args) == 0 { // Only scan the index when "git lfs ls-files" was invoked with // no arguments. // // Do so to avoid showing "mixed" results, e.g., ls-files output // from a specific historical revision, and the index. if err := gitscanner.ScanIndex(ref, "", nil); err != nil { Exit(tr.Tr.Get("Could not scan for Git LFS index: %s", err)) } } if lsFilesScanAll { if err := gitscanner.ScanAll(nil); err != nil { Exit(tr.Tr.Get("Could not scan for Git LFS history: %s", err)) } } else { var err error if lsFilesScanDeleted { err = gitscanner.ScanRefWithDeleted(ref, nil) } else if scanRange { err = gitscanner.ScanRefRange(includeRef, ref, nil) } else { err = gitscanner.ScanTree(ref, nil) } if err != nil { Exit(tr.Tr.Get("Could not scan for Git LFS tree: %s", err)) } } if lsFilesJSON { data := struct { Files []lsFilesObject `json:"files"` }{Files: items} encoder := json.NewEncoder(os.Stdout) encoder.SetIndent("", " ") if err := encoder.Encode(data); err != nil { ExitWithError(err) } } } // Returns true if a pointer appears to be properly smudge on checkout func fileExistsOfSize(p *lfs.WrappedPointer) bool { path := cfg.Filesystem().DecodePathname(p.Name) info, err := os.Stat(filepath.Join(cfg.LocalWorkingDir(), path)) return err == nil && info.Size() == p.Size } func lsFilesMarker(p *lfs.WrappedPointer) string { if fileExistsOfSize(p) { return "*" } return "-" } func init() { RegisterCommand("ls-files", lsFilesCommand, func(cmd *cobra.Command) { cmd.Flags().BoolVarP(&longOIDs, "long", "l", false, "") cmd.Flags().BoolVarP(&lsFilesShowSize, "size", "s", false, "") cmd.Flags().BoolVarP(&lsFilesShowNameOnly, "name-only", "n", false, "") cmd.Flags().BoolVarP(&debug, "debug", "d", false, "") cmd.Flags().BoolVarP(&lsFilesScanAll, "all", "a", false, "") cmd.Flags().BoolVar(&lsFilesScanDeleted, "deleted", false, "") cmd.Flags().StringVarP(&includeArg, "include", "I", "", "Include a list of paths") cmd.Flags().StringVarP(&excludeArg, "exclude", "X", "", "Exclude a list of paths") cmd.Flags().BoolVarP(&lsFilesJSON, "json", "", false, "print output in JSON") }) } git-lfs-3.6.1/commands/command_merge_driver.go000066400000000000000000000107521472372047300214040ustar00rootroot00000000000000package commands import ( "fmt" "os" "os/exec" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/lfs" "github.com/git-lfs/git-lfs/v3/subprocess" "github.com/git-lfs/git-lfs/v3/tr" "github.com/spf13/cobra" ) var ( mergeDriverAncestor string mergeDriverCurrent string mergeDriverOther string mergeDriverOutput string mergeDriverProgram string mergeDriverMarkerSize int ) func mergeDriverCommand(cmd *cobra.Command, args []string) { if len(mergeDriverAncestor) == 0 || len(mergeDriverCurrent) == 0 || len(mergeDriverOther) == 0 || len(mergeDriverOutput) == 0 { Exit(tr.Tr.Get("the --ancestor, --current, --other, and --output options are mandatory")) } fileSpecifiers := make(map[string]string) gf := lfs.NewGitFilter(cfg) mergeProcessInput(gf, mergeDriverAncestor, fileSpecifiers, "O") mergeProcessInput(gf, mergeDriverCurrent, fileSpecifiers, "A") mergeProcessInput(gf, mergeDriverOther, fileSpecifiers, "B") mergeProcessInput(gf, "", fileSpecifiers, "D") fileSpecifiers["L"] = fmt.Sprintf("%d", mergeDriverMarkerSize) if len(mergeDriverProgram) == 0 { mergeDriverProgram = "git merge-file --stdout --marker-size=%L %A %O %B >%D" } status, err := processFiles(fileSpecifiers, mergeDriverProgram, mergeDriverOutput) if err != nil { ExitWithError(err) } os.Exit(status) } func processFiles(fileSpecifiers map[string]string, program string, outputFile string) (int, error) { defer mergeCleanup(fileSpecifiers) var exitStatus int formattedMergeProgram := subprocess.FormatPercentSequences(mergeDriverProgram, fileSpecifiers) cmd, err := subprocess.ExecCommand("sh", "-c", formattedMergeProgram) if err != nil { return -1, errors.New(tr.Tr.Get("failed to run merge program %q: %s", formattedMergeProgram, err)) } err = cmd.Run() // If it runs but exits nonzero, then that means there's conflicts if err != nil { if exitError, ok := err.(*exec.ExitError); ok { exitStatus = exitError.ProcessState.ExitCode() } else { return -1, errors.New(tr.Tr.Get("failed to run merge program %q: %s", formattedMergeProgram, err)) } } outputFp, err := os.OpenFile(outputFile, os.O_WRONLY|os.O_CREATE, 0600) if err != nil { return -1, err } defer outputFp.Close() filename := fileSpecifiers["D"] stat, err := os.Stat(filename) if err != nil { return -1, err } inputFp, err := os.OpenFile(filename, os.O_RDONLY|os.O_CREATE, 0600) if err != nil { return -1, err } defer inputFp.Close() gf := lfs.NewGitFilter(cfg) _, err = clean(gf, outputFp, inputFp, filename, stat.Size()) if err != nil { return -1, err } return exitStatus, nil } func mergeCleanup(fileSpecifiers map[string]string) { ids := []string{"A", "O", "B", "D"} for _, id := range ids { os.Remove(fileSpecifiers[id]) } } func mergeProcessInput(gf *lfs.GitFilter, filename string, fileSpecifiers map[string]string, tag string) { file, err := lfs.TempFile(cfg, fmt.Sprintf("merge-driver-%s", tag)) if err != nil { Exit(tr.Tr.Get("could not create temporary file when merging: %s", err)) } defer file.Close() fileSpecifiers[tag] = file.Name() if len(filename) == 0 { return } pointer, err := lfs.DecodePointerFromFile(filename) if err != nil { if errors.IsNotAPointerError(err) { file.Close() if err := lfs.CopyFileContents(cfg, filename, file.Name()); err != nil { os.Remove(file.Name()) Exit(tr.Tr.Get("could not copy non-LFS content when merging: %s", err)) } return } else { os.Remove(file.Name()) Exit(tr.Tr.Get("could not decode pointer when merging: %s", err)) } } cb, fp, err := gf.CopyCallbackFile("download", file.Name(), 1, 1) if err != nil { os.Remove(file.Name()) Exit(tr.Tr.Get("could not create callback: %s", err)) } defer fp.Close() _, err = gf.Smudge(file, pointer, file.Name(), true, getTransferManifestOperationRemote("download", cfg.Remote()), cb) } func init() { RegisterCommand("merge-driver", mergeDriverCommand, func(cmd *cobra.Command) { cmd.Flags().StringVarP(&mergeDriverAncestor, "ancestor", "", "", "file with the ancestor version") cmd.Flags().StringVarP(&mergeDriverCurrent, "current", "", "", "file with the current version") cmd.Flags().StringVarP(&mergeDriverOther, "other", "", "", "file with the other version") cmd.Flags().StringVarP(&mergeDriverOutput, "output", "", "", "file with the output version") cmd.Flags().StringVarP(&mergeDriverProgram, "program", "", "", "program to run to perform the merge") cmd.Flags().IntVarP(&mergeDriverMarkerSize, "marker-size", "", 12, "merge marker size") }) } git-lfs-3.6.1/commands/command_migrate.go000066400000000000000000000321251472372047300203600ustar00rootroot00000000000000package commands import ( "bufio" "fmt" "io" "strings" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/filepathfilter" "github.com/git-lfs/git-lfs/v3/git" "github.com/git-lfs/git-lfs/v3/git/githistory" "github.com/git-lfs/git-lfs/v3/tasklog" "github.com/git-lfs/git-lfs/v3/tr" "github.com/git-lfs/gitobj/v2" "github.com/spf13/cobra" ) var ( // migrateIncludeRefs is a set of Git references to explicitly include // in the migration. migrateIncludeRefs []string // migrateExcludeRefs is a set of Git references to explicitly exclude // in the migration. migrateExcludeRefs []string // migrateYes indicates that an answer of 'yes' should be presumed // whenever 'git lfs migrate' asks for user input. migrateYes bool // migrateSkipFetch assumes that the client has the latest copy of // remote references, and thus should not contact the remote for a set // of updated references. migrateSkipFetch bool // migrateImportAboveFmt indicates the presence of the --above= // flag and instructs 'git lfs migrate import' to import all files // above the provided size. migrateImportAboveFmt string // migrateEverything indicates the presence of the --everything flag, // and instructs 'git lfs migrate' to migrate all local references. migrateEverything bool // migrateVerbose enables verbose logging migrateVerbose bool // objectMapFile is the path to the map of old sha1 to new sha1 // commits objectMapFilePath string // migrateNoRewrite is the flag indicating whether or not the // command should rewrite git history migrateNoRewrite bool // migrateCommitMessage is the message to use with the commit generated // by the migrate command migrateCommitMessage string // exportRemote is the remote from which to download objects when // performing an export exportRemote string // migrateFixup is the flag indicating whether or not to infer the // included and excluded filepath patterns. migrateFixup bool ) // migrate takes the given command and arguments, *gitobj.ObjectDatabase, as well // as a BlobRewriteFn to apply, and performs a migration. func migrate(args []string, r *githistory.Rewriter, l *tasklog.Logger, opts *githistory.RewriteOptions) { setupRepository() opts, err := rewriteOptions(args, opts, l) if err != nil { ExitWithError(err) } _, err = r.Rewrite(opts) if err != nil { ExitWithError(err) } } // getObjectDatabase creates a *git.ObjectDatabase from the filesystem pointed // at the .git directory of the currently checked-out repository. func getObjectDatabase() (*gitobj.ObjectDatabase, error) { dir, err := git.GitCommonDir() if err != nil { return nil, errors.Wrap(err, tr.Tr.Get("cannot open root")) } return git.ObjectDatabase(cfg.OSEnv(), cfg.GitEnv(), dir, cfg.TempDir()) } // rewriteOptions returns *githistory.RewriteOptions able to be passed to a // *githistory.Rewriter that reflect the current arguments and flags passed to // an invocation of git-lfs-migrate(1). // // It is merged with the given "opts". In other words, an identical "opts" is // returned, where the Include and Exclude fields have been filled based on the // following rules: // // The included and excluded references are determined based on the output of // includeExcludeRefs (see below for documentation and detail). // // If any of the above could not be determined without error, that error will be // returned immediately. func rewriteOptions(args []string, opts *githistory.RewriteOptions, l *tasklog.Logger) (*githistory.RewriteOptions, error) { include, exclude, err := includeExcludeRefs(l, args) if err != nil { return nil, err } return &githistory.RewriteOptions{ Include: include, Exclude: exclude, UpdateRefs: opts.UpdateRefs, Verbose: opts.Verbose, ObjectMapFilePath: opts.ObjectMapFilePath, BlobFn: opts.BlobFn, TreePreCallbackFn: opts.TreePreCallbackFn, TreeCallbackFn: opts.TreeCallbackFn, }, nil } // isSpecialGitRef checks if a ref spec is a special git ref to exclude from // --everything func isSpecialGitRef(refspec string) bool { // Special refspecs. switch refspec { case "refs/stash": return true } // Special refspecs from namespaces. parts := strings.SplitN(refspec, "/", 3) if len(parts) < 3 { return false } prefix := strings.Join(parts[:2], "/") switch prefix { case "refs/notes", "refs/bisect", "refs/replace": return true } return false } // includeExcludeRefs returns fully-qualified sets of references to include, and // exclude, or an error if those could not be determined. // // They are determined based on the following rules: // // - Include all local refs/heads/ references for each branch // specified as an argument. // - Include the currently checked out branch if no branches are given as // arguments and the --include-ref= or --exclude-ref= flag(s) aren't given. // - Include all references given in --include-ref=. // - Exclude all references given in --exclude-ref=. func includeExcludeRefs(l *tasklog.Logger, args []string) (include, exclude []string, err error) { hardcore := len(migrateIncludeRefs) > 0 || len(migrateExcludeRefs) > 0 if len(args) == 0 && !hardcore && !migrateEverything { // If no branches were given explicitly AND neither // --include-ref or --exclude-ref flags were given, then add the // currently checked out reference. current, err := currentRefToMigrate() if err != nil { return nil, nil, err } args = append(args, current.Name) } if migrateEverything && len(args) > 0 { return nil, nil, errors.New(tr.Tr.Get("Cannot use --everything with explicit reference arguments")) } for _, name := range args { var excluded bool if strings.HasPrefix("^", name) { name = name[1:] excluded = true } // Then, loop through each branch given, resolve that reference, // and include it. ref, err := git.ResolveRef(name) if err != nil { return nil, nil, err } if excluded { exclude = append(exclude, ref.Refspec()) } else { include = append(include, ref.Refspec()) } } if hardcore { if migrateEverything { return nil, nil, errors.New(tr.Tr.Get("Cannot use --everything with --include-ref or --exclude-ref")) } // If either --include-ref= or --exclude-ref= were // given, append those to the include and excluded reference // set, respectively. include = append(include, migrateIncludeRefs...) exclude = append(exclude, migrateExcludeRefs...) } else if migrateEverything { refs, err := git.AllRefsIn("") if err != nil { return nil, nil, err } for _, ref := range refs { switch ref.Type { case git.RefTypeLocalBranch, git.RefTypeLocalTag, git.RefTypeRemoteBranch: include = append(include, ref.Refspec()) case git.RefTypeOther: if isSpecialGitRef(ref.Refspec()) { continue } include = append(include, ref.Refspec()) } } } else { bare, err := git.IsBare() if err != nil { return nil, nil, errors.Wrap(err, tr.Tr.Get("Unable to determine bareness")) } if !bare { // Otherwise, if neither --include-ref= or // --exclude-ref= were given, include no additional // references, and exclude all remote references that // are remote branches or remote tags. remoteRefs, err := getRemoteRefs(l) if err != nil { return nil, nil, err } for remote, refs := range remoteRefs { for _, ref := range refs { exclude = append(exclude, formatRefName(ref, remote)) } } } } return include, exclude, nil } // getRemoteRefs returns a fully qualified set of references belonging to all // remotes known by the currently checked-out repository, or an error if those // references could not be determined. func getRemoteRefs(l *tasklog.Logger) (map[string][]*git.Ref, error) { refs := make(map[string][]*git.Ref) remotes, err := git.RemoteList() if err != nil { return nil, err } if !migrateSkipFetch { if err := fetchRemoteRefs(l, remotes); err != nil { return nil, err } } for _, remote := range remotes { var refsForRemote []*git.Ref if migrateSkipFetch { refsForRemote, err = git.CachedRemoteRefs(remote) } else { refsForRemote, err = git.RemoteRefs(remote, true) } if err != nil { return nil, err } refs[remote] = refsForRemote } return refs, nil } func fetchRemoteRefs(l *tasklog.Logger, remotes []string) error { w := l.Waiter(fmt.Sprintf("migrate: %s", tr.Tr.Get("Fetching remote refs"))) defer w.Complete() return git.Fetch(remotes...) } // formatRefName returns the fully-qualified name for the given Git reference // "ref". func formatRefName(ref *git.Ref, remote string) string { if ref.Type == git.RefTypeRemoteBranch { return strings.Join([]string{ "refs", "remotes", remote, ref.Name}, "/") } return ref.Refspec() } // currentRefToMigrate returns the fully-qualified name of the currently // checked-out reference, or an error if the reference's type was not a local // branch. func currentRefToMigrate() (*git.Ref, error) { current, err := git.CurrentRef() if err != nil { return nil, err } if current.Type == git.RefTypeOther || current.Type == git.RefTypeRemoteBranch { return nil, errors.Errorf(tr.Tr.Get("Cannot migrate non-local ref: %s", current.Name)) } return current, nil } // getHistoryRewriter returns a history rewriter that includes the filepath // filter given by the --include and --exclude arguments. func getHistoryRewriter(cmd *cobra.Command, db *gitobj.ObjectDatabase, l *tasklog.Logger) *githistory.Rewriter { include, exclude := getIncludeExcludeArgs(cmd) filter := buildFilepathFilterWithPatternType(cfg, include, exclude, false, filepathfilter.GitAttributes) return githistory.NewRewriter(db, githistory.WithFilter(filter), githistory.WithLogger(l)) } func ensureWorkingCopyClean(in io.Reader, out io.Writer) { dirty, err := git.IsWorkingCopyDirty() if err != nil { ExitWithError(errors.Wrap(err, tr.Tr.Get("Could not determine if working copy is dirty"))) } if !dirty { return } var proceed bool if migrateYes { proceed = true } else { answer := bufio.NewReader(in) L: for { fmt.Fprintf(out, "migrate: %s", tr.Tr.Get("override changes in your working copy? All uncommitted changes will be lost! [y/N] ")) s, err := answer.ReadString('\n') if err != nil { if err == io.EOF { break L } ExitWithError(errors.Wrap(err, tr.Tr.Get("Could not read answer"))) } switch strings.TrimSpace(s) { // TRANSLATORS: these are negative (no) responses. case tr.Tr.Get("n"), tr.Tr.Get("N"), "": proceed = false break L // TRANSLATORS: these are positive (yes) responses. case tr.Tr.Get("y"), tr.Tr.Get("Y"): proceed = true break L } if !strings.HasSuffix(s, "\n") { fmt.Fprintf(out, "\n") } } } if proceed { fmt.Fprintf(out, "migrate: %s\n", tr.Tr.Get("changes in your working copy will be overridden ...")) } else { Exit("migrate: %s", tr.Tr.Get("working copy must not be dirty")) } } func init() { info := NewCommand("info", migrateInfoCommand) info.Flags().IntVar(&migrateInfoTopN, "top", 5, "--top=") info.Flags().StringVar(&migrateInfoAboveFmt, "above", "", "--above=") info.Flags().StringVar(&migrateInfoUnitFmt, "unit", "", "--unit=") info.Flags().StringVar(&migrateInfoPointers, "pointers", "", "Ignore, dereference, or include LFS pointer files") info.Flags().BoolVar(&migrateFixup, "fixup", false, "Infer filepaths based on .gitattributes") importCmd := NewCommand("import", migrateImportCommand) importCmd.Flags().StringVar(&migrateImportAboveFmt, "above", "", "--above=") importCmd.Flags().BoolVar(&migrateVerbose, "verbose", false, "Verbose logging") importCmd.Flags().StringVar(&objectMapFilePath, "object-map", "", "Object map file") importCmd.Flags().BoolVar(&migrateNoRewrite, "no-rewrite", false, "Add new history without rewriting previous") importCmd.Flags().StringVarP(&migrateCommitMessage, "message", "m", "", "With --no-rewrite, an optional commit message") importCmd.Flags().BoolVar(&migrateFixup, "fixup", false, "Infer filepaths based on .gitattributes") exportCmd := NewCommand("export", migrateExportCommand) exportCmd.Flags().BoolVar(&migrateVerbose, "verbose", false, "Verbose logging") exportCmd.Flags().StringVar(&objectMapFilePath, "object-map", "", "Object map file") exportCmd.Flags().StringVar(&exportRemote, "remote", "", "Remote from which to download objects") RegisterCommand("migrate", nil, func(cmd *cobra.Command) { cmd.PersistentFlags().StringVarP(&includeArg, "include", "I", "", "Include a list of paths") cmd.PersistentFlags().StringVarP(&excludeArg, "exclude", "X", "", "Exclude a list of paths") cmd.PersistentFlags().StringSliceVar(&migrateIncludeRefs, "include-ref", nil, "An explicit list of refs to include") cmd.PersistentFlags().StringSliceVar(&migrateExcludeRefs, "exclude-ref", nil, "An explicit list of refs to exclude") cmd.PersistentFlags().BoolVar(&migrateEverything, "everything", false, "Migrate all local references") cmd.PersistentFlags().BoolVar(&migrateSkipFetch, "skip-fetch", false, "Assume up-to-date remote references.") cmd.PersistentFlags().BoolVarP(&migrateYes, "yes", "y", false, "Don't prompt for answers.") cmd.AddCommand(exportCmd, importCmd, info) }) } git-lfs-3.6.1/commands/command_migrate_export.go000066400000000000000000000125251472372047300217630ustar00rootroot00000000000000package commands import ( "fmt" "os" "path/filepath" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/filepathfilter" "github.com/git-lfs/git-lfs/v3/git" "github.com/git-lfs/git-lfs/v3/git/githistory" "github.com/git-lfs/git-lfs/v3/lfs" "github.com/git-lfs/git-lfs/v3/tasklog" "github.com/git-lfs/git-lfs/v3/tools" "github.com/git-lfs/git-lfs/v3/tr" "github.com/git-lfs/gitobj/v2" "github.com/spf13/cobra" ) func migrateExportCommand(cmd *cobra.Command, args []string) { ensureWorkingCopyClean(os.Stdin, os.Stderr) l := tasklog.NewLogger(os.Stderr, tasklog.ForceProgress(cfg.ForceProgress()), ) defer l.Close() db, err := getObjectDatabase() if err != nil { ExitWithError(err) } defer db.Close() rewriter := getHistoryRewriter(cmd, db, l) filter := rewriter.Filter() if len(filter.Include()) <= 0 { ExitWithError(errors.Errorf(tr.Tr.Get("One or more files must be specified with --include"))) } tracked := trackedFromExportFilter(filter) gitfilter := lfs.NewGitFilter(cfg) opts := &githistory.RewriteOptions{ Verbose: migrateVerbose, ObjectMapFilePath: objectMapFilePath, BlobFn: func(path string, b *gitobj.Blob) (*gitobj.Blob, error) { if filepath.Base(path) == ".gitattributes" { return b, nil } ptr, err := lfs.DecodePointer(b.Contents) if err != nil { if errors.IsNotAPointerError(err) { return b, nil } return nil, err } downloadPath, err := gitfilter.ObjectPath(ptr.Oid) if err != nil { return nil, err } return gitobj.NewBlobFromFile(downloadPath) }, TreeCallbackFn: func(path string, t *gitobj.Tree) (*gitobj.Tree, error) { if path != "/" { // Ignore non-root trees. return t, nil } ours := tracked theirs, err := trackedFromAttrs(db, t) if err != nil { return nil, err } // Create a blob of the attributes that are optionally // present in the "t" tree's .gitattributes blob, and // union in the patterns that we've tracked. // // Perform this Union() operation each time we visit a // root tree such that if the underlying .gitattributes // is present and has a diff between commits in the // range of commits to migrate, those changes are // preserved. blob, err := trackedToBlob(db, theirs.Clone().Union(ours)) if err != nil { return nil, err } // Finally, return a copy of the tree "t" that has the // new .gitattributes file included/replaced. return t.Merge(&gitobj.TreeEntry{ Name: ".gitattributes", Filemode: 0100644, Oid: blob, }), nil }, UpdateRefs: true, } setupRepository() opts, err = rewriteOptions(args, opts, l) if err != nil { ExitWithError(err) } remote := cfg.Remote() if cmd.Flag("remote").Changed { remote = exportRemote } remoteURL := getAPIClient().Endpoints.RemoteEndpoint("download", remote).Url if remoteURL == "" && cmd.Flag("remote").Changed { ExitWithError(errors.Errorf(tr.Tr.Get("Invalid remote %s provided", remote))) } // If we have a valid remote, pre-download all objects using the Transfer Queue if remoteURL != "" { q := newDownloadQueue(getTransferManifestOperationRemote("Download", remote), remote) gs := lfs.NewGitScanner(cfg, func(p *lfs.WrappedPointer, err error) { if err != nil { return } if !filter.Allows(p.Name) { return } downloadPath, err := gitfilter.ObjectPath(p.Oid) if err != nil { return } if _, err := os.Stat(downloadPath); os.IsNotExist(err) { q.Add(p.Name, downloadPath, p.Oid, p.Size, false, nil) } }) gs.ScanRefs(opts.Include, opts.Exclude, nil) q.Wait() for _, err := range q.Errors() { if err != nil { ExitWithError(err) } } } // Perform the rewrite if _, err := rewriter.Rewrite(opts); err != nil { ExitWithError(err) } // Only perform `git-checkout(1) -f` if the repository is non-bare. if bare, _ := git.IsBare(); !bare { if err := performForceCheckout(l); err != nil { ExitWithError(err) } } fetchPruneCfg := lfs.NewFetchPruneConfig(cfg.Git) // Set our preservation time-window for objects existing on the remote to // 0. Because the newly rewritten commits have not yet been pushed, some // exported objects can still exist on the remote within the time window // and thus will not be pruned from the cache. fetchPruneCfg.FetchRecentRefsDays = 0 // Prune our cache prune(fetchPruneCfg, false, false, false, false, true) } func performForceCheckout(l *tasklog.Logger) error { t := l.Waiter(fmt.Sprintf("migrate: %s", tr.Tr.Get("checkout"))) defer t.Complete() return git.Checkout("", nil, true) } // trackedFromExportFilter returns an ordered set of strings where each entry // is a line we intend to place in the .gitattributes file. It adds/removes the // filter/diff/merge=lfs attributes based on patterns included/excluded in the // given filter. Since `migrate export` removes files from Git LFS, it will // remove attributes for included files, and add attributes for excluded files func trackedFromExportFilter(filter *filepathfilter.Filter) *tools.OrderedSet { tracked := tools.NewOrderedSet() for _, include := range filter.Include() { tracked.Add(fmt.Sprintf("%s !text !filter !merge !diff", escapeAttrPattern(include))) } for _, exclude := range filter.Exclude() { tracked.Add(fmt.Sprintf("%s filter=lfs diff=lfs merge=lfs -text", escapeAttrPattern(exclude))) } return tracked } git-lfs-3.6.1/commands/command_migrate_import.go000066400000000000000000000305751472372047300217610ustar00rootroot00000000000000package commands import ( "bufio" "bytes" "encoding/hex" "fmt" "os" "path/filepath" "strings" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/filepathfilter" "github.com/git-lfs/git-lfs/v3/git" "github.com/git-lfs/git-lfs/v3/git/gitattr" "github.com/git-lfs/git-lfs/v3/git/githistory" "github.com/git-lfs/git-lfs/v3/lfs" "github.com/git-lfs/git-lfs/v3/tasklog" "github.com/git-lfs/git-lfs/v3/tools" "github.com/git-lfs/git-lfs/v3/tools/humanize" "github.com/git-lfs/git-lfs/v3/tr" "github.com/git-lfs/gitobj/v2" "github.com/spf13/cobra" ) func migrateImportCommand(cmd *cobra.Command, args []string) { ensureWorkingCopyClean(os.Stdin, os.Stderr) l := tasklog.NewLogger(os.Stderr, tasklog.ForceProgress(cfg.ForceProgress()), ) defer l.Close() db, err := getObjectDatabase() if err != nil { ExitWithError(err) } defer db.Close() // To avoid confusion later, let's make sure that we've installed the // necessary hooks so that a newly migrated repository is `git // push`-able immediately following a `git lfs migrate import`. installHooks(false) if migrateNoRewrite { if migrateFixup { ExitWithError(errors.Errorf(tr.Tr.Get("--no-rewrite and --fixup cannot be combined"))) } if len(args) == 0 { ExitWithError(errors.Errorf(tr.Tr.Get("Expected one or more files with --no-rewrite"))) } ref, err := git.CurrentRef() if err != nil { ExitWithError(errors.Wrap(err, tr.Tr.Get("Unable to find current reference"))) } sha, _ := hex.DecodeString(ref.Sha) commit, err := db.Commit(sha) if err != nil { ExitWithError(errors.Wrap(err, tr.Tr.Get("Unable to load commit"))) } root := commit.TreeID filter := git.GetAttributeFilter(cfg.LocalWorkingDir(), cfg.LocalGitDir()) if len(filter.Include()) == 0 { ExitWithError(errors.Errorf(tr.Tr.Get("No Git LFS filters found in '.gitattributes'"))) } gf := lfs.NewGitFilter(cfg) for _, file := range args { if !filter.Allows(file) { ExitWithError(errors.Errorf(tr.Tr.Get("File %s did not match any Git LFS filters in '.gitattributes'", file))) } } for _, file := range args { root, err = rewriteTree(gf, db, root, file) if err != nil { ExitWithError(errors.Wrapf(err, tr.Tr.Get("Could not rewrite %q", file))) } } name, email := cfg.CurrentAuthor() author := &gitobj.Signature{ Name: name, Email: email, When: cfg.CurrentAuthorTimestamp(), } name, email = cfg.CurrentCommitter() committer := &gitobj.Signature{ Name: name, Email: email, When: cfg.CurrentCommitterTimestamp(), } oid, err := db.WriteCommit(&gitobj.Commit{ Author: author.String(), Committer: committer.String(), ParentIDs: [][]byte{sha}, Message: generateMigrateCommitMessage(cmd, strings.Join(args, ",")), TreeID: root, }) if err != nil { ExitWithError(errors.Wrap(err, tr.Tr.Get("Unable to write commit"))) } if err := git.UpdateRef(ref, oid, "git lfs migrate import --no-rewrite"); err != nil { ExitWithError(errors.Wrap(err, tr.Tr.Get("Unable to update ref"))) } if err := checkoutNonBare(l); err != nil { ExitWithError(errors.Wrap(err, tr.Tr.Get("Could not checkout"))) } return } if migrateFixup { include, exclude := getIncludeExcludeArgs(cmd) if include != nil || exclude != nil { ExitWithError(errors.Errorf(tr.Tr.Get("Cannot use --fixup with --include, --exclude"))) } } rewriter := getHistoryRewriter(cmd, db, l) tracked := trackedFromFilter(rewriter.Filter()) exts := tools.NewOrderedSet() gitfilter := lfs.NewGitFilter(cfg) var fixups *gitattr.Tree above, err := humanize.ParseBytes(migrateImportAboveFmt) if err != nil { ExitWithError(errors.Wrap(err, tr.Tr.Get("Cannot parse --above="))) } if above > 0 { include, exclude := getIncludeExcludeArgs(cmd) if include != nil || exclude != nil || migrateFixup { ExitWithError(errors.Errorf(tr.Tr.Get("Cannot use --above with --include, --exclude, --fixup"))) } } migrate(args, rewriter, l, &githistory.RewriteOptions{ Verbose: migrateVerbose, ObjectMapFilePath: objectMapFilePath, BlobFn: func(path string, b *gitobj.Blob) (*gitobj.Blob, error) { if filepath.Base(path) == ".gitattributes" { return b, nil } if (above > 0) && (uint64(b.Size) < above) { return b, nil } if migrateFixup { var ok bool attrs := fixups.Applied(path) for _, attr := range attrs { if attr.K == "filter" { ok = attr.V == "lfs" } } if !ok { return b, nil } } var buf bytes.Buffer if _, err := clean(gitfilter, &buf, b.Contents, path, b.Size); err != nil { return nil, err } if ext := filepath.Ext(path); len(ext) > 0 && above == 0 { exts.Add(fmt.Sprintf("*%s filter=lfs diff=lfs merge=lfs -text", ext)) } else { exts.Add(fmt.Sprintf("/%s filter=lfs diff=lfs merge=lfs -text", escapeGlobCharacters(path))) } return &gitobj.Blob{ Contents: &buf, Size: int64(buf.Len()), }, nil }, TreePreCallbackFn: func(path string, t *gitobj.Tree) error { if migrateFixup && path == "/" { var err error fixups, err = gitattr.New(db, t) if err != nil { return err } } return nil }, TreeCallbackFn: func(path string, t *gitobj.Tree) (*gitobj.Tree, error) { if path != "/" || migrateFixup { // Avoid updating .gitattributes in non-root // trees, or if --fixup is given. return t, nil } ours := tracked if ours.Cardinality() == 0 { // If there were no explicitly tracked // --include, --exclude filters, assume that the // include set is the wildcard filepath // extensions of files tracked. ours = exts if ours.Cardinality() == 0 { // If it is still the case that we have // no patterns to track, that means that // we are in a tree that does not // require .gitattributes changes. // // We can return early to avoid // comparing and saving an identical // tree. return t, nil } } theirs, err := trackedFromAttrs(db, t) if err != nil { return nil, err } // Create a blob of the attributes that are optionally // present in the "t" tree's .gitattributes blob, and // union in the patterns that we've tracked. // // Perform this Union() operation each time we visit a // root tree such that if the underlying .gitattributes // is present and has a diff between commits in the // range of commits to migrate, those changes are // preserved. blob, err := trackedToBlob(db, theirs.Clone().Union(ours)) if err != nil { return nil, err } // Finally, return a copy of the tree "t" that has the // new .gitattributes file included/replaced. return t.Merge(&gitobj.TreeEntry{ Name: ".gitattributes", Filemode: 0100644, Oid: blob, }), nil }, UpdateRefs: true, }) if err := checkoutNonBare(l); err != nil { ExitWithError(errors.Wrap(err, tr.Tr.Get("Could not checkout"))) } } // generateMigrateCommitMessage generates a commit message used with // --no-rewrite, using --message (if given) or generating one if it isn't. func generateMigrateCommitMessage(cmd *cobra.Command, patterns string) string { if cmd.Flag("message").Changed { return migrateCommitMessage } return tr.Tr.Get("%s: convert to Git LFS", patterns) } // checkoutNonBare forces a checkout of the current reference, so long as the // repository is non-bare. // // It returns nil on success, and a non-nil error on failure. func checkoutNonBare(l *tasklog.Logger) error { if bare, _ := git.IsBare(); bare { return nil } t := l.Waiter(fmt.Sprintf("migrate: %s", tr.Tr.Get("checkout"))) defer t.Complete() return git.Checkout("", nil, true) } // trackedFromFilter returns an ordered set of strings where each entry is a // line in the .gitattributes file. It adds/removes the filter/diff/merge=lfs // attributes based on patterns included/excluded in the given filter. func trackedFromFilter(filter *filepathfilter.Filter) *tools.OrderedSet { tracked := tools.NewOrderedSet() for _, include := range filter.Include() { tracked.Add(fmt.Sprintf("%s filter=lfs diff=lfs merge=lfs -text", escapeAttrPattern(include))) } for _, exclude := range filter.Exclude() { tracked.Add(fmt.Sprintf("%s !text -filter -merge -diff", escapeAttrPattern(exclude))) } return tracked } var ( // attrsCache maintains a cache from the hex-encoded SHA1 of a // .gitattributes blob to the set of patterns parsed from that blob. attrsCache = make(map[string]*tools.OrderedSet) ) // trackedFromAttrs returns an ordered line-delimited set of the contents of a // .gitattributes blob in a given tree "t". // // It returns an empty set if no attributes file could be found, or an error if // it could not otherwise be opened. func trackedFromAttrs(db *gitobj.ObjectDatabase, t *gitobj.Tree) (*tools.OrderedSet, error) { var oid []byte for _, e := range t.Entries { if strings.ToLower(e.Name) == ".gitattributes" && e.Type() == gitobj.BlobObjectType { if e.IsLink() { return nil, errors.Errorf("migrate: %s", tr.Tr.Get("expected '.gitattributes' to be a file, got a symbolic link")) } else { oid = e.Oid break } } } if oid == nil { // TODO(@ttaylorr): make (*tools.OrderedSet)(nil) a valid // receiver for non-mutative methods. return tools.NewOrderedSet(), nil } sha1 := hex.EncodeToString(oid) if s, ok := attrsCache[sha1]; ok { return s, nil } blob, err := db.Blob(oid) if err != nil { return nil, err } attrs := tools.NewOrderedSet() scanner := bufio.NewScanner(blob.Contents) for scanner.Scan() { attrs.Add(scanner.Text()) } if err := scanner.Err(); err != nil { return nil, err } attrsCache[sha1] = attrs return attrsCache[sha1], nil } // trackedToBlob writes and returns the OID of a .gitattributes blob based on // the patterns given in the ordered set of patterns, "patterns". func trackedToBlob(db *gitobj.ObjectDatabase, patterns *tools.OrderedSet) ([]byte, error) { var attrs bytes.Buffer for pattern := range patterns.Iter() { fmt.Fprintf(&attrs, "%s\n", pattern) } return db.WriteBlob(&gitobj.Blob{ Contents: &attrs, Size: int64(attrs.Len()), }) } // rewriteTree replaces the blob at the provided path within the given tree with // a git lfs pointer. It will recursively rewrite any subtrees along the path to the // blob. func rewriteTree(gf *lfs.GitFilter, db *gitobj.ObjectDatabase, root []byte, path string) ([]byte, error) { tree, err := db.Tree(root) if err != nil { return nil, err } splits := strings.SplitN(path, "/", 2) switch len(splits) { case 1: // The path points to an entry at the root of this tree, so it must be a blob. // Try to replace this blob with a Git LFS pointer. index := findEntry(tree, splits[0]) if index < 0 { return nil, errors.Errorf(tr.Tr.Get("unable to find entry %s in tree", splits[0])) } blobEntry := tree.Entries[index] blob, err := db.Blob(blobEntry.Oid) if err != nil { return nil, err } var buf bytes.Buffer if _, err := clean(gf, &buf, blob.Contents, blobEntry.Name, blob.Size); err != nil { return nil, err } newOid, err := db.WriteBlob(&gitobj.Blob{ Contents: &buf, Size: int64(buf.Len()), }) if err != nil { return nil, err } tree = tree.Merge(&gitobj.TreeEntry{ Name: splits[0], Filemode: blobEntry.Filemode, Oid: newOid, }) return db.WriteTree(tree) case 2: // The path points to an entry in a subtree contained at the root of the tree. // Recursively rewrite the subtree. head, tail := splits[0], splits[1] index := findEntry(tree, head) if index < 0 { return nil, errors.Errorf(tr.Tr.Get("unable to find entry %s in tree", head)) } subtreeEntry := tree.Entries[index] if subtreeEntry.Type() != gitobj.TreeObjectType { return nil, errors.Errorf("migrate: %s", tr.Tr.Get("expected %s to be a tree, got %s", head, subtreeEntry.Type())) } rewrittenSubtree, err := rewriteTree(gf, db, subtreeEntry.Oid, tail) if err != nil { return nil, err } tree = tree.Merge(&gitobj.TreeEntry{ Filemode: subtreeEntry.Filemode, Name: subtreeEntry.Name, Oid: rewrittenSubtree, }) return db.WriteTree(tree) default: return nil, errors.Errorf(tr.Tr.Get("error parsing path %s", path)) } } // findEntry searches a tree for the desired entry, and returns the index of that // entry within the tree's Entries array func findEntry(t *gitobj.Tree, name string) int { for i, entry := range t.Entries { if entry.Name == name { return i } } return -1 } git-lfs-3.6.1/commands/command_migrate_info.go000066400000000000000000000232601472372047300213730ustar00rootroot00000000000000package commands import ( "fmt" "io" "os" "path/filepath" "sort" "strings" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/git/gitattr" "github.com/git-lfs/git-lfs/v3/git/githistory" "github.com/git-lfs/git-lfs/v3/lfs" "github.com/git-lfs/git-lfs/v3/tasklog" "github.com/git-lfs/git-lfs/v3/tools" "github.com/git-lfs/git-lfs/v3/tools/humanize" "github.com/git-lfs/git-lfs/v3/tr" "github.com/git-lfs/gitobj/v2" "github.com/spf13/cobra" ) type migrateInfoPointersType int const ( migrateInfoPointersFollow = migrateInfoPointersType(iota) migrateInfoPointersNoFollow = migrateInfoPointersType(iota) migrateInfoPointersIgnore = migrateInfoPointersType(iota) ) var ( // migrateInfoTopN is a flag given to the git-lfs-migrate(1) subcommand // 'info' which specifies how many info entries to show by default. migrateInfoTopN int // migrateInfoAboveFmt is a flag given to the git-lfs-migrate(1) // subcommand 'info' specifying a human-readable string threshold of // filesize before entries are counted. migrateInfoAboveFmt string // migrateInfoAbove is the number of bytes parsed from the above // migrateInfoAboveFmt flag. migrateInfoAbove uint64 // migrateInfoUnitFmt is a flag given to the git-lfs-migrate(1) // subcommand 'info' specifying a human-readable string of units with // which to display the number of bytes. migrateInfoUnitFmt string // migrateInfoUnit is the number of bytes in the unit given as // migrateInfoUnitFmt. migrateInfoUnit uint64 // migrateInfoPointers is an option given to the git-lfs-migrate(1) // subcommand 'info' specifying how to treat Git LFS pointers. migrateInfoPointers string // migrateInfoPointersMode is the Git LFS pointer treatment mode // parsed from migrateInfoPointers. migrateInfoPointersMode migrateInfoPointersType ) func migrateInfoCommand(cmd *cobra.Command, args []string) { l := tasklog.NewLogger(os.Stderr, tasklog.ForceProgress(cfg.ForceProgress()), ) db, err := getObjectDatabase() if err != nil { ExitWithError(err) } defer db.Close() rewriter := getHistoryRewriter(cmd, db, l) exts := make(map[string]*MigrateInfoEntry) above, err := humanize.ParseBytes(migrateInfoAboveFmt) if err != nil { ExitWithError(errors.Wrap(err, tr.Tr.Get("cannot parse --above="))) } if u := cmd.Flag("unit"); u.Changed { unit, err := humanize.ParseByteUnit(u.Value.String()) if err != nil { ExitWithError(errors.Wrap(err, tr.Tr.Get("cannot parse --unit="))) } migrateInfoUnit = unit } pointers := cmd.Flag("pointers") if pointers.Changed { switch pointers.Value.String() { case "follow": migrateInfoPointersMode = migrateInfoPointersFollow case "no-follow": migrateInfoPointersMode = migrateInfoPointersNoFollow case "ignore": migrateInfoPointersMode = migrateInfoPointersIgnore default: ExitWithError(errors.Errorf(tr.Tr.Get("Unsupported --pointers option value"))) } } if migrateFixup { include, exclude := getIncludeExcludeArgs(cmd) if include != nil || exclude != nil { ExitWithError(errors.Errorf(tr.Tr.Get("Cannot use --fixup with --include, --exclude"))) } if pointers.Changed && migrateInfoPointersMode != migrateInfoPointersIgnore { ExitWithError(errors.Errorf(tr.Tr.Get("Cannot use --fixup with --pointers=%s", pointers.Value.String()))) } migrateInfoPointersMode = migrateInfoPointersIgnore } migrateInfoAbove = above pointersInfoEntry := &MigrateInfoEntry{Qualifier: "LFS Objects", Separate: true} var fixups *gitattr.Tree migrate(args, rewriter, l, &githistory.RewriteOptions{ BlobFn: func(path string, b *gitobj.Blob) (*gitobj.Blob, error) { var entry *MigrateInfoEntry var size int64 var p *lfs.Pointer var err error if migrateFixup { if filepath.Base(path) == ".gitattributes" { return b, nil } var ok bool attrs := fixups.Applied(path) for _, attr := range attrs { if attr.K == "filter" { ok = attr.V == "lfs" } } if !ok { return b, nil } } if migrateInfoPointersMode != migrateInfoPointersNoFollow { p, err = lfs.DecodePointerFromBlob(b) } if p != nil && err == nil { if migrateInfoPointersMode == migrateInfoPointersIgnore { return b, nil } entry = pointersInfoEntry size = p.Size } else { entry = findEntryByExtension(exts, path) size = b.Size } entry.Total++ if size > int64(migrateInfoAbove) { entry.TotalAbove++ entry.BytesAbove += size } return b, nil }, TreePreCallbackFn: func(path string, t *gitobj.Tree) error { if migrateFixup { if path == "/" { var err error fixups, err = gitattr.New(db, t) if err != nil { return err } } return nil } for _, e := range t.Entries { if strings.ToLower(e.Name) == ".gitattributes" && e.Type() == gitobj.BlobObjectType { if e.IsLink() { return errors.Errorf("migrate: %s", tr.Tr.Get("expected '.gitattributes' to be a file, got a symbolic link")) } else { break } } } return nil }, }) l.Close() entries := EntriesBySize(MapToEntries(exts)) entries = removeEmptyEntries(entries) sort.Sort(sort.Reverse(entries)) migrateInfoTopN = tools.ClampInt(migrateInfoTopN, 0, len(entries)) entries = entries[:migrateInfoTopN] if pointersInfoEntry.Total > 0 { entries = append(entries, pointersInfoEntry) } entries.Print(os.Stdout) } // MigrateInfoEntry represents a tuple of filetype to bytes and entry count // above and below a threshold. type MigrateInfoEntry struct { // Qualifier is the filepath's extension. Qualifier string // Separate indicates if the entry should be printed separately. Separate bool // BytesAbove is total size of all files above a given threshold. BytesAbove int64 // TotalAbove is the count of all files above a given size threshold. TotalAbove int64 // Total is the count of all files. Total int64 } // findEntryByExtension finds or creates an entry from the given map that // corresponds with the given path's file extension (or the path's file name // if there is no file extension). func findEntryByExtension(exts map[string]*MigrateInfoEntry, path string) *MigrateInfoEntry { ext := fmt.Sprintf("*%s", filepath.Ext(path)) // If extension exists, group all items under extension, // else just use the file name. var groupName string if len(ext) > 1 { groupName = ext } else { groupName = filepath.Base(path) } entry := exts[groupName] if entry == nil { entry = &MigrateInfoEntry{Qualifier: groupName} exts[groupName] = entry } return entry } // MapToEntries creates a set of `*MigrateInfoEntry`'s for a given map of // filepath extensions to file size in bytes. func MapToEntries(exts map[string]*MigrateInfoEntry) []*MigrateInfoEntry { entries := make([]*MigrateInfoEntry, 0, len(exts)) for _, entry := range exts { entries = append(entries, entry) } return entries } // removeEmptyEntries removes `*MigrateInfoEntry`'s for which no matching file // is above the given threshold "--above". func removeEmptyEntries(entries []*MigrateInfoEntry) []*MigrateInfoEntry { nz := make([]*MigrateInfoEntry, 0, len(entries)) for _, e := range entries { if e.TotalAbove > 0 { nz = append(nz, e) } } return nz } // EntriesBySize is an implementation of sort.Interface that sorts a set of // `*MigrateInfoEntry`'s type EntriesBySize []*MigrateInfoEntry // Len returns the total length of the set of `*MigrateInfoEntry`'s. func (e EntriesBySize) Len() int { return len(e) } // Less returns the whether or not the MigrateInfoEntry given at `i` takes up // less total size than the MigrateInfoEntry given at `j`. func (e EntriesBySize) Less(i, j int) bool { if e[i].BytesAbove == e[j].BytesAbove { return e[i].Qualifier > e[j].Qualifier } else { return e[i].BytesAbove < e[j].BytesAbove } } // Swap swaps the entries given at i, j. func (e EntriesBySize) Swap(i, j int) { e[i], e[j] = e[j], e[i] } // Print formats the `*MigrateInfoEntry`'s in the set and prints them to the // given io.Writer, "to", returning "n" the number of bytes written, and any // error, if one occurred. func (e EntriesBySize) Print(to io.Writer) (int, error) { if len(e) == 0 { return 0, nil } extensions := make([]string, 0, len(e)) separateFlags := make([]bool, 0, len(e)) sizes := make([]string, 0, len(e)) stats := make([]string, 0, len(e)) percentages := make([]string, 0, len(e)) for _, entry := range e { bytesAbove := uint64(entry.BytesAbove) above := entry.TotalAbove total := entry.Total percentAbove := 100 * (float64(above) / float64(total)) var size string if migrateInfoUnit > 0 { size = humanize.FormatBytesUnit(bytesAbove, migrateInfoUnit) } else { size = humanize.FormatBytes(bytesAbove) } // TRANSLATORS: The strings here are intended to have the same // display width including spaces, so please insert trailing // spaces as necessary for your language. stat := tr.Tr.GetN( "%d/%d file ", "%d/%d files", int(total), above, total, ) percentage := fmt.Sprintf("%.0f%%", percentAbove) extensions = append(extensions, entry.Qualifier) separateFlags = append(separateFlags, entry.Separate) sizes = append(sizes, size) stats = append(stats, stat) percentages = append(percentages, percentage) } extensions = tools.Ljust(extensions) sizes = tools.Ljust(sizes) stats = tools.Rjust(stats) percentages = tools.Rjust(percentages) output := make([]string, 0, len(e)) for i := 0; i < len(e); i++ { extension := extensions[i] size := sizes[i] stat := stats[i] percentage := percentages[i] line := strings.Join([]string{extension, size, stat, percentage}, "\t") if i > 0 && separateFlags[i] { output = append(output, "") } output = append(output, line) } return fmt.Fprintln(to, strings.Join(output, "\n")) } git-lfs-3.6.1/commands/command_pointer.go000066400000000000000000000104051472372047300204050ustar00rootroot00000000000000package commands import ( "bytes" "crypto/sha256" "encoding/hex" "errors" "fmt" "io" "os" "github.com/git-lfs/git-lfs/v3/git" "github.com/git-lfs/git-lfs/v3/lfs" "github.com/git-lfs/git-lfs/v3/tr" "github.com/spf13/cobra" ) var ( pointerFile string pointerCompare string pointerStdin bool pointerCheck bool pointerStrict bool pointerNoStrict bool ) func pointerCommand(cmd *cobra.Command, args []string) { comparing := false something := false buildOid := "" compareOid := "" if pointerCheck { var r io.ReadCloser var err error if pointerStrict && pointerNoStrict { ExitWithError(errors.New(tr.Tr.Get("Cannot combine --strict with --no-strict"))) } if len(pointerCompare) > 0 { ExitWithError(errors.New(tr.Tr.Get("Cannot combine --check with --compare"))) } if len(pointerFile) > 0 { if pointerStdin { ExitWithError(errors.New(tr.Tr.Get("With --check, --file cannot be combined with --stdin"))) } r, err = os.Open(pointerFile) if err != nil { ExitWithError(err) } } else if pointerStdin { r = io.NopCloser(os.Stdin) } else { ExitWithError(errors.New(tr.Tr.Get("Must specify either --file or --stdin with --compare"))) } p, err := lfs.DecodePointer(r) if err != nil { os.Exit(1) } if pointerStrict && !p.Canonical { os.Exit(2) } r.Close() return } if len(pointerCompare) > 0 || pointerStdin { comparing = true } if len(pointerFile) > 0 { something = true buildFile, err := os.Open(pointerFile) if err != nil { Error(err.Error()) os.Exit(1) } oidHash := sha256.New() size, err := io.Copy(oidHash, buildFile) buildFile.Close() if err != nil { Error(err.Error()) os.Exit(1) } ptr := lfs.NewPointer(hex.EncodeToString(oidHash.Sum(nil)), size, nil) fmt.Fprint(os.Stderr, tr.Tr.Get("Git LFS pointer for %s", pointerFile), "\n\n") buf := &bytes.Buffer{} lfs.EncodePointer(io.MultiWriter(os.Stdout, buf), ptr) if comparing { buildOid, err = git.HashObject(bytes.NewReader(buf.Bytes())) if err != nil { Error(err.Error()) os.Exit(1) } fmt.Fprint(os.Stderr, "\n", tr.Tr.Get("Git blob OID: %s", buildOid), "\n\n") } } else { comparing = false } if len(pointerCompare) > 0 || pointerStdin { something = true compFile, err := pointerReader() if err != nil { Error(err.Error()) os.Exit(1) } buf := &bytes.Buffer{} tee := io.TeeReader(compFile, buf) _, err = lfs.DecodePointer(tee) compFile.Close() pointerName := "STDIN" if !pointerStdin { pointerName = pointerCompare } fmt.Fprint(os.Stderr, tr.Tr.Get("Pointer from %s", pointerName), "\n\n") if err != nil { Error(err.Error()) os.Exit(1) } fmt.Fprintf(os.Stderr, buf.String()) if comparing { compareOid, err = git.HashObject(bytes.NewReader(buf.Bytes())) if err != nil { Error(err.Error()) os.Exit(1) } fmt.Fprint(os.Stderr, "\n", tr.Tr.Get("Git blob OID: %s", compareOid), "\n") } } if comparing && buildOid != compareOid { fmt.Fprint(os.Stderr, "\n", tr.Tr.Get("Pointers do not match"), "\n") os.Exit(1) } if !something { Error(tr.Tr.Get("Nothing to do!")) os.Exit(1) } } func pointerReader() (io.ReadCloser, error) { if len(pointerCompare) > 0 { if pointerStdin { return nil, errors.New(tr.Tr.Get("cannot read from STDIN and --pointer")) } return os.Open(pointerCompare) } requireStdin(tr.Tr.Get("The --stdin flag expects a pointer file from STDIN.")) return os.Stdin, nil } func init() { RegisterCommand("pointer", pointerCommand, func(cmd *cobra.Command) { cmd.Flags().StringVarP(&pointerFile, "file", "f", "", "Path to a local file to generate the pointer from.") cmd.Flags().StringVarP(&pointerCompare, "pointer", "p", "", "Path to a local file containing a pointer built by another Git LFS implementation.") cmd.Flags().BoolVarP(&pointerStdin, "stdin", "", false, "Read a pointer built by another Git LFS implementation through STDIN.") cmd.Flags().BoolVarP(&pointerCheck, "check", "", false, "Check whether the given file is a Git LFS pointer.") cmd.Flags().BoolVarP(&pointerStrict, "strict", "", false, "Check whether the given Git LFS pointer is canonical.") cmd.Flags().BoolVarP(&pointerNoStrict, "no-strict", "", false, "Don't check whether the given Git LFS pointer is canonical.") }) } git-lfs-3.6.1/commands/command_post_checkout.go000066400000000000000000000052031472372047300215770ustar00rootroot00000000000000package commands import ( "os" "github.com/git-lfs/git-lfs/v3/git" "github.com/git-lfs/git-lfs/v3/locking" "github.com/git-lfs/git-lfs/v3/tr" "github.com/rubyist/tracerx" "github.com/spf13/cobra" ) // postCheckoutCommand is run through Git's post-checkout hook. The hook passes // up to 3 arguments on the command line: // // 1. SHA of previous commit before the checkout // 2. SHA of commit just checked out // 3. Flag ("0" or "1") - 1 if a branch/tag/SHA was checked out, 0 if a file was // In the case of a file being checked out, the pre/post SHA are the same // // This hook checks that files which are lockable and not locked are made read-only, // optimising that as best it can based on the available information. func postCheckoutCommand(cmd *cobra.Command, args []string) { if len(args) != 3 { Print(tr.Tr.Get("This should be run through Git's post-checkout hook. Run `git lfs update` to install it.")) os.Exit(1) } // Skip entire hook if lockable read only feature is disabled if !cfg.SetLockableFilesReadOnly() { os.Exit(0) } requireGitVersion() lockClient := newLockClient() // Skip this hook if no lockable patterns have been configured if len(lockClient.GetLockablePatterns()) == 0 { os.Exit(0) } if args[2] == "1" && args[0] != "0000000000000000000000000000000000000000" { postCheckoutRevChange(lockClient, args[0], args[1]) } else { postCheckoutFileChange(lockClient) } } func postCheckoutRevChange(client *locking.Client, pre, post string) { tracerx.Printf("post-checkout: changes between %v and %v", pre, post) // We can speed things up by looking at the difference between previous HEAD // and current HEAD, and only checking lockable files that are different files, err := git.GetFilesChanged(pre, post) if err != nil { LoggedError(err, "%s\n%s", tr.Tr.Get("Warning: post-checkout rev diff %v:%v failed: %v", pre, post, err), tr.Tr.Get("Falling back on full scan.")) postCheckoutFileChange(client) } tracerx.Printf("post-checkout: checking write flags on %v", files) err = client.FixLockableFileWriteFlags(files) if err != nil { LoggedError(err, tr.Tr.Get("Warning: post-checkout locked file check failed: %v", err)) } } func postCheckoutFileChange(client *locking.Client) { tracerx.Printf("post-checkout: checking write flags for all lockable files") // Sadly we don't get any information about what files were checked out, // so we have to check the entire repo err := client.FixAllLockableFileWriteFlags() if err != nil { LoggedError(err, tr.Tr.Get("Warning: post-checkout locked file check failed: %v", err)) } } func init() { RegisterCommand("post-checkout", postCheckoutCommand, nil) } git-lfs-3.6.1/commands/command_post_commit.go000066400000000000000000000031161472372047300212630ustar00rootroot00000000000000package commands import ( "os" "github.com/git-lfs/git-lfs/v3/git" "github.com/git-lfs/git-lfs/v3/tr" "github.com/rubyist/tracerx" "github.com/spf13/cobra" ) // postCommitCommand is run through Git's post-commit hook. The hook passes // no arguments. // This hook checks that files which are lockable and not locked are made read-only, // optimising that based on what was added / modified in the commit. // This is mainly to catch added files, since modified files should already be // locked. If we didn't do this, any added files would remain read/write on disk // even without a lock unless something else checked. func postCommitCommand(cmd *cobra.Command, args []string) { // Skip entire hook if lockable read only feature is disabled if !cfg.SetLockableFilesReadOnly() { os.Exit(0) } requireGitVersion() lockClient := newLockClient() // Skip this hook if no lockable patterns have been configured if len(lockClient.GetLockablePatterns()) == 0 { os.Exit(0) } tracerx.Printf("post-commit: checking file write flags at HEAD") // We can speed things up by looking at what changed in // HEAD, and only checking those lockable files files, err := git.GetFilesChanged("HEAD", "") if err != nil { LoggedError(err, tr.Tr.Get("Warning: post-commit failed: %v", err)) os.Exit(1) } tracerx.Printf("post-commit: checking write flags on %v", files) err = lockClient.FixLockableFileWriteFlags(files) if err != nil { LoggedError(err, tr.Tr.Get("Warning: post-commit locked file check failed: %v", err)) } } func init() { RegisterCommand("post-commit", postCommitCommand, nil) } git-lfs-3.6.1/commands/command_post_merge.go000066400000000000000000000030011472372047300210630ustar00rootroot00000000000000package commands import ( "os" "github.com/git-lfs/git-lfs/v3/tr" "github.com/rubyist/tracerx" "github.com/spf13/cobra" ) // postMergeCommand is run through Git's post-merge hook. // // This hook checks that files which are lockable and not locked are made read-only, // optimising that as best it can based on the available information. func postMergeCommand(cmd *cobra.Command, args []string) { if len(args) != 1 { Print(tr.Tr.Get("This should be run through Git's post-merge hook. Run `git lfs update` to install it.")) os.Exit(1) } // Skip entire hook if lockable read only feature is disabled if !cfg.SetLockableFilesReadOnly() { os.Exit(0) } requireGitVersion() lockClient := newLockClient() // Skip this hook if no lockable patterns have been configured if len(lockClient.GetLockablePatterns()) == 0 { os.Exit(0) } // The only argument this hook receives is a flag indicating whether the // merge was a squash merge; we don't know what files changed. // Whether it's squash or not is irrelevant, either way it could have // reset the read-only flag on files that got merged. tracerx.Printf("post-merge: checking write flags for all lockable files") // Sadly we don't get any information about what files were checked out, // so we have to check the entire repo err := lockClient.FixAllLockableFileWriteFlags() if err != nil { LoggedError(err, tr.Tr.Get("Warning: post-merge locked file check failed: %v", err)) } } func init() { RegisterCommand("post-merge", postMergeCommand, nil) } git-lfs-3.6.1/commands/command_pre_push.go000066400000000000000000000061421472372047300205550ustar00rootroot00000000000000package commands import ( "bufio" "io" "os" "strings" "github.com/git-lfs/git-lfs/v3/git" "github.com/git-lfs/git-lfs/v3/tr" "github.com/rubyist/tracerx" "github.com/spf13/cobra" ) var ( prePushDryRun = false ) // prePushCommand is run through Git's pre-push hook. The pre-push hook passes // two arguments on the command line: // // 1. Name of the remote to which the push is being done // 2. URL to which the push is being done // // The hook receives commit information on stdin in the form: // // // // In the typical case, prePushCommand will get a list of git objects being // pushed by using the following: // // git rev-list --objects ^ // // If any of those git objects are associated with Git LFS objects, those // objects will be pushed to the Git LFS API. // // In the case of pushing a new branch, the list of git objects will be all of // the git objects in this branch. // // In the case of deleting a branch, no attempts to push Git LFS objects will be // made. func prePushCommand(cmd *cobra.Command, args []string) { if len(args) == 0 { Print(tr.Tr.Get("This should be run through Git's pre-push hook. Run `git lfs update` to install it.")) os.Exit(1) } if cfg.Os.Bool("GIT_LFS_SKIP_PUSH", false) { return } requireGitVersion() // Remote is first arg remote, _ := git.MapRemoteURL(args[0], true) if err := cfg.SetValidPushRemote(remote); err != nil { Exit(tr.Tr.Get("Invalid remote name %q: %s", args[0], err)) } ctx := newUploadContext(prePushDryRun) updates := prePushRefs(os.Stdin) if err := uploadForRefUpdates(ctx, updates, false); err != nil { ExitWithError(err) } } // prePushRefs parses commit information that the pre-push git hook receives: // // // // Each line describes a proposed update of the remote ref at the remote sha to // the local sha. Multiple updates can be received on multiple lines (such as // from 'git push --all'). These updates are typically received over STDIN. func prePushRefs(r io.Reader) []*git.RefUpdate { scanner := bufio.NewScanner(r) refs := make([]*git.RefUpdate, 0, 1) // We can be passed multiple lines of refs for scanner.Scan() { line := strings.TrimSpace(scanner.Text()) if len(line) == 0 { continue } tracerx.Printf("pre-push: %s", line) localRef, remoteRef := decodeRefs(line) if git.IsZeroObjectID(localRef.Sha) { continue } refs = append(refs, git.NewRefUpdate(cfg.Git, cfg.PushRemote(), localRef, remoteRef)) } return refs } // decodeRefs pulls the sha1s out of the line read from the pre-push // hook's stdin. func decodeRefs(input string) (*git.Ref, *git.Ref) { refs := strings.Split(strings.TrimSpace(input), " ") for len(refs) < 4 { refs = append(refs, "") } localRef := git.ParseRef(refs[0], refs[1]) remoteRef := git.ParseRef(refs[2], refs[3]) return localRef, remoteRef } func init() { RegisterCommand("pre-push", prePushCommand, func(cmd *cobra.Command) { cmd.Flags().BoolVarP(&prePushDryRun, "dry-run", "d", false, "Do everything except actually send the updates") }) } git-lfs-3.6.1/commands/command_prune.go000066400000000000000000000473761472372047300200770ustar00rootroot00000000000000package commands import ( "bytes" "context" "fmt" "os" "runtime" "sync" "time" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/filepathfilter" "github.com/git-lfs/git-lfs/v3/fs" "github.com/git-lfs/git-lfs/v3/git" "github.com/git-lfs/git-lfs/v3/lfs" "github.com/git-lfs/git-lfs/v3/tasklog" "github.com/git-lfs/git-lfs/v3/tools" "github.com/git-lfs/git-lfs/v3/tools/humanize" "github.com/git-lfs/git-lfs/v3/tq" "github.com/git-lfs/git-lfs/v3/tr" "github.com/rubyist/tracerx" "github.com/spf13/cobra" "golang.org/x/sync/semaphore" ) var ( pruneDryRunArg bool pruneVerboseArg bool pruneVerifyArg bool pruneRecentArg bool pruneForceArg bool pruneDoNotVerifyArg bool pruneVerifyUnreachableArg bool pruneDoNotVerifyUnreachableArg bool pruneWhenUnverifiedArg string ) func pruneCommand(cmd *cobra.Command, args []string) { // Guts of this must be re-usable from fetch --prune so just parse & dispatch if pruneVerifyArg && pruneDoNotVerifyArg { Exit(tr.Tr.Get("Cannot specify both --verify-remote and --no-verify-remote")) } fetchPruneConfig := lfs.NewFetchPruneConfig(cfg.Git) verify := !pruneDoNotVerifyArg && (fetchPruneConfig.PruneVerifyRemoteAlways || pruneVerifyArg) verifyUnreachable := !pruneDoNotVerifyUnreachableArg && (pruneVerifyUnreachableArg || fetchPruneConfig.PruneVerifyUnreachableAlways) continueWhenUnverified := false switch pruneWhenUnverifiedArg { case "halt": continueWhenUnverified = false case "continue": continueWhenUnverified = true default: Exit(tr.Tr.Get("Invalid value for --when-unverified: %s", pruneWhenUnverifiedArg)) } fetchPruneConfig.PruneRecent = pruneRecentArg || pruneForceArg fetchPruneConfig.PruneForce = pruneForceArg prune(fetchPruneConfig, verify, verifyUnreachable, continueWhenUnverified, pruneDryRunArg, pruneVerboseArg) } type PruneProgressType int const ( PruneProgressTypeLocal = PruneProgressType(iota) PruneProgressTypeRetain = PruneProgressType(iota) PruneProgressTypeVerify = PruneProgressType(iota) PruneProgressTypeUnverified = PruneProgressType(iota) ) // Progress from a sub-task of prune type PruneProgress struct { ProgressType PruneProgressType Count int // Number of items done } type PruneProgressChan chan PruneProgress func prune(fetchPruneConfig lfs.FetchPruneConfig, verifyRemote, verifyUnreachable, continueWhenUnverified, dryRun, verbose bool) { localObjects := make([]fs.Object, 0, 100) retainedObjects := tools.NewStringSetWithCapacity(100) logger := tasklog.NewLogger(OutputWriter, tasklog.ForceProgress(cfg.ForceProgress()), ) defer logger.Close() var reachableObjects tools.StringSet var taskwait sync.WaitGroup // Add all the base funcs to the waitgroup before starting them, in case // one completes really fast & hits 0 unexpectedly // each main process can Add() to the wg itself if it subdivides the task taskwait.Add(5) // 1..5: localObjects, current & recent refs, unpushed, worktree, stashes if verifyRemote && !verifyUnreachable { taskwait.Add(1) // 6 } progressChan := make(PruneProgressChan, 100) // Collect errors errorChan := make(chan error, 10) var errorwait sync.WaitGroup errorwait.Add(1) var taskErrors []error go pruneTaskCollectErrors(&taskErrors, errorChan, &errorwait) // Populate the single list of local objects go pruneTaskGetLocalObjects(&localObjects, progressChan, &taskwait) // Now find files to be retained from many sources retainChan := make(chan string, 100) gitscanner := lfs.NewGitScanner(cfg, nil) gitscanner.Filter = filepathfilter.New(nil, cfg.FetchExcludePaths(), filepathfilter.GitIgnore) sem := semaphore.NewWeighted(int64(runtime.NumCPU() * 2)) go pruneTaskGetRetainedCurrentAndRecentRefs(gitscanner, fetchPruneConfig, retainChan, errorChan, &taskwait, sem) go pruneTaskGetRetainedUnpushed(gitscanner, fetchPruneConfig, retainChan, errorChan, &taskwait, sem) go pruneTaskGetRetainedWorktree(gitscanner, fetchPruneConfig, retainChan, errorChan, &taskwait, sem) go pruneTaskGetRetainedStashed(gitscanner, retainChan, errorChan, &taskwait, sem) if verifyRemote && !verifyUnreachable { reachableObjects = tools.NewStringSetWithCapacity(100) go pruneTaskGetReachableObjects(gitscanner, &reachableObjects, errorChan, &taskwait, sem) } // Now collect all the retained objects, on separate wait var retainwait sync.WaitGroup retainwait.Add(1) go pruneTaskCollectRetained(&retainedObjects, retainChan, progressChan, &retainwait) // Report progress var progresswait sync.WaitGroup progresswait.Add(1) go pruneTaskDisplayProgress(progressChan, &progresswait, logger) taskwait.Wait() // wait for subtasks close(retainChan) // triggers retain collector to end now all tasks have retainwait.Wait() // make sure all retained objects added close(errorChan) // triggers error collector to end now all tasks have errorwait.Wait() // make sure all errors have been processed pruneCheckErrors(taskErrors) prunableObjects := make([]string, 0, len(localObjects)/2) // Build list of prunables (also queue for verify at same time if applicable) var verifyQueue *tq.TransferQueue var verifiedObjects tools.StringSet var totalSize int64 var verboseOutput []string var verifyc chan *tq.Transfer var verifywait sync.WaitGroup if verifyRemote { verifyQueue = newDownloadCheckQueue( getTransferManifestOperationRemote("download", fetchPruneConfig.PruneRemoteName), fetchPruneConfig.PruneRemoteName, ) verifiedObjects = tools.NewStringSetWithCapacity(len(localObjects) / 2) // this channel is filled with oids for which Check() succeeded & Transfer() was called verifyc = verifyQueue.Watch() verifywait.Add(1) go func() { for t := range verifyc { verifiedObjects.Add(t.Oid) tracerx.Printf("VERIFIED: %v", t.Oid) progressChan <- PruneProgress{PruneProgressTypeVerify, 1} } verifywait.Done() }() } for _, file := range localObjects { if !retainedObjects.Contains(file.Oid) { prunableObjects = append(prunableObjects, file.Oid) totalSize += file.Size if verbose { // Save up verbose output for the end. verboseOutput = append(verboseOutput, fmt.Sprintf("%s (%s)", file.Oid, humanize.FormatBytes(uint64(file.Size)))) } if verifyRemote { verifyQueue.Add(downloadTransfer(&lfs.WrappedPointer{ Pointer: lfs.NewPointer(file.Oid, file.Size, nil), })) } } } if verifyRemote { verifyQueue.Wait() verifywait.Wait() var problems bytes.Buffer prunableObjectsLen := len(prunableObjects) prunableObjects, problems = pruneGetVerifiedPrunableObjects(prunableObjects, reachableObjects, verifiedObjects, verifyUnreachable) if prunableObjectsLen != len(prunableObjects) { progressChan <- PruneProgress{PruneProgressTypeUnverified, prunableObjectsLen - len(prunableObjects)} } close(progressChan) // after verify but before check progresswait.Wait() if !continueWhenUnverified && problems.Len() > 0 { Exit("%s\n%v", tr.Tr.Get("These objects to be pruned are missing on remote:"), problems.String()) } } else { close(progressChan) progresswait.Wait() } if len(prunableObjects) == 0 { return } logVerboseOutput(logger, verboseOutput, len(prunableObjects), totalSize, dryRun) if !dryRun { pruneDeleteFiles(prunableObjects, logger) } } func logVerboseOutput(logger *tasklog.Logger, verboseOutput []string, numPrunableObjects int, totalSize int64, dryRun bool) { info := logger.Simple() defer info.Complete() if dryRun { info.Logf("prune: %s", tr.Tr.GetN( "%d file would be pruned (%s)", "%d files would be pruned (%s)", numPrunableObjects, numPrunableObjects, humanize.FormatBytes(uint64(totalSize)))) for _, item := range verboseOutput { info.Logf("\n * %s", item) } } else { for _, item := range verboseOutput { info.Logf("\n%s", item) } } } func pruneGetVerifiedPrunableObjects(prunableObjects []string, reachableObjects, verifiedObjects tools.StringSet, verifyUnreachable bool) ([]string, bytes.Buffer) { verifiedPrunableObjects := make([]string, 0, len(verifiedObjects)) var unverified bytes.Buffer for _, oid := range prunableObjects { if verifiedObjects.Contains(oid) { verifiedPrunableObjects = append(verifiedPrunableObjects, oid) } else { if verifyUnreachable { tracerx.Printf("UNVERIFIED: %v", oid) unverified.WriteString(fmt.Sprintf(" * %v\n", oid)) } else { // There's no issue if an object is not reachable and missing, only if reachable & missing if reachableObjects.Contains(oid) { unverified.WriteString(fmt.Sprintf(" * %v\n", oid)) } else { // Just to indicate why it doesn't matter that we didn't verify tracerx.Printf("UNREACHABLE: %v", oid) verifiedPrunableObjects = append(verifiedPrunableObjects, oid) } } } } return verifiedPrunableObjects, unverified } func pruneCheckErrors(taskErrors []error) { if len(taskErrors) > 0 { for _, err := range taskErrors { LoggedError(err, tr.Tr.Get("Prune error: %v", err)) } Exit(tr.Tr.Get("Prune sub-tasks failed, cannot continue")) } } func pruneTaskDisplayProgress(progressChan PruneProgressChan, waitg *sync.WaitGroup, logger *tasklog.Logger) { defer waitg.Done() task := logger.Simple() defer task.Complete() localCount := 0 retainCount := 0 verifyCount := 0 notRemoteCount := 0 var msg string for p := range progressChan { switch p.ProgressType { case PruneProgressTypeLocal: localCount++ case PruneProgressTypeRetain: retainCount++ case PruneProgressTypeVerify: verifyCount++ case PruneProgressTypeUnverified: notRemoteCount += p.Count } msg = fmt.Sprintf("prune: %s, %s", tr.Tr.GetN("%d local object", "%d local objects", localCount, localCount), tr.Tr.GetN("%d retained", "%d retained", retainCount, retainCount)) if verifyCount > 0 { msg += tr.Tr.GetN(", %d verified with remote", ", %d verified with remote", verifyCount, verifyCount) } if notRemoteCount > 0 { msg += tr.Tr.GetN(", %d not on remote", ", %d not on remote", notRemoteCount, notRemoteCount) } task.Log(msg) } } func pruneTaskCollectRetained(outRetainedObjects *tools.StringSet, retainChan chan string, progressChan PruneProgressChan, retainwait *sync.WaitGroup) { defer retainwait.Done() for oid := range retainChan { if outRetainedObjects.Add(oid) { progressChan <- PruneProgress{PruneProgressTypeRetain, 1} } } } func pruneTaskCollectErrors(outtaskErrors *[]error, errorChan chan error, errorwait *sync.WaitGroup) { defer errorwait.Done() for err := range errorChan { *outtaskErrors = append(*outtaskErrors, err) } } func pruneDeleteFiles(prunableObjects []string, logger *tasklog.Logger) { task := logger.Percentage(fmt.Sprintf("prune: %s", tr.Tr.Get("Deleting objects")), uint64(len(prunableObjects))) defer task.Complete() var problems bytes.Buffer // In case we fail to delete some var deletedFiles int for _, oid := range prunableObjects { mediaFile, err := cfg.Filesystem().ObjectPath(oid) if err != nil { problems.WriteString(tr.Tr.Get("Unable to find media path for %v: %v", oid, err)) problems.WriteRune('\n') continue } if mediaFile == os.DevNull { continue } err = os.Remove(mediaFile) if err != nil { problems.WriteString(tr.Tr.Get("Failed to remove file %v: %v", mediaFile, err)) problems.WriteRune('\n') continue } deletedFiles++ task.Count(1) } if problems.Len() > 0 { LoggedError(errors.New(tr.Tr.Get("failed to delete some files")), problems.String()) Exit(tr.Tr.Get("Prune failed, see errors above")) } } // Background task, must call waitg.Done() once at end func pruneTaskGetLocalObjects(outLocalObjects *[]fs.Object, progChan PruneProgressChan, waitg *sync.WaitGroup) { defer waitg.Done() cfg.EachLFSObject(func(obj fs.Object) error { *outLocalObjects = append(*outLocalObjects, obj) progChan <- PruneProgress{PruneProgressTypeLocal, 1} return nil }) } // Background task, must call waitg.Done() once at end func pruneTaskGetRetainedAtRef(gitscanner *lfs.GitScanner, ref string, retainChan chan string, errorChan chan error, waitg *sync.WaitGroup, sem *semaphore.Weighted) { sem.Acquire(context.Background(), 1) defer sem.Release(1) defer waitg.Done() err := gitscanner.ScanTree(ref, func(p *lfs.WrappedPointer, err error) { if err != nil { errorChan <- err return } retainChan <- p.Oid tracerx.Printf("RETAIN: %v via ref %v", p.Oid, ref) }) if err != nil { errorChan <- err } } // Background task, must call waitg.Done() once at end func pruneTaskGetPreviousVersionsOfRef(gitscanner *lfs.GitScanner, ref string, since time.Time, retainChan chan string, errorChan chan error, waitg *sync.WaitGroup, sem *semaphore.Weighted) { sem.Acquire(context.Background(), 1) defer sem.Release(1) defer waitg.Done() err := gitscanner.ScanPreviousVersions(ref, since, func(p *lfs.WrappedPointer, err error) { if err != nil { errorChan <- err return } retainChan <- p.Oid tracerx.Printf("RETAIN: %v via ref %v >= %v", p.Oid, ref, since) }) if err != nil { errorChan <- err return } } // Background task, must call waitg.Done() once at end func pruneTaskGetRetainedCurrentAndRecentRefs(gitscanner *lfs.GitScanner, fetchconf lfs.FetchPruneConfig, retainChan chan string, errorChan chan error, waitg *sync.WaitGroup, sem *semaphore.Weighted) { defer waitg.Done() // We actually increment the waitg in this func since we kick off sub-goroutines // Make a list of what unique commits to keep, & search backward from commits := tools.NewStringSet() // Do current first ref, err := git.CurrentRef() if err != nil { errorChan <- err return } commits.Add(ref.Sha) if !fetchconf.PruneForce { waitg.Add(1) go pruneTaskGetRetainedAtRef(gitscanner, ref.Sha, retainChan, errorChan, waitg, sem) } // Now recent if !fetchconf.PruneRecent && fetchconf.FetchRecentRefsDays > 0 { pruneRefDays := fetchconf.FetchRecentRefsDays + fetchconf.PruneOffsetDays tracerx.Printf("PRUNE: Retaining non-HEAD refs within %d (%d+%d) days", pruneRefDays, fetchconf.FetchRecentRefsDays, fetchconf.PruneOffsetDays) refsSince := time.Now().AddDate(0, 0, -pruneRefDays) // Keep all recent refs including any recent remote branches refs, err := git.RecentBranches(refsSince, fetchconf.FetchRecentRefsIncludeRemotes, "") if err != nil { Panic(err, tr.Tr.Get("Could not scan for recent refs")) } for _, ref := range refs { if commits.Add(ref.Sha) { // A new commit waitg.Add(1) go pruneTaskGetRetainedAtRef(gitscanner, ref.Sha, retainChan, errorChan, waitg, sem) } } } // For every unique commit we've fetched, check recent commits too // Only if we're fetching recent commits, otherwise only keep at refs if !fetchconf.PruneRecent && fetchconf.FetchRecentCommitsDays > 0 { pruneCommitDays := fetchconf.FetchRecentCommitsDays + fetchconf.PruneOffsetDays for commit := range commits.Iter() { // We measure from the last commit at the ref summ, err := git.GetCommitSummary(commit) if err != nil { errorChan <- errors.New(tr.Tr.Get("couldn't scan commits at %v: %v", commit, err)) continue } commitsSince := summ.CommitDate.AddDate(0, 0, -pruneCommitDays) waitg.Add(1) go pruneTaskGetPreviousVersionsOfRef(gitscanner, commit, commitsSince, retainChan, errorChan, waitg, sem) } } } // Background task, must call waitg.Done() once at end func pruneTaskGetRetainedUnpushed(gitscanner *lfs.GitScanner, fetchconf lfs.FetchPruneConfig, retainChan chan string, errorChan chan error, waitg *sync.WaitGroup, sem *semaphore.Weighted) { defer waitg.Done() err := gitscanner.ScanUnpushed(fetchconf.PruneRemoteName, func(p *lfs.WrappedPointer, err error) { if err != nil { errorChan <- err } else { retainChan <- p.Pointer.Oid tracerx.Printf("RETAIN: %v unpushed", p.Pointer.Oid) } }) if err != nil { errorChan <- err return } } // Background task, must call waitg.Done() once at end func pruneTaskGetRetainedWorktree(gitscanner *lfs.GitScanner, fetchconf lfs.FetchPruneConfig, retainChan chan string, errorChan chan error, waitg *sync.WaitGroup, sem *semaphore.Weighted) { defer waitg.Done() // Retain other worktree HEADs too // Working copy, branch & maybe commit is different but repo is shared allWorktrees, err := git.GetAllWorktrees(cfg.LocalGitStorageDir()) if err != nil { errorChan <- err return } // Don't repeat any commits, worktrees are always on their own branches but // may point to the same commit commits := tools.NewStringSet() if !fetchconf.PruneForce { // current HEAD is done elsewhere headref, err := git.CurrentRef() if err != nil { errorChan <- err return } commits.Add(headref.Sha) } for _, worktree := range allWorktrees { if !fetchconf.PruneForce && commits.Add(worktree.Ref.Sha) { // Worktree is on a different commit waitg.Add(1) // Don't need to 'cd' to worktree since we share same repo go pruneTaskGetRetainedAtRef(gitscanner, worktree.Ref.Sha, retainChan, errorChan, waitg, sem) } if !worktree.Prunable { // Always scan the index of the worktree if it exists waitg.Add(1) go pruneTaskGetRetainedIndex(gitscanner, worktree.Ref.Sha, worktree.Dir, retainChan, errorChan, waitg, sem) } } } // Background task, must call waitg.Done() once at end func pruneTaskGetRetainedStashed(gitscanner *lfs.GitScanner, retainChan chan string, errorChan chan error, waitg *sync.WaitGroup, sem *semaphore.Weighted) { defer waitg.Done() err := gitscanner.ScanStashed(func(p *lfs.WrappedPointer, err error) { if err != nil { errorChan <- err } else { retainChan <- p.Pointer.Oid tracerx.Printf("RETAIN: %v stashed", p.Pointer.Oid) } }) if err != nil { errorChan <- err return } } // Background task, must call waitg.Done() once at end func pruneTaskGetRetainedIndex(gitscanner *lfs.GitScanner, ref string, workingDir string, retainChan chan string, errorChan chan error, waitg *sync.WaitGroup, sem *semaphore.Weighted) { defer waitg.Done() err := gitscanner.ScanIndex(ref, workingDir, func(p *lfs.WrappedPointer, err error) { if err != nil { errorChan <- err } else { retainChan <- p.Pointer.Oid tracerx.Printf("RETAIN: %v index", p.Pointer.Oid) } }) if err != nil { errorChan <- err return } } // Background task, must call waitg.Done() once at end func pruneTaskGetReachableObjects(gitscanner *lfs.GitScanner, outObjectSet *tools.StringSet, errorChan chan error, waitg *sync.WaitGroup, sem *semaphore.Weighted) { defer waitg.Done() err := gitscanner.ScanAll(func(p *lfs.WrappedPointer, err error) { sem.Acquire(context.Background(), 1) defer sem.Release(1) if err != nil { errorChan <- err return } outObjectSet.Add(p.Oid) }) if err != nil { errorChan <- err } } func init() { RegisterCommand("prune", pruneCommand, func(cmd *cobra.Command) { cmd.Flags().BoolVarP(&pruneDryRunArg, "dry-run", "d", false, "Don't delete anything, just report") cmd.Flags().BoolVarP(&pruneVerboseArg, "verbose", "v", false, "Print full details of what is/would be deleted") cmd.Flags().BoolVarP(&pruneRecentArg, "recent", "", false, "Prune even recent objects") cmd.Flags().BoolVarP(&pruneForceArg, "force", "f", false, "Prune everything that has been pushed") cmd.Flags().BoolVarP(&pruneVerifyArg, "verify-remote", "c", false, "Verify that remote has reachable LFS files before deleting") cmd.Flags().BoolVar(&pruneDoNotVerifyArg, "no-verify-remote", false, "Override lfs.pruneverifyremotealways and don't verify") cmd.Flags().BoolVar(&pruneVerifyUnreachableArg, "verify-unreachable", false, "When using --verify-remote, additionally verify unreachable LFS files before deleting.") cmd.Flags().BoolVar(&pruneDoNotVerifyUnreachableArg, "no-verify-unreachable", false, "Override lfs.pruneverifyunreachablealways and don't verify unreachable objects") cmd.Flags().StringVar(&pruneWhenUnverifiedArg, "when-unverified", "halt", "halt|continue the execution when objects are not found on the remote") }) } git-lfs-3.6.1/commands/command_pull.go000066400000000000000000000072271472372047300177110ustar00rootroot00000000000000package commands import ( "fmt" "os" "sync" "time" "github.com/git-lfs/git-lfs/v3/filepathfilter" "github.com/git-lfs/git-lfs/v3/git" "github.com/git-lfs/git-lfs/v3/lfs" "github.com/git-lfs/git-lfs/v3/tasklog" "github.com/git-lfs/git-lfs/v3/tq" "github.com/git-lfs/git-lfs/v3/tr" "github.com/rubyist/tracerx" "github.com/spf13/cobra" ) func pullCommand(cmd *cobra.Command, args []string) { requireGitVersion() setupRepository() if len(args) > 0 { // Remote is first arg if err := cfg.SetValidRemote(args[0]); err != nil { Exit(tr.Tr.Get("Invalid remote name %q: %s", args[0], err)) } } includeArg, excludeArg := getIncludeExcludeArgs(cmd) filter := buildFilepathFilter(cfg, includeArg, excludeArg, true) pull(filter) } func pull(filter *filepathfilter.Filter) { ref, err := git.CurrentRef() if err != nil { Panic(err, tr.Tr.Get("Could not pull")) } pointers := newPointerMap() logger := tasklog.NewLogger(os.Stdout, tasklog.ForceProgress(cfg.ForceProgress()), ) meter := tq.NewMeter(cfg) meter.Logger = meter.LoggerFromEnv(cfg.Os) logger.Enqueue(meter) remote := cfg.Remote() singleCheckout := newSingleCheckout(cfg.Git, remote) q := newDownloadQueue(singleCheckout.Manifest(), remote, tq.WithProgress(meter)) gitscanner := lfs.NewGitScanner(cfg, func(p *lfs.WrappedPointer, err error) { if err != nil { LoggedError(err, tr.Tr.Get("Scanner error: %s", err)) return } if pointers.Seen(p) { return } // no need to download objects that exist locally already lfs.LinkOrCopyFromReference(cfg, p.Oid, p.Size) if cfg.LFSObjectExists(p.Oid, p.Size) { singleCheckout.Run(p) return } meter.Add(p.Size) tracerx.Printf("fetch %v [%v]", p.Name, p.Oid) pointers.Add(p) q.Add(downloadTransfer(p)) }) gitscanner.Filter = filter dlwatch := q.Watch() var wg sync.WaitGroup wg.Add(1) go func() { for t := range dlwatch { for _, p := range pointers.All(t.Oid) { singleCheckout.Run(p) } } wg.Done() }() processQueue := time.Now() if err := gitscanner.ScanLFSFiles(ref.Sha, nil); err != nil { singleCheckout.Close() ExitWithError(err) } meter.Start() q.Wait() wg.Wait() tracerx.PerformanceSince("process queue", processQueue) singleCheckout.Close() success := true for _, err := range q.Errors() { success = false FullError(err) } if !success { c := getAPIClient() e := c.Endpoints.Endpoint("download", remote) Exit(tr.Tr.Get("Failed to fetch some objects from '%s'", e.Url)) } if singleCheckout.Skip() { fmt.Println(tr.Tr.Get("Skipping object checkout, Git LFS is not installed for this repository.\nConsider installing it with 'git lfs install'.")) } } // tracks LFS objects being downloaded, according to their unique OIDs. type pointerMap struct { pointers map[string][]*lfs.WrappedPointer mu sync.Mutex } func newPointerMap() *pointerMap { return &pointerMap{pointers: make(map[string][]*lfs.WrappedPointer)} } func (m *pointerMap) Seen(p *lfs.WrappedPointer) bool { m.mu.Lock() defer m.mu.Unlock() if existing, ok := m.pointers[p.Oid]; ok { m.pointers[p.Oid] = append(existing, p) return true } return false } func (m *pointerMap) Add(p *lfs.WrappedPointer) { m.mu.Lock() defer m.mu.Unlock() m.pointers[p.Oid] = append(m.pointers[p.Oid], p) } func (m *pointerMap) All(oid string) []*lfs.WrappedPointer { m.mu.Lock() defer m.mu.Unlock() pointers := m.pointers[oid] delete(m.pointers, oid) return pointers } func init() { RegisterCommand("pull", pullCommand, func(cmd *cobra.Command) { cmd.Flags().StringVarP(&includeArg, "include", "I", "", "Include a list of paths") cmd.Flags().StringVarP(&excludeArg, "exclude", "X", "", "Exclude a list of paths") }) } git-lfs-3.6.1/commands/command_push.go000066400000000000000000000116351472372047300177120ustar00rootroot00000000000000package commands import ( "bufio" "os" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/git" "github.com/git-lfs/git-lfs/v3/lfs" "github.com/git-lfs/git-lfs/v3/tq" "github.com/git-lfs/git-lfs/v3/tr" "github.com/rubyist/tracerx" "github.com/spf13/cobra" ) var ( pushDryRun = false pushObjectIDs = false pushAll = false useStdin = false // shares some global vars and functions with command_pre_push.go ) // pushCommand pushes local objects to a Git LFS server. It has four forms: // // ` ...` // ` --stdin` (reads refs from stdin) // ` --object-id ...` // ` --object-id --stdin` (reads oids from stdin) // // Remote must be a remote name, not a URL. With --stdin, values are newline // separated. // // pushCommand calculates the git objects to send by comparing the range // of commits between the local and remote git servers. func pushCommand(cmd *cobra.Command, args []string) { if len(args) == 0 { Print(tr.Tr.Get("Specify a remote and a remote branch name (`git lfs push origin main`)")) os.Exit(1) } requireGitVersion() // Remote is first arg if err := cfg.SetValidPushRemote(args[0]); err != nil { Exit(tr.Tr.Get("Invalid remote name %q: %s", args[0], err)) } ctx := newUploadContext(pushDryRun) var argList []string if useStdin { if len(args) > 1 { Print(tr.Tr.Get("Further command line arguments are ignored with --stdin")) os.Exit(1) } scanner := bufio.NewScanner(os.Stdin) // line-delimited for scanner.Scan() { line := scanner.Text() if line != "" { argList = append(argList, line) } } if err := scanner.Err(); err != nil { ExitWithError(errors.Wrap(err, tr.Tr.Get("Error reading from stdin:"))) } } else { argList = args[1:] } if pushObjectIDs { // We allow no object IDs with `--stdin` to make scripting // easier and avoid having to special-case this in scripts. if !useStdin && len(argList) < 1 { Print(tr.Tr.Get("At least one object ID must be supplied with --object-id")) os.Exit(1) } uploadsWithObjectIDs(ctx, argList) } else { if !useStdin && !pushAll && len(argList) < 1 { Print(tr.Tr.Get("At least one ref must be supplied without --all")) os.Exit(1) } uploadsBetweenRefAndRemote(ctx, argList) } } func uploadsBetweenRefAndRemote(ctx *uploadContext, refnames []string) { tracerx.Printf("Upload refs %v to remote %v", refnames, ctx.Remote) updates, err := lfsPushRefs(refnames, pushAll) if err != nil { Error(err.Error()) Exit(tr.Tr.Get("Error getting local refs.")) } if err := uploadForRefUpdates(ctx, updates, pushAll); err != nil { ExitWithError(err) } } func uploadsWithObjectIDs(ctx *uploadContext, oids []string) { pointers := make([]*lfs.WrappedPointer, len(oids)) for i, oid := range oids { mp, err := ctx.gitfilter.ObjectPath(oid) if err != nil { ExitWithError(errors.Wrap(err, tr.Tr.Get("Unable to find local media path:"))) } stat, err := os.Stat(mp) if err != nil { ExitWithError(errors.Wrap(err, tr.Tr.Get("Unable to stat local media path"))) } pointers[i] = &lfs.WrappedPointer{ Name: mp, Pointer: &lfs.Pointer{ Oid: oid, Size: stat.Size(), }, } } q := ctx.NewQueue(tq.RemoteRef(currentRemoteRef())) ctx.UploadPointers(q, pointers...) ctx.CollectErrors(q) ctx.ReportErrors() } // lfsPushRefs returns valid ref updates from the given ref and --all arguments. // Either one or more refs can be explicitly specified, or --all indicates all // local refs are pushed. func lfsPushRefs(refnames []string, pushAll bool) ([]*git.RefUpdate, error) { localrefs, err := git.LocalRefs() if err != nil { return nil, err } if pushAll && len(refnames) == 0 { refs := make([]*git.RefUpdate, len(localrefs)) for i, lr := range localrefs { refs[i] = git.NewRefUpdate(cfg.Git, cfg.PushRemote(), lr, nil) } return refs, nil } reflookup := make(map[string]*git.Ref, len(localrefs)) for _, ref := range localrefs { reflookup[ref.Name] = ref } refs := make([]*git.RefUpdate, len(refnames)) for i, name := range refnames { if ref, ok := reflookup[name]; ok { refs[i] = git.NewRefUpdate(cfg.Git, cfg.PushRemote(), ref, nil) } else { ref := &git.Ref{Name: name, Type: git.RefTypeOther, Sha: name} if _, err := git.ResolveRef(name); err != nil { return nil, errors.New(tr.Tr.Get("Invalid ref argument: %v", name)) } refs[i] = git.NewRefUpdate(cfg.Git, cfg.PushRemote(), ref, nil) } } return refs, nil } func init() { RegisterCommand("push", pushCommand, func(cmd *cobra.Command) { cmd.Flags().BoolVarP(&pushDryRun, "dry-run", "d", false, "Do everything except actually send the updates") cmd.Flags().BoolVarP(&pushObjectIDs, "object-id", "o", false, "Push LFS object ID(s)") cmd.Flags().BoolVarP(&useStdin, "stdin", "", false, "Read object IDs or refs from stdin") cmd.Flags().BoolVarP(&pushAll, "all", "a", false, "Push all objects for the current ref to the remote.") }) } git-lfs-3.6.1/commands/command_smudge.go000066400000000000000000000132311472372047300202110ustar00rootroot00000000000000package commands import ( "fmt" "io" "os" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/filepathfilter" "github.com/git-lfs/git-lfs/v3/git" "github.com/git-lfs/git-lfs/v3/lfs" "github.com/git-lfs/git-lfs/v3/tools" "github.com/git-lfs/git-lfs/v3/tools/humanize" "github.com/git-lfs/git-lfs/v3/tq" "github.com/git-lfs/git-lfs/v3/tr" "github.com/spf13/cobra" ) var ( // smudgeSkip is a command-line flag belonging to the "git-lfs smudge" // command specifying whether to skip the smudge process. smudgeSkip = false ) // delayedSmudge performs a 'delayed' smudge, adding the LFS pointer to the // `*tq.TransferQueue` "q" if the file is not present locally, passes the given // filepathfilter, and is not skipped. If the pointer is malformed, or already // exists, it streams the contents to be written into the working copy to "to". // // delayedSmudge returns the number of bytes written, whether the checkout was // delayed, the *lfs.Pointer that was smudged, and an error, if one occurred. func delayedSmudge(gf *lfs.GitFilter, s *git.FilterProcessScanner, to io.Writer, from io.Reader, q *tq.TransferQueue, filename string, skip bool, filter *filepathfilter.Filter) (int64, bool, *lfs.Pointer, error) { ptr, pbuf, perr := lfs.DecodeFrom(from) if perr != nil { // Write 'statusFromErr(nil)', even though 'perr != nil', since // we are about to write non-delayed smudged contents to "to". if err := s.WriteStatus(statusFromErr(nil)); err != nil { return 0, false, nil, err } n, err := tools.Spool(to, pbuf, cfg.TempDir()) if err != nil { return n, false, nil, errors.Wrap(err, perr.Error()) } if n != 0 { return 0, false, nil, errors.NewNotAPointerError(errors.Errorf( tr.Tr.Get("Unable to parse pointer at: %q", filename), )) } return 0, false, nil, nil } lfs.LinkOrCopyFromReference(cfg, ptr.Oid, ptr.Size) path, err := cfg.Filesystem().ObjectPath(ptr.Oid) if err != nil { return 0, false, nil, err } if !skip && filter.Allows(filename) { if _, statErr := os.Stat(path); statErr != nil && ptr.Size != 0 { q.Add(filename, path, ptr.Oid, ptr.Size, false, err) return 0, true, ptr, nil } // Write 'statusFromErr(nil)', since the object is already // present in the local cache, we will write the object's // contents without delaying. if err := s.WriteStatus(statusFromErr(nil)); err != nil { return 0, false, nil, err } n, err := gf.Smudge(to, ptr, filename, false, nil, nil) return n, false, ptr, err } if err := s.WriteStatus(statusFromErr(nil)); err != nil { return 0, false, nil, err } n, err := ptr.Encode(to) return int64(n), false, ptr, err } // smudge smudges the given `*lfs.Pointer`, "ptr", and writes its objects // contents to the `io.Writer`, "to". // // If the encoded LFS pointer is not parse-able as a pointer, the contents of // that file will instead be spooled to a temporary location on disk and then // copied out back to Git. If the pointer file is empty, an empty file will be // written with no error. // // If the smudged object did not "pass" the include and exclude filterset, it // will not be downloaded, and the object will remain a pointer on disk, as if // the smudge filter had not been applied at all. // // Any errors encountered along the way will be returned immediately if they // were non-fatal, otherwise execution will halt and the process will be // terminated by using the `commands.Panic()` func. func smudge(gf *lfs.GitFilter, to io.Writer, from io.Reader, filename string, skip bool, filter *filepathfilter.Filter) (int64, error) { ptr, pbuf, perr := lfs.DecodeFrom(from) if perr != nil { n, err := tools.Spool(to, pbuf, cfg.TempDir()) if err != nil { return 0, errors.Wrap(err, perr.Error()) } if n != 0 { return 0, errors.NewNotAPointerError(errors.Errorf( tr.Tr.Get("Unable to parse pointer at: %q", filename), )) } return 0, nil } lfs.LinkOrCopyFromReference(cfg, ptr.Oid, ptr.Size) cb, file, err := gf.CopyCallbackFile("download", filename, 1, 1) if err != nil { return 0, err } if skip || !filter.Allows(filename) { n, err := ptr.Encode(to) return int64(n), err } n, err := gf.Smudge(to, ptr, filename, true, getTransferManifestOperationRemote("download", cfg.Remote()), cb) if file != nil { file.Close() } if err != nil { ptr.Encode(to) var oid string = ptr.Oid if len(oid) >= 7 { oid = oid[:7] } LoggedError(err, tr.Tr.Get("Error downloading object: %s (%s): %s", filename, oid, err)) if !cfg.SkipDownloadErrors() { os.Exit(2) } } return n, nil } func smudgeCommand(cmd *cobra.Command, args []string) { requireStdin(tr.Tr.Get("This command should be run by the Git 'smudge' filter")) setupRepository() installHooks(false) if !smudgeSkip && cfg.Os.Bool("GIT_LFS_SKIP_SMUDGE", false) { smudgeSkip = true } filter := filepathfilter.New(cfg.FetchIncludePaths(), cfg.FetchExcludePaths(), filepathfilter.GitIgnore) gitfilter := lfs.NewGitFilter(cfg) if n, err := smudge(gitfilter, os.Stdout, os.Stdin, smudgeFilename(args), smudgeSkip, filter); err != nil { if errors.IsNotAPointerError(err) { fmt.Fprintln(os.Stderr, err.Error()) } else { Error(err.Error()) } } else if possiblyMalformedObjectSize(n) { fmt.Fprintln(os.Stderr, tr.Tr.Get("Possibly malformed smudge on Windows: see `git lfs help smudge` for more info.")) } } func smudgeFilename(args []string) string { if len(args) > 0 { return args[0] } return fmt.Sprintf("<%s>", tr.Tr.Get("unknown file")) } func possiblyMalformedObjectSize(n int64) bool { return n >= 4*humanize.Gibibyte } func init() { RegisterCommand("smudge", smudgeCommand, func(cmd *cobra.Command) { cmd.Flags().BoolVarP(&smudgeSkip, "skip", "s", false, "") }) } git-lfs-3.6.1/commands/command_standalone_file.go000066400000000000000000000006301472372047300220530ustar00rootroot00000000000000package commands import ( "fmt" "os" "github.com/git-lfs/git-lfs/v3/lfshttp/standalone" "github.com/spf13/cobra" ) func standaloneFileCommand(cmd *cobra.Command, args []string) { err := standalone.ProcessStandaloneData(cfg, os.Stdin, os.Stdout) if err != nil { fmt.Fprintln(os.Stderr, err.Error()) os.Exit(2) } } func init() { RegisterCommand("standalone-file", standaloneFileCommand, nil) } git-lfs-3.6.1/commands/command_status.go000066400000000000000000000202231472372047300202470ustar00rootroot00000000000000package commands import ( "crypto/sha256" "encoding/json" "fmt" "io" "os" "path/filepath" "strings" "github.com/git-lfs/git-lfs/v3/git" "github.com/git-lfs/git-lfs/v3/lfs" "github.com/git-lfs/git-lfs/v3/tools" "github.com/git-lfs/git-lfs/v3/tr" "github.com/spf13/cobra" ) var ( porcelain = false statusJson = false ) func statusCommand(cmd *cobra.Command, args []string) { setupWorkingCopy() // tolerate errors getting ref so this works before first commit ref, _ := git.CurrentRef() scanIndexAt := "HEAD" var err error if ref == nil { scanIndexAt, err = git.EmptyTree() if err != nil { ExitWithError(err) } } scanner, err := lfs.NewPointerScanner(cfg.GitEnv(), cfg.OSEnv()) if err != nil { ExitWithError(err) } if porcelain { porcelainStagedPointers(scanIndexAt) return } else if statusJson { jsonStagedPointers(scanner, scanIndexAt) return } statusScanRefRange(ref) staged, unstaged, err := scanIndex(scanIndexAt) if err != nil { ExitWithError(err) } wd, _ := os.Getwd() repo := cfg.LocalWorkingDir() wd = tools.ResolveSymlinks(wd) Print("\n%s\n", tr.Tr.Get("Objects to be committed:")) for _, entry := range staged { // Find a path from the current working directory to the // absolute path of each side of the entry. src := relativize(wd, filepath.Join(repo, entry.SrcName)) dst := relativize(wd, filepath.Join(repo, entry.DstName)) switch entry.Status { case lfs.StatusRename, lfs.StatusCopy: Print("\t%s -> %s (%s)", src, dst, formatBlobInfo(scanner, entry)) default: Print("\t%s (%s)", src, formatBlobInfo(scanner, entry)) } } Print("\n%s\n", tr.Tr.Get("Objects not staged for commit:")) for _, entry := range unstaged { src := relativize(wd, filepath.Join(repo, entry.SrcName)) Print("\t%s (%s)", src, formatBlobInfo(scanner, entry)) } Print("") if err = scanner.Close(); err != nil { ExitWithError(err) } } func formatBlobInfo(s *lfs.PointerScanner, entry *lfs.DiffIndexEntry) string { fromSha, fromSrc, err := blobInfoFrom(s, entry) if err != nil { ExitWithError(err) } from := fmt.Sprintf("%s: %s", fromSrc, fromSha) if entry.Status == lfs.StatusAddition { return from } toSha, toSrc, err := blobInfoTo(s, entry) if err != nil { ExitWithError(err) } to := fmt.Sprintf("%s: %s", toSrc, toSha) return fmt.Sprintf("%s -> %s", from, to) } func blobInfoFrom(s *lfs.PointerScanner, entry *lfs.DiffIndexEntry) (sha, from string, err error) { var blobSha string = entry.SrcSha if git.IsZeroObjectID(blobSha) { blobSha = entry.DstSha } return blobInfo(s, blobSha, entry.SrcName) } func blobInfoTo(s *lfs.PointerScanner, entry *lfs.DiffIndexEntry) (sha, from string, err error) { var name string = entry.DstName if len(name) == 0 { name = entry.SrcName } return blobInfo(s, entry.DstSha, name) } func blobInfo(s *lfs.PointerScanner, blobSha, name string) (sha, from string, err error) { if !git.IsZeroObjectID(blobSha) { s.Scan(blobSha) if err := s.Err(); err != nil { if git.IsMissingObject(err) { return fmt.Sprintf("<%s>", tr.Tr.Get("missing")), "?", nil } return "", "", err } var from string if s.Pointer() != nil { from = "LFS" } else { from = "Git" } return s.ContentsSha()[:7], from, nil } f, err := os.Open(filepath.Join(cfg.LocalWorkingDir(), name)) if os.IsNotExist(err) { return tr.Tr.Get("deleted"), tr.Tr.Get("File"), nil } if err != nil { return "", "", err } defer f.Close() // We've replaced a file with a directory. if fi, err := f.Stat(); err == nil && fi.Mode().IsDir() { return tr.Tr.Get("deleted"), tr.Tr.Get("File"), nil } shasum := sha256.New() if _, err = io.Copy(shasum, f); err != nil { return "", "", err } return fmt.Sprintf("%x", shasum.Sum(nil))[:7], tr.Tr.Get("File"), nil } func scanIndex(ref string) (staged, unstaged []*lfs.DiffIndexEntry, err error) { uncached, err := lfs.NewDiffIndexScanner(ref, false, true, "") if err != nil { return nil, nil, err } cached, err := lfs.NewDiffIndexScanner(ref, true, false, "") if err != nil { return nil, nil, err } seenNames := make(map[string]struct{}, 0) staged, err = drainScanner(seenNames, cached) if err != nil { return nil, nil, err } unstaged, err = drainScanner(seenNames, uncached) if err != nil { return nil, nil, err } return } func drainScanner(cache map[string]struct{}, scanner *lfs.DiffIndexScanner) ([]*lfs.DiffIndexEntry, error) { var to []*lfs.DiffIndexEntry for scanner.Scan() { entry := scanner.Entry() key := keyFromEntry(entry) if _, seen := cache[key]; !seen { to = append(to, entry) cache[key] = struct{}{} } } if err := scanner.Err(); err != nil { return nil, err } return to, nil } func keyFromEntry(e *lfs.DiffIndexEntry) string { var name string = e.DstName if len(name) == 0 { name = e.SrcName } return strings.Join([]string{e.SrcSha, e.DstSha, name}, ":") } func statusScanRefRange(ref *git.Ref) { if ref == nil { return } Print(tr.Tr.Get("On branch %s", ref.Name)) remoteRef, err := cfg.GitConfig().CurrentRemoteRef() if err != nil { return } gitscanner := lfs.NewGitScanner(cfg, func(p *lfs.WrappedPointer, err error) { if err != nil { Panic(err, tr.Tr.Get("Could not scan for Git LFS objects")) return } Print("\t%s (%s)", p.Name, p.Oid) }) Print("%s\n", tr.Tr.Get("Objects to be pushed to %s:", remoteRef.Name)) if err := gitscanner.ScanRefRange(ref.Sha, remoteRef.Sha, nil); err != nil { Panic(err, tr.Tr.Get("Could not scan for Git LFS objects")) } } type JSONStatusEntry struct { Status string `json:"status"` From string `json:"from,omitempty"` } type JSONStatus struct { Files map[string]JSONStatusEntry `json:"files"` } func jsonStagedPointers(scanner *lfs.PointerScanner, ref string) { staged, unstaged, err := scanIndex(ref) if err != nil { ExitWithError(err) } status := JSONStatus{Files: make(map[string]JSONStatusEntry)} for _, entry := range append(unstaged, staged...) { _, fromSrc, err := blobInfoFrom(scanner, entry) if err != nil { ExitWithError(err) } if fromSrc != "LFS" { continue } switch entry.Status { case lfs.StatusRename, lfs.StatusCopy: status.Files[entry.DstName] = JSONStatusEntry{ Status: string(entry.Status), From: entry.SrcName, } default: status.Files[entry.SrcName] = JSONStatusEntry{ Status: string(entry.Status), } } } ret, err := json.Marshal(status) if err != nil { ExitWithError(err) } Print(string(ret)) } func porcelainStagedPointers(ref string) { staged, unstaged, err := scanIndex(ref) if err != nil { ExitWithError(err) } seenNames := make(map[string]struct{}) for _, entry := range append(unstaged, staged...) { name := entry.DstName if len(name) == 0 { name = entry.SrcName } if _, seen := seenNames[name]; !seen { Print(porcelainStatusLine(entry)) seenNames[name] = struct{}{} } } } func porcelainStatusLine(entry *lfs.DiffIndexEntry) string { switch entry.Status { case lfs.StatusRename, lfs.StatusCopy: return fmt.Sprintf("%s %s -> %s", entry.Status, entry.SrcName, entry.DstName) case lfs.StatusModification: return fmt.Sprintf(" %s %s", entry.Status, entry.SrcName) } return fmt.Sprintf("%s %s", entry.Status, entry.SrcName) } // relativize relatives a path from "from" to "to". For instance, note that, for // any paths "from" and "to", that: // // to == filepath.Clean(filepath.Join(from, relativize(from, to))) func relativize(from, to string) string { if len(from) == 0 { return to } flist := strings.Split(filepath.ToSlash(from), "/") tlist := strings.Split(filepath.ToSlash(to), "/") var ( divergence int min int ) if lf, lt := len(flist), len(tlist); lf < lt { min = lf } else { min = lt } for ; divergence < min; divergence++ { if flist[divergence] != tlist[divergence] { break } } return strings.Repeat("../", len(flist)-divergence) + strings.Join(tlist[divergence:], "/") } func init() { RegisterCommand("status", statusCommand, func(cmd *cobra.Command) { cmd.Flags().BoolVarP(&porcelain, "porcelain", "p", false, "Give the output in an easy-to-parse format for scripts.") cmd.Flags().BoolVarP(&statusJson, "json", "j", false, "Give the output in a stable json format for scripts.") }) } git-lfs-3.6.1/commands/command_track.go000066400000000000000000000274741472372047300200470ustar00rootroot00000000000000package commands import ( "bufio" "bytes" "encoding/json" "fmt" "os" "path" "path/filepath" "runtime" "strings" "time" "github.com/git-lfs/git-lfs/v3/git" "github.com/git-lfs/git-lfs/v3/git/gitattr" "github.com/git-lfs/git-lfs/v3/tools" "github.com/git-lfs/git-lfs/v3/tr" "github.com/spf13/cobra" ) var ( prefixBlocklist = []string{ ".git", ".lfs", } trackLockableFlag bool trackNotLockableFlag bool trackVerboseLoggingFlag bool trackDryRunFlag bool trackNoModifyAttrsFlag bool trackNoExcludedFlag bool trackFilenameFlag bool trackJSONFlag bool ) func trackCommand(cmd *cobra.Command, args []string) { requireGitVersion() setupWorkingCopy() if trackDryRunFlag { trackNoModifyAttrsFlag = true } if !cfg.Os.Bool("GIT_LFS_TRACK_NO_INSTALL_HOOKS", false) { installHooks(false) } if len(args) == 0 { listPatterns() return } if trackJSONFlag { Exit(tr.Tr.Get("--json option can't be combined with arguments")) } mp := gitattr.NewMacroProcessor() // Intentionally do _not_ consider global- and system-level // .gitattributes here. Parse them still to expand any macros. git.GetSystemAttributePaths(mp, cfg.Os) git.GetRootAttributePaths(mp, cfg.Git) knownPatterns := git.GetAttributePaths(mp, cfg.LocalWorkingDir(), cfg.LocalGitDir()) lineEnd := getAttributeLineEnding(knownPatterns) if len(lineEnd) == 0 { lineEnd = gitLineEnding(cfg.Git) } wd, _ := tools.Getwd() wd = tools.ResolveSymlinks(wd) relpath, err := filepath.Rel(cfg.LocalWorkingDir(), wd) if err != nil { Exit(tr.Tr.Get("Current directory %q outside of Git working directory %q.", wd, cfg.LocalWorkingDir())) } changedAttribLines := make(map[string]string) var readOnlyPatterns []string var writeablePatterns []string ArgsLoop: for _, unsanitizedPattern := range args { pattern := tools.TrimCurrentPrefix(cleanRootPath(unsanitizedPattern)) // Generate the new / changed attrib line for merging var encodedArg string if trackFilenameFlag { encodedArg = escapeGlobCharacters(pattern) pattern = escapeGlobCharacters(pattern) } else { encodedArg = escapeAttrPattern(pattern) } if !trackNoModifyAttrsFlag { for _, known := range knownPatterns { if unescapeAttrPattern(known.Path) == path.Join(relpath, pattern) && ((trackLockableFlag && known.Lockable) || // enabling lockable & already lockable (no change) (trackNotLockableFlag && !known.Lockable) || // disabling lockable & not lockable (no change) (!trackLockableFlag && !trackNotLockableFlag)) { // leave lockable as-is in all cases Print(tr.Tr.Get("%q already supported", pattern)) continue ArgsLoop } } } lockableArg := "" if trackLockableFlag { // no need to test trackNotLockableFlag, if we got here we're disabling lockableArg = " " + git.LockableAttrib } changedAttribLines[pattern] = fmt.Sprintf("%s filter=lfs diff=lfs merge=lfs -text%v%s", encodedArg, lockableArg, lineEnd) if trackLockableFlag { readOnlyPatterns = append(readOnlyPatterns, pattern) } else { writeablePatterns = append(writeablePatterns, pattern) } Print(tr.Tr.Get("Tracking %q", unescapeAttrPattern(encodedArg))) } // Now read the whole local attributes file and iterate over the contents, // replacing any lines where the values have changed, and appending new lines // change this: var ( attribContents []byte attributesFile *os.File ) if !trackNoModifyAttrsFlag { attribContents, err = os.ReadFile(".gitattributes") // it's fine for file to not exist if err != nil && !os.IsNotExist(err) { Print(tr.Tr.Get("Error reading '.gitattributes' file")) return } // Re-generate the file with merge of old contents and new (to deal with changes) attributesFile, err = os.OpenFile(".gitattributes", os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0660) if err != nil { Print(tr.Tr.Get("Error opening '.gitattributes' file")) return } defer attributesFile.Close() if len(attribContents) > 0 { scanner := bufio.NewScanner(bytes.NewReader(attribContents)) for scanner.Scan() { line := scanner.Text() fields := strings.Fields(line) if len(fields) < 1 { continue } pattern := unescapeAttrPattern(fields[0]) if newline, ok := changedAttribLines[pattern]; ok { // Replace this line (newline already embedded) attributesFile.WriteString(newline) // Remove from map so we know we don't have to add it to the end delete(changedAttribLines, pattern) } else { // Write line unchanged (replace newline) attributesFile.WriteString(line + lineEnd) } } // Our method of writing also made sure there's always a newline at end } } modified := false sawError := false // Any items left in the map, write new lines at the end of the file // Note this is only new patterns, not ones which changed locking flags for pattern, newline := range changedAttribLines { // Also, for any new patterns we've added, make sure any existing git // tracked files have their timestamp updated so they will now show as // modified note this is relative to current dir which is how we write // .gitattributes deliberately not done in parallel as a chan because // we'll be marking modified // // NOTE: `git ls-files` does not do well with leading slashes. // Since all `git-lfs track` calls are relative to the root of // the repository, the leading slash is simply removed for its // implicit counterpart. if trackVerboseLoggingFlag { Print(tr.Tr.Get("Searching for files matching pattern: %s", pattern)) } gittracked, err := git.GetTrackedFiles(pattern) if err != nil { Exit(tr.Tr.Get("Error getting tracked files for %q: %s", pattern, err)) } if trackVerboseLoggingFlag { Print(tr.Tr.Get("Found %d files previously added to Git matching pattern: %s", len(gittracked), pattern)) } var matchedBlocklist bool for _, f := range gittracked { if forbidden := blocklistItem(f); forbidden != "" { Print(tr.Tr.Get("Pattern '%s' matches forbidden file '%s'. If you would like to track %s, modify '.gitattributes' manually.", pattern, f, f)) matchedBlocklist = true } } if matchedBlocklist { continue } if !trackNoModifyAttrsFlag { // Newline already embedded attributesFile.WriteString(newline) } modified = true for _, f := range gittracked { if trackVerboseLoggingFlag || trackDryRunFlag { Print(tr.Tr.Get("Touching %q", f)) } if !trackDryRunFlag { now := time.Now() err := os.Chtimes(f, now, now) if err != nil { LoggedError(err, tr.Tr.Get("Error marking %q modified: %s", f, err)) sawError = true continue } } } } // now flip read-only mode based on lockable / not lockable changes lockClient := newLockClient() err = lockClient.FixFileWriteFlagsInDir(relpath, readOnlyPatterns, writeablePatterns) if err != nil { LoggedError(err, tr.Tr.Get("Error changing lockable file permissions: %s", err)) sawError = true } if sawError { os.Exit(2) } // If we didn't modify things, but that's because the patterns // were already supported, don't return an error, since what the // user wanted has already been done. // Otherwise, if we didn't modify things but only because the // patterns were disallowed, return an error. if !modified && len(changedAttribLines) > 0 { os.Exit(1) } } type PatternData struct { Pattern string `json:"pattern"` Source string `json:"source"` Lockable bool `json:"lockable"` Tracked bool `json:"tracked"` } func listPatterns() { knownPatterns, err := getAllKnownPatterns() if err != nil { Exit("unable to list patterns: %s", err) } if trackJSONFlag { patterns := struct { Patterns []PatternData `json:"patterns"` }{Patterns: make([]PatternData, 0, len(knownPatterns))} for _, p := range knownPatterns { patterns.Patterns = append(patterns.Patterns, PatternData{ Pattern: p.Path, Source: p.Source.String(), Tracked: p.Tracked, Lockable: p.Lockable, }) } encoder := json.NewEncoder(os.Stdout) encoder.SetIndent("", " ") err := encoder.Encode(patterns) if err != nil { ExitWithError(err) } return } if len(knownPatterns) < 1 { return } Print(tr.Tr.Get("Listing tracked patterns")) for _, t := range knownPatterns { if t.Lockable { // TRANSLATORS: Leading spaces here should be preserved. Print(tr.Tr.Get(" %s [lockable] (%s)", t.Path, t.Source)) } else if t.Tracked { Print(" %s (%s)", t.Path, t.Source) } } if trackNoExcludedFlag { return } Print(tr.Tr.Get("Listing excluded patterns")) for _, t := range knownPatterns { if !t.Tracked && !t.Lockable { Print(" %s (%s)", t.Path, t.Source) } } } func getAllKnownPatterns() ([]git.AttributePath, error) { mp := gitattr.NewMacroProcessor() // Parse these in this order so that macros in one file are properly // expanded when referred to in a later file, then order them in the // order we want. systemPatterns, err := git.GetSystemAttributePaths(mp, cfg.Os) if err != nil { return nil, err } globalPatterns := git.GetRootAttributePaths(mp, cfg.Git) knownPatterns := git.GetAttributePaths(mp, cfg.LocalWorkingDir(), cfg.LocalGitDir()) knownPatterns = append(knownPatterns, globalPatterns...) knownPatterns = append(knownPatterns, systemPatterns...) return knownPatterns, nil } func getAttributeLineEnding(attribs []git.AttributePath) string { for _, a := range attribs { if a.Source.Path == ".gitattributes" { return a.Source.LineEnding } } return "" } // blocklistItem returns the name of the blocklist item preventing the given // file-name from being tracked, or an empty string, if there is none. func blocklistItem(name string) string { base := filepath.Base(name) for _, p := range prefixBlocklist { if strings.HasPrefix(base, p) { return p } } return "" } var ( trackEscapePatterns = map[string]string{ " ": "[[:space:]]", "#": "\\#", } trackEscapeStrings = []string{"*", "[", "]", "?"} ) func escapeGlobCharacters(s string) string { var escaped string if runtime.GOOS == "windows" { escaped = strings.Replace(s, `\`, "/", -1) } else { escaped = strings.Replace(s, `\`, `\\`, -1) } for _, ch := range trackEscapeStrings { escaped = strings.Replace(escaped, ch, fmt.Sprintf("\\%s", ch), -1) } for from, to := range trackEscapePatterns { escaped = strings.Replace(escaped, from, to, -1) } return escaped } func escapeAttrPattern(s string) string { var escaped string if runtime.GOOS == "windows" { escaped = strings.Replace(s, `\`, "/", -1) } else { escaped = strings.Replace(s, `\`, `\\`, -1) } for from, to := range trackEscapePatterns { escaped = strings.Replace(escaped, from, to, -1) } return escaped } func unescapeAttrPattern(escaped string) string { var unescaped string = escaped for to, from := range trackEscapePatterns { unescaped = strings.Replace(unescaped, from, to, -1) } if runtime.GOOS != "windows" { unescaped = strings.Replace(unescaped, `\\`, `\`, -1) } return unescaped } func init() { RegisterCommand("track", trackCommand, func(cmd *cobra.Command) { cmd.Flags().BoolVarP(&trackLockableFlag, "lockable", "l", false, "make pattern lockable, i.e. read-only unless locked") cmd.Flags().BoolVarP(&trackNotLockableFlag, "not-lockable", "", false, "remove lockable attribute from pattern") cmd.Flags().BoolVarP(&trackVerboseLoggingFlag, "verbose", "v", false, "log which files are being tracked and modified") cmd.Flags().BoolVarP(&trackDryRunFlag, "dry-run", "d", false, "preview results of running `git lfs track`") cmd.Flags().BoolVarP(&trackNoModifyAttrsFlag, "no-modify-attrs", "", false, "skip modifying .gitattributes file") cmd.Flags().BoolVarP(&trackNoExcludedFlag, "no-excluded", "", false, "skip listing excluded paths") cmd.Flags().BoolVarP(&trackFilenameFlag, "filename", "", false, "treat this pattern as a literal filename") cmd.Flags().BoolVarP(&trackJSONFlag, "json", "", false, "print output in JSON") }) } git-lfs-3.6.1/commands/command_uninstall.go000066400000000000000000000034531472372047300207430ustar00rootroot00000000000000package commands import ( "github.com/git-lfs/git-lfs/v3/git" "github.com/git-lfs/git-lfs/v3/tr" "github.com/spf13/cobra" ) // uninstallCommand removes any configuration and hooks set by Git LFS. func uninstallCommand(cmd *cobra.Command, args []string) { if err := cmdInstallOptions().Uninstall(); err != nil { Print(tr.Tr.Get("warning: %s", err.Error())) } if !skipRepoInstall && (localInstall || worktreeInstall || cfg.InRepo()) { uninstallHooksCommand(cmd, args) } if systemInstall { Print(tr.Tr.Get("System Git LFS configuration has been removed.")) } else if !(localInstall || worktreeInstall) { Print(tr.Tr.Get("Global Git LFS configuration has been removed.")) } } // uninstallHooksCommand removes any hooks created by Git LFS. func uninstallHooksCommand(cmd *cobra.Command, args []string) { if err := uninstallHooks(); err != nil { Error(err.Error()) } Print(tr.Tr.Get("Hooks for this repository have been removed.")) } func init() { RegisterCommand("uninstall", uninstallCommand, func(cmd *cobra.Command) { cmd.Flags().BoolVarP(&localInstall, "local", "l", false, "Remove the Git LFS config for the local Git repository only.") cmd.Flags().StringVarP(&fileInstall, "file", "", "", "Remove the Git LFS config for the given configuration file only.") if git.IsGitVersionAtLeast("2.20.0") { cmd.Flags().BoolVarP(&worktreeInstall, "worktree", "w", false, "Remove the Git LFS config for the current Git working tree, if multiple working trees are configured; otherwise, the same as --local.") } cmd.Flags().BoolVarP(&systemInstall, "system", "", false, "Remove the Git LFS config in system-wide scope.") cmd.Flags().BoolVarP(&skipRepoInstall, "skip-repo", "", false, "Skip repo setup, just uninstall global filters.") cmd.AddCommand(NewCommand("hooks", uninstallHooksCommand)) }) } git-lfs-3.6.1/commands/command_unlock.go000066400000000000000000000121011472372047300202130ustar00rootroot00000000000000package commands import ( "encoding/json" "os" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/git" "github.com/git-lfs/git-lfs/v3/locking" "github.com/git-lfs/git-lfs/v3/tr" "github.com/spf13/cobra" ) var ( unlockCmdFlags unlockFlags ) // unlockFlags holds the flags given to the `git lfs unlock` command type unlockFlags struct { // Id is the Id of the lock that is being unlocked. Id string // Force specifies whether or not the `lfs unlock` command was invoked // with "--force", signifying the user's intent to break another // individual's lock(s). Force bool } type unlockResponse struct { Id string `json:"id,omitempty"` Path string `json:"path,omitempty"` Unlocked bool `json:"unlocked"` Reason string `json:"reason,omitempty"` } func handleUnlockError(locks []unlockResponse, id string, path string, err error) []unlockResponse { Error(err.Error()) if locksCmdFlags.JSON { locks = append(locks, unlockResponse{ Id: id, Path: path, Unlocked: false, Reason: err.Error(), }) } return locks } func unlockCommand(cmd *cobra.Command, args []string) { hasPath := len(args) > 0 hasId := len(unlockCmdFlags.Id) > 0 if hasPath == hasId { // If there is both an `--id` AND a ``, or there is // neither, print the usage and quit. Exit(tr.Tr.Get("Exactly one of --id or a set of paths must be provided")) } if len(lockRemote) > 0 { cfg.SetRemote(lockRemote) } lockData, err := computeLockData() if err != nil { ExitWithError(err) } refUpdate := git.NewRefUpdate(cfg.Git, cfg.PushRemote(), cfg.CurrentRef(), nil) lockClient := newLockClient() lockClient.RemoteRef = refUpdate.RemoteRef() defer lockClient.Close() locks := make([]unlockResponse, 0, len(args)) success := true if hasPath { for _, pathspec := range args { path, err := lockPath(lockData, pathspec) if err != nil { if !unlockCmdFlags.Force { locks = handleUnlockError(locks, "", path, errors.New(tr.Tr.Get("Unable to determine path: %v", err.Error()))) success = false continue } path = pathspec } if err := unlockAbortIfFileModified(path); err != nil { locks = handleUnlockError(locks, "", path, err) success = false continue } err = lockClient.UnlockFile(path, unlockCmdFlags.Force) if err != nil { locks = handleUnlockError(locks, "", path, errors.Cause(err)) success = false continue } if !locksCmdFlags.JSON { Print(tr.Tr.Get("Unlocked %s", path)) continue } locks = append(locks, unlockResponse{ Path: path, Unlocked: true, }) } } else if unlockCmdFlags.Id != "" { // This call can early-out unlockAbortIfFileModifiedById(unlockCmdFlags.Id, lockClient) err := lockClient.UnlockFileById(unlockCmdFlags.Id, unlockCmdFlags.Force) if err != nil { locks = handleUnlockError(locks, unlockCmdFlags.Id, "", errors.New(tr.Tr.Get("Unable to unlock %v: %v", unlockCmdFlags.Id, errors.Cause(err)))) success = false } else if !locksCmdFlags.JSON { Print(tr.Tr.Get("Unlocked Lock %s", unlockCmdFlags.Id)) } else { locks = append(locks, unlockResponse{ Id: unlockCmdFlags.Id, Unlocked: true, }) } } else { Exit(tr.Tr.Get("Exactly one of --id or a set of paths must be provided")) } if locksCmdFlags.JSON { if err := json.NewEncoder(os.Stdout).Encode(locks); err != nil { Error(err.Error()) } } if !success { lockClient.Close() os.Exit(2) } } func unlockAbortIfFileModified(path string) error { modified, err := git.IsFileModified(path) if err != nil { if unlockCmdFlags.Force { // Since git/git@b9a7d55, `git-status(1)` causes an // error when asked about files that don't exist, // causing `err != nil`, as above. // // Unlocking a files that does not exist with // --force is OK. return nil } return err } if modified { if unlockCmdFlags.Force { // Only a warning Error(tr.Tr.Get("warning: unlocking with uncommitted changes because --force")) } else { return errors.New(tr.Tr.Get("Cannot unlock file with uncommitted changes")) } } return nil } func unlockAbortIfFileModifiedById(id string, lockClient *locking.Client) error { // Get the path so we can check the status filter := map[string]string{"id": id} // try local cache first locks, _ := lockClient.SearchLocks(filter, 0, true, false) if len(locks) == 0 { // Fall back on calling server locks, _ = lockClient.SearchLocks(filter, 0, false, false) } if len(locks) == 0 { // Don't block if we can't determine the path, may be cleaning up old data return nil } return unlockAbortIfFileModified(locks[0].Path) } func init() { RegisterCommand("unlock", unlockCommand, func(cmd *cobra.Command) { cmd.Flags().StringVarP(&lockRemote, "remote", "r", "", "specify which remote to use when interacting with locks") cmd.Flags().StringVarP(&unlockCmdFlags.Id, "id", "i", "", "unlock a lock by its ID") cmd.Flags().BoolVarP(&unlockCmdFlags.Force, "force", "f", false, "forcibly break another user's lock(s)") cmd.Flags().BoolVarP(&locksCmdFlags.JSON, "json", "", false, "print output in json") }) } git-lfs-3.6.1/commands/command_untrack.go000066400000000000000000000031261472372047300203760ustar00rootroot00000000000000package commands import ( "bufio" "os" "strings" "github.com/git-lfs/git-lfs/v3/tools" "github.com/git-lfs/git-lfs/v3/tr" "github.com/spf13/cobra" ) // untrackCommand takes a list of paths as an argument, and removes each path from the // default attributes file (.gitattributes), if it exists. func untrackCommand(cmd *cobra.Command, args []string) { setupWorkingCopy() installHooks(false) if len(args) < 1 { Print("git lfs untrack [path]*") return } data, err := os.ReadFile(".gitattributes") if err != nil { return } attributes := strings.NewReader(string(data)) attributesFile, err := os.Create(".gitattributes") if err != nil { Print(tr.Tr.Get("Error opening '.gitattributes' for writing")) return } defer attributesFile.Close() scanner := bufio.NewScanner(attributes) // Iterate through each line of the attributes file and rewrite it, // if the path was meant to be untracked, omit it, and print a message instead. for scanner.Scan() { line := scanner.Text() if !strings.Contains(line, "filter=lfs") { attributesFile.WriteString(line + "\n") continue } path := strings.Fields(line)[0] if removePath(path, args) { Print(tr.Tr.Get("Untracking %q", unescapeAttrPattern(path))) } else { attributesFile.WriteString(line + "\n") } } } func removePath(path string, args []string) bool { withoutCurrentDir := tools.TrimCurrentPrefix(path) for _, t := range args { if withoutCurrentDir == escapeAttrPattern(tools.TrimCurrentPrefix(t)) { return true } } return false } func init() { RegisterCommand("untrack", untrackCommand, nil) } git-lfs-3.6.1/commands/command_update.go000066400000000000000000000032121472372047300202050ustar00rootroot00000000000000package commands import ( "regexp" "github.com/git-lfs/git-lfs/v3/tr" "github.com/spf13/cobra" ) var ( updateForce = false updateManual = false ) // updateCommand is used for updating parts of Git LFS that reside under // .git/lfs. func updateCommand(cmd *cobra.Command, args []string) { requireGitVersion() setupRepository() lfsAccessRE := regexp.MustCompile(`\Alfs\.(.*)\.access\z`) for key, _ := range cfg.Git.All() { matches := lfsAccessRE.FindStringSubmatch(key) if len(matches) < 2 { continue } value, _ := cfg.Git.Get(key) switch value { case "basic": case "private": cfg.SetGitLocalKey(key, "basic") Print(tr.Tr.Get("Updated %s access from %s to %s.", matches[1], value, "basic")) default: cfg.UnsetGitLocalKey(key) Print(tr.Tr.Get("Removed invalid %s access of %s.", matches[1], value)) } } if updateForce && updateManual { Exit(tr.Tr.Get("You cannot use --force and --manual options together")) } if updateManual { Print(getHookInstallSteps()) } else { if err := installHooks(updateForce); err != nil { Error(err.Error()) Exit("%s\n 1: %s\n 2: %s", tr.Tr.Get("To resolve this, either:"), tr.Tr.Get("run `git lfs update --manual` for instructions on how to merge hooks."), tr.Tr.Get("run `git lfs update --force` to overwrite your hook.")) } else { Print(tr.Tr.Get("Updated Git hooks.")) } } } func init() { RegisterCommand("update", updateCommand, func(cmd *cobra.Command) { cmd.Flags().BoolVarP(&updateForce, "force", "f", false, "Overwrite existing hooks.") cmd.Flags().BoolVarP(&updateManual, "manual", "m", false, "Print instructions for manual install.") }) } git-lfs-3.6.1/commands/command_version.go000066400000000000000000000007231472372047300204140ustar00rootroot00000000000000package commands import ( "github.com/git-lfs/git-lfs/v3/lfshttp" "github.com/spf13/cobra" ) var ( lovesComics bool ) func versionCommand(cmd *cobra.Command, args []string) { Print(lfshttp.UserAgent) if lovesComics { Print("Nothing may see Gah Lak Tus and survive!") } } func init() { RegisterCommand("version", versionCommand, func(cmd *cobra.Command) { cmd.PreRun = nil cmd.Flags().BoolVarP(&lovesComics, "comics", "c", false, "easter egg") }) } git-lfs-3.6.1/commands/commands.go000066400000000000000000000343521472372047300170370ustar00rootroot00000000000000package commands import ( "bytes" "fmt" "io" "log" "net" "os" "path/filepath" "strings" "sync" "time" "github.com/git-lfs/git-lfs/v3/config" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/filepathfilter" "github.com/git-lfs/git-lfs/v3/git" "github.com/git-lfs/git-lfs/v3/lfs" "github.com/git-lfs/git-lfs/v3/lfsapi" "github.com/git-lfs/git-lfs/v3/locking" "github.com/git-lfs/git-lfs/v3/subprocess" "github.com/git-lfs/git-lfs/v3/tools" "github.com/git-lfs/git-lfs/v3/tq" "github.com/git-lfs/git-lfs/v3/tr" ) // Populate man pages //go:generate go run ../docs/man/mangen.go var ( Debugging = false ErrorBuffer = &bytes.Buffer{} ErrorWriter = newMultiWriter(os.Stderr, ErrorBuffer) OutputWriter = newMultiWriter(os.Stdout, ErrorBuffer) ManPages = make(map[string]string, 20) tqManifest = make(map[string]tq.Manifest) cfg *config.Configuration apiClient *lfsapi.Client global sync.Mutex oldEnv = make(map[string]string) includeArg string excludeArg string ) // getTransferManifest builds a tq.Manifest from the global os and git // environments. func getTransferManifest() tq.Manifest { return getTransferManifestOperationRemote("", "") } // getTransferManifestOperationRemote builds a tq.Manifest from the global os // and git environments and operation-specific and remote-specific settings. // Operation must be "download", "upload", or the empty string. func getTransferManifestOperationRemote(operation, remote string) tq.Manifest { c := getAPIClient() global.Lock() defer global.Unlock() k := fmt.Sprintf("%s.%s", operation, remote) if tqManifest[k] == nil { tqManifest[k] = tq.NewManifest(cfg.Filesystem(), c, operation, remote) } return tqManifest[k] } func getAPIClient() *lfsapi.Client { global.Lock() defer global.Unlock() if apiClient == nil { c, err := lfsapi.NewClient(cfg) if err != nil { ExitWithError(err) } apiClient = c } return apiClient } func closeAPIClient() error { global.Lock() defer global.Unlock() if apiClient == nil { return nil } return apiClient.Close() } func newLockClient() *locking.Client { lockClient, err := locking.NewClient(cfg.PushRemote(), getAPIClient(), cfg) if err == nil { tools.MkdirAll(cfg.LFSStorageDir(), cfg) err = lockClient.SetupFileCache(cfg.LFSStorageDir()) } if err != nil { Exit(tr.Tr.Get("Unable to create lock system: %v", err.Error())) } // Configure dirs lockClient.LocalWorkingDir = cfg.LocalWorkingDir() lockClient.LocalGitDir = cfg.LocalGitDir() lockClient.SetLockableFilesReadOnly = cfg.SetLockableFilesReadOnly() return lockClient } // newDownloadCheckQueue builds a checking queue, checks that objects are there but doesn't download func newDownloadCheckQueue(manifest tq.Manifest, remote string, options ...tq.Option) *tq.TransferQueue { return newDownloadQueue(manifest, remote, append(options, tq.DryRun(true), )...) } // newDownloadQueue builds a DownloadQueue, allowing concurrent downloads. func newDownloadQueue(manifest tq.Manifest, remote string, options ...tq.Option) *tq.TransferQueue { return tq.NewTransferQueue(tq.Download, manifest, remote, append(options, tq.RemoteRef(currentRemoteRef()), tq.WithBatchSize(cfg.TransferBatchSize()), )...) } func currentRemoteRef() *git.Ref { return git.NewRefUpdate(cfg.Git, cfg.PushRemote(), cfg.CurrentRef(), nil).RemoteRef() } func buildFilepathFilter(config *config.Configuration, includeArg, excludeArg *string, useFetchOptions bool) *filepathfilter.Filter { return buildFilepathFilterWithPatternType(config, includeArg, excludeArg, useFetchOptions, filepathfilter.GitIgnore) } func buildFilepathFilterWithPatternType(config *config.Configuration, includeArg, excludeArg *string, useFetchOptions bool, patternType filepathfilter.PatternType) *filepathfilter.Filter { inc, exc := determineIncludeExcludePaths(config, includeArg, excludeArg, useFetchOptions) return filepathfilter.New(inc, exc, patternType) } func downloadTransfer(p *lfs.WrappedPointer) (name, path, oid string, size int64, missing bool, err error) { path, err = cfg.Filesystem().ObjectPath(p.Oid) return p.Name, path, p.Oid, p.Size, false, err } // Get user-readable manual install steps for hooks func getHookInstallSteps() string { hookDir, err := cfg.HookDir() if err != nil { ExitWithError(err) } hooks := lfs.LoadHooks(hookDir, cfg) hookDir = filepath.ToSlash(hookDir) workingDir := filepath.ToSlash(fmt.Sprintf("%s%c", cfg.LocalWorkingDir(), os.PathSeparator)) steps := make([]string, 0, len(hooks)) for _, h := range hooks { steps = append(steps, fmt.Sprintf("%s\n\n%s", tr.Tr.Get("Add the following to '%s/%s':", strings.TrimPrefix(hookDir, workingDir), h.Type), tools.Indent(h.Contents))) } return strings.Join(steps, "\n\n") } func installHooks(force bool) error { hookDir, err := cfg.HookDir() if err != nil { return err } hooks := lfs.LoadHooks(hookDir, cfg) for _, h := range hooks { if err := h.Install(force); err != nil { return err } } return nil } // uninstallHooks removes all hooks in range of the `hooks` var. func uninstallHooks() error { if !cfg.InRepo() { return errors.New(tr.Tr.Get("Not in a Git repository")) } hookDir, err := cfg.HookDir() if err != nil { return err } hooks := lfs.LoadHooks(hookDir, cfg) for _, h := range hooks { if err := h.Uninstall(); err != nil { return err } } return nil } // Error prints a formatted message to Stderr. It also gets printed to the // panic log if one is created for this command. func Error(format string, args ...interface{}) { if len(args) == 0 { fmt.Fprintln(ErrorWriter, format) return } fmt.Fprintf(ErrorWriter, format+"\n", args...) } // Print prints a formatted message to Stdout. It also gets printed to the // panic log if one is created for this command. func Print(format string, args ...interface{}) { if len(args) == 0 { fmt.Fprintln(OutputWriter, format) return } fmt.Fprintf(OutputWriter, format+"\n", args...) } // Exit prints a formatted message and exits. func Exit(format string, args ...interface{}) { Error(format, args...) os.Exit(2) } // ExitWithError either panics with a full stack trace for fatal errors, or // simply prints the error message and exits immediately. func ExitWithError(err error) { errorWith(err, Panic, Exit) } // FullError prints either a full stack trace for fatal errors, or just the // error message. func FullError(err error) { errorWith(err, LoggedError, Error) } func errorWith(err error, fatalErrFn func(error, string, ...interface{}), errFn func(string, ...interface{})) { if Debugging || errors.IsFatalError(err) { fatalErrFn(err, "%s", err) return } errFn("%s", err) } // Debug prints a formatted message if debugging is enabled. The formatted // message also shows up in the panic log, if created. func Debug(format string, args ...interface{}) { if !Debugging { return } log.Printf(format, args...) } // LoggedError prints the given message formatted with its arguments (if any) to // Stderr. If an empty string is passed as the "format" argument, only the // standard error logging message will be printed, and the error's body will be // omitted. // // It also writes a stack trace for the error to a log file without exiting. func LoggedError(err error, format string, args ...interface{}) { if len(format) > 0 { Error(format, args...) } file := handlePanic(err) if len(file) > 0 { fmt.Fprintf(os.Stderr, "\n%s\n", tr.Tr.Get("Errors logged to '%s'.\nUse `git lfs logs last` to view the log.", file)) } } // Panic prints a formatted message, and writes a stack trace for the error to // a log file before exiting. func Panic(err error, format string, args ...interface{}) { LoggedError(err, format, args...) os.Exit(2) } func Cleanup() { if err := cfg.Cleanup(); err != nil { fmt.Fprintln(os.Stderr, tr.Tr.Get("Error clearing old temporary files: %s", err)) } } func requireStdin(msg string) { var out string stat, err := os.Stdin.Stat() if err != nil { out = tr.Tr.Get("Cannot read from STDIN: %s (%s)", msg, err) } else if (stat.Mode() & os.ModeCharDevice) != 0 { out = tr.Tr.Get("Cannot read from STDIN: %s", msg) } if len(out) > 0 { Error(out) os.Exit(1) } } func requireInRepo() { if !cfg.InRepo() { Print(tr.Tr.Get("Not in a Git repository.")) os.Exit(128) } } // requireWorkingCopy requires that the working directory be a work tree, i.e., // that it not be bare. If it is bare (or the state of the repository could not // be determined), this function will terminate the program. func requireWorkingCopy() { if cfg.LocalWorkingDir() == "" { Print(tr.Tr.Get("This operation must be run in a work tree.")) os.Exit(128) } } func setupRepository() { requireInRepo() bare, err := git.IsBare() if err != nil { ExitWithError(errors.Wrap( err, tr.Tr.Get("Could not determine bareness"))) } verifyRepositoryVersion() if !bare { changeToWorkingCopy() } } func verifyRepositoryVersion() { key := "lfs.repositoryformatversion" val := cfg.FindGitLocalKey(key) if val == "" { cfg.SetGitLocalKey(key, "0") } else if val != "0" { Print(tr.Tr.Get("Unknown repository format version: %s", val)) os.Exit(128) } } func setupWorkingCopy() { requireInRepo() requireWorkingCopy() verifyRepositoryVersion() changeToWorkingCopy() } func changeToWorkingCopy() { workingDir := cfg.LocalWorkingDir() cwd, err := tools.Getwd() if err != nil { ExitWithError(errors.Wrap( err, tr.Tr.Get("Could not determine current working directory"))) } cwd, err = tools.CanonicalizeSystemPath(cwd) if err != nil { ExitWithError(errors.Wrap( err, tr.Tr.Get("Could not canonicalize current working directory"))) } // If the current working directory is not within the repository's // working directory, then let's change directories accordingly. This // should only occur if GIT_WORK_TREE is set. if !(strings.HasPrefix(cwd, workingDir) && (cwd == workingDir || (len(cwd) > len(workingDir) && cwd[len(workingDir)] == os.PathSeparator))) { os.Chdir(workingDir) } } func canonicalizeEnvironment() { vars := []string{"GIT_INDEX_FILE", "GIT_OBJECT_DIRECTORY", "GIT_DIR", "GIT_WORK_TREE", "GIT_COMMON_DIR"} for _, v := range vars { val, ok := os.LookupEnv(v) if ok { path, err := tools.CanonicalizePath(val, true) // We have existing code which relies on users being // able to pass invalid paths, so don't fail if the path // cannot be canonicalized. if err == nil { oldEnv[v] = val os.Setenv(v, path) } } } subprocess.ResetEnvironment() } func handlePanic(err error) string { if err == nil { return "" } return logPanic(err) } func logPanic(loggedError error) string { var ( fmtWriter io.Writer = os.Stderr lineEnding string = "\n" ) now := time.Now() name := now.Format("20060102T150405.999999999") full := filepath.Join(cfg.LocalLogDir(), name+".log") if err := tools.MkdirAll(cfg.LocalLogDir(), cfg); err != nil { full = "" fmt.Fprintf(fmtWriter, "%s\n\n", tr.Tr.Get("Unable to log panic to '%s': %s", cfg.LocalLogDir(), err.Error())) } else if file, err := os.Create(full); err != nil { filename := full full = "" defer func() { fmt.Fprintf(fmtWriter, "%s\n\n", tr.Tr.Get("Unable to log panic to '%s'", filename)) logPanicToWriter(fmtWriter, err, lineEnding) }() } else { fmtWriter = file lineEnding = gitLineEnding(cfg.Git) defer file.Close() } logPanicToWriter(fmtWriter, loggedError, lineEnding) return full } func ipAddresses() []string { ips := make([]string, 0, 1) ifaces, err := net.Interfaces() if err != nil { ips = append(ips, tr.Tr.Get("Error getting network interface: %s", err.Error())) return ips } for _, i := range ifaces { if i.Flags&net.FlagUp == 0 { continue // interface down } if i.Flags&net.FlagLoopback != 0 { continue // loopback interface } addrs, _ := i.Addrs() l := make([]string, 0, 1) if err != nil { ips = append(ips, tr.Tr.Get("Error getting IP address: %s", err.Error())) continue } for _, addr := range addrs { var ip net.IP switch v := addr.(type) { case *net.IPNet: ip = v.IP case *net.IPAddr: ip = v.IP } if ip == nil || ip.IsLoopback() { continue } l = append(l, ip.String()) } if len(l) > 0 { ips = append(ips, strings.Join(l, " ")) } } return ips } func logPanicToWriter(w io.Writer, loggedError error, le string) { // log the version gitV, err := git.Version() if err != nil { gitV = tr.Tr.Get("Error getting Git version: %s", err.Error()) } fmt.Fprint(w, config.VersionDesc, le) fmt.Fprint(w, gitV, le) // log the command that was run fmt.Fprint(w, le) fmt.Fprintf(w, "$ %s", filepath.Base(os.Args[0])) if len(os.Args) > 0 { fmt.Fprintf(w, " %s", strings.Join(os.Args[1:], " ")) } fmt.Fprint(w, le) // log the error message and stack trace w.Write(ErrorBuffer.Bytes()) fmt.Fprint(w, le) fmt.Fprintf(w, "%+v%s", loggedError, le) for key, val := range errors.Context(err) { fmt.Fprintf(w, "%s=%v%s", key, val, le) } fmt.Fprint(w, le, tr.Tr.Get("Current time in UTC:"), le) fmt.Fprint(w, time.Now().UTC().Format("2006-01-02 15:04:05"), le) fmt.Fprint(w, le, tr.Tr.Get("Environment:"), le) // log the environment for _, env := range lfs.Environ(cfg, getTransferManifest(), oldEnv) { fmt.Fprint(w, env, le) } fmt.Fprint(w, le, tr.Tr.Get("Client IP addresses:"), le) for _, ip := range ipAddresses() { fmt.Fprint(w, ip, le) } } func determineIncludeExcludePaths(config *config.Configuration, includeArg, excludeArg *string, useFetchOptions bool) (include, exclude []string) { if includeArg == nil { if useFetchOptions { include = config.FetchIncludePaths() } else { include = []string{} } } else { include = tools.CleanPaths(*includeArg, ",") } if excludeArg == nil { if useFetchOptions { exclude = config.FetchExcludePaths() } else { exclude = []string{} } } else { exclude = tools.CleanPaths(*excludeArg, ",") } return } func buildProgressMeter(dryRun bool, d tq.Direction) *tq.Meter { m := tq.NewMeter(cfg) m.Logger = m.LoggerFromEnv(cfg.Os) m.DryRun = dryRun m.Direction = d return m } func requireGitVersion() { minimumGit := "1.8.2" if !git.IsGitVersionAtLeast(minimumGit) { gitver, err := git.Version() if err != nil { Exit(tr.Tr.Get("Error getting Git version: %s", err)) } Exit(tr.Tr.Get("Git version %s or higher is required for Git LFS; your version: %s", minimumGit, gitver)) } } git-lfs-3.6.1/commands/commands_test.go000066400000000000000000000030211472372047300200630ustar00rootroot00000000000000package commands import ( "testing" "github.com/git-lfs/git-lfs/v3/config" "github.com/stretchr/testify/assert" ) var ( testcfg = config.NewFrom(config.Values{ Git: map[string][]string{ "lfs.fetchinclude": []string{"/default/include"}, "lfs.fetchexclude": []string{"/default/exclude"}, }, }) ) func TestDetermineIncludeExcludePathsReturnsCleanedPaths(t *testing.T) { inc := "/some/include" exc := "/some/exclude" i, e := determineIncludeExcludePaths(testcfg, &inc, &exc, true) assert.Equal(t, []string{"/some/include"}, i) assert.Equal(t, []string{"/some/exclude"}, e) } func TestDetermineIncludeExcludePathsReturnsEmptyPaths(t *testing.T) { inc := "" exc := "" i, e := determineIncludeExcludePaths(testcfg, &inc, &exc, true) assert.Empty(t, i) assert.Empty(t, e) } func TestDetermineIncludeExcludePathsReturnsDefaultsWhenAbsent(t *testing.T) { i, e := determineIncludeExcludePaths(testcfg, nil, nil, true) assert.Equal(t, []string{"/default/include"}, i) assert.Equal(t, []string{"/default/exclude"}, e) } func TestDetermineIncludeExcludePathsReturnsNothingWhenAbsent(t *testing.T) { i, e := determineIncludeExcludePaths(testcfg, nil, nil, false) assert.Empty(t, i) assert.Empty(t, e) } func TestSpecialGitRefsExclusion(t *testing.T) { assert.True(t, isSpecialGitRef("refs/notes/commits")) assert.True(t, isSpecialGitRef("refs/bisect/bad")) assert.True(t, isSpecialGitRef("refs/replace/abcdef90")) assert.True(t, isSpecialGitRef("refs/stash")) assert.False(t, isSpecialGitRef("refs/commits/abcdef90")) } git-lfs-3.6.1/commands/lockverifier.go000066400000000000000000000144741472372047300177250ustar00rootroot00000000000000package commands import ( "fmt" "sort" "strconv" "strings" "github.com/git-lfs/git-lfs/v3/config" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/git" "github.com/git-lfs/git-lfs/v3/lfshttp" "github.com/git-lfs/git-lfs/v3/locking" "github.com/git-lfs/git-lfs/v3/tq" "github.com/git-lfs/git-lfs/v3/tr" ) type verifyState byte const ( verifyStateUnknown verifyState = iota verifyStateEnabled verifyStateDisabled ) func verifyLocksForUpdates(lv *lockVerifier, updates []*git.RefUpdate) { for _, update := range updates { lv.Verify(update.RemoteRef()) } } // lockVerifier verifies locked files before updating one or more refs. type lockVerifier struct { endpoint lfshttp.Endpoint verifyState verifyState verifiedRefs map[string]bool // all existing locks ourLocks map[string]*refLock theirLocks map[string]*refLock // locks from ourLocks that have been modified ownedLocks []*refLock // locks from theirLocks that have been modified unownedLocks []*refLock } func (lv *lockVerifier) Verify(ref *git.Ref) { if ref == nil { panic(tr.Tr.Get("no ref specified for verification")) } if lv.verifyState == verifyStateDisabled || lv.verifiedRefs[ref.Refspec()] { return } lockClient := newLockClient() lockClient.RemoteRef = ref ours, theirs, err := lockClient.SearchLocksVerifiable(0, false) if err != nil { if errors.IsNotImplementedError(err) { disableFor(lv.endpoint.Url) } else if lv.verifyState == verifyStateUnknown || lv.verifyState == verifyStateEnabled { if errors.IsAuthError(err) { if lv.verifyState == verifyStateUnknown { Error(tr.Tr.Get("warning: Authentication error: %s", err)) } else if lv.verifyState == verifyStateEnabled { Exit(tr.Tr.Get("error: Authentication error: %s", err)) } } else { Error(tr.Tr.Get("Remote %q does not support the Git LFS locking API. Consider disabling it with:", cfg.PushRemote())) Error(" $ git config lfs.%s.locksverify false", lv.endpoint.Url) if lv.verifyState == verifyStateEnabled { ExitWithError(err) } } } } else if lv.verifyState == verifyStateUnknown { Error(tr.Tr.Get("Locking support detected on remote %q. Consider enabling it with:", cfg.PushRemote())) Error(" $ git config lfs.%s.locksverify true", lv.endpoint.Url) } lv.addLocks(ref, ours, lv.ourLocks) lv.addLocks(ref, theirs, lv.theirLocks) lv.verifiedRefs[ref.Refspec()] = true } func (lv *lockVerifier) addLocks(ref *git.Ref, locks []locking.Lock, set map[string]*refLock) { for _, l := range locks { if rl, ok := set[l.Path]; ok { if err := rl.Add(ref, l); err != nil { Error(tr.Tr.Get("warning: error adding %q lock for ref %q: %+v", l.Path, ref, err)) } } else { set[l.Path] = lv.newRefLocks(ref, l) } } } // Determines if a filename is lockable. Implements lfs.GitScannerSet func (lv *lockVerifier) Contains(name string) bool { if lv == nil { return false } _, ok := lv.theirLocks[name] return ok } func (lv *lockVerifier) LockedByThem(name string) bool { if lock, ok := lv.theirLocks[name]; ok { lv.unownedLocks = append(lv.unownedLocks, lock) return true } return false } func (lv *lockVerifier) LockedByUs(name string) bool { if lock, ok := lv.ourLocks[name]; ok { lv.ownedLocks = append(lv.ownedLocks, lock) return true } return false } func (lv *lockVerifier) UnownedLocks() []*refLock { return lv.unownedLocks } func (lv *lockVerifier) HasUnownedLocks() bool { return len(lv.unownedLocks) > 0 } func (lv *lockVerifier) OwnedLocks() []*refLock { return lv.ownedLocks } func (lv *lockVerifier) HasOwnedLocks() bool { return len(lv.ownedLocks) > 0 } func (lv *lockVerifier) Enabled() bool { return lv.verifyState == verifyStateEnabled } func (lv *lockVerifier) newRefLocks(ref *git.Ref, l locking.Lock) *refLock { return &refLock{ allRefs: lv.verifiedRefs, path: l.Path, refs: map[*git.Ref]locking.Lock{ref: l}, } } func newLockVerifier(m tq.Manifest) *lockVerifier { lv := &lockVerifier{ endpoint: getAPIClient().Endpoints.Endpoint("upload", cfg.PushRemote()), verifiedRefs: make(map[string]bool), ourLocks: make(map[string]*refLock), theirLocks: make(map[string]*refLock), } // Do not check locks for standalone transfer, because there is no LFS // server to ask. if m.IsStandaloneTransfer() { lv.verifyState = verifyStateDisabled } else { lv.verifyState = getVerifyStateFor(lv.endpoint.Url) } return lv } // refLock represents a unique locked file path, potentially across multiple // refs. It tracks each individual lock in case different users locked the // same path across multiple refs. type refLock struct { path string allRefs map[string]bool refs map[*git.Ref]locking.Lock } // Path returns the locked path. func (r *refLock) Path() string { return r.path } // Owners returns the list of owners that locked this file, including what // specific refs the files were locked in. If a user locked a file on all refs, // don't bother listing them. // // Example: technoweenie, bob (refs: foo) func (r *refLock) Owners() string { users := make(map[string][]string, len(r.refs)) for ref, lock := range r.refs { u := lock.Owner.Name if _, ok := users[u]; !ok { users[u] = make([]string, 0, len(r.refs)) } users[u] = append(users[u], ref.Name) } owners := make([]string, 0, len(users)) for name, refs := range users { seenRefCount := 0 for _, ref := range refs { if r.allRefs[ref] { seenRefCount++ } } if seenRefCount == len(r.allRefs) { // lock is included in all refs, so don't list them owners = append(owners, name) continue } sort.Strings(refs) owners = append(owners, fmt.Sprintf("%s (refs: %s)", name, strings.Join(refs, ", "))) } sort.Strings(owners) return strings.Join(owners, ", ") } func (r *refLock) Add(ref *git.Ref, l locking.Lock) error { r.refs[ref] = l return nil } // getVerifyStateFor returns whether or not lock verification is enabled for the // given url. If no state has been explicitly set, an "unknown" state will be // returned instead. func getVerifyStateFor(rawurl string) verifyState { uc := config.NewURLConfig(cfg.Git) v, ok := uc.Get("lfs", rawurl, "locksverify") if !ok { if supportsLockingAPI(rawurl) { return verifyStateEnabled } return verifyStateUnknown } if enabled, _ := strconv.ParseBool(v); enabled { return verifyStateEnabled } return verifyStateDisabled } git-lfs-3.6.1/commands/multiwriter.go000066400000000000000000000006511472372047300176200ustar00rootroot00000000000000package commands import ( "io" "os" ) type multiWriter struct { writer io.Writer fd uintptr } func newMultiWriter(f *os.File, writers ...io.Writer) *multiWriter { return &multiWriter{ writer: io.MultiWriter(append([]io.Writer{f}, writers...)...), fd: f.Fd(), } } func (w *multiWriter) Write(p []byte) (n int, err error) { return w.writer.Write(p) } func (w *multiWriter) Fd() uintptr { return w.fd } git-lfs-3.6.1/commands/path.go000066400000000000000000000004231472372047300161620ustar00rootroot00000000000000package commands import "strings" func gitLineEnding(git env) string { value, _ := git.Get("core.autocrlf") switch strings.ToLower(value) { case "true", "t", "1": return "\r\n" default: return osLineEnding() } } type env interface { Get(string) (string, bool) } git-lfs-3.6.1/commands/path_nix.go000066400000000000000000000003421472372047300170400ustar00rootroot00000000000000//go:build !windows // +build !windows package commands // cleanRootPath is a no-op on every platform except Windows func cleanRootPath(pattern string) string { return pattern } func osLineEnding() string { return "\n" } git-lfs-3.6.1/commands/path_windows.go000066400000000000000000000026521472372047300177420ustar00rootroot00000000000000//go:build windows // +build windows package commands import ( "path/filepath" "regexp" "strings" "sync" "github.com/git-lfs/git-lfs/v3/subprocess" ) var ( winBashPrefix string winBashMu sync.Mutex winBashRe *regexp.Regexp ) func osLineEnding() string { return "\r\n" } // cleanRootPath replaces the windows root path prefix with a unix path prefix: // "/". Git Bash (provided with Git For Windows) expands a path like "/foo" to // the actual Windows directory, but with forward slashes. You can see this // for yourself: // // $ git /foo // git: 'C:/Program Files/Git/foo' is not a git command. See 'git --help'. // // You can check the path with `pwd -W`: // // $ cd / // $ pwd // / // $ pwd -W // c:/Program Files/Git func cleanRootPath(pattern string) string { winBashMu.Lock() defer winBashMu.Unlock() // check if path starts with windows drive letter if !winPathHasDrive(pattern) { return pattern } if len(winBashPrefix) < 1 { // cmd.Path is something like C:\Program Files\Git\usr\bin\pwd.exe cmd, err := subprocess.ExecCommand("pwd") if err != nil { return pattern } winBashPrefix = strings.Replace(filepath.Dir(filepath.Dir(filepath.Dir(cmd.Path))), `\`, "/", -1) + "/" } return strings.Replace(pattern, winBashPrefix, "/", 1) } func winPathHasDrive(pattern string) bool { if winBashRe == nil { winBashRe = regexp.MustCompile(`\A\w{1}:[/\/]`) } return winBashRe.MatchString(pattern) } git-lfs-3.6.1/commands/pointers.go000066400000000000000000000004531472372047300170740ustar00rootroot00000000000000package commands import "github.com/git-lfs/git-lfs/v3/lfs" func collectPointers(pointerCh *lfs.PointerChannelWrapper) ([]*lfs.WrappedPointer, error) { var pointers []*lfs.WrappedPointer for p := range pointerCh.Results { pointers = append(pointers, p) } return pointers, pointerCh.Wait() } git-lfs-3.6.1/commands/pull.go000066400000000000000000000124051472372047300162050ustar00rootroot00000000000000package commands import ( "bytes" "io" "os" "strings" "sync" "github.com/git-lfs/git-lfs/v3/config" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/git" "github.com/git-lfs/git-lfs/v3/lfs" "github.com/git-lfs/git-lfs/v3/subprocess" "github.com/git-lfs/git-lfs/v3/tq" "github.com/git-lfs/git-lfs/v3/tr" ) // Handles the process of checking out a single file, and updating the git // index. func newSingleCheckout(gitEnv config.Environment, remote string) abstractCheckout { clean, ok := gitEnv.Get("filter.lfs.clean") if !ok || len(clean) == 0 { return &noOpCheckout{remote: remote} } // Get a converter from repo-relative to cwd-relative // Since writing data & calling git update-index must be relative to cwd pathConverter, err := lfs.NewRepoToCurrentPathConverter(cfg) if err != nil { Panic(err, tr.Tr.Get("Could not convert file paths")) } return &singleCheckout{ gitIndexer: &gitIndexer{}, pathConverter: pathConverter, manifest: nil, remote: remote, } } type abstractCheckout interface { Manifest() tq.Manifest Skip() bool Run(*lfs.WrappedPointer) RunToPath(*lfs.WrappedPointer, string) error Close() } type singleCheckout struct { gitIndexer *gitIndexer pathConverter lfs.PathConverter manifest tq.Manifest remote string } func (c *singleCheckout) Manifest() tq.Manifest { if c.manifest == nil { c.manifest = getTransferManifestOperationRemote("download", c.remote) } return c.manifest } func (c *singleCheckout) Skip() bool { return false } func (c *singleCheckout) Run(p *lfs.WrappedPointer) { cwdfilepath := c.pathConverter.Convert(p.Name) // Check the content - either missing or still this pointer (not exist is ok) filepointer, err := lfs.DecodePointerFromFile(cwdfilepath) if err != nil { if os.IsNotExist(err) { output, err := git.DiffIndexWithPaths("HEAD", true, []string{p.Name}) if err != nil { LoggedError(err, tr.Tr.Get("Checkout error trying to run diff-index: %s", err)) return } if strings.HasPrefix(output, ":100644 000000 ") || strings.HasPrefix(output, ":100755 000000 ") { // This file is deleted in the index. Don't try // to check it out. return } } else { if errors.IsNotAPointerError(err) || errors.IsBadPointerKeyError(err) { // File has non-pointer content, leave it alone return } LoggedError(err, tr.Tr.Get("Checkout error: %s", err)) return } } if filepointer != nil && filepointer.Oid != p.Oid { // User has probably manually reset a file to another commit // while leaving it a pointer; don't mess with this return } if err := c.RunToPath(p, cwdfilepath); err != nil { if errors.IsDownloadDeclinedError(err) { // acceptable error, data not local (fetch not run or include/exclude) Error(tr.Tr.Get("Skipped checkout for %q, content not local. Use fetch to download.", p.Name)) } else { FullError(errors.New(tr.Tr.Get("could not check out %q", p.Name))) } return } // errors are only returned when the gitIndexer is starting a new cmd if err := c.gitIndexer.Add(cwdfilepath); err != nil { Panic(err, tr.Tr.Get("Could not update the index")) } } // RunToPath checks out the pointer specified by p to the given path. It does // not perform any sort of sanity checking or add the path to the index. func (c *singleCheckout) RunToPath(p *lfs.WrappedPointer, path string) error { gitfilter := lfs.NewGitFilter(cfg) return gitfilter.SmudgeToFile(path, p.Pointer, false, c.manifest, nil) } func (c *singleCheckout) Close() { if err := c.gitIndexer.Close(); err != nil { LoggedError(err, "%s\n%s", tr.Tr.Get("Error updating the Git index:"), c.gitIndexer.Output()) } } type noOpCheckout struct { manifest tq.Manifest remote string } func (c *noOpCheckout) Manifest() tq.Manifest { if c.manifest == nil { c.manifest = getTransferManifestOperationRemote("download", c.remote) } return c.manifest } func (c *noOpCheckout) Skip() bool { return true } func (c *noOpCheckout) RunToPath(p *lfs.WrappedPointer, path string) error { return nil } func (c *noOpCheckout) Run(p *lfs.WrappedPointer) {} func (c *noOpCheckout) Close() {} // Don't fire up the update-index command until we have at least one file to // give it. Otherwise git interprets the lack of arguments to mean param-less update-index // which can trigger entire working copy to be re-examined, which triggers clean filters // and which has unexpected side effects (e.g. downloading filtered-out files) type gitIndexer struct { cmd *subprocess.Cmd input io.WriteCloser output bytes.Buffer mu sync.Mutex } func (i *gitIndexer) Add(path string) error { i.mu.Lock() defer i.mu.Unlock() if i.cmd == nil { // Fire up the update-index command cmd, err := git.UpdateIndexFromStdin() if err != nil { return err } cmd.Stdout = &i.output cmd.Stderr = &i.output stdin, err := cmd.StdinPipe() if err != nil { return err } err = cmd.Start() if err != nil { return err } i.cmd = cmd i.input = stdin } i.input.Write([]byte(path + "\n")) return nil } func (i *gitIndexer) Output() string { return i.output.String() } func (i *gitIndexer) Close() error { i.mu.Lock() defer i.mu.Unlock() if i.input != nil { i.input.Close() } if i.cmd != nil { return i.cmd.Wait() } return nil } git-lfs-3.6.1/commands/run.go000066400000000000000000000135041472372047300160360ustar00rootroot00000000000000package commands import ( "bytes" "fmt" "log" "os" "path/filepath" "strings" "sync" "time" "github.com/git-lfs/git-lfs/v3/config" "github.com/git-lfs/git-lfs/v3/tools" "github.com/git-lfs/git-lfs/v3/tr" "github.com/spf13/cobra" ) var ( commandFuncs []func() *cobra.Command commandMu sync.Mutex rootVersion bool ) // NewCommand creates a new 'git-lfs' sub command, given a command name and // command run function. // // Each command will initialize the local storage ('.git/lfs') directory when // run, unless the PreRun hook is set to nil. func NewCommand(name string, runFn func(*cobra.Command, []string)) *cobra.Command { return &cobra.Command{Use: name, Run: runFn, PreRun: setupHTTPLogger} } // RegisterCommand creates a direct 'git-lfs' subcommand, given a command name, // a command run function, and an optional callback during the command // initialization process. // // The 'git-lfs' command initialization is deferred until the `commands.Run()` // function is called. The fn callback is passed the output from NewCommand, // and gives the caller the flexibility to customize the command by adding // flags, tweaking command hooks, etc. func RegisterCommand(name string, runFn func(cmd *cobra.Command, args []string), fn func(cmd *cobra.Command)) { commandMu.Lock() commandFuncs = append(commandFuncs, func() *cobra.Command { cmd := NewCommand(name, runFn) if fn != nil { fn(cmd) } return cmd }) commandMu.Unlock() } // Run initializes the 'git-lfs' command and runs it with the given stdin and // command line args. // // It returns an exit code. func Run() int { log.SetOutput(ErrorWriter) tr.InitializeLocale() root := NewCommand("git-lfs", gitlfsCommand) root.PreRun = nil completionCmd := &cobra.Command{ Use: "completion [bash|fish|zsh]", DisableFlagsInUseLine: true, ValidArgs: []string{"bash", "fish", "zsh"}, Args: cobra.MatchAll(cobra.ExactArgs(1), cobra.OnlyValidArgs), Run: func(cmd *cobra.Command, args []string) { switch args[0] { case "bash": completion := new(bytes.Buffer) cmd.Root().GenBashCompletionV2(completion, false) // this is needed for git bash completion to pick up the completion for the subcommand completionSource := []byte(` local out directive __git-lfs_get_completion_results `) completionReplace := []byte(` if [[ ${words[0]} == "git" && ${words[1]} == "lfs" ]]; then words=("git-lfs" "${words[@]:2:${#words[@]}-2}") __git-lfs_debug "Rewritten words[*]: ${words[*]}," fi local out directive __git-lfs_get_completion_results `) newCompletion := bytes.NewBuffer(bytes.Replace(completion.Bytes(), completionSource, completionReplace, 1)) newCompletion.WriteString("_git_lfs() { __start_git-lfs; }\n") newCompletion.WriteTo(os.Stdout) case "fish": cmd.Root().GenFishCompletion(os.Stdout, false) case "zsh": completion := new(bytes.Buffer) cmd.Root().GenZshCompletionNoDesc(completion) // this is needed for git zsh completion to use the right command for completion completionSource := []byte(` requestComp="${words[1]} __completeNoDesc ${words[2,-1]}"`) completionReplace := []byte(` requestComp="git-${words[1]#*git-} __completeNoDesc ${words[2,-1]}"`) newCompletion := bytes.NewBuffer(bytes.Replace(completion.Bytes(), completionSource, completionReplace, 1)) newCompletion.WriteTo(os.Stdout) } }, } root.AddCommand(completionCmd) // Set up help/usage funcs based on manpage text helpcmd := &cobra.Command{ Use: "help [command]", Short: "Help about any command", Long: `Help provides help for any command in the application. Simply type ` + root.Name() + ` help [path to command] for full details.`, Run: func(c *cobra.Command, args []string) { cmd, _, e := c.Root().Find(args) // In the case of "git lfs help config" or "git lfs help // faq", pretend the last arg was "help" so our command // lookup succeeds, since cmd will be ignored in // helpCommand(). if e != nil && (args[0] == "config" || args[0] == "faq") { cmd, _, e = c.Root().Find([]string{"help"}) } if cmd == nil || e != nil { c.Println(tr.Tr.Get("Unknown help topic %#q", args)) c.Root().Usage() } else { c.HelpFunc()(cmd, args) } }, } root.SetHelpCommand(helpcmd) root.SetHelpTemplate("{{.UsageString}}") root.SetHelpFunc(helpCommand) root.SetUsageFunc(usageCommand) root.Flags().BoolVarP(&rootVersion, "version", "v", false, "") canonicalizeEnvironment() cfg = config.New() for _, f := range commandFuncs { if cmd := f(); cmd != nil { root.AddCommand(cmd) } } err := root.Execute() closeAPIClient() if err != nil { return 127 } return 0 } func gitlfsCommand(cmd *cobra.Command, args []string) { versionCommand(cmd, args) if !rootVersion { cmd.Usage() } } func helpCommand(cmd *cobra.Command, args []string) { if len(args) == 0 { printHelp("git-lfs") } else { printHelp(args[0]) } } func usageCommand(cmd *cobra.Command) error { printHelp(cmd.Name()) return nil } func printHelp(commandName string) { if commandName == "--help" { commandName = "git-lfs" } if txt, ok := ManPages[commandName]; ok { fmt.Println(strings.TrimSpace(txt)) } else { fmt.Println(tr.Tr.Get("Sorry, no usage text found for %q", commandName)) } } func setupHTTPLogger(cmd *cobra.Command, args []string) { if len(os.Getenv("GIT_LOG_STATS")) < 1 { return } logBase := filepath.Join(cfg.LocalLogDir(), "http") if err := tools.MkdirAll(logBase, cfg); err != nil { fmt.Fprintln(os.Stderr, tr.Tr.Get("Error logging HTTP stats: %s", err)) return } logFile := fmt.Sprintf("http-%d.log", time.Now().Unix()) file, err := os.Create(filepath.Join(logBase, logFile)) if err != nil { fmt.Fprintln(os.Stderr, tr.Tr.Get("Error logging HTTP stats: %s", err)) } else { getAPIClient().LogHTTPStats(file) } } git-lfs-3.6.1/commands/uploader.go000066400000000000000000000261601472372047300170470ustar00rootroot00000000000000package commands import ( "fmt" "io" "net/url" "os" "path/filepath" "strings" "sync" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/git" "github.com/git-lfs/git-lfs/v3/lfs" "github.com/git-lfs/git-lfs/v3/tasklog" "github.com/git-lfs/git-lfs/v3/tools" "github.com/git-lfs/git-lfs/v3/tq" "github.com/git-lfs/git-lfs/v3/tr" "github.com/rubyist/tracerx" ) func uploadForRefUpdates(ctx *uploadContext, updates []*git.RefUpdate, pushAll bool) error { gitscanner := ctx.buildGitScanner() defer ctx.ReportErrors() verifyLocksForUpdates(ctx.lockVerifier, updates) exclude := make([]string, 0, len(updates)) for _, update := range updates { remoteRefSha := update.RemoteRef().Sha if update.LocalRefCommitish() != remoteRefSha { exclude = append(exclude, remoteRefSha) } } for _, update := range updates { // initialized here to prevent looped defer q := ctx.NewQueue( tq.RemoteRef(update.RemoteRef()), ) err := uploadRangeOrAll(gitscanner, ctx, q, exclude, update, pushAll) ctx.CollectErrors(q) if err != nil { return errors.Wrap(err, tr.Tr.Get("ref %q:", update.LocalRef().Name)) } } return nil } func uploadRangeOrAll(g *lfs.GitScanner, ctx *uploadContext, q *tq.TransferQueue, exclude []string, update *git.RefUpdate, pushAll bool) error { cb := ctx.gitScannerCallback(q) if pushAll { if err := g.ScanRefWithDeleted(update.LocalRefCommitish(), cb); err != nil { return err } } else { if err := g.ScanMultiRangeToRemote(update.LocalRefCommitish(), exclude, cb); err != nil { return err } } return ctx.scannerError() } type uploadContext struct { Remote string DryRun bool Manifest tq.Manifest uploadedOids tools.StringSet gitfilter *lfs.GitFilter logger *tasklog.Logger meter *tq.Meter committerName string committerEmail string lockVerifier *lockVerifier // allowMissing specifies whether pushes containing missing/corrupt // pointers should allow pushing Git blobs allowMissing bool // tracks errors from gitscanner callbacks scannerErr error errMu sync.Mutex // filename => oid missing map[string]string corrupt map[string]string otherErrs []error } func newUploadContext(dryRun bool) *uploadContext { remote := cfg.PushRemote() manifest := getTransferManifestOperationRemote("upload", remote) ctx := &uploadContext{ Remote: remote, Manifest: manifest, DryRun: dryRun, uploadedOids: tools.NewStringSet(), gitfilter: lfs.NewGitFilter(cfg), lockVerifier: newLockVerifier(manifest), allowMissing: cfg.Git.Bool("lfs.allowincompletepush", false), missing: make(map[string]string), corrupt: make(map[string]string), otherErrs: make([]error, 0), } var sink io.Writer = os.Stdout if dryRun { sink = io.Discard } ctx.logger = tasklog.NewLogger(sink, tasklog.ForceProgress(cfg.ForceProgress()), ) ctx.meter = buildProgressMeter(ctx.DryRun, tq.Upload) ctx.logger.Enqueue(ctx.meter) ctx.committerName, ctx.committerEmail = cfg.CurrentCommitter() return ctx } func (c *uploadContext) NewQueue(options ...tq.Option) *tq.TransferQueue { return tq.NewTransferQueue(tq.Upload, c.Manifest, c.Remote, append(options, tq.DryRun(c.DryRun), tq.WithProgress(c.meter), tq.WithBatchSize(cfg.TransferBatchSize()), )...) } func (c *uploadContext) scannerError() error { c.errMu.Lock() defer c.errMu.Unlock() return c.scannerErr } func (c *uploadContext) addScannerError(err error) { c.errMu.Lock() defer c.errMu.Unlock() if c.scannerErr != nil { c.scannerErr = fmt.Errorf("%v\n%v", c.scannerErr, err) } else { c.scannerErr = err } } func (c *uploadContext) buildGitScanner() *lfs.GitScanner { return lfs.NewGitScannerForPush(cfg, c.Remote, func(n string) { c.lockVerifier.LockedByThem(n) }, c.lockVerifier) } func (c *uploadContext) gitScannerCallback(tqueue *tq.TransferQueue) func(*lfs.WrappedPointer, error) { return func(p *lfs.WrappedPointer, err error) { if err != nil { c.addScannerError(err) } else { c.UploadPointers(tqueue, p) } } } // AddUpload adds the given oid to the set of oids that have been uploaded in // the current process. func (c *uploadContext) SetUploaded(oid string) { c.uploadedOids.Add(oid) } // HasUploaded determines if the given oid has already been uploaded in the // current process. func (c *uploadContext) HasUploaded(oid string) bool { return c.uploadedOids.Contains(oid) } func (c *uploadContext) prepareUpload(unfiltered ...*lfs.WrappedPointer) []*lfs.WrappedPointer { numUnfiltered := len(unfiltered) uploadables := make([]*lfs.WrappedPointer, 0, numUnfiltered) // XXX(taylor): temporary measure to fix duplicate (broken) results from // scanner uniqOids := tools.NewStringSet() // Skip any objects which we've seen or already uploaded, as well // as any which are locked by other users. for _, p := range unfiltered { // object already uploaded in this process, or we've already // seen this OID (see above), skip! if uniqOids.Contains(p.Oid) || c.HasUploaded(p.Oid) || p.Size == 0 { continue } uniqOids.Add(p.Oid) // canUpload determines whether the current pointer "p" can be // uploaded through the TransferQueue below. It is set to false // only when the file is locked by someone other than the // current committer. var canUpload bool = true if c.lockVerifier.LockedByThem(p.Name) { // If the verification state is enabled, this failed // locks verification means that the push should fail. // // If the state is disabled, the verification error is // silent and the user can upload. // // If the state is undefined, the verification error is // sent as a warning and the user can upload. canUpload = !c.lockVerifier.Enabled() } c.lockVerifier.LockedByUs(p.Name) if canUpload { // estimate in meter early (even if it's not going into // uploadables), since we will call Skip() based on the // results of the download check queue. c.meter.Add(p.Size) uploadables = append(uploadables, p) } } return uploadables } func (c *uploadContext) UploadPointers(q *tq.TransferQueue, unfiltered ...*lfs.WrappedPointer) { if c.DryRun { for _, p := range unfiltered { if c.HasUploaded(p.Oid) { continue } Print("%s %s => %s", tr.Tr.Get("push"), p.Oid, p.Name) c.SetUploaded(p.Oid) } return } pointers := c.prepareUpload(unfiltered...) for _, p := range pointers { t, err := c.uploadTransfer(p) if err != nil && !errors.IsCleanPointerError(err) { ExitWithError(err) } q.Add(t.Name, t.Path, t.Oid, t.Size, t.Missing, nil) c.SetUploaded(p.Oid) } } func (c *uploadContext) CollectErrors(tqueue *tq.TransferQueue) { tqueue.Wait() for _, err := range tqueue.Errors() { if malformed, ok := err.(*tq.MalformedObjectError); ok { if malformed.Missing() { c.missing[malformed.Name] = malformed.Oid } else if malformed.Corrupt() { c.corrupt[malformed.Name] = malformed.Oid } } else { c.otherErrs = append(c.otherErrs, err) } } } func (c *uploadContext) ReportErrors() { c.meter.Finish() for _, err := range c.otherErrs { FullError(err) } if len(c.missing) > 0 || len(c.corrupt) > 0 { var action string if c.allowMissing { action = tr.Tr.Get("missing objects") } else { action = tr.Tr.Get("failed") } Print(tr.Tr.Get("Git LFS upload %s:", action)) for name, oid := range c.missing { // TRANSLATORS: Leading spaces should be preserved. Print(tr.Tr.Get(" (missing) %s (%s)", name, oid)) } for name, oid := range c.corrupt { // TRANSLATORS: Leading spaces should be preserved. Print(tr.Tr.Get(" (corrupt) %s (%s)", name, oid)) } if !c.allowMissing { pushMissingHint := []string{ tr.Tr.Get("hint: Your push was rejected due to missing or corrupt local objects."), tr.Tr.Get("hint: You can disable this check with: `git config lfs.allowincompletepush true`"), } Print(strings.Join(pushMissingHint, "\n")) os.Exit(2) } } if len(c.otherErrs) > 0 { os.Exit(2) } if c.lockVerifier.HasUnownedLocks() { Print(tr.Tr.Get("Unable to push locked files:")) for _, unowned := range c.lockVerifier.UnownedLocks() { Print("* %s - %s", unowned.Path(), unowned.Owners()) } if c.lockVerifier.Enabled() { Exit(tr.Tr.Get("Cannot update locked files.")) } else { Error(tr.Tr.Get("warning: The above files would have halted this push.")) } } else if c.lockVerifier.HasOwnedLocks() { Print(tr.Tr.Get("Consider unlocking your own locked files: (`git lfs unlock `)")) for _, owned := range c.lockVerifier.OwnedLocks() { Print("* %s", owned.Path()) } } } var ( githubHttps, _ = url.Parse("https://github.com") githubSsh, _ = url.Parse("ssh://github.com") // hostsWithKnownLockingSupport is a list of scheme-less hostnames // (without port numbers) that are known to implement the LFS locking // API. // // Additions are welcome. hostsWithKnownLockingSupport = []*url.URL{ githubHttps, githubSsh, } ) func (c *uploadContext) uploadTransfer(p *lfs.WrappedPointer) (*tq.Transfer, error) { var missing bool filename := p.Name oid := p.Oid localMediaPath, err := c.gitfilter.ObjectPath(oid) if err != nil { return nil, errors.Wrap(err, tr.Tr.Get("Error uploading file %s (%s)", filename, oid)) } // Skip the object if its corresponding file does not exist in // .git/lfs/objects/. if len(filename) > 0 { if missing, err = c.ensureFile(filename, localMediaPath, oid); err != nil && !errors.IsCleanPointerError(err) { return nil, err } } return &tq.Transfer{ Name: filename, Path: localMediaPath, Oid: oid, Size: p.Size, Missing: missing, }, nil } // ensureFile makes sure that the cleanPath exists before pushing it. If it // does not exist, it attempts to clean it by reading the file at smudgePath. func (c *uploadContext) ensureFile(smudgePath, cleanPath, oid string) (bool, error) { if _, err := os.Stat(cleanPath); err == nil { return false, nil } localPath := filepath.Join(cfg.LocalWorkingDir(), smudgePath) file, err := os.Open(localPath) if err != nil { return !c.allowMissing, nil } defer file.Close() stat, err := file.Stat() if err != nil { return false, err } cleaned, err := c.gitfilter.Clean(file, file.Name(), stat.Size(), nil) if cleaned != nil { cleaned.Teardown() } if err != nil { return false, err } return false, nil } // supportsLockingAPI returns whether or not a given url is known to support // the LFS locking API by whether or not its hostname is included in the list // above. func supportsLockingAPI(rawurl string) bool { u, err := url.Parse(rawurl) if err != nil { tracerx.Printf("commands: unable to parse %q to determine locking support: %v", rawurl, err) return false } for _, supported := range hostsWithKnownLockingSupport { if supported.Scheme == u.Scheme && supported.Hostname() == u.Hostname() && strings.HasPrefix(u.Path, supported.Path) { return true } } return false } // disableFor disables lock verification for the given lfsapi.Endpoint, // "endpoint". func disableFor(rawurl string) error { tracerx.Printf("commands: disabling lock verification for %q", rawurl) key := strings.Join([]string{"lfs", rawurl, "locksverify"}, ".") _, err := cfg.SetGitLocalKey(key, "false") return err } git-lfs-3.6.1/commands/uploader_test.go000066400000000000000000000021711472372047300201020ustar00rootroot00000000000000package commands import ( "testing" "github.com/stretchr/testify/assert" ) type LockingSupportTestCase struct { Given string ExpectedToMatch bool } func (l *LockingSupportTestCase) Assert(t *testing.T) { assert.Equal(t, l.ExpectedToMatch, supportsLockingAPI(l.Given)) } func TestSupportedLockingHosts(t *testing.T) { for desc, c := range map[string]*LockingSupportTestCase{ "https with path prefix": {"https://github.com/ttaylorr/dotfiles.git/info/lfs", true}, "https with root": {"https://github.com/ttaylorr/dotfiles", true}, "http with path prefix": {"http://github.com/ttaylorr/dotfiles.git/info/lfs", false}, "http with root": {"http://github.com/ttaylorr/dotfiles", false}, "ssh with path prefix": {"ssh://github.com/ttaylorr/dotfiles.git/info/lfs", true}, "ssh with root": {"ssh://github.com/ttaylorr/dotfiles", true}, "ssh with user and path prefix": {"ssh://git@github.com/ttaylorr/dotfiles.git/info/lfs", true}, "ssh with user and root": {"ssh://git@github.com/ttaylorr/dotfiles", true}, } { t.Run(desc, c.Assert) } } git-lfs-3.6.1/config/000077500000000000000000000000001472372047300143445ustar00rootroot00000000000000git-lfs-3.6.1/config/config.go000066400000000000000000000433341472372047300161470ustar00rootroot00000000000000// Package config collects together all configuration settings // NOTE: Subject to change, do not rely on this package from outside git-lfs source package config import ( "fmt" "os" "path/filepath" "regexp" "strconv" "strings" "sync" "time" "unicode" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/fs" "github.com/git-lfs/git-lfs/v3/git" "github.com/git-lfs/git-lfs/v3/tools" "github.com/git-lfs/git-lfs/v3/tr" "github.com/rubyist/tracerx" ) var ( ShowConfigWarnings = false defaultRemote = "origin" gitConfigWarningPrefix = "lfs." ) type Configuration struct { // Os provides a `*Environment` used to access to the system's // environment through os.Getenv. It is the point of entry for all // system environment configuration. Os Environment // Git provides a `*Environment` used to access to the various levels of // `.gitconfig`'s. It is the point of entry for all Git environment // configuration. Git Environment currentRemote *string pushRemote *string // gitConfig can fetch or modify the current Git config and track the Git // version. gitConfig *git.Configuration ref *git.Ref remoteRef *git.Ref fs *fs.Filesystem gitDir *string workDir string loading sync.Mutex // guards initialization of gitConfig and remotes loadingGit sync.Mutex // guards initialization of local git and working dirs remotes []string extensions map[string]Extension mask int maskOnce sync.Once timestamp time.Time } func New() *Configuration { return NewIn("", "") } func NewIn(workdir, gitdir string) *Configuration { gitConf := git.NewConfig(workdir, gitdir) c := &Configuration{ Os: EnvironmentOf(NewOsFetcher()), gitConfig: gitConf, timestamp: time.Now(), } if len(gitConf.WorkDir) > 0 { c.gitDir = &gitConf.GitDir c.workDir = gitConf.WorkDir } c.Git = &delayedEnvironment{ callback: func() Environment { sources, err := gitConf.Sources(c.LocalWorkingDir(), ".lfsconfig") if err != nil { fmt.Fprintln(os.Stderr, tr.Tr.Get("Error reading `git config`: %s", err)) } return c.readGitConfig(sources...) }, } return c } func (c *Configuration) getMask() int { // This logic is necessarily complex because Git's logic is complex. c.maskOnce.Do(func() { val, ok := c.Git.Get("core.sharedrepository") if !ok { val = "umask" } else if Bool(val, false) { val = "group" } switch strings.ToLower(val) { case "group", "true", "1": c.mask = 007 case "all", "world", "everybody", "2": c.mask = 002 case "umask", "false", "0": c.mask = umask() default: if mode, err := strconv.ParseInt(val, 8, 16); err != nil { // If this doesn't look like an octal number, then it // could be a falsy value, in which case we should use // the umask, or it's just invalid, in which case the // umask is a safe bet. c.mask = umask() } else { c.mask = 0666 & ^int(mode) } } }) return c.mask } func (c *Configuration) readGitConfig(gitconfigs ...*git.ConfigurationSource) Environment { gf, extensions, uniqRemotes := readGitConfig(gitconfigs...) c.extensions = extensions c.remotes = make([]string, 0, len(uniqRemotes)) for remote := range uniqRemotes { c.remotes = append(c.remotes, remote) } return EnvironmentOf(gf) } // Values is a convenience type used to call the NewFromValues function. It // specifies `Git` and `Env` maps to use as mock values, instead of calling out // to real `.gitconfig`s and the `os.Getenv` function. type Values struct { // Git and Os are the stand-in maps used to provide values for their // respective environments. Git, Os map[string][]string } // NewFrom returns a new `*config.Configuration` that reads both its Git // and Environment-level values from the ones provided instead of the actual // `.gitconfig` file or `os.Getenv`, respectively. // // This method should only be used during testing. func NewFrom(v Values) *Configuration { c := &Configuration{ Os: EnvironmentOf(mapFetcher(v.Os)), gitConfig: git.NewConfig("", ""), timestamp: time.Now(), } c.Git = &delayedEnvironment{ callback: func() Environment { source := &git.ConfigurationSource{ Lines: make([]string, 0, len(v.Git)), } for key, values := range v.Git { parts := strings.Split(key, ".") isCaseSensitive := len(parts) >= 3 hasUpper := strings.IndexFunc(key, unicode.IsUpper) > -1 // This branch should only ever trigger in // tests, and only if they'd be broken. if !isCaseSensitive && hasUpper { panic(tr.Tr.Get("key %q has uppercase, shouldn't", key)) } for _, value := range values { fmt.Printf("Config: %s=%s\n", key, value) source.Lines = append(source.Lines, fmt.Sprintf("%s=%s", key, value)) } } return c.readGitConfig(source) }, } return c } // BasicTransfersOnly returns whether to only allow "basic" HTTP transfers. // Default is false, including if the lfs.basictransfersonly is invalid func (c *Configuration) BasicTransfersOnly() bool { return c.Git.Bool("lfs.basictransfersonly", false) } // TusTransfersAllowed returns whether to only use "tus.io" HTTP transfers. // Default is false, including if the lfs.tustransfers is invalid func (c *Configuration) TusTransfersAllowed() bool { return c.Git.Bool("lfs.tustransfers", false) } func (c *Configuration) TransferBatchSize() int { return c.Git.Int("lfs.transfer.batchSize", 0) } func (c *Configuration) FetchIncludePaths() []string { patterns, _ := c.Git.Get("lfs.fetchinclude") return tools.CleanPaths(patterns, ",") } func (c *Configuration) FetchExcludePaths() []string { patterns, _ := c.Git.Get("lfs.fetchexclude") return tools.CleanPaths(patterns, ",") } func (c *Configuration) CurrentRef() *git.Ref { c.loading.Lock() defer c.loading.Unlock() if c.ref == nil { r, err := git.CurrentRef() if err != nil { tracerx.Printf("Error loading current ref: %s", err) c.ref = &git.Ref{} } else { c.ref = r } } return c.ref } func (c *Configuration) IsDefaultRemote() bool { return c.Remote() == defaultRemote } func (c *Configuration) AutoDetectRemoteEnabled() bool { return c.Git.Bool("lfs.remote.autodetect", false) } func (c *Configuration) SearchAllRemotesEnabled() bool { return c.Git.Bool("lfs.remote.searchall", false) } // Remote returns the default remote based on: // 1. The currently tracked remote branch, if present // 2. The value of remote.lfsdefault. // 3. Any other SINGLE remote defined in .git/config // 4. Use "origin" as a fallback. // Results are cached after the first hit. func (c *Configuration) Remote() string { ref := c.CurrentRef() c.loading.Lock() defer c.loading.Unlock() if c.currentRemote == nil { if remote, ok := c.Git.Get(fmt.Sprintf("branch.%s.remote", ref.Name)); len(ref.Name) != 0 && ok { // try tracking remote c.currentRemote = &remote } else if remote, ok := c.Git.Get("remote.lfsdefault"); ok { // try default remote c.currentRemote = &remote } else if remotes := c.Remotes(); len(remotes) == 1 { // use only remote if there is only 1 c.currentRemote = &remotes[0] } else { // fall back to default :( c.currentRemote = &defaultRemote } } return *c.currentRemote } func (c *Configuration) PushRemote() string { ref := c.CurrentRef() c.loading.Lock() defer c.loading.Unlock() if c.pushRemote == nil { if remote, ok := c.Git.Get(fmt.Sprintf("branch.%s.pushRemote", ref.Name)); ok { c.pushRemote = &remote } else if remote, ok := c.Git.Get("remote.lfspushdefault"); ok { c.pushRemote = &remote } else if remote, ok := c.Git.Get("remote.pushDefault"); ok { c.pushRemote = &remote } else { c.loading.Unlock() remote := c.Remote() c.loading.Lock() c.pushRemote = &remote } } return *c.pushRemote } func (c *Configuration) SetValidRemote(name string) error { if err := git.ValidateRemote(name); err != nil { name := git.RewriteLocalPathAsURL(name) if err := git.ValidateRemote(name); err != nil { return err } } c.SetRemote(name) return nil } func (c *Configuration) SetValidPushRemote(name string) error { if err := git.ValidateRemote(name); err != nil { name := git.RewriteLocalPathAsURL(name) if err := git.ValidateRemote(name); err != nil { return err } } c.SetPushRemote(name) return nil } func (c *Configuration) SetRemote(name string) { c.currentRemote = &name } func (c *Configuration) SetPushRemote(name string) { c.pushRemote = &name } func (c *Configuration) Remotes() []string { c.loadGitConfig() return c.remotes } func (c *Configuration) Extensions() map[string]Extension { c.loadGitConfig() return c.extensions } // SortedExtensions gets the list of extensions ordered by Priority func (c *Configuration) SortedExtensions() ([]Extension, error) { return SortExtensions(c.Extensions()) } func (c *Configuration) SkipDownloadErrors() bool { return c.Os.Bool("GIT_LFS_SKIP_DOWNLOAD_ERRORS", false) || c.Git.Bool("lfs.skipdownloaderrors", false) } func (c *Configuration) SetLockableFilesReadOnly() bool { return c.Os.Bool("GIT_LFS_SET_LOCKABLE_READONLY", true) && c.Git.Bool("lfs.setlockablereadonly", true) } func (c *Configuration) ForceProgress() bool { return c.Os.Bool("GIT_LFS_FORCE_PROGRESS", false) || c.Git.Bool("lfs.forceprogress", false) } // HookDir returns the location of the hooks owned by this repository. If the // core.hooksPath configuration variable is supported, we prefer that and expand // paths appropriately. func (c *Configuration) HookDir() (string, error) { if git.IsGitVersionAtLeast("2.9.0") { hp, ok := c.Git.Get("core.hooksPath") if ok { path, err := tools.ExpandPath(hp, false) if err != nil { return "", err } if filepath.IsAbs(path) { return path, nil } return filepath.Join(c.LocalWorkingDir(), path), nil } } return filepath.Join(c.LocalGitStorageDir(), "hooks"), nil } func (c *Configuration) InRepo() bool { return len(c.LocalGitDir()) > 0 } func (c *Configuration) LocalWorkingDir() string { c.loadGitDirs() return c.workDir } func (c *Configuration) LocalGitDir() string { c.loadGitDirs() return *c.gitDir } func (c *Configuration) loadGitDirs() { c.loadingGit.Lock() defer c.loadingGit.Unlock() if c.gitDir != nil { return } gitdir, workdir, err := git.GitAndRootDirs() if err != nil { errMsg := err.Error() tracerx.Printf("Error running 'git rev-parse': %s", errMsg) if errors.ExitStatus(err) != 128 { fmt.Fprintln(os.Stderr, tr.Tr.Get("Error: %s", errMsg)) } c.gitDir = &gitdir } gitdir = tools.ResolveSymlinks(gitdir) c.gitDir = &gitdir c.workDir = tools.ResolveSymlinks(workdir) } func (c *Configuration) LocalGitStorageDir() string { return c.Filesystem().GitStorageDir } func (c *Configuration) LocalReferenceDirs() []string { return c.Filesystem().ReferenceDirs } func (c *Configuration) LFSStorageDir() string { return c.Filesystem().LFSStorageDir } func (c *Configuration) LFSObjectDir() string { return c.Filesystem().LFSObjectDir() } func (c *Configuration) LFSObjectExists(oid string, size int64) bool { return c.Filesystem().ObjectExists(oid, size) } func (c *Configuration) EachLFSObject(fn func(fs.Object) error) error { return c.Filesystem().EachObject(fn) } func (c *Configuration) LocalLogDir() string { return c.Filesystem().LogDir() } func (c *Configuration) TempDir() string { return c.Filesystem().TempDir() } func (c *Configuration) Filesystem() *fs.Filesystem { c.loadGitDirs() c.loading.Lock() defer c.loading.Unlock() if c.fs == nil { lfsdir, _ := c.Git.Get("lfs.storage") c.fs = fs.New( c.Os, c.LocalGitDir(), c.LocalWorkingDir(), lfsdir, c.RepositoryPermissions(false), ) } return c.fs } func (c *Configuration) Cleanup() error { if c == nil { return nil } c.loading.Lock() defer c.loading.Unlock() return c.fs.Cleanup() } func (c *Configuration) OSEnv() Environment { return c.Os } func (c *Configuration) GitEnv() Environment { return c.Git } func (c *Configuration) GitConfig() *git.Configuration { return c.gitConfig } func (c *Configuration) FindGitGlobalKey(key string) string { return c.gitConfig.FindGlobal(key) } func (c *Configuration) FindGitSystemKey(key string) string { return c.gitConfig.FindSystem(key) } func (c *Configuration) FindGitLocalKey(key string) string { return c.gitConfig.FindLocal(key) } func (c *Configuration) FindGitWorktreeKey(key string) string { return c.gitConfig.FindWorktree(key) } func (c *Configuration) SetGitGlobalKey(key, val string) (string, error) { return c.gitConfig.SetGlobal(key, val) } func (c *Configuration) SetGitSystemKey(key, val string) (string, error) { return c.gitConfig.SetSystem(key, val) } func (c *Configuration) SetGitLocalKey(key, val string) (string, error) { return c.gitConfig.SetLocal(key, val) } func (c *Configuration) SetGitWorktreeKey(key, val string) (string, error) { return c.gitConfig.SetWorktree(key, val) } func (c *Configuration) UnsetGitGlobalSection(key string) (string, error) { return c.gitConfig.UnsetGlobalSection(key) } func (c *Configuration) UnsetGitSystemSection(key string) (string, error) { return c.gitConfig.UnsetSystemSection(key) } func (c *Configuration) UnsetGitLocalSection(key string) (string, error) { return c.gitConfig.UnsetLocalSection(key) } func (c *Configuration) UnsetGitWorktreeSection(key string) (string, error) { return c.gitConfig.UnsetWorktreeSection(key) } func (c *Configuration) UnsetGitLocalKey(key string) (string, error) { return c.gitConfig.UnsetLocalKey(key) } // loadGitConfig is a temporary measure to support legacy behavior dependent on // accessing properties set by ReadGitConfig, namely: // - `c.extensions` // - `c.uniqRemotes` // - `c.gitConfig` // // Since the *gitEnvironment is responsible for setting these values on the // (*config.Configuration) instance, we must call that method, if it exists. // // loadGitConfig returns a bool returning whether or not `loadGitConfig` was // called AND the method did not return early. func (c *Configuration) loadGitConfig() { if g, ok := c.Git.(*delayedEnvironment); ok { g.Load() } } var ( // dateFormats is a list of all the date formats that Git accepts, // except for the built-in one, which is handled below. dateFormats = []string{ "Mon, 02 Jan 2006 15:04:05 -0700", "2006-01-02T15:04:05-0700", "2006-01-02 15:04:05-0700", "2006.01.02T15:04:05-0700", "2006.01.02 15:04:05-0700", "01/02/2006T15:04:05-0700", "01/02/2006 15:04:05-0700", "02.01.2006T15:04:05-0700", "02.01.2006 15:04:05-0700", "2006-01-02T15:04:05Z", "2006-01-02 15:04:05Z", "2006.01.02T15:04:05Z", "2006.01.02 15:04:05Z", "01/02/2006T15:04:05Z", "01/02/2006 15:04:05Z", "02.01.2006T15:04:05Z", "02.01.2006 15:04:05Z", } // defaultDatePattern is the regexp for Git's native date format. defaultDatePattern = regexp.MustCompile(`\A(\d+) ([+-])(\d{2})(\d{2})\z`) ) // findUserData returns the name/email that should be used in the commit header. // We use the same technique as Git for finding this information, except that we // don't fall back to querying the system for defaults if no values are found in // the Git configuration or environment. // // envType should be "author" or "committer". func (c *Configuration) findUserData(envType string) (name, email string) { var filter = func(r rune) rune { switch r { case '<', '>', '\n': return -1 default: return r } } envType = strings.ToUpper(envType) name, ok := c.Os.Get("GIT_" + envType + "_NAME") if !ok { name, _ = c.Git.Get("user.name") } email, ok = c.Os.Get("GIT_" + envType + "_EMAIL") if !ok { email, ok = c.Git.Get("user.email") } if !ok { email, _ = c.Os.Get("EMAIL") } // Git filters certain characters out of the name and email fields. name = strings.Map(filter, name) email = strings.Map(filter, email) return } func (c *Configuration) findUserTimestamp(envType string) time.Time { date, ok := c.Os.Get(fmt.Sprintf("GIT_%s_DATE", strings.ToUpper(envType))) if !ok { return c.timestamp } // time.Parse doesn't parse seconds from the Epoch, like we use in the // Git native format, so we have to do it ourselves. strs := defaultDatePattern.FindStringSubmatch(date) if strs != nil { unixSecs, _ := strconv.ParseInt(strs[1], 10, 64) hours, _ := strconv.Atoi(strs[3]) offset, _ := strconv.Atoi(strs[4]) offset = (offset + hours*60) * 60 if strs[2] == "-" { offset = -offset } return time.Unix(unixSecs, 0).In(time.FixedZone("", offset)) } for _, format := range dateFormats { if t, err := time.Parse(format, date); err == nil { return t } } // The user provided an invalid value, so default to the current time. return c.timestamp } // CurrentCommitter returns the name/email that would be used to commit a change // with this configuration. In particular, the "user.name" and "user.email" // configuration values are used func (c *Configuration) CurrentCommitter() (name, email string) { return c.findUserData("committer") } // CurrentCommitterTimestamp returns the timestamp that would be used to commit // a change with this configuration. func (c *Configuration) CurrentCommitterTimestamp() time.Time { return c.findUserTimestamp("committer") } // CurrentAuthor returns the name/email that would be used to author a change // with this configuration. In particular, the "user.name" and "user.email" // configuration values are used func (c *Configuration) CurrentAuthor() (name, email string) { return c.findUserData("author") } // CurrentCommitterTimestamp returns the timestamp that would be used to commit // a change with this configuration. func (c *Configuration) CurrentAuthorTimestamp() time.Time { return c.findUserTimestamp("author") } // RepositoryPermissions returns the permissions that should be used to write // files in the repository. func (c *Configuration) RepositoryPermissions(executable bool) os.FileMode { perms := os.FileMode(0666 & ^c.getMask()) if executable { return tools.ExecutablePermissions(perms) } return perms } git-lfs-3.6.1/config/config_test.go000066400000000000000000000226341472372047300172060ustar00rootroot00000000000000package config import ( "os" "testing" "time" "github.com/git-lfs/git-lfs/v3/git" "github.com/stretchr/testify/assert" ) func TestRemoteDefault(t *testing.T) { cfg := NewFrom(Values{ Git: map[string][]string{ "branch.unused.remote": []string{"a"}, "branch.unused.pushremote": []string{"b"}, }, }) assert.Equal(t, "origin", cfg.Remote()) assert.Equal(t, "origin", cfg.PushRemote()) } func TestRemoteBranchConfig(t *testing.T) { cfg := NewFrom(Values{ Git: map[string][]string{ "branch.master.remote": []string{"a"}, "branch.other.pushremote": []string{"b"}, }, }) cfg.ref = &git.Ref{Name: "master"} assert.Equal(t, "a", cfg.Remote()) assert.Equal(t, "a", cfg.PushRemote()) } func TestRemotePushDefault(t *testing.T) { cfg := NewFrom(Values{ Git: map[string][]string{ "branch.master.remote": []string{"a"}, "remote.pushdefault": []string{"b"}, "branch.other.pushremote": []string{"c"}, }, }) cfg.ref = &git.Ref{Name: "master"} assert.Equal(t, "a", cfg.Remote()) assert.Equal(t, "b", cfg.PushRemote()) } func TestRemoteBranchPushDefault(t *testing.T) { cfg := NewFrom(Values{ Git: map[string][]string{ "branch.master.remote": []string{"a"}, "remote.pushdefault": []string{"b"}, "branch.master.pushremote": []string{"c"}, }, }) cfg.ref = &git.Ref{Name: "master"} assert.Equal(t, "a", cfg.Remote()) assert.Equal(t, "c", cfg.PushRemote()) } func TestLFSDefault(t *testing.T) { cfg := NewFrom(Values{ Git: map[string][]string{ "remote.lfspushdefault": []string{"a"}, "remote.pushdefault": []string{"b"}, "remote.lfsdefault": []string{"c"}, }, }) assert.Equal(t, "c", cfg.Remote()) assert.Equal(t, "a", cfg.PushRemote()) } func TestLFSDefaultSimple(t *testing.T) { cfg := NewFrom(Values{ Git: map[string][]string{ "remote.lfsdefault": []string{"a"}, }, }) assert.Equal(t, "a", cfg.Remote()) assert.Equal(t, "a", cfg.PushRemote()) } func TestLFSDefaultBranch(t *testing.T) { cfg := NewFrom(Values{ Git: map[string][]string{ "branch.main.remote": []string{"a"}, "remote.pushdefault": []string{"b"}, "branch.main.pushremote": []string{"c"}, "remote.lfspushdefault": []string{"d"}, "remote.lfsdefault": []string{"e"}, }, }) cfg.ref = &git.Ref{Name: "main"} assert.Equal(t, "a", cfg.Remote()) assert.Equal(t, "c", cfg.PushRemote()) } func TestBasicTransfersOnlySetValue(t *testing.T) { cfg := NewFrom(Values{ Git: map[string][]string{ "lfs.basictransfersonly": []string{"true"}, }, }) b := cfg.BasicTransfersOnly() assert.Equal(t, true, b) } func TestBasicTransfersOnlyDefault(t *testing.T) { cfg := NewFrom(Values{}) b := cfg.BasicTransfersOnly() assert.Equal(t, false, b) } func TestBasicTransfersOnlyInvalidValue(t *testing.T) { cfg := NewFrom(Values{ Git: map[string][]string{ "lfs.basictransfersonly": []string{"wat"}, }, }) b := cfg.BasicTransfersOnly() assert.Equal(t, false, b) } func TestTusTransfersAllowedSetValue(t *testing.T) { cfg := NewFrom(Values{ Git: map[string][]string{ "lfs.tustransfers": []string{"true"}, }, }) b := cfg.TusTransfersAllowed() assert.Equal(t, true, b) } func TestTusTransfersAllowedDefault(t *testing.T) { cfg := NewFrom(Values{}) b := cfg.TusTransfersAllowed() assert.Equal(t, false, b) } func TestTusTransfersAllowedInvalidValue(t *testing.T) { cfg := NewFrom(Values{ Git: map[string][]string{ "lfs.tustransfers": []string{"wat"}, }, }) b := cfg.TusTransfersAllowed() assert.Equal(t, false, b) } func TestLoadValidExtension(t *testing.T) { cfg := NewFrom(Values{ Git: map[string][]string{ "lfs.extension.foo.clean": []string{"foo-clean %f"}, "lfs.extension.foo.smudge": []string{"foo-smudge %f"}, "lfs.extension.foo.priority": []string{"2"}, }, }) ext := cfg.Extensions()["foo"] assert.Equal(t, "foo", ext.Name) assert.Equal(t, "foo-clean %f", ext.Clean) assert.Equal(t, "foo-smudge %f", ext.Smudge) assert.Equal(t, 2, ext.Priority) } func TestLoadInvalidExtension(t *testing.T) { cfg := NewFrom(Values{}) ext := cfg.Extensions()["foo"] assert.Equal(t, "", ext.Name) assert.Equal(t, "", ext.Clean) assert.Equal(t, "", ext.Smudge) assert.Equal(t, 0, ext.Priority) } func TestFetchIncludeExcludesAreCleaned(t *testing.T) { cfg := NewFrom(Values{ Git: map[string][]string{ "lfs.fetchinclude": []string{"/path/to/clean/"}, "lfs.fetchexclude": []string{"/other/path/to/clean/"}, }, }) assert.Equal(t, []string{"/path/to/clean"}, cfg.FetchIncludePaths()) assert.Equal(t, []string{"/other/path/to/clean"}, cfg.FetchExcludePaths()) } func TestRepositoryPermissions(t *testing.T) { perms := 0666 & ^umask() values := map[string]int{ "group": 0660, "true": 0660, "1": 0660, "YES": 0660, "all": 0664, "world": 0664, "everybody": 0664, "2": 0664, "false": perms, "umask": perms, "0": perms, "NO": perms, "this does not remotely look like a valid value": perms, "0664": 0664, "0666": 0666, "0600": 0600, "0660": 0660, "0644": 0644, } for key, val := range values { cfg := NewFrom(Values{ Git: map[string][]string{ "core.sharedrepository": []string{key}, }, }) assert.Equal(t, os.FileMode(val), cfg.RepositoryPermissions(false)) } } func TestRepositoryPermissionsExectable(t *testing.T) { perms := 0777 & ^umask() values := map[string]int{ "group": 0770, "true": 0770, "1": 0770, "YES": 0770, "all": 0775, "world": 0775, "everybody": 0775, "2": 0775, "false": perms, "umask": perms, "0": perms, "NO": perms, "this does not remotely look like a valid value": perms, "0664": 0775, "0666": 0777, "0600": 0700, "0660": 0770, "0644": 0755, } for key, val := range values { cfg := NewFrom(Values{ Git: map[string][]string{ "core.sharedrepository": []string{key}, }, }) assert.Equal(t, os.FileMode(val), cfg.RepositoryPermissions(true)) } } func TestCurrentUser(t *testing.T) { cfg := NewFrom(Values{ Git: map[string][]string{ "user.name": []string{"Pat Doe"}, "user.email": []string{"pdoe@example.org"}, }, Os: map[string][]string{ "EMAIL": []string{"pdoe@example.com"}, }, }) name, email := cfg.CurrentCommitter() assert.Equal(t, name, "Pat Doe") assert.Equal(t, email, "pdoe@example.org") cfg = NewFrom(Values{ Git: map[string][]string{ "user.name": []string{"Pat Doe"}, }, Os: map[string][]string{ "EMAIL": []string{"pdoe@example.com"}, }, }) name, email = cfg.CurrentCommitter() assert.Equal(t, name, "Pat Doe") assert.Equal(t, email, "pdoe@example.com") cfg = NewFrom(Values{ Git: map[string][]string{ "user.name": []string{"Pat Doe"}, "user.email": []string{"pdoe@example.org"}, }, Os: map[string][]string{ "GIT_COMMITTER_NAME": []string{"Sam Roe"}, "GIT_COMMITTER_EMAIL": []string{"sroe@example.net"}, "EMAIL": []string{"pdoe@example.com"}, }, }) name, email = cfg.CurrentCommitter() assert.Equal(t, name, "Sam Roe") assert.Equal(t, email, "sroe@example.net") cfg = NewFrom(Values{ Git: map[string][]string{ "user.name": []string{"Pat Doe"}, "user.email": []string{"pdoe@example.org"}, }, Os: map[string][]string{ "GIT_AUTHOR_NAME": []string{"Sam Roe"}, "GIT_AUTHOR_EMAIL": []string{"sroe@example.net"}, "EMAIL": []string{"pdoe@example.com"}, }, }) name, email = cfg.CurrentCommitter() assert.Equal(t, name, "Pat Doe") assert.Equal(t, email, "pdoe@example.org") name, email = cfg.CurrentAuthor() assert.Equal(t, name, "Sam Roe") assert.Equal(t, email, "sroe@example.net") } func TestCurrentTimestamp(t *testing.T) { m := map[string]string{ "1136239445 -0700": "2006-01-02T15:04:05-07:00", "Mon, 02 Jan 2006 15:04:05 -0700": "2006-01-02T15:04:05-07:00", "2006-01-02T15:04:05-0700": "2006-01-02T15:04:05-07:00", "2006-01-02 15:04:05-0700": "2006-01-02T15:04:05-07:00", "2006.01.02T15:04:05-0700": "2006-01-02T15:04:05-07:00", "2006.01.02 15:04:05-0700": "2006-01-02T15:04:05-07:00", "01/02/2006T15:04:05-0700": "2006-01-02T15:04:05-07:00", "01/02/2006 15:04:05-0700": "2006-01-02T15:04:05-07:00", "02.01.2006T15:04:05-0700": "2006-01-02T15:04:05-07:00", "02.01.2006 15:04:05-0700": "2006-01-02T15:04:05-07:00", "2006-01-02T15:04:05Z": "2006-01-02T15:04:05Z", "2006-01-02 15:04:05Z": "2006-01-02T15:04:05Z", "2006.01.02T15:04:05Z": "2006-01-02T15:04:05Z", "2006.01.02 15:04:05Z": "2006-01-02T15:04:05Z", "01/02/2006T15:04:05Z": "2006-01-02T15:04:05Z", "01/02/2006 15:04:05Z": "2006-01-02T15:04:05Z", "02.01.2006T15:04:05Z": "2006-01-02T15:04:05Z", "02.01.2006 15:04:05Z": "2006-01-02T15:04:05Z", "not a date": "default", "": "default", } for val, res := range m { cfg := NewFrom(Values{ Os: map[string][]string{ "GIT_COMMITTER_DATE": []string{val}, }, }) date := cfg.CurrentCommitterTimestamp() if res == "default" { assert.Equal(t, date, cfg.timestamp) } else { assert.Equal(t, date.Format(time.RFC3339), res) } } } func TestRemoteNameWithDotDefault(t *testing.T) { cfg := NewFrom(Values{ Git: map[string][]string{ "remote.name.with.dot.url": []string{"http://remote.url/repo"}, }, }) assert.Equal(t, "name.with.dot", cfg.Remote()) } git-lfs-3.6.1/config/delayed_environment.go000066400000000000000000000034171472372047300207330ustar00rootroot00000000000000package config import ( "sync" ) // delayedEnvironment is an implementation of the Environment which wraps the legacy // behavior of `*config.Configuration.loadGitConfig()`. // // It is functionally equivalent to call `cfg.loadGitConfig()` before calling // methods on the Environment type. type delayedEnvironment struct { env Environment loading sync.Mutex callback func() Environment } // Get is shorthand for calling the e.Load(), and then returning // `e.env.Get(key)`. func (e *delayedEnvironment) Get(key string) (string, bool) { e.Load() return e.env.Get(key) } // Get is shorthand for calling the e.Load(), and then returning // `e.env.GetAll(key)`. func (e *delayedEnvironment) GetAll(key string) []string { e.Load() return e.env.GetAll(key) } // Get is shorthand for calling the e.Load(), and then returning // `e.env.Bool(key, def)`. func (e *delayedEnvironment) Bool(key string, def bool) bool { e.Load() return e.env.Bool(key, def) } // Get is shorthand for calling the e.Load(), and then returning // `e.env.Int(key, def)`. func (e *delayedEnvironment) Int(key string, def int) int { e.Load() return e.env.Int(key, def) } // All returns a copy of all the key/value pairs for the current git config. func (e *delayedEnvironment) All() map[string][]string { e.Load() return e.env.All() } // Load reads and parses the .gitconfig by calling ReadGitConfig. It // also sets values on the configuration instance `g.config`. // // If Load has already been called, this method will bail out early, // and return false. Otherwise it will perform the entire parse and return true. // // Load is safe to call across multiple goroutines. func (e *delayedEnvironment) Load() { e.loading.Lock() defer e.loading.Unlock() if e.env != nil { return } e.env = e.callback() } git-lfs-3.6.1/config/environment.go000066400000000000000000000066001472372047300172410ustar00rootroot00000000000000package config import ( "strconv" "strings" ) // An Environment adds additional behavior to a Fetcher, such a type conversion, // and default values. // // `Environment`s are the primary way to communicate with various configuration // sources, such as the OS environment variables, the `.gitconfig`, and even // `map[string]string`s. type Environment interface { // Get is shorthand for calling `e.Fetcher.Get(key)`. Get(key string) (val string, ok bool) // Get is shorthand for calling `e.Fetcher.GetAll(key)`. GetAll(key string) (vals []string) // Bool returns the boolean state associated with a given key, or the // value "def", if no value was associated. // // The "boolean state associated with a given key" is defined as the // case-insensitive string comparison with the following: // // 1) true if... // "true", "1", "on", "yes", or "t" // 2) false if... // "false", "0", "off", "no", "f", or otherwise. Bool(key string, def bool) (val bool) // Int returns the int value associated with a given key, or the value // "def", if no value was associated. // // To convert from a the string value attached to a given key, // `strconv.Atoi(val)` is called. If `Atoi` returned a non-nil error, // then the value "def" will be returned instead. // // Otherwise, if the value was converted `string -> int` successfully, // then it will be returned wholesale. Int(key string, def int) (val int) // All returns a copy of all the key/value pairs for the current // environment. All() map[string][]string } type environment struct { // Fetcher is the `environment`'s source of data. Fetcher Fetcher } // EnvironmentOf creates a new `Environment` initialized with the givne // `Fetcher`, "f". func EnvironmentOf(f Fetcher) Environment { return &environment{f} } func (e *environment) Get(key string) (val string, ok bool) { return e.Fetcher.Get(key) } func (e *environment) GetAll(key string) []string { return e.Fetcher.GetAll(key) } func (e *environment) Bool(key string, def bool) bool { s, _ := e.Fetcher.Get(key) return Bool(s, def) } func (e *environment) Int(key string, def int) int { s, _ := e.Fetcher.Get(key) return Int(s, def) } func (e *environment) All() map[string][]string { return e.Fetcher.All() } // Int returns the int value associated with the given value, or the value // "def", if the value is blank. // // To convert from a the string value attached to a given key, // `strconv.Atoi(val)` is called. If `Atoi` returned a non-nil error, // then the value "def" will be returned instead. // // Otherwise, if the value was converted `string -> int` successfully, // then it will be returned wholesale. func Int(value string, def int) int { if len(value) == 0 { return def } i, err := strconv.Atoi(value) if err != nil { return def } return i } // Bool returns the boolean state associated with the given value, or the // value "def", if the value is blank. // // The "boolean state associated with a given key" is defined as the // case-insensitive string comparison with the following: // // 1. true if... // "true", "1", "on", "yes", or "t" // 2. false if... // "false", "0", "off", "no", "f", or otherwise. func Bool(value string, def bool) bool { if len(value) == 0 { return def } switch strings.ToLower(value) { case "true", "1", "on", "yes", "t": return true case "false", "0", "off", "no", "f": return false default: return false } } git-lfs-3.6.1/config/environment_test.go000066400000000000000000000045151472372047300203030ustar00rootroot00000000000000package config_test import ( "testing" . "github.com/git-lfs/git-lfs/v3/config" "github.com/stretchr/testify/assert" ) func TestEnvironmentGetDelegatesToFetcher(t *testing.T) { fetcher := MapFetcher(map[string][]string{ "foo": []string{"bar", "baz"}, }) env := EnvironmentOf(fetcher) val, ok := env.Get("foo") assert.True(t, ok) assert.Equal(t, "baz", val) } func TestEnvironmentGetAllDelegatesToFetcher(t *testing.T) { fetcher := MapFetcher(map[string][]string{ "foo": []string{"bar", "baz"}, }) env := EnvironmentOf(fetcher) vals := env.GetAll("foo") assert.Equal(t, []string{"bar", "baz"}, vals) } func TestEnvironmentUnsetBoolDefault(t *testing.T) { env := EnvironmentOf(MapFetcher(nil)) assert.True(t, env.Bool("unset", true)) } func TestEnvironmentBoolTruthyConversion(t *testing.T) { for _, c := range []EnvironmentConversionTestCase{ {"", false, GetBoolDefault(false)}, {"true", true, GetBoolDefault(false)}, {"1", true, GetBoolDefault(false)}, {"on", true, GetBoolDefault(false)}, {"yes", true, GetBoolDefault(false)}, {"t", true, GetBoolDefault(false)}, {"false", false, GetBoolDefault(true)}, {"0", false, GetBoolDefault(true)}, {"off", false, GetBoolDefault(true)}, {"no", false, GetBoolDefault(true)}, {"f", false, GetBoolDefault(true)}, } { c.Assert(t) } } func TestEnvironmentIntTestCases(t *testing.T) { for _, c := range []EnvironmentConversionTestCase{ {"", 1, GetIntDefault(1)}, {"1", 1, GetIntDefault(0)}, {"3", 3, GetIntDefault(0)}, {"malformed", 7, GetIntDefault(7)}, } { c.Assert(t) } } type EnvironmentConversionTestCase struct { Val string Expected interface{} GotFn func(env Environment, key string) interface{} } var ( GetBoolDefault = func(def bool) func(e Environment, key string) interface{} { return func(e Environment, key string) interface{} { return e.Bool(key, def) } } GetIntDefault = func(def int) func(e Environment, key string) interface{} { return func(e Environment, key string) interface{} { return e.Int(key, def) } } ) func (c *EnvironmentConversionTestCase) Assert(t *testing.T) { fetcher := MapFetcher(map[string][]string{ c.Val: []string{c.Val}, }) env := EnvironmentOf(fetcher) got := c.GotFn(env, c.Val) if c.Expected != got { t.Errorf("lfs/config: expected val=%q to be %q (got: %q)", c.Val, c.Expected, got) } } git-lfs-3.6.1/config/extension.go000066400000000000000000000015671472372047300167200ustar00rootroot00000000000000package config import ( "errors" "sort" "github.com/git-lfs/git-lfs/v3/tr" ) // An Extension describes how to manipulate files during smudge and clean. // Extensions are parsed from the Git config. type Extension struct { Name string Clean string Smudge string Priority int } // SortExtensions sorts a map of extensions in ascending order by Priority func SortExtensions(m map[string]Extension) ([]Extension, error) { pMap := make(map[int]Extension) priorities := make([]int, 0, len(m)) for n, ext := range m { p := ext.Priority if _, exist := pMap[p]; exist { err := errors.New(tr.Tr.Get("duplicate priority %d on %s", p, n)) return nil, err } pMap[p] = ext priorities = append(priorities, p) } sort.Ints(priorities) result := make([]Extension, len(priorities)) for i, p := range priorities { result[i] = pMap[p] } return result, nil } git-lfs-3.6.1/config/extension_test.go000066400000000000000000000020211472372047300177410ustar00rootroot00000000000000package config import ( "testing" "github.com/stretchr/testify/assert" ) func TestSortExtensions(t *testing.T) { m := map[string]Extension{ "baz": Extension{ "baz", "baz-clean %f", "baz-smudge %f", 2, }, "foo": Extension{ "foo", "foo-clean %f", "foo-smudge %f", 0, }, "bar": Extension{ "bar", "bar-clean %f", "bar-smudge %f", 1, }, } names := []string{"foo", "bar", "baz"} sorted, err := SortExtensions(m) assert.Nil(t, err) for i, ext := range sorted { name := names[i] assert.Equal(t, name, ext.Name) assert.Equal(t, name+"-clean %f", ext.Clean) assert.Equal(t, name+"-smudge %f", ext.Smudge) assert.Equal(t, i, ext.Priority) } } func TestSortExtensionsDuplicatePriority(t *testing.T) { m := map[string]Extension{ "foo": Extension{ "foo", "foo-clean %f", "foo-smudge %f", 0, }, "bar": Extension{ "bar", "bar-clean %f", "bar-smudge %f", 0, }, } sorted, err := SortExtensions(m) assert.NotNil(t, err) assert.Empty(t, sorted) } git-lfs-3.6.1/config/fetcher.go000066400000000000000000000013671472372047300163220ustar00rootroot00000000000000package config // Fetcher provides an interface to get typed information out of a configuration // "source". These sources could be the OS environment, a .gitconfig, or even // just a `map`. type Fetcher interface { // Get returns the string value associated with a given key and a bool // determining if the key exists. // // If multiple entries match the given key, the first one will be // returned. Get(key string) (val string, ok bool) // GetAll returns the a set of string values associated with a given // key. If no entries matched the given key, an empty slice will be // returned instead. GetAll(key string) (vals []string) // All returns a copy of all the key/value pairs for the current // environment. All() map[string][]string } git-lfs-3.6.1/config/git_fetcher.go000066400000000000000000000112161472372047300171570ustar00rootroot00000000000000package config import ( "fmt" "os" "strconv" "strings" "sync" "github.com/git-lfs/git-lfs/v3/git" "github.com/git-lfs/git-lfs/v3/tr" ) type GitFetcher struct { vmu sync.RWMutex vals map[string][]string } func readGitConfig(configs ...*git.ConfigurationSource) (gf *GitFetcher, extensions map[string]Extension, uniqRemotes map[string]bool) { vals := make(map[string][]string) ignored := make([]string, 0) extensions = make(map[string]Extension) uniqRemotes = make(map[string]bool) for _, gc := range configs { uniqKeys := make(map[string]string) for _, line := range gc.Lines { pieces := strings.SplitN(line, "=", 2) if len(pieces) < 2 { continue } allowed := !gc.OnlySafeKeys // We don't need to change the case of the key here, // since Git will already have canonicalized it for us. key, val := pieces[0], pieces[1] if origKey, ok := uniqKeys[key]; ok { if ShowConfigWarnings && len(vals[key]) > 0 && vals[key][len(vals[key])-1] != val && strings.HasPrefix(key, gitConfigWarningPrefix) { fmt.Fprintln(os.Stderr, tr.Tr.Get("warning: These `git config` values clash:")) fmt.Fprintf(os.Stderr, " git config %q = %q\n", origKey, vals[key]) fmt.Fprintf(os.Stderr, " git config %q = %q\n", pieces[0], val) } } else { uniqKeys[key] = pieces[0] } parts := strings.Split(key, ".") if len(parts) == 4 && parts[0] == "lfs" && parts[1] == "extension" { // prop: lfs.extension.. name := parts[2] prop := parts[3] ext := extensions[name] ext.Name = name switch prop { case "clean": if gc.OnlySafeKeys { ignored = append(ignored, key) continue } ext.Clean = val case "smudge": if gc.OnlySafeKeys { ignored = append(ignored, key) continue } ext.Smudge = val case "priority": allowed = true p, err := strconv.Atoi(val) if err == nil && p >= 0 { ext.Priority = p } } extensions[name] = ext } else if len(parts) > 1 && parts[0] == "remote" { if gc.OnlySafeKeys && (len(parts) == 3 && parts[2] != "lfsurl") { ignored = append(ignored, key) continue } allowed = true remote := strings.Join(parts[1:len(parts)-1], ".") uniqRemotes[remote] = remote == "origin" } else if len(parts) > 2 && parts[len(parts)-1] == "access" { allowed = true } if !allowed && keyIsUnsafe(key) { ignored = append(ignored, key) continue } vals[key] = append(vals[key], val) } } if len(ignored) > 0 { fmt.Fprint(os.Stderr, tr.Tr.Get("warning: These unsafe '.lfsconfig' keys were ignored:"), "\n\n") for _, key := range ignored { fmt.Fprintf(os.Stderr, " %s\n", key) } } gf = &GitFetcher{vals: vals} return } // Get implements the Fetcher interface, and returns the value associated with // a given key and true, signaling that the value was present. Otherwise, an // empty string and false will be returned, signaling that the value was // absent. // // Map lookup by key is case-insensitive, except for the middle part of a // three-part key, as per the .gitconfig specification. // // Get is safe to call across multiple goroutines. func (g *GitFetcher) Get(key string) (val string, ok bool) { all := g.GetAll(key) if len(all) == 0 { return "", false } return all[len(all)-1], true } func (g *GitFetcher) GetAll(key string) []string { g.vmu.RLock() defer g.vmu.RUnlock() return g.vals[g.caseFoldKey(key)] } func (g *GitFetcher) All() map[string][]string { newmap := make(map[string][]string) g.vmu.RLock() defer g.vmu.RUnlock() for key, values := range g.vals { for _, value := range values { newmap[key] = append(newmap[key], value) } } return newmap } func (g *GitFetcher) caseFoldKey(key string) string { parts := strings.Split(key, ".") last := len(parts) - 1 // We check for 3 or more parts here because if the middle part is a // URL, it may have dots in it. We'll downcase the part before the first // dot and after the last dot, but preserve the piece in the middle, // which may be a branch name, remote, or URL, all of which are // case-sensitive. This is the algorithm Git uses to canonicalize its // keys. if len(parts) < 3 { return strings.ToLower(key) } return strings.Join([]string{ strings.ToLower(parts[0]), strings.Join(parts[1:last], "."), strings.ToLower(parts[last]), }, ".") } func keyIsUnsafe(key string) bool { for _, safe := range safeKeys { if safe == key { return false } } return true } var safeKeys = []string{ "lfs.allowincompletepush", "lfs.fetchexclude", "lfs.fetchinclude", "lfs.gitprotocol", "lfs.locksverify", "lfs.pushurl", "lfs.skipdownloaderrors", "lfs.url", } git-lfs-3.6.1/config/git_fetcher_test.go000066400000000000000000000021031472372047300202110ustar00rootroot00000000000000package config import ( "testing" "github.com/stretchr/testify/assert" ) func TestGetCanonicalization(t *testing.T) { vals := map[string][]string{ "user.name": []string{"Pat Doe"}, "branch.MixedCase.pushremote": []string{"Somewhere"}, "http.https://example.com/BIG-TEXT.git.extraheader": []string{"X-Foo: Bar"}, } fetcher := GitFetcher{vals: vals} assert.Equal(t, []string{"Somewhere"}, fetcher.GetAll("bRanch.MixedCase.pushRemote")) assert.Equal(t, []string{"Somewhere"}, fetcher.GetAll("branch.MixedCase.pushremote")) assert.Equal(t, []string(nil), fetcher.GetAll("branch.mixedcase.pushremote")) assert.Equal(t, []string{"Pat Doe"}, fetcher.GetAll("user.name")) assert.Equal(t, []string{"Pat Doe"}, fetcher.GetAll("User.Name")) assert.Equal(t, []string{"X-Foo: Bar"}, fetcher.GetAll("http.https://example.com/BIG-TEXT.git.extraheader")) assert.Equal(t, []string{"X-Foo: Bar"}, fetcher.GetAll("http.https://example.com/BIG-TEXT.git.extraHeader")) assert.Equal(t, []string(nil), fetcher.GetAll("http.https://example.com/big-text.git.extraHeader")) } git-lfs-3.6.1/config/map_fetcher.go000066400000000000000000000016571472372047300171610ustar00rootroot00000000000000package config // mapFetcher provides an implementation of the Fetcher interface by wrapping // the `map[string]string` type. type mapFetcher map[string][]string func UniqMapFetcher(m map[string]string) Fetcher { multi := make(map[string][]string, len(m)) for k, v := range m { multi[k] = []string{v} } return MapFetcher(multi) } func MapFetcher(m map[string][]string) Fetcher { return mapFetcher(m) } // Get implements the func `Fetcher.Get`. func (m mapFetcher) Get(key string) (val string, ok bool) { all := m.GetAll(key) if len(all) == 0 { return "", false } return all[len(all)-1], true } // Get implements the func `Fetcher.GetAll`. func (m mapFetcher) GetAll(key string) []string { return m[key] } func (m mapFetcher) All() map[string][]string { newmap := make(map[string][]string) for key, values := range m { for _, value := range values { newmap[key] = append(newmap[key], value) } } return newmap } git-lfs-3.6.1/config/netrc.go000066400000000000000000000011141472372047300160030ustar00rootroot00000000000000package config import ( "os" "path/filepath" "github.com/git-lfs/go-netrc/netrc" ) type netrcfinder interface { FindMachine(string, string) *netrc.Machine } type noNetrc struct{} func (n *noNetrc) FindMachine(host string, loginName string) *netrc.Machine { return nil } func (c *Configuration) parseNetrc() (netrcfinder, error) { home, _ := c.Os.Get("HOME") if len(home) == 0 { return &noNetrc{}, nil } nrcfilename := filepath.Join(home, netrcBasename) if _, err := os.Stat(nrcfilename); err != nil { return &noNetrc{}, nil } return netrc.ParseFile(nrcfilename) } git-lfs-3.6.1/config/netrc_nix.go000066400000000000000000000001251472372047300166620ustar00rootroot00000000000000//go:build !windows // +build !windows package config var netrcBasename = ".netrc" git-lfs-3.6.1/config/netrc_windows.go000066400000000000000000000001231472372047300175540ustar00rootroot00000000000000//go:build windows // +build windows package config var netrcBasename = "_netrc" git-lfs-3.6.1/config/os_fetcher.go000066400000000000000000000032351472372047300170170ustar00rootroot00000000000000package config import ( "os" "sync" ) // OsFetcher is an implementation of the Fetcher type for communicating with // the system's environment. // // It is safe to use across multiple goroutines. type OsFetcher struct { // vmu guards read/write access to vals vmu sync.Mutex // vals maintains a local cache of the system's environment variables // for fast repeat lookups of a given key. vals map[string]*string } // NewOsFetcher returns a new *OsFetcher. func NewOsFetcher() *OsFetcher { return &OsFetcher{ vals: make(map[string]*string), } } // Get returns the value associated with the given key as stored in the local // cache, or in the operating system's environment variables. // // If there was a cache-hit, the value will be returned from the cache, skipping // a check against os.Getenv. Otherwise, the value will be fetched from the // system, stored in the cache, and then returned. If no value was present in // the cache or in the system, an empty string will be returned. // // Get is safe to call across multiple goroutines. func (o *OsFetcher) Get(key string) (val string, ok bool) { o.vmu.Lock() defer o.vmu.Unlock() if i, ok := o.vals[key]; ok { if i == nil { return "", false } return *i, true } v, ok := os.LookupEnv(key) if ok { o.vals[key] = &v } else { o.vals[key] = nil } return v, ok } // GetAll implements the `config.Fetcher.GetAll` method by returning, at most, a // 1-ary set containing the result of `config.OsFetcher.Get()`. func (o *OsFetcher) GetAll(key string) []string { if v, ok := o.Get(key); ok { return []string{v} } return make([]string, 0) } func (o *OsFetcher) All() map[string][]string { return nil } git-lfs-3.6.1/config/url_config.go000066400000000000000000000147661472372047300170400ustar00rootroot00000000000000package config import ( "fmt" "net/url" "regexp" "strings" ) type URLConfig struct { git Environment } func NewURLConfig(git Environment) *URLConfig { if git == nil { git = EnvironmentOf(make(mapFetcher)) } return &URLConfig{ git: git, } } // Get retrieves a `http.{url}.{key}` for the given key and urls, following the // rules in https://git-scm.com/docs/git-config#Documentation/git-config.txt-httplturlgt. // The value for `http.{key}` is returned as a fallback if no config keys are // set for the given urls. func (c *URLConfig) Get(prefix, rawurl, key string) (string, bool) { if c == nil { return "", false } key = strings.ToLower(key) prefix = strings.ToLower(prefix) if v := c.getAll(prefix, rawurl, key); len(v) > 0 { return v[len(v)-1], true } return c.git.Get(strings.Join([]string{prefix, key}, ".")) } func (c *URLConfig) GetAll(prefix, rawurl, key string) []string { if c == nil { return nil } key = strings.ToLower(key) prefix = strings.ToLower(prefix) if v := c.getAll(prefix, rawurl, key); len(v) > 0 { return v } return c.git.GetAll(strings.Join([]string{prefix, key}, ".")) } func (c *URLConfig) Bool(prefix, rawurl, key string, def bool) bool { s, _ := c.Get(prefix, rawurl, key) return Bool(s, def) } func (c *URLConfig) getAll(prefix, rawurl, key string) []string { type urlMatch struct { key string // The full configuration key hostScore int // A score indicating the strength of the host match pathScore int // A score indicating the strength of the path match userMatch int // Whether we matched on a username. 1 for yes, else 0 } searchURL, err := url.Parse(rawurl) if err != nil { return nil } config := c.git.All() re := regexp.MustCompile(fmt.Sprintf(`\A%s\.(\S+)\.%s\z`, prefix, key)) bestMatch := urlMatch{ key: "", hostScore: 0, pathScore: 0, userMatch: 0, } for k := range config { // Ensure we're examining the correct type of key and parse out the URL matches := re.FindStringSubmatch(k) if matches == nil { continue } configURL, err := url.Parse(matches[1]) if err != nil { continue } match := urlMatch{ key: k, } // Rule #1: Scheme must match exactly if searchURL.Scheme != configURL.Scheme { continue } // Rule #2: Hosts must match exactly, or through wildcards. More exact // matches should take priority over wildcard matches match.hostScore = compareHosts(searchURL.Hostname(), configURL.Hostname()) if match.hostScore == 0 { continue } if match.hostScore < bestMatch.hostScore { continue } // Rule #3: Port Number must match exactly if portForURL(searchURL) != portForURL(configURL) { continue } // Rule #4: Configured path must match exactly, or as a prefix of // slash-delimited path elements match.pathScore = comparePaths(searchURL.Path, configURL.Path) if match.pathScore == 0 { continue } // Rule #5: Username must match exactly if present in the config. // If not present, config matches on any username but with lower // priority than an exact username match. if configURL.User != nil { if searchURL.User == nil { continue } if searchURL.User.Username() != configURL.User.Username() { continue } match.userMatch = 1 } // Now combine our various scores to determine if we have found a best // match. Host score > path score > user score if match.hostScore > bestMatch.hostScore { bestMatch = match continue } if match.pathScore > bestMatch.pathScore { bestMatch = match continue } if match.pathScore == bestMatch.pathScore && match.userMatch > bestMatch.userMatch { bestMatch = match continue } } if bestMatch.key == "" { return nil } return c.git.GetAll(bestMatch.key) } func portForURL(u *url.URL) string { port := u.Port() if port != "" { return port } switch u.Scheme { case "http": return "80" case "https": return "443" case "ssh": return "22" default: return "" } } // compareHosts compares a hostname with a configuration hostname to determine // a match. It returns an integer indicating the strength of the match, or 0 if // the two hostnames did not match. func compareHosts(searchHostname, configHostname string) int { searchHost := strings.Split(searchHostname, ".") configHost := strings.Split(configHostname, ".") if len(searchHost) != len(configHost) { return 0 } score := len(searchHost) + 1 for i, subdomain := range searchHost { if configHost[i] == "*" { score-- continue } if subdomain != configHost[i] { return 0 } } return score } // comparePaths compares a path with a configuration path to determine a match. // It returns an integer indicating the strength of the match, or 0 if the two // paths did not match. func comparePaths(rawSearchPath, rawConfigPath string) int { f := func(c rune) bool { return c == '/' } searchPath := strings.FieldsFunc(rawSearchPath, f) configPath := strings.FieldsFunc(rawConfigPath, f) if len(searchPath) < len(configPath) { return 0 } // Start with a base score of 1, so we return something above 0 for a // zero-length path score := 1 for i, element := range configPath { searchElement := searchPath[i] if element == searchElement { score += 2 continue } if isDefaultLFSUrl(searchElement, searchPath, i+1) { if searchElement[0:len(searchElement)-4] == element { // Since we matched without the `.git` prefix, only add one // point to the score instead of 2 score++ continue } } return 0 } return score } func (c *URLConfig) hostsAndPaths(rawurl string) (hosts, paths []string) { u, err := url.Parse(rawurl) if err != nil { return nil, nil } return c.hosts(u), c.paths(u.Path) } func (c *URLConfig) hosts(u *url.URL) []string { hosts := make([]string, 0, 1) if u.User != nil { hosts = append(hosts, fmt.Sprintf("%s://%s@%s", u.Scheme, u.User.Username(), u.Host)) } hosts = append(hosts, fmt.Sprintf("%s://%s", u.Scheme, u.Host)) return hosts } func (c *URLConfig) paths(path string) []string { pLen := len(path) if pLen <= 2 { return nil } end := pLen if strings.HasSuffix(path, slash) { end-- } return strings.Split(path[1:end], slash) } const ( gitExt = ".git" infoPart = "info" lfsPart = "lfs" slash = "/" ) func isDefaultLFSUrl(path string, parts []string, index int) bool { if len(path) < 5 { return false // shorter than ".git" } if !strings.HasSuffix(path, gitExt) { return false } if index > len(parts)-2 { return false } return parts[index] == infoPart && parts[index+1] == lfsPart } git-lfs-3.6.1/config/url_config_test.go000066400000000000000000000064301472372047300200640ustar00rootroot00000000000000package config import ( "testing" "github.com/stretchr/testify/assert" ) func TestURLConfig(t *testing.T) { u := NewURLConfig(EnvironmentOf(MapFetcher(map[string][]string{ "http.key": []string{"root", "root-2"}, "http.https://host.com.key": []string{"host", "host-2"}, "http.https://user@host.com/a.key": []string{"user-a", "user-b"}, "http.https://user@host.com.key": []string{"user", "user-2"}, "http.https://host.com/a.key": []string{"host-a", "host-b"}, "http.https://host.com:8080.key": []string{"port", "port-2"}, "http.https://host.com/repo.git.key": []string{".git"}, "http.https://host.com/repo.key": []string{"no .git"}, "http.https://host.com/repo2.key": []string{"no .git"}, "http.http://host.com/repo.key": []string{"http"}, "http.https://host.com:443/repo3.git.key": []string{"port"}, "http.ssh://host.com:22/repo3.git.key": []string{"ssh-port"}, "http.https://host.*/a.key": []string{"wild"}, "httpXhttps://host.*/aXkey": []string{"invalid"}, }))) getOne := map[string]string{ "https://root.com/a/b/c": "root-2", "https://host.com/": "host-2", "https://host.com/a/b/c": "host-b", "https://user:pass@host.com/a/b/c": "user-b", "https://user:pass@host.com/z/b/c": "user-2", "https://host.com:8080/a": "port-2", "https://host.com/repo.git/info/lfs": ".git", "https://host.com/repo.git/info": ".git", "https://host.com/repo.git": ".git", "https://host.com/repo": "no .git", "https://host.com/repo2.git/info/lfs/foo/bar": "no .git", "https://host.com/repo2.git/info/lfs": "no .git", "https://host.com:443/repo2.git/info/lfs": "no .git", "https://host.com/repo2.git/info": "host-2", // doesn't match /.git/info/lfs\Z/ "https://host.com/repo2.git": "host-2", // ditto "https://host.com/repo3.git/info/lfs": "port", "ssh://host.com/repo3.git/info/lfs": "ssh-port", "https://host.com/repo2": "no .git", "http://host.com/repo": "http", "http://host.com:80/repo": "http", "https://host.wild/a/b/c": "wild", } for rawurl, expected := range getOne { value, _ := u.Get("http", rawurl, "key") assert.Equal(t, expected, value, "get one: "+rawurl) } value, _ := u.Get("http", "https://host.wild/a/b/c", "k") assert.Equal(t, value, "") value, _ = u.Get("ttp", "https://host.wild/a/b/c", "key") assert.Equal(t, value, "") getAll := map[string][]string{ "https://root.com/a/b/c": []string{"root", "root-2"}, "https://host.com/": []string{"host", "host-2"}, "https://host.com/a/b/c": []string{"host-a", "host-b"}, "https://user:pass@host.com/a/b/c": []string{"user-a", "user-b"}, "https://user:pass@host.com/z/b/c": []string{"user", "user-2"}, "https://host.com:8080/a": []string{"port", "port-2"}, } for rawurl, expected := range getAll { values := u.GetAll("http", rawurl, "key") assert.Equal(t, expected, values, "get all: "+rawurl) } } git-lfs-3.6.1/config/util_nix.go000066400000000000000000000003621472372047300165270ustar00rootroot00000000000000//go:build !windows // +build !windows package config import "syscall" func umask() int { // umask(2), which this function wraps, also sets the umask, so set it // back. umask := syscall.Umask(022) syscall.Umask(umask) return umask } git-lfs-3.6.1/config/util_windows.go000066400000000000000000000003451472372047300174240ustar00rootroot00000000000000//go:build windows // +build windows package config // Windows doesn't provide the umask syscall, so return something sane as a // default. os.Chmod will only care about the owner bits anyway. func umask() int { return 077 } git-lfs-3.6.1/config/version.go000066400000000000000000000007421472372047300163630ustar00rootroot00000000000000package config import ( "fmt" "runtime" "strings" ) var ( GitCommit string VersionDesc string Vendor string ) const ( Version = "3.6.1" ) func init() { gitCommit := "" if len(GitCommit) > 0 { gitCommit = "; git " + GitCommit } if len(Vendor) == 0 { Vendor = "GitHub" } VersionDesc = fmt.Sprintf("git-lfs/%s (%s; %s %s; go %s%s)", Version, Vendor, runtime.GOOS, runtime.GOARCH, strings.Replace(runtime.Version(), "go", "", 1), gitCommit, ) } git-lfs-3.6.1/creds/000077500000000000000000000000001472372047300141775ustar00rootroot00000000000000git-lfs-3.6.1/creds/access.go000066400000000000000000000015211472372047300157660ustar00rootroot00000000000000package creds type AccessMode string const ( NoneAccess AccessMode = "none" BasicAccess AccessMode = "basic" PrivateAccess AccessMode = "private" NegotiateAccess AccessMode = "negotiate" EmptyAccess AccessMode = "" ) type Access struct { mode AccessMode url string } func NewAccess(mode AccessMode, url string) Access { return Access{url: url, mode: mode} } // Returns a copy of an AccessMode with the mode upgraded to newMode func (a *Access) Upgrade(newMode AccessMode) Access { return Access{url: a.url, mode: newMode} } func (a *Access) Mode() AccessMode { return a.mode } func (a *Access) URL() string { return a.url } // AllAccessModes returns all access modes in the order they should be tried. func AllAccessModes() []AccessMode { return []AccessMode{ NoneAccess, NegotiateAccess, BasicAccess, } } git-lfs-3.6.1/creds/creds.go000066400000000000000000000424311472372047300156320ustar00rootroot00000000000000package creds import ( "bytes" "fmt" "net/url" "os" "os/exec" "slices" "strings" "sync" "github.com/git-lfs/git-lfs/v3/config" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/subprocess" "github.com/git-lfs/git-lfs/v3/tools" "github.com/git-lfs/git-lfs/v3/tr" "github.com/rubyist/tracerx" ) // CredentialHelperWrapper is used to contain the encapsulate the information we need for credential handling during auth. type CredentialHelperWrapper struct { CredentialHelper CredentialHelper Input Creds Url *url.URL Creds Creds } // CredentialHelper is an interface used by the lfsapi Client to interact with // the 'git credential' command: https://git-scm.com/docs/gitcredentials // Other implementations include ASKPASS support, and an in-memory cache. type CredentialHelper interface { Fill(Creds) (Creds, error) Reject(Creds) error Approve(Creds) error } func (credWrapper *CredentialHelperWrapper) FillCreds() error { creds, err := credWrapper.CredentialHelper.Fill(credWrapper.Input) if creds == nil || len(creds) < 1 { errmsg := tr.Tr.Get("Git credentials for %s not found", credWrapper.Url) if err != nil { errmsg = fmt.Sprintf("%s:\n%s", errmsg, err.Error()) } else { errmsg = fmt.Sprintf("%s.", errmsg) } err = errors.New(errmsg) } credWrapper.Creds = creds return err } // Creds represents a set of key/value pairs that are passed to 'git credential' // as input. type Creds map[string][]string func (c Creds) IsMultistage() bool { return slices.Contains([]string{"1", "true"}, FirstEntryForKey(c, "continue")) } func (c Creds) buffer(protectProtocol bool) (*bytes.Buffer, error) { buf := new(bytes.Buffer) buf.Write([]byte("capability[]=authtype\n")) buf.Write([]byte("capability[]=state\n")) for k, v := range c { for _, item := range v { if strings.Contains(item, "\n") { return nil, errors.Errorf(tr.Tr.Get("credential value for %s contains newline: %q", k, item)) } if protectProtocol && strings.Contains(item, "\r") { return nil, errors.Errorf(tr.Tr.Get("credential value for %s contains carriage return: %q\nIf this is intended, set `credential.protectProtocol=false`", k, item)) } if strings.Contains(item, string(rune(0))) { return nil, errors.Errorf(tr.Tr.Get("credential value for %s contains null byte: %q", k, item)) } buf.Write([]byte(k)) buf.Write([]byte("=")) buf.Write([]byte(item)) buf.Write([]byte("\n")) } } return buf, nil } type CredentialHelperContext struct { netrcCredHelper *netrcCredentialHelper commandCredHelper *commandCredentialHelper askpassCredHelper *AskPassCredentialHelper cachingCredHelper *credentialCacher urlConfig *config.URLConfig wwwAuthHeaders []string state []string } func NewCredentialHelperContext(gitEnv config.Environment, osEnv config.Environment) *CredentialHelperContext { c := &CredentialHelperContext{urlConfig: config.NewURLConfig(gitEnv)} c.netrcCredHelper = newNetrcCredentialHelper(osEnv) askpass, ok := osEnv.Get("GIT_ASKPASS") if !ok { askpass, ok = gitEnv.Get("core.askpass") } if !ok { askpass, _ = osEnv.Get("SSH_ASKPASS") } if len(askpass) > 0 { askpassfile, err := tools.TranslateCygwinPath(askpass) if err != nil { tracerx.Printf("Error reading askpass helper %q: %v", askpassfile, err) } if len(askpassfile) > 0 { c.askpassCredHelper = &AskPassCredentialHelper{ Program: askpassfile, } } } cacheCreds := gitEnv.Bool("lfs.cachecredentials", true) if cacheCreds { c.cachingCredHelper = NewCredentialCacher() } c.commandCredHelper = &commandCredentialHelper{ SkipPrompt: osEnv.Bool("GIT_TERMINAL_PROMPT", false), } return c } func (ctxt *CredentialHelperContext) SetWWWAuthHeaders(headers []string) { ctxt.wwwAuthHeaders = headers } func (ctxt *CredentialHelperContext) SetStateFields(fields []string) { ctxt.state = fields } // getCredentialHelper parses a 'credsConfig' from the git and OS environments, // returning the appropriate CredentialHelper to authenticate requests with. // // It returns an error if any configuration was invalid, or otherwise // un-useable. func (ctxt *CredentialHelperContext) GetCredentialHelper(helper CredentialHelper, u *url.URL) CredentialHelperWrapper { rawurl := fmt.Sprintf("%s://%s%s", u.Scheme, u.Host, u.Path) input := Creds{"protocol": []string{u.Scheme}, "host": []string{u.Host}} if u.User != nil && u.User.Username() != "" { input["username"] = []string{u.User.Username()} } if u.Scheme == "cert" || ctxt.urlConfig.Bool("credential", rawurl, "usehttppath", false) { input["path"] = []string{strings.TrimPrefix(u.Path, "/")} } if len(ctxt.wwwAuthHeaders) != 0 && !ctxt.urlConfig.Bool("credential", rawurl, "skipwwwauth", false) { input["wwwauth[]"] = ctxt.wwwAuthHeaders } if len(ctxt.state) != 0 { input["state[]"] = ctxt.state } if helper != nil { return CredentialHelperWrapper{CredentialHelper: helper, Input: input, Url: u} } helpers := make([]CredentialHelper, 0, 4) if ctxt.netrcCredHelper != nil { helpers = append(helpers, ctxt.netrcCredHelper) } if ctxt.cachingCredHelper != nil { helpers = append(helpers, ctxt.cachingCredHelper) } if ctxt.askpassCredHelper != nil { helper, _ := ctxt.urlConfig.Get("credential", rawurl, "helper") if len(helper) == 0 { helpers = append(helpers, ctxt.askpassCredHelper) } } ctxt.commandCredHelper.protectProtocol = ctxt.urlConfig.Bool("credential", rawurl, "protectProtocol", true) return CredentialHelperWrapper{CredentialHelper: NewCredentialHelpers(append(helpers, ctxt.commandCredHelper)), Input: input, Url: u} } // AskPassCredentialHelper implements the CredentialHelper type for GIT_ASKPASS // and 'core.askpass' configuration values. type AskPassCredentialHelper struct { // Program is the executable program's absolute or relative name. Program string } type credValueType int const ( credValueTypeUnknown credValueType = iota credValueTypeUsername credValueTypePassword ) // Fill implements fill by running the ASKPASS program and returning its output // as a password encoded in the Creds type given the key "password". // // It accepts the password as coming from the program's stdout, as when invoked // with the given arguments (see (*AskPassCredentialHelper).args() below)./ // // If there was an error running the command, it is returned instead of a set of // filled credentials. // // The ASKPASS program is only queried if a credential was not already // provided, i.e. through the git URL func (a *AskPassCredentialHelper) Fill(what Creds) (Creds, error) { u := &url.URL{ Scheme: FirstEntryForKey(what, "protocol"), Host: FirstEntryForKey(what, "host"), Path: FirstEntryForKey(what, "path"), } creds := make(Creds) username, err := a.getValue(what, credValueTypeUsername, u) if err != nil { return nil, err } creds["username"] = []string{username} if len(username) > 0 { // If a non-empty username was given, add it to the URL via func // 'net/url.User()'. u.User = url.User(username) } password, err := a.getValue(what, credValueTypePassword, u) if err != nil { return nil, err } creds["password"] = []string{password} return creds, nil } func (a *AskPassCredentialHelper) getValue(what Creds, valueType credValueType, u *url.URL) (string, error) { var valueString string switch valueType { case credValueTypeUsername: valueString = "username" case credValueTypePassword: valueString = "password" default: return "", errors.Errorf(tr.Tr.Get("Invalid Credential type queried from AskPass")) } // Return the existing credential if it was already provided, otherwise // query AskPass for it if given, ok := what[valueString]; ok && len(given) > 0 { return given[0], nil } return a.getFromProgram(valueType, u) } func (a *AskPassCredentialHelper) getFromProgram(valueType credValueType, u *url.URL) (string, error) { var ( value bytes.Buffer err bytes.Buffer valueString string ) switch valueType { case credValueTypeUsername: valueString = "Username" case credValueTypePassword: valueString = "Password" default: return "", errors.Errorf(tr.Tr.Get("Invalid Credential type queried from AskPass")) } // 'cmd' will run the GIT_ASKPASS (or core.askpass) command prompting // for the desired valueType (`Username` or `Password`) cmd, errVal := subprocess.ExecCommand(a.Program, a.args(fmt.Sprintf("%s for %q", valueString, u))...) if errVal != nil { tracerx.Printf("creds: failed to find GIT_ASKPASS command: %s", a.Program) return "", errVal } cmd.Stderr = &err cmd.Stdout = &value tracerx.Printf("creds: filling with GIT_ASKPASS: %s", strings.Join(cmd.Args, " ")) if err := cmd.Run(); err != nil { return "", err } if err.Len() > 0 { return "", errors.New(err.String()) } return strings.TrimSpace(value.String()), nil } // Approve implements CredentialHelper.Approve, and returns nil. The ASKPASS // credential helper does not implement credential approval. func (a *AskPassCredentialHelper) Approve(_ Creds) error { return nil } // Reject implements CredentialHelper.Reject, and returns nil. The ASKPASS // credential helper does not implement credential rejection. func (a *AskPassCredentialHelper) Reject(_ Creds) error { return nil } // args returns the arguments given to the ASKPASS program, if a prompt was // given. // See: https://git-scm.com/docs/gitcredentials#_requesting_credentials for // more. func (a *AskPassCredentialHelper) args(prompt string) []string { if len(prompt) == 0 { return nil } return []string{prompt} } type commandCredentialHelper struct { SkipPrompt bool protectProtocol bool } func (h *commandCredentialHelper) Fill(creds Creds) (Creds, error) { tracerx.Printf("creds: git credential fill (%q, %q, %q)", FirstEntryForKey(creds, "protocol"), FirstEntryForKey(creds, "host"), FirstEntryForKey(creds, "path")) return h.exec("fill", creds) } func (h *commandCredentialHelper) Reject(creds Creds) error { _, err := h.exec("reject", creds) return err } func (h *commandCredentialHelper) Approve(creds Creds) error { tracerx.Printf("creds: git credential approve (%q, %q, %q)", FirstEntryForKey(creds, "protocol"), FirstEntryForKey(creds, "host"), FirstEntryForKey(creds, "path")) _, err := h.exec("approve", creds) return err } func (h *commandCredentialHelper) exec(subcommand string, input Creds) (Creds, error) { output := new(bytes.Buffer) cmd, err := subprocess.ExecCommand("git", "credential", subcommand) if err != nil { return nil, errors.New(tr.Tr.Get("failed to find `git credential %s`: %v", subcommand, err)) } cmd.Stdin, err = input.buffer(h.protectProtocol) if err != nil { return nil, errors.New(tr.Tr.Get("invalid input to `git credential %s`: %v", subcommand, err)) } cmd.Stdout = output /* There is a reason we don't read from stderr here: Git's credential cache daemon helper does not close its stderr, so if this process is the process that fires up the daemon, it will wait forever (until the daemon exits, really) trying to read from stderr. Instead, we simply pass it through to our stderr. See https://github.com/git-lfs/git-lfs/issues/117 for more details. */ cmd.Stderr = os.Stderr err = cmd.Start() if err == nil { err = cmd.Wait() } if _, ok := err.(*exec.ExitError); ok { if h.SkipPrompt { return nil, errors.New(tr.Tr.Get("change the GIT_TERMINAL_PROMPT env var to be prompted to enter your credentials for %s://%s", FirstEntryForKey(input, "protocol"), FirstEntryForKey(input, "host"))) } // 'git credential' exits with 128 if the helper doesn't fill the username // and password values. if subcommand == "fill" && err.Error() == "exit status 128" { return nil, nil } } if err != nil { return nil, errors.New(tr.Tr.Get("`git credential %s` error: %s", subcommand, err.Error())) } creds := make(Creds) for _, line := range strings.Split(output.String(), "\n") { pieces := strings.SplitN(line, "=", 2) if len(pieces) < 2 || len(pieces[1]) < 1 { continue } if _, ok := creds[pieces[0]]; ok { creds[pieces[0]] = append(creds[pieces[0]], pieces[1]) } else { creds[pieces[0]] = []string{pieces[1]} } } return creds, nil } type credentialCacher struct { creds map[string]Creds mu sync.Mutex } func NewCredentialCacher() *credentialCacher { return &credentialCacher{creds: make(map[string]Creds)} } func credCacheKey(creds Creds) string { parts := []string{ FirstEntryForKey(creds, "protocol"), FirstEntryForKey(creds, "host"), FirstEntryForKey(creds, "path"), } return strings.Join(parts, "//") } func (c *credentialCacher) Fill(what Creds) (Creds, error) { key := credCacheKey(what) c.mu.Lock() cached, ok := c.creds[key] c.mu.Unlock() if ok { tracerx.Printf("creds: git credential cache (%q, %q, %q)", FirstEntryForKey(what, "protocol"), FirstEntryForKey(what, "host"), FirstEntryForKey(what, "path")) return cached, nil } return nil, credHelperNoOp } func (c *credentialCacher) Approve(what Creds) error { key := credCacheKey(what) c.mu.Lock() defer c.mu.Unlock() if _, ok := c.creds[key]; ok { return nil } c.creds[key] = what return credHelperNoOp } func (c *credentialCacher) Reject(what Creds) error { key := credCacheKey(what) c.mu.Lock() delete(c.creds, key) c.mu.Unlock() return credHelperNoOp } // CredentialHelpers iterates through a slice of CredentialHelper objects // CredentialHelpers is a []CredentialHelper that iterates through each // credential helper to fill, reject, or approve credentials. Typically, the // first success returns immediately. Errors are reported to tracerx, unless // all credential helpers return errors. Any erroring credential helpers are // skipped for future calls. // // A CredentialHelper can return a credHelperNoOp error, signaling that the // CredentialHelpers should try the next one. type CredentialHelpers struct { helpers []CredentialHelper skippedHelpers map[int]bool mu sync.Mutex } // NewCredentialHelpers initializes a new CredentialHelpers from the given // slice of CredentialHelper instances. func NewCredentialHelpers(helpers []CredentialHelper) CredentialHelper { return &CredentialHelpers{ helpers: helpers, skippedHelpers: make(map[int]bool), } } var credHelperNoOp = errors.New("no-op!") // Fill implements CredentialHelper.Fill by asking each CredentialHelper in // order to fill the credentials. // // If a fill was successful, it is returned immediately, and no other // `CredentialHelper`s are consulted. If any CredentialHelper returns an error, // it is reported to tracerx, and the next one is attempted. If they all error, // then a collection of all the error messages is returned. Erroring credential // helpers are added to the skip list, and never attempted again for the // lifetime of the current Git LFS command. func (s *CredentialHelpers) Fill(what Creds) (Creds, error) { errs := make([]string, 0, len(s.helpers)) for i, h := range s.helpers { if s.skipped(i) { continue } creds, err := h.Fill(what) if err != nil { if err != credHelperNoOp { s.skip(i) tracerx.Printf("credential fill error: %s", err) errs = append(errs, err.Error()) } continue } if creds != nil { return creds, nil } } if len(errs) > 0 { return nil, errors.New(tr.Tr.Get("credential fill errors:\n%s", strings.Join(errs, "\n"))) } return nil, nil } // Reject implements CredentialHelper.Reject and rejects the given Creds "what" // with the first successful attempt. func (s *CredentialHelpers) Reject(what Creds) error { for i, h := range s.helpers { if s.skipped(i) { continue } if err := h.Reject(what); err != credHelperNoOp { return err } } return errors.New(tr.Tr.Get("no valid credential helpers to reject")) } // Approve implements CredentialHelper.Approve and approves the given Creds // "what" with the first successful CredentialHelper. If an error occurs, // it calls Reject() with the same Creds and returns the error immediately. This // ensures a caching credential helper removes the cache, since the Erroring // CredentialHelper never successfully saved it. func (s *CredentialHelpers) Approve(what Creds) error { skipped := make(map[int]bool) for i, h := range s.helpers { if s.skipped(i) { skipped[i] = true continue } if err := h.Approve(what); err != credHelperNoOp { if err != nil && i > 0 { // clear any cached approvals for j := 0; j < i; j++ { if !skipped[j] { s.helpers[j].Reject(what) } } } return err } } return errors.New(tr.Tr.Get("no valid credential helpers to approve")) } func (s *CredentialHelpers) skip(i int) { s.mu.Lock() s.skippedHelpers[i] = true s.mu.Unlock() } func (s *CredentialHelpers) skipped(i int) bool { s.mu.Lock() skipped := s.skippedHelpers[i] s.mu.Unlock() return skipped } type nullCredentialHelper struct{} var ( nullCredError = errors.New(tr.Tr.Get("No credential helper configured")) NullCreds = &nullCredentialHelper{} ) func (h *nullCredentialHelper) Fill(input Creds) (Creds, error) { return nil, nullCredError } func (h *nullCredentialHelper) Approve(creds Creds) error { return nil } func (h *nullCredentialHelper) Reject(creds Creds) error { return nil } // FirstEntryForKey extracts and returns the first entry for a given key, or // returns the empty string if no value for that key is available. func FirstEntryForKey(input Creds, key string) string { if val, ok := input[key]; ok && len(val) > 0 { return val[0] } return "" } git-lfs-3.6.1/creds/creds_nix.go000066400000000000000000000001241472372047300165010ustar00rootroot00000000000000//go:build !windows // +build !windows package creds var netrcBasename = ".netrc" git-lfs-3.6.1/creds/creds_test.go000066400000000000000000000237271472372047300167000ustar00rootroot00000000000000package creds import ( "bytes" "errors" "slices" "strings" "testing" "github.com/stretchr/testify/assert" ) func assertCredsLinesMatch(t *testing.T, expected []string, buf *bytes.Buffer) { expected = append(expected, "") actual := strings.SplitAfter(buf.String(), "\n") slices.Sort(expected) slices.Sort(actual) assert.Equal(t, expected, actual) } func TestCredsBufferFormat(t *testing.T) { creds := make(Creds) expected := []string{"capability[]=authtype\n", "capability[]=state\n"} buf, err := creds.buffer(true) assert.NoError(t, err) assertCredsLinesMatch(t, expected, buf) creds["protocol"] = []string{"https"} creds["host"] = []string{"example.com"} expectedPrefix := strings.Join(expected, "") expected = append(expected, "protocol=https\n", "host=example.com\n") buf, err = creds.buffer(true) assert.NoError(t, err) assert.True(t, strings.HasPrefix(buf.String(), expectedPrefix)) assertCredsLinesMatch(t, expected, buf) creds["wwwauth[]"] = []string{"Basic realm=test", "Negotiate"} expected = append(expected, "wwwauth[]=Basic realm=test\n") expected = append(expected, "wwwauth[]=Negotiate\n") buf, err = creds.buffer(true) assert.NoError(t, err) assert.True(t, strings.HasPrefix(buf.String(), expectedPrefix)) assertCredsLinesMatch(t, expected, buf) } func TestCredsBufferProtect(t *testing.T) { creds := make(Creds) // Always disallow LF characters creds["protocol"] = []string{"https"} creds["host"] = []string{"one.example.com\nhost=two.example.com"} buf, err := creds.buffer(false) assert.Error(t, err) assert.Nil(t, buf) buf, err = creds.buffer(true) assert.Error(t, err) assert.Nil(t, buf) // Disallow CR characters unless protocol protection disabled creds["host"] = []string{"one.example.com\rhost=two.example.com"} expected := []string{ "capability[]=authtype\n", "capability[]=state\n", "protocol=https\n", "host=one.example.com\rhost=two.example.com\n", } buf, err = creds.buffer(false) assert.NoError(t, err) assertCredsLinesMatch(t, expected, buf) buf, err = creds.buffer(true) assert.Error(t, err) assert.Nil(t, buf) // Always disallow null bytes creds["host"] = []string{"one.example.com\x00host=two.example.com"} buf, err = creds.buffer(false) assert.Error(t, err) assert.Nil(t, buf) buf, err = creds.buffer(true) assert.Error(t, err) assert.Nil(t, buf) } type testCredHelper struct { fillErr error approveErr error rejectErr error fill []Creds approve []Creds reject []Creds } func newTestCredHelper() *testCredHelper { return &testCredHelper{ fill: make([]Creds, 0), approve: make([]Creds, 0), reject: make([]Creds, 0), } } func (h *testCredHelper) Fill(input Creds) (Creds, error) { h.fill = append(h.fill, input) return input, h.fillErr } func (h *testCredHelper) Approve(creds Creds) error { h.approve = append(h.approve, creds) return h.approveErr } func (h *testCredHelper) Reject(creds Creds) error { h.reject = append(h.reject, creds) return h.rejectErr } func TestCredHelperSetNoErrors(t *testing.T) { cache := NewCredentialCacher() helper1 := newTestCredHelper() helper2 := newTestCredHelper() helpers := NewCredentialHelpers([]CredentialHelper{cache, helper1, helper2}) creds := Creds{"protocol": []string{"https"}, "host": []string{"example.com"}} out, err := helpers.Fill(creds) assert.Nil(t, err) assert.Equal(t, creds, out) assert.Equal(t, 1, len(helper1.fill)) assert.Equal(t, 0, len(helper2.fill)) // calling Fill() with empty cache out, err = helpers.Fill(creds) assert.Nil(t, err) assert.Equal(t, creds, out) assert.Equal(t, 2, len(helper1.fill)) assert.Equal(t, 0, len(helper2.fill)) credsWithPass := Creds{ "protocol": []string{"https"}, "host": []string{"example.com"}, "username": []string{"foo"}, "password": []string{"bar"}, } assert.Nil(t, helpers.Approve(credsWithPass)) assert.Equal(t, 1, len(helper1.approve)) assert.Equal(t, 0, len(helper2.approve)) // calling Approve() again is cached assert.Nil(t, helpers.Approve(credsWithPass)) assert.Equal(t, 1, len(helper1.approve)) assert.Equal(t, 0, len(helper2.approve)) // access cache for i := 0; i < 3; i++ { out, err = helpers.Fill(creds) assert.Nil(t, err) assert.Equal(t, credsWithPass, out) assert.Equal(t, 2, len(helper1.fill)) assert.Equal(t, 0, len(helper2.fill)) } assert.Nil(t, helpers.Reject(creds)) assert.Equal(t, 1, len(helper1.reject)) assert.Equal(t, 0, len(helper2.reject)) // Reject() is never cached assert.Nil(t, helpers.Reject(creds)) assert.Equal(t, 2, len(helper1.reject)) assert.Equal(t, 0, len(helper2.reject)) // calling Fill() with empty cache out, err = helpers.Fill(creds) assert.Nil(t, err) assert.Equal(t, creds, out) assert.Equal(t, 3, len(helper1.fill)) assert.Equal(t, 0, len(helper2.fill)) } func TestCredHelperSetFillError(t *testing.T) { cache := NewCredentialCacher() helper1 := newTestCredHelper() helper2 := newTestCredHelper() helpers := NewCredentialHelpers([]CredentialHelper{cache, helper1, helper2}) creds := Creds{"protocol": []string{"https"}, "host": []string{"example.com"}} helper1.fillErr = errors.New("boom") out, err := helpers.Fill(creds) assert.Nil(t, err) assert.Equal(t, creds, out) assert.Equal(t, 1, len(helper1.fill)) assert.Equal(t, 1, len(helper2.fill)) assert.Nil(t, helpers.Approve(creds)) assert.Equal(t, 0, len(helper1.approve)) assert.Equal(t, 1, len(helper2.approve)) // Fill() with cache for i := 0; i < 3; i++ { out, err = helpers.Fill(creds) assert.Nil(t, err) assert.Equal(t, creds, out) assert.Equal(t, 1, len(helper1.fill)) assert.Equal(t, 1, len(helper2.fill)) } assert.Nil(t, helpers.Reject(creds)) assert.Equal(t, 0, len(helper1.reject)) assert.Equal(t, 1, len(helper2.reject)) // Fill() with empty cache out, err = helpers.Fill(creds) assert.Nil(t, err) assert.Equal(t, creds, out) assert.Equal(t, 1, len(helper1.fill)) // still skipped assert.Equal(t, 2, len(helper2.fill)) } func TestCredHelperSetApproveError(t *testing.T) { cache := NewCredentialCacher() helper1 := newTestCredHelper() helper2 := newTestCredHelper() helpers := NewCredentialHelpers([]CredentialHelper{cache, helper1, helper2}) creds := Creds{"protocol": []string{"https"}, "host": []string{"example.com"}} approveErr := errors.New("boom") helper1.approveErr = approveErr out, err := helpers.Fill(creds) assert.Nil(t, err) assert.Equal(t, creds, out) assert.Equal(t, 1, len(helper1.fill)) assert.Equal(t, 0, len(helper2.fill)) assert.Equal(t, approveErr, helpers.Approve(creds)) assert.Equal(t, 1, len(helper1.approve)) assert.Equal(t, 0, len(helper2.approve)) // cache is never set out, err = helpers.Fill(creds) assert.Nil(t, err) assert.Equal(t, creds, out) assert.Equal(t, 2, len(helper1.fill)) assert.Equal(t, 0, len(helper2.fill)) assert.Nil(t, helpers.Reject(creds)) assert.Equal(t, 1, len(helper1.reject)) assert.Equal(t, 0, len(helper2.reject)) } func TestCredHelperSetFillAndApproveError(t *testing.T) { cache := NewCredentialCacher() helper1 := newTestCredHelper() helper2 := newTestCredHelper() helpers := NewCredentialHelpers([]CredentialHelper{cache, helper1, helper2}) creds := Creds{"protocol": []string{"https"}, "host": []string{"example.com"}} credErr := errors.New("boom") helper1.fillErr = credErr helper2.approveErr = credErr out, err := helpers.Fill(creds) assert.Nil(t, err) assert.Equal(t, creds, out) assert.Equal(t, 1, len(helper1.fill)) assert.Equal(t, 1, len(helper2.fill)) assert.Equal(t, credErr, helpers.Approve(creds)) assert.Equal(t, 0, len(helper1.approve)) // skipped assert.Equal(t, 0, len(helper1.reject)) // skipped assert.Equal(t, 1, len(helper2.approve)) // never approved, so cache is empty out, err = helpers.Fill(creds) assert.Nil(t, err) assert.Equal(t, creds, out) assert.Equal(t, 1, len(helper1.fill)) // still skipped assert.Equal(t, 2, len(helper2.fill)) } func TestCredHelperSetRejectError(t *testing.T) { cache := NewCredentialCacher() helper1 := newTestCredHelper() helper2 := newTestCredHelper() helpers := NewCredentialHelpers([]CredentialHelper{cache, helper1, helper2}) creds := Creds{"protocol": []string{"https"}, "host": []string{"example.com"}} rejectErr := errors.New("boom") helper1.rejectErr = rejectErr out, err := helpers.Fill(creds) assert.Nil(t, err) assert.Equal(t, creds, out) assert.Equal(t, 1, len(helper1.fill)) assert.Equal(t, 0, len(helper2.fill)) assert.Nil(t, helpers.Approve(creds)) assert.Equal(t, 1, len(helper1.approve)) assert.Equal(t, 0, len(helper2.approve)) // Fill() with cache out, err = helpers.Fill(creds) assert.Nil(t, err) assert.Equal(t, creds, out) assert.Equal(t, 1, len(helper1.fill)) assert.Equal(t, 0, len(helper2.fill)) assert.Equal(t, rejectErr, helpers.Reject(creds)) assert.Equal(t, 1, len(helper1.reject)) assert.Equal(t, 0, len(helper2.reject)) // failed Reject() still clears cache out, err = helpers.Fill(creds) assert.Nil(t, err) assert.Equal(t, creds, out) assert.Equal(t, 2, len(helper1.fill)) assert.Equal(t, 0, len(helper2.fill)) } func TestCredHelperSetAllFillErrors(t *testing.T) { cache := NewCredentialCacher() helper1 := newTestCredHelper() helper2 := newTestCredHelper() helpers := NewCredentialHelpers([]CredentialHelper{cache, helper1, helper2}) creds := Creds{"protocol": []string{"https"}, "host": []string{"example.com"}} helper1.fillErr = errors.New("boom 1") helper2.fillErr = errors.New("boom 2") out, err := helpers.Fill(creds) if assert.NotNil(t, err) { assert.Equal(t, "credential fill errors:\nboom 1\nboom 2", err.Error()) } assert.Nil(t, out) assert.Equal(t, 1, len(helper1.fill)) assert.Equal(t, 1, len(helper2.fill)) err = helpers.Approve(creds) if assert.NotNil(t, err) { assert.Equal(t, "no valid credential helpers to approve", err.Error()) } assert.Equal(t, 0, len(helper1.approve)) assert.Equal(t, 0, len(helper2.approve)) err = helpers.Reject(creds) if assert.NotNil(t, err) { assert.Equal(t, "no valid credential helpers to reject", err.Error()) } assert.Equal(t, 0, len(helper1.reject)) assert.Equal(t, 0, len(helper2.reject)) } git-lfs-3.6.1/creds/creds_windows.go000066400000000000000000000001221472372047300173730ustar00rootroot00000000000000//go:build windows // +build windows package creds var netrcBasename = "_netrc" git-lfs-3.6.1/creds/netrc.go000066400000000000000000000067461472372047300156560ustar00rootroot00000000000000package creds import ( "net" "os" "path/filepath" "strings" "sync" "github.com/git-lfs/git-lfs/v3/config" "github.com/git-lfs/go-netrc/netrc" "github.com/rubyist/tracerx" ) type NetrcFinder interface { FindMachine(string, string) *netrc.Machine } func ParseNetrc(osEnv config.Environment) (NetrcFinder, string, error) { home, _ := osEnv.Get("HOME") if len(home) == 0 { return &noFinder{}, "", nil } nrcfilename := filepath.Join(home, netrcBasename) if _, err := os.Stat(nrcfilename); err != nil { return &noFinder{}, nrcfilename, nil } f, err := netrc.ParseFile(nrcfilename) return f, nrcfilename, err } type noFinder struct{} func (f *noFinder) FindMachine(host string, loginName string) *netrc.Machine { return nil } // NetrcCredentialHelper retrieves credentials from a .netrc file type netrcCredentialHelper struct { netrcFinder NetrcFinder mu sync.Mutex skip map[string]bool } var defaultNetrcFinder = &noFinder{} // NewNetrcCredentialHelper creates a new netrc credential helper using a // .netrc file gleaned from the OS environment func newNetrcCredentialHelper(osEnv config.Environment) *netrcCredentialHelper { netrcFinder, netrcfile, err := ParseNetrc(osEnv) if err != nil { tracerx.Printf("bad netrc file %s: %s", netrcfile, err) return nil } if netrcFinder == nil { netrcFinder = defaultNetrcFinder } return &netrcCredentialHelper{netrcFinder: netrcFinder, skip: make(map[string]bool)} } func (c *netrcCredentialHelper) Fill(what Creds) (Creds, error) { host, err := getNetrcHostname(FirstEntryForKey(what, "host")) if err != nil { return nil, credHelperNoOp } c.mu.Lock() defer c.mu.Unlock() if c.skip[host] { return nil, credHelperNoOp } if machine := c.netrcFinder.FindMachine(host, FirstEntryForKey(what, "username")); machine != nil { creds := make(Creds) creds["username"] = []string{machine.Login} creds["password"] = []string{machine.Password} creds["protocol"] = what["protocol"] creds["host"] = what["host"] creds["scheme"] = what["scheme"] creds["path"] = what["path"] creds["source"] = []string{"netrc"} tracerx.Printf("netrc: git credential fill (%q, %q, %q, %q)", FirstEntryForKey(what, "protocol"), FirstEntryForKey(what, "host"), machine.Login, FirstEntryForKey(what, "path")) return creds, nil } return nil, credHelperNoOp } func getNetrcHostname(hostname string) (string, error) { if strings.Contains(hostname, ":") { host, _, err := net.SplitHostPort(hostname) if err != nil { tracerx.Printf("netrc: error parsing %q: %s", hostname, err) return "", err } return host, nil } return hostname, nil } func (c *netrcCredentialHelper) Approve(what Creds) error { if FirstEntryForKey(what, "source") == "netrc" { host, err := getNetrcHostname(FirstEntryForKey(what, "host")) if err != nil { return credHelperNoOp } tracerx.Printf("netrc: git credential approve (%q, %q, %q)", FirstEntryForKey(what, "protocol"), FirstEntryForKey(what, "host"), FirstEntryForKey(what, "path")) c.mu.Lock() c.skip[host] = false c.mu.Unlock() return nil } return credHelperNoOp } func (c *netrcCredentialHelper) Reject(what Creds) error { if FirstEntryForKey(what, "source") == "netrc" { host, err := getNetrcHostname(what["host"][0]) if err != nil { return credHelperNoOp } tracerx.Printf("netrc: git credential reject (%q, %q, %q)", what["protocol"], what["host"], what["path"]) c.mu.Lock() c.skip[host] = true c.mu.Unlock() return nil } return credHelperNoOp } git-lfs-3.6.1/creds/netrc_test.go000066400000000000000000000034751472372047300167110ustar00rootroot00000000000000package creds import ( "strings" "testing" "github.com/git-lfs/go-netrc/netrc" ) func TestNetrcWithHostAndPort(t *testing.T) { var netrcHelper netrcCredentialHelper netrcHelper.netrcFinder = &fakeNetrc{} what := make(Creds) what["protocol"] = []string{"http"} what["host"] = []string{"netrc-host:123"} what["path"] = []string{"/foo/bar"} creds, err := netrcHelper.Fill(what) if err != nil { t.Fatalf("error retrieving netrc credentials: %s", err) } username := creds["username"][0] if username != "abc" { t.Fatalf("bad username: %s", username) } password := creds["password"][0] if password != "def" { t.Fatalf("bad password: %s", password) } } func TestNetrcWithHost(t *testing.T) { var netrcHelper netrcCredentialHelper netrcHelper.netrcFinder = &fakeNetrc{} what := make(Creds) what["protocol"] = []string{"http"} what["host"] = []string{"netrc-host"} what["path"] = []string{"/foo/bar"} creds, err := netrcHelper.Fill(what) if err != nil { t.Fatalf("error retrieving netrc credentials: %s", err) } username := creds["username"][0] if username != "abc" { t.Fatalf("bad username: %s", username) } password := creds["password"][0] if password != "def" { t.Fatalf("bad password: %s", password) } } func TestNetrcWithBadHost(t *testing.T) { var netrcHelper netrcCredentialHelper netrcHelper.netrcFinder = &fakeNetrc{} what := make(Creds) what["protocol"] = []string{"http"} what["host"] = []string{"other-host"} what["path"] = []string{"/foo/bar"} _, err := netrcHelper.Fill(what) if err != credHelperNoOp { t.Fatalf("expected no-op for unknown host other-host") } } type fakeNetrc struct{} func (n *fakeNetrc) FindMachine(host string, loginName string) *netrc.Machine { if strings.Contains(host, "netrc") { return &netrc.Machine{Login: "abc", Password: "def"} } return nil } git-lfs-3.6.1/debian/000077500000000000000000000000001472372047300143215ustar00rootroot00000000000000git-lfs-3.6.1/debian/changelog000066400000000000000000000201261472372047300161740ustar00rootroot00000000000000git-lfs (3.6.1) stable; urgency=low * New upstream version -- Chris Darroch Tue, 03 Dec 2024 14:29:00 -0000 git-lfs (3.6.0) stable; urgency=low * New upstream version -- Chris Darroch Wed, 20 Nov 2024 14:29:00 -0000 git-lfs (3.5.0) stable; urgency=low * New upstream version -- brian m. carlson Wed, 28 Feb 2024 14:29:00 -0000 git-lfs (3.4.0) stable; urgency=low * New upstream version -- Chris Darroch Wed, 26 Jul 2023 14:29:00 -0000 git-lfs (3.3.0) stable; urgency=low * New upstream version -- brian m. carlson Wed, 30 Nov 2022 14:29:00 -0000 git-lfs (3.2.0) stable; urgency=low * New upstream version -- brian m. carlson Wed, 25 May 2022 14:29:00 -0000 git-lfs (3.1.0) stable; urgency=low * New upstream version -- brian m. carlson Mon, 14 Feb 2022 14:29:00 -0000 git-lfs (3.0.0) stable; urgency=low * New upstream version -- brian m. carlson Fri, 24 Sep 2021 14:29:00 -0000 git-lfs (2.13.0) stable; urgency=low * New upstream version -- brian m. carlson Thu, 10 Dec 2020 14:29:00 -0000 git-lfs (2.12.0) stable; urgency=low * New upstream version -- brian m. carlson Tue, 1 Sep 2020 14:29:00 -0000 git-lfs (2.11.0) stable; urgency=low * New upstream version -- brian m. carlson Fri, 08 May 2020 14:29:00 -0000 git-lfs (2.10.0) stable; urgency=low * New upstream version -- brian m. carlson Tue, 21 Jan 2020 14:29:00 -0000 git-lfs (2.9.0) stable; urgency=low * New upstream version -- brian m. carlson Thu, 17 Oct 2019 14:29:00 -0000 git-lfs (2.8.0) stable; urgency=low * New upstream version -- brian m. carlson Tue, 23 Jul 2019 14:29:00 -0000 git-lfs (2.7.0) stable; urgency=low * New upstream version -- brian m. carlson Fri, 15 Feb 2019 14:29:00 -0000 git-lfs (2.6.0) stable; urgency=low * New upstream version -- Taylor Blau Thu, 1 Nov 2018 14:29:00 +0000 git-lfs (2.5.2) stable; urgency=low * New upstream version -- Taylor Blau Mon, 17 Sep 2018 14:29:00 +0000 git-lfs (2.5.1) stable; urgency=low * New upstream version -- Taylor Blau Thu, 2 Aug 2018 14:29:00 +0000 git-lfs (2.5.0) stable; urgency=low * New upstream version -- Taylor Blau Thu, 26 Jul 2018 14:29:00 +0000 git-lfs (2.4.2) stable; urgency=low * New upstream version -- Taylor Blau Mon, 28 May 2018 14:29:00 +0000 git-lfs (2.4.1) stable; urgency=low * New upstream version -- Taylor Blau Mon, 21 May 2018 14:29:00 +0000 git-lfs (2.4.0) stable; urgency=low * New upstream version -- Taylor Blau Thu, 1 Mar 2018 14:29:00 +0000 git-lfs (2.3.4) stable; urgency=low * New upstream version -- Rick Olson Wed, 18 Oct 2017 14:29:00 +0000 git-lfs (2.3.3) stable; urgency=low * New upstream version -- Rick Olson Mon, 9 Oct 2017 14:29:00 +0000 git-lfs (2.3.2) stable; urgency=low * New upstream version -- Rick Olson Tue, 3 Oct 2017 14:29:00 +0000 git-lfs (2.3.1) stable; urgency=low * New upstream version -- Rick Olson Wed, 26 Sep 2017 14:29:00 +0000 git-lfs (2.3.0) stable; urgency=low * New upstream version -- Taylor Blau Thu, 14 Sep 2017 14:29:00 +0000 git-lfs (2.2.1) stable; urgency=low * New upstream version -- Taylor Blau Mon, 10 Jul 2017 14:29:00 +0000 git-lfs (2.2.0) stable; urgency=low * New upstream version -- Rick Olson Tue, 27 Jun 2017 14:29:00 +0000 git-lfs (2.1.1) stable; urgency=low * New upstream version -- Taylor Blau Fri, 19 May 2017 14:29:00 +0000 git-lfs (2.1.0) stable; urgency=low * New upstream version -- Taylor Blau Fri, 28 Apr 2017 14:29:00 +0000 git-lfs (2.0.2) stable; urgency=low * New upstream version -- Taylor Blau Wed, 29 Mar 2017 14:29:00 +0000 git-lfs (2.0.1) stable; urgency=low * New upstream version -- Taylor Blau Mon, 6 Mar 2017 14:29:00 +0000 git-lfs (2.0.0) stable; urgency=low * New upstream version -- Rick Olson Tue, 1 Mar 2017 14:29:00 +0000 git-lfs (1.5.6) stable; urgency=low * New upstream version -- Stephen Gelman Thu, 16 Feb 2017 14:29:00 +0000 git-lfs (1.5.5) stable; urgency=low * New upstream version -- Stephen Gelman Thu, 12 Jan 2017 14:29:00 +0000 git-lfs (1.5.4) stable; urgency=low * New upstream version -- Stephen Gelman Tue, 27 Dec 2016 14:29:00 +0000 git-lfs (1.5.3) stable; urgency=low * New upstream version -- Stephen Gelman Mon, 5 Dec 2016 14:29:00 +0000 git-lfs (1.5.2) stable; urgency=low * New upstream version -- Stephen Gelman Tue, 22 Nov 2016 14:29:00 +0000 git-lfs (1.5.1) stable; urgency=low * New upstream version -- Stephen Gelman Fri, 18 Nov 2016 14:29:00 +0000 git-lfs (1.5.0) stable; urgency=low * New upstream version -- Stephen Gelman Thu, 17 Nov 2016 14:29:00 +0000 git-lfs (1.4.4) stable; urgency=low * New upstream version -- Stephen Gelman Mon, 24 Oct 2016 14:29:00 +0000 git-lfs (1.4.3) stable; urgency=low * New upstream version -- Stephen Gelman Mon, 17 Oct 2016 14:29:00 +0000 git-lfs (1.4.2) stable; urgency=low * New upstream version -- Stephen Gelman Mon, 10 Oct 2016 14:29:00 +0000 git-lfs (1.4.1) stable; urgency=low * New upstream version -- Stephen Gelman Fri, 26 Aug 2016 14:29:00 +0000 git-lfs (1.4.0) stable; urgency=low * New upstream version -- Stephen Gelman Fri, 19 Aug 2016 14:29:00 +0000 git-lfs (1.3.1) stable; urgency=low * New upstream version -- Stephen Gelman Tue, 2 Aug 2016 14:29:00 +0000 git-lfs (1.3.0) stable; urgency=low * New upstream version -- Stephen Gelman Thu, 21 Jul 2016 14:29:00 +0000 git-lfs (1.2.1) stable; urgency=low * New upstream version -- Stephen Gelman Thu, 2 Jun 2016 14:29:00 +0000 git-lfs (1.2.0) stable; urgency=low * New upstream version -- Stephen Gelman Thu, 14 Apr 2016 14:29:00 +0000 git-lfs (1.1.2) stable; urgency=low * New upstream version -- Stephen Gelman Tue, 1 Mar 2016 14:29:00 +0000 git-lfs (1.1.1) stable; urgency=low * New upstream version -- Stephen Gelman Wed, 4 Feb 2016 14:29:00 +0000 git-lfs (1.1.0) stable; urgency=low * New upstream version -- Stephen Gelman Wed, 12 Nov 2015 14:29:00 +0000 git-lfs (1.0.2) stable; urgency=low * New upstream version -- Stephen Gelman Wed, 28 Oct 2015 14:29:00 +0000 git-lfs (1.0.1) stable; urgency=low * New upstream version -- Stephen Gelman Fri, 23 Oct 2015 14:29:00 +0000 git-lfs (1.0.0) stable; urgency=low * New upstream version -- Stephen Gelman Thu, 1 Oct 2015 14:29:00 +0000 git-lfs (0.6.0) stable; urgency=low * New upstream version -- Stephen Gelman Thu, 10 Sep 2015 14:29:00 +0000 git-lfs (0.5.3) stable; urgency=low * New upstream version -- Stephen Gelman Fri, 24 Jul 2015 20:43:00 +0000 git-lfs (0.5.2) stable; urgency=low * New upstream version -- Stephen Gelman Fri, 12 Jun 2015 02:54:01 +0000 git-lfs (0.5.1) stable; urgency=medium * Initial release. -- Stephen Gelman Fri, 08 May 2015 22:55:45 +0000 git-lfs-3.6.1/debian/compat000066400000000000000000000000021472372047300155170ustar00rootroot000000000000009 git-lfs-3.6.1/debian/control000066400000000000000000000007411472372047300157260ustar00rootroot00000000000000Source: git-lfs Section: vcs Priority: optional Maintainer: Stephen Gelman Build-Depends: debhelper (>= 9), dh-golang, golang-go:native (>= 1.12.0), git (>= 1.8.2), asciidoctor Standards-Version: 3.9.6 Package: git-lfs Architecture: any Depends: ${shlibs:Depends}, ${misc:Depends}, git (>= 1.8.2) Built-Using: ${misc:Built-Using} Description: Git Large File Support An open source Git extension for versioning large files Homepage: https://git-lfs.com/ git-lfs-3.6.1/debian/copyright000066400000000000000000000024211472372047300162530ustar00rootroot00000000000000Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ Upstream-Name: git-lfs Source: https://github.com/git-lfs/git-lfs Files: * Copyright: 2013-2015 Github, Inc. License: Expat Copyright (c) GitHub, Inc. and Git LFS contributors . Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: . The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. . THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. git-lfs-3.6.1/debian/git-lfs.lintian-overrides000066400000000000000000000004241472372047300212460ustar00rootroot00000000000000# Go only produces static binaries so read-only relocations aren't possible hardening-no-relro usr/bin/git-lfs # strip disabled as golang upstream doesn't support it and it makes go # crash. See https://launchpad.net/bugs/1200255. unstripped-binary-or-object usr/bin/git-lfs git-lfs-3.6.1/debian/git-lfs.manpages000066400000000000000000000000471472372047300174040ustar00rootroot00000000000000man/man1/*.1 man/man5/*.5 man/man7/*.7 git-lfs-3.6.1/debian/postinst000066400000000000000000000002261472372047300161270ustar00rootroot00000000000000#!/bin/sh # The --skip-repo option prevents failure if / is a Git repository with existing # non-git-lfs hooks. git lfs install --skip-repo --system git-lfs-3.6.1/debian/prerm000066400000000000000000000003101472372047300153630ustar00rootroot00000000000000#!/bin/sh # The --skip-repo option avoids mutating / if it is a Git repository. (Maybe the # user wants to replace this package with a different installation.) git lfs uninstall --skip-repo --system git-lfs-3.6.1/debian/rules000077500000000000000000000046371472372047300154130ustar00rootroot00000000000000#!/usr/bin/make -f export DH_OPTIONS #dh_golang doesn't do this for you ifeq ($(DEB_HOST_ARCH), i386) export GOARCH := 386 else ifeq ($(DEB_HOST_ARCH), amd64) export GOARCH := amd64 else ifeq ($(DEB_HOST_ARCH), armhf) export GOARCH := arm else ifeq ($(DEB_HOST_ARCH), arm64) export GOARCH := arm64 endif BUILD_DIR := obj-$(DEB_HOST_GNU_TYPE) export DH_GOPKG := github.com/git-lfs/git-lfs/v3 # DH_GOLANG_EXCLUDES typically incorporates vendor exclusions export DH_GOLANG_EXCLUDES := test github.com/olekukonko/ts/* github.com/xeipuuv/* github.com/spf13/cobra/* github.com/kr/* github.com/pkg/errors github.com/alexbrainman/sspi/* export DH_GOLANG_GO_GENERATE := 1 export PATH := $(CURDIR)/$(BUILD_DIR)/bin:$(PATH) # by-default, dh_golang only copies *.go and other source - this upsets a bunch of vendor test routines export DH_GOLANG_INSTALL_ALL := 1 export FORCE_LOCALIZE=true %: mkdir -p /tmp/gocache GO111MODULE=on GOFLAGS=-mod=vendor GOCACHE=/tmp/gocache dh $@ --buildsystem=golang --with=golang override_dh_clean: rm -f debian/debhelper.log rm -rf man dh_clean override_dh_auto_build: FORCE_LOCALIZE=true make trgen dh_auto_build #dh_golang doesn't do anything here in deb 8, and it's needed in both if [ "$(DEB_HOST_GNU_TYPE)" != "$(DEB_BUILD_GNU_TYPE)" ]; then\ cp -rf $(BUILD_DIR)/bin/*/* $(BUILD_DIR)/bin/; \ fi rm $(BUILD_DIR)/bin/man make man override_dh_strip: # strip disabled as golang upstream doesn't support it and it makes go # crash. See https://launchpad.net/bugs/1200255. override_dh_golang: # The dh_golang is used to add the Built-using field to the deb. This is only for reference. # As of https://anonscm.debian.org/cgit/collab-maint/dh-golang.git/commit/script/dh_golang?id=7c3fbec6ea92294477fa8910264fe9bd823f21c3 # dh_golang errors out because the go compiler used was not installed via a package. Therefore the step is skipped override_dh_auto_install: mkdir -p debian/git-lfs/usr/bin cp $(BUILD_DIR)/bin/git-lfs debian/git-lfs/usr/bin/ override_dh_auto_test: ln -s ../../../../../../../commands/repos $(BUILD_DIR)/src/github.com/git-lfs/git-lfs/v3/commands/repos ln -s ../../../../../bin $(BUILD_DIR)/src/github.com/git-lfs/git-lfs/v3/bin #dh_golang uses the wrong dir to test on. This tricks everything into being happy DEB_BUILD_GNU_TYPE=$(DEB_HOST_GNU_TYPE) dh_auto_test rm $(BUILD_DIR)/src/github.com/git-lfs/git-lfs/v3/commands/repos $(BUILD_DIR)/src/github.com/git-lfs/git-lfs/v3/bin git-lfs-3.6.1/debian/source/000077500000000000000000000000001472372047300156215ustar00rootroot00000000000000git-lfs-3.6.1/debian/source/format000066400000000000000000000000151472372047300170300ustar00rootroot000000000000003.0 (native) git-lfs-3.6.1/docker/000077500000000000000000000000001472372047300143465ustar00rootroot00000000000000git-lfs-3.6.1/docker/README.md000066400000000000000000000102241472372047300156240ustar00rootroot00000000000000# Git LFS Linux Docker Builds In order to build Linux distribution packages of Git LFS in both the Debian and RPM package formats and across multiple architectures, the GitHub Actions workflows for this repository run package build scripts in a set of Docker containers. The containers are based on Docker images created from the Dockerfiles in the `git-lfs/build-dockers` [repository](https://github.com/git-lfs/build-dockers). Each Docker image contains either the `debian_script.bsh` script or `centos_script.bsh` script from the `git-lfs/build-dockers` repository, as appropriate for the expected output package format. The relevant script is executed by default when the image is loaded in a container and builds Git LFS distribution packages for one or more architectures. The packages are written into the `/repo` directory, which is typically a mounted volume in the Docker container. ## Building Linux Packages The `docker/run_dockers.bsh` script in this repository provides a convenient way to run some or all of the Git LFS Docker builds, assuming the Docker images already exist. (The `build_dockers.bsh` script in the `git-lfs/build-dockers` repository may be used to create the Docker images.) When run without arguments, the `run_dockers.bsh` script will run builds for all of the OS versions listed by the `script/distro-tool` utility when it is passed the `--image-names` option. This list should match the available Dockerfiles in the `git-lfs/build-dockers` repository and therefore also the corresponding Docker images created by that repository's `build_dockers.bsh` script. The `run_dockers.bsh` script may also be used to run builds for only a subset of the available OS versions, e.g.: ``` $ docker/run_dockers.bsh debian_12 $ docker/run_dockers.bsh debian_12 rocky_9 ``` The resultant packages of Git LFS will be generated in directories named `./repos/{OS NAME}/{OS VERSION}`. Debian packages are written into those directories, while RPM packages are stored in further levels of subdirectories, e.g., `./repos/rocky/9/RPMS/x86_64`. The Docker containers created from each image are removed after use, unless the `AUTO_REMOVE` environment variable is set to a value other than `1`. The Docker images will be removed as well if the `--prune` command-line argument is supplied. By default, packages are built for the `amd64` architecture. Cross-platform builds may be performed using the `--arch={ARCH}` command-line option if the requested architecture is supported by the build script in the Docker image for the given OS and version. At present, only the `debian_script.bsh` build script in the Debian images supports cross-platform builds. If the current user has write permission on the `/var/run/docker.sock` file descriptor or belongs to the `docker` group, the `run_dockers.bsh` script will run the `docker` command directly; otherwise it will attempt to run the command using `sudo`. ### Environment Variables There are several environment variables which adjust the behavior of the `run_dockers.bsh` script: - `AUTO_REMOVE` - Default `1`. Docker containers are automatically deleted upon exit unless this variable is set to a value other than `1`. Retaining a container may be useful for debugging purposes. Note that the container must be removed manually in this case. - `DOCKER_OTHER_OPTIONS` - Any additional arguments to be passed to the Docker `run` command. ### Docker Image Development When developing or debugging the build scripts included in the Docker images, it may be valuable to start an interactive shell in a container instead of letting the build script run by default. To start the Bash shell, for example, use: ``` $ docker/run_dockers.bsh {OS NAME}_{OS VERSION} -- bash ``` Any command available in the Docker image may be executed instead of Bash by using the `--` separator, followed by the command to be run. ## Adding Docker Images To add another Docker image, a new Dockerfile needs to be committed to the `git-lfs/build-dockers` repository, and then a corresponding new entry should be added to the `script/lib/distro.rb` file in this repository with an `image` key whose value matches the OS name and version used in the name of the new Dockerfile. git-lfs-3.6.1/docker/run_dockers.bsh000077500000000000000000000062041472372047300173670ustar00rootroot00000000000000#!/usr/bin/env bash # Usage: # ./run_dockers.bsh # Run all the Docker images # ./run_dockers.bsh centos_6 centos_7 # Run only the CentOS 6 & 7 Docker images # ./run_dockers.bsh centos_6 -- bash # Run Bash in the CentOS 6 Docker image # ./run_dockers.bsh --prune # Remove each Docker image after running # ./run_dockers.bsh --arch= # Build for a specific architecture, # # e.g., arm64 # Special Environment Variables # AUTO_REMOVE - Default 1. If set to 0, the Docker container will not be # automatically deleted when done. This can be useful for a # post-mortem analysis. Just make sure you clean up the Docker # containers manually. set -eu #Mingw32 auto converts /drive/dir/blah to drive:\dir\blah ... Can't have that. if [[ `uname` == MINGW* ]]; then MINGW_PATCH='/' else MINGW_PATCH='' fi CUR_DIR=$(cd $(dirname "${BASH_SOURCE[0]}"); pwd) REPO_DIR=$(cd ${CUR_DIR}/..; pwd) PACKAGE_DIR=${REPO_DIR}/repos mkdir -p ${PACKAGE_DIR}/centos || : mkdir -p ${PACKAGE_DIR}/debian || : #If you are not in docker group and you have sudo, default value is sudo : ${SUDO=`if ( [ ! -w /var/run/docker.sock ] && id -nG | grep -qwv docker && [ "${DOCKER_HOST:+dh}" != "dh" ] ) && command -v sudo > /dev/null 2>&1; then echo sudo; fi`} function split_image_name() { #$1 - image dockerfile #sets IMAGE_NAME to the basename of the dir containing the docker file #sets IMAGE_INFO to be the array name following my pattern local IFS=_ IMAGE_INFO=($1) } # Parse Arguments IMAGES= PRUNE= ARCH=amd64 while [[ $# -gt 0 ]]; do if [ "$1" = "--prune" ]; then PRUNE=t elif [[ "$1" == --arch=* ]]; then ARCH="${1#--arch=}" elif [ "$1" == "--" ]; then shift DOCKER_CMD="${@}" break else IMAGES="$IMAGES $1" fi shift done if [[ -z "$IMAGES" ]]; then IMAGES="$(script/distro-tool --image-names)" fi mkdir -p "${PACKAGE_DIR}" #Run docker to build packages for IMAGE_NAME in $IMAGES; do split_image_name "${IMAGE_NAME}" #set IMAGE_NAME and IMAGE_INFO #It CAN'T be empty () with set -u... So I put some defaults in here OTHER_OPTIONS=("-t") if tty >/dev/null; then OTHER_OPTIONS+=("-i") fi if [ "${AUTO_REMOVE-1}" == "1" ]; then OTHER_OPTIONS+=("--rm") fi FINAL_UID=$(id -u) FINAL_GID=$(id -g) if [[ $FINAL_UID == 0 ]]; then FINAL_UID=${SUDO_UID-} fi if [[ $FINAL_GID == 0 ]]; then FINAL_GID=${SUDO_GID-} fi echo Compiling LFS in docker image ${IMAGE_NAME} IMAGE_REPO_DIR="${PACKAGE_DIR}"/"${IMAGE_INFO[0]}"/"${IMAGE_INFO[1]}" $SUDO docker run "${OTHER_OPTIONS[@]}" ${DOCKER_OTHER_OPTIONS-} \ -e USER=root \ -e FINAL_UID=${FINAL_UID} \ -e FINAL_GID=${FINAL_GID} \ -v "${MINGW_PATCH}${REPO_DIR}:/src" \ -v "${MINGW_PATCH}${IMAGE_REPO_DIR}:/repo" \ --platform "$ARCH" \ gitlfs/build-dockers:${IMAGE_NAME} ${DOCKER_CMD-} if [ -n "$PRUNE" ] then $SUDO docker rmi -f "gitlfs/build-dockers:${IMAGE_NAME}" fi done echo "Docker run completed successfully!" git-lfs-3.6.1/docs/000077500000000000000000000000001472372047300140275ustar00rootroot00000000000000git-lfs-3.6.1/docs/README.md000066400000000000000000000006521472372047300153110ustar00rootroot00000000000000# Git LFS Documentation ## Reference Manual Each Git LFS subcommand is documented in the [official man pages](man). Any of these can also be viewed from the command line: ```bash $ git lfs help $ git lfs -h ``` ## Developer Docs Details of how the Git LFS **client** works are in the [official specification](spec.md). Details of how the GIT LFS **server** works are in the [API specification](api). git-lfs-3.6.1/docs/api/000077500000000000000000000000001472372047300146005ustar00rootroot00000000000000git-lfs-3.6.1/docs/api/README.md000066400000000000000000000016061472372047300160620ustar00rootroot00000000000000# Git LFS API The Git LFS client uses an HTTPS server to coordinate fetching and storing large binary objects separately from a Git server. The basic process the client goes through looks like this: 1. [Discover the LFS Server to use](./server-discovery.md). 2. [Apply Authentication](./authentication.md). 3. Make the request. See the Batch and File Locking API sections. ## Batch API The Batch API is used to request the ability to transfer LFS objects with the LFS server. API Specification: * [Batch API](./batch.md) Current transfer adapters include: * [Basic](./basic-transfers.md) Experimental transfer adapters include: * Tus.io (upload only) * [Custom](../custom-transfers.md) ## File Locking API The File Locking API is used to create, list, and delete locks, as well as verify that locks are respected in Git pushes. API Specification: * [File Locking API](./locking.md) git-lfs-3.6.1/docs/api/authentication.md000066400000000000000000000046101472372047300201420ustar00rootroot00000000000000# Authentication The Git LFS API uses HTTP Basic Authentication to authorize requests. Therefore, HTTPS is strongly encouraged for all production Git LFS servers. The credentials can come from the following places: ## SSH Git LFS will add any HTTP headers returned from the `git-lfs-authenticate` command to any Batch API requests. If servers are returning expiring tokens, they can add an `expires_in` (or `expires_at`) property to hint when the token will expire. ```bash # Called for remotes like: # * git@git-server.com:foo/bar.git # * ssh://git@git-server.com/foo/bar.git $ ssh git@git-server.com git-lfs-authenticate foo/bar.git download { "header": { "Authorization": "RemoteAuth some-token" }, # optional, for expiring tokens, preferred over expires_at "expires_in": 86400, # optional, for expiring tokens "expires_at": "2016-11-10T15:29:07Z" } ``` See the SSH section in the [Server Discovery doc](./server-discovery.md) for more info about `git-lfs-authenticate`. ## Git Credentials Git provides a [`credentials` command](https://git-scm.com/docs/gitcredentials) for storing and retrieving credentials through a customizable credential helper. By default, it associates the credentials with a domain. You can enable `credential.useHttpPath` so different repository paths have different credentials. Git ships with a really basic credential cacher that stores passwords in memory, so you don't have to enter your password frequently. However, you are encouraged to setup a [custom git credential cacher](https://help.github.com/articles/caching-your-github-password-in-git/), if a better one exists for your platform. As of version 3.0, Git LFS no longer supports NTLM. Users are encouraged to set up Kerberos; for example, Azure DevOps Server recommends Kerberos over NTLM in [this blog post](https://devblogs.microsoft.com/devops/reconfigure-azure-devops-server-to-use-kerberos-instead-of-ntlm/). For pre-3.0 LFS versions, if your Git LFS server authenticates with NTLM then you must provide your credentials to `git-credential` in the form `username:DOMAIN\user password:password`. ## Specified in URL You can hardcode credentials into your Git remote or LFS url properties in your git config. This is not recommended for security reasons because it relies on the credentials living in your local git config. ```bash $ git remote add origin https://user:password@git-server.com/foo/bar.git ``` git-lfs-3.6.1/docs/api/basic-transfers.md000066400000000000000000000063111472372047300202110ustar00rootroot00000000000000# Basic Transfer API The Basic transfer API is a simple, generic API for directly uploading and downloading LFS objects. Git LFS servers can offload object storage to cloud services like S3, or implement this API natively. This is the original transfer adapter. All Git LFS clients and servers SHOULD support it, and default to it if the [Batch API](./batch.md) request or response do not specify a `transfer` property. ## Downloads Downloading an object requires a download `action` object in the Batch API response that looks like this: ```json { "transfer": "basic", "objects": [ { "oid": "1111111", "size": 123, "authenticated": true, "actions": { "download": { "href": "https://some-download.com/1111111", "header": { "Authorization": "Basic ..." }, "expires_in": 86400, } } } ] } ``` The Basic transfer adapter will make a GET request on the `href`, expecting the raw bytes returned in the HTTP response. ``` > GET https://some-download.com/1111111 > Authorization: Basic ... < < HTTP/1.1 200 OK < Content-Type: application/octet-stream < Content-Length: 123 < < {contents} ``` ## Uploads The client uploads objects through individual PUT requests. The URL and headers are provided by an upload `action` object. ```json { "transfer": "basic", "objects": [ { "oid": "1111111", "size": 123, "authenticated": true, "actions": { "upload": { "href": "https://some-upload.com/1111111", "header": { "Authorization": "Basic ..." }, "expires_in": 86400 } } } ] } ``` The Basic transfer adapter will make a PUT request on the `href`, sending the raw bytes returned in the HTTP request. ``` > PUT https://some-upload.com/1111111 > Authorization: Basic ... > Content-Type: application/octet-stream > Content-Length: 123 > > {contents} > < HTTP/1.1 200 OK ``` ## Verification The Batch API can optionally return a verify `action` object in addition to an upload `action` object. If given, The Batch API expects a POST to the href after a successful upload. ```json { "transfer": "basic", "objects": [ { "oid": "1111111", "size": 123, "authenticated": true, "actions": { "upload": { "href": "https://some-upload.com/1111111", "header": { "Authorization": "Basic ..." }, "expires_in": 86400 }, "verify": { "href": "https://some-verify-callback.com", "header": { "Authorization": "Basic ..." }, "expires_in": 86400 } } } ] } ``` Git LFS clients send: * `oid` - The String OID of the Git LFS object. * `size` - The integer size of the Git LFS object, in bytes. ``` > POST https://some-verify-callback.com > Accept: application/vnd.git-lfs+json > Content-Type: application/vnd.git-lfs+json > Content-Length: 123 > > {"oid": "{oid}", "size": 10000} > < HTTP/1.1 200 OK ``` The client may also include a `charset=utf-8` parameter in the `Content-Type` header, which servers should be prepared to accept. A 200 response means that the object exists on the server. git-lfs-3.6.1/docs/api/batch.md000066400000000000000000000230651472372047300162110ustar00rootroot00000000000000# Git LFS Batch API Added: v0.6 The Batch API is used to request the ability to transfer LFS objects with the LFS server. The Batch URL is built by adding `/objects/batch` to the LFS server URL. Git remote: https://git-server.com/foo/bar
LFS server: https://git-server.com/foo/bar.git/info/lfs
Batch API: https://git-server.com/foo/bar.git/info/lfs/objects/batch See the [Server Discovery doc](./server-discovery.md) for more info on how LFS builds the LFS server URL. All Batch API requests use the POST verb, and require the following HTTP headers. The request and response bodies are JSON. Accept: application/vnd.git-lfs+json Content-Type: application/vnd.git-lfs+json The client may also include a `charset=utf-8` parameter in the `Content-Type` header, which servers should be prepared to accept. See the [Authentication doc](./authentication.md) for more info on how LFS gets authorizes Batch API requests. ## Requests The client sends the following information to the Batch endpoint to transfer some objects: * `operation` - Should be `download` or `upload`. * `transfers` - An optional Array of String identifiers for transfer adapters that the client has configured. If omitted, the `basic` transfer adapter MUST be assumed by the server. * `ref` - Optional object describing the server ref that the objects belong to. Note: Added in v2.4. * `name` - Fully-qualified server refspec. * `objects` - An Array of objects to download. * `oid` - String OID of the LFS object. * `size` - Integer byte size of the LFS object. Must be at least zero. * `hash_algo` - The hash algorithm used to name Git LFS objects. Optional; defaults to `sha256` if not specified. Note: Git LFS currently only supports the `basic` transfer adapter. This property was added for future compatibility with some experimental transfer adapters. See the [API README](./README.md) for a list of the documented transfer adapters. ```js // POST https://lfs-server.com/objects/batch // Accept: application/vnd.git-lfs+json // Content-Type: application/vnd.git-lfs+json // Authorization: Basic ... (if needed) { "operation": "download", "transfers": [ "basic" ], "ref": { "name": "refs/heads/main" }, "objects": [ { "oid": "12345678", "size": 123 } ], "hash_algo": "sha256" } ``` #### Ref Property The Batch API added the `ref` property in LFS v2.4 to support Git server authentication schemes that take the refspec into account. Since this is a new addition to the API, servers should be able to operate with a missing or null `ref` property. Some examples will illustrate how the `ref` property can be used. * User `owner` has full access to the repository. * User `contrib` has readonly access to the repository, and write access to `refs/heads/contrib`. ```js { "operation": "download", "transfers": [ "basic" ], "objects": [ { "oid": "12345678", "size": 123 } ] } ``` With this payload, both `owner` and `contrib` can download the requested object, since they both have read access. ```js { "operation": "upload", "transfers": [ "basic" ], "objects": [ { "oid": "12345678", "size": 123 } ] } ``` With this payload, only `owner` can upload the requested object. ```js { "operation": "upload", "transfers": [ "basic" ], "ref": { "name": "refs/heads/contrib" }, "objects": [ { "oid": "12345678", "size": 123 } ] } ``` Both `owner` and `contrib` can upload the request object. ### Successful Responses The Batch API should always return with a 200 status, unless there are some issues with the request (bad authorization, bad json, etc). See below for examples of response errors. Check out the documented transfer adapters in the [API README](./README.md) to see how Git LFS handles successful Batch responses. Successful responses include the following properties: * `transfer` - String identifier of the transfer adapter that the server prefers. This MUST be one of the given `transfer` identifiers from the request. Servers can assume the `basic` transfer adapter if none were given. The Git LFS client will use the `basic` transfer adapter if the `transfer` property is omitted. * `objects` - An Array of objects to download. * `oid` - String OID of the LFS object. * `size` - Integer byte size of the LFS object. Must be at least zero. * `authenticated` - Optional boolean specifying whether the request for this specific object is authenticated. If omitted or false, Git LFS will attempt to [find credentials for this URL](./authentication.md). * `actions` - Object containing the next actions for this object. Applicable actions depend on which `operation` is specified in the request. How these properties are interpreted depends on which transfer adapter the client will be using. * `href` - String URL to download the object. * `header` - Optional hash of String HTTP header key/value pairs to apply to the request. * `expires_in` - Whole number of seconds after local client time when transfer will expire. Preferred over `expires_at` if both are provided. Maximum of 2147483647, minimum of -2147483647. * `expires_at` - String uppercase RFC 3339-formatted timestamp with second precision for when the given action expires (usually due to a temporary token). * `hash_algo` - The hash algorithm used to name Git LFS objects for this repository. Optional; defaults to `sha256` if not specified. Download operations MUST specify a `download` action, or an object error if the object cannot be downloaded for some reason. See "Response Errors" below. Upload operations can specify an `upload` and a `verify` action. The `upload` action describes how to upload the object. If the object has a `verify` action, the LFS client will hit this URL after a successful upload. Servers can use this for extra verification, if needed. If a client requests to upload an object that the server already has, the server should omit the `actions` property completely. The client will then assume the server already has it. ```js // HTTP/1.1 200 Ok // Content-Type: application/vnd.git-lfs+json { "transfer": "basic", "objects": [ { "oid": "1111111", "size": 123, "authenticated": true, "actions": { "download": { "href": "https://some-download.com", "header": { "Key": "value" }, "expires_at": "2016-11-10T15:29:07Z" } } } ], "hash_algo": "sha256" } ``` If there are problems accessing individual objects, servers should continue to return a 200 status code, and provide per-object errors. Here is an example: ```js // HTTP/1.1 200 Ok // Content-Type: application/vnd.git-lfs+json { "transfer": "basic", "objects": [ { "oid": "1111111", "size": 123, "error": { "code": 404, "message": "Object does not exist" } } ], "hash_algo": "sha256" } ``` LFS object error codes should match HTTP status codes where possible: * 404 - The object does not exist on the server. * 409 - The specified hash algorithm disagrees with the server's acceptable options. * 410 - The object was removed by the owner. * 422 - Validation error. ### Response Errors LFS servers can respond with these other HTTP status codes: * 401 - The authentication credentials are needed, but were not sent. Git LFS will attempt to [get the authentication](./authentication.md) for the request and retry immediately. * 403 - The user has **read**, but not **write** access. Only applicable when the `operation` in the request is "upload." * 404 - The Repository does not exist for the user. * 422 - Validation error with one or more of the objects in the request. This means that _none_ of the requested objects to upload are valid. Error responses will not have an `objects` property. They will only have: * `message` - String error message. * `request_id` - Optional String unique identifier for the request. Useful for debugging. * `documentation_url` - Optional String to give the user a place to report errors. ```js // HTTP/1.1 404 Not Found // Content-Type: application/vnd.git-lfs+json { "message": "Not found", "documentation_url": "https://lfs-server.com/docs/errors", "request_id": "123" } ``` HTTP 401 responses should include an `LFS-Authenticate` header to tell the client what form of authentication it requires. If omitted, Git LFS will assume Basic Authentication. This mirrors the standard `WWW-Authenticate` header with a custom header key so it does not trigger password prompts in browsers. ```js // HTTP/1.1 401 Unauthorized // Content-Type: application/vnd.git-lfs+json // LFS-Authenticate: Basic realm="Git LFS" { "message": "Credentials needed", "documentation_url": "https://lfs-server.com/docs/errors", "request_id": "123" } ``` The following status codes can optionally be returned from the API, depending on the server implementation. * 406 - The Accept header needs to be `application/vnd.git-lfs+json`. * 413 - The batch API request contained too many objects or the request was otherwise too large. * 429 - The user has hit a rate limit with the server. Though the API does not specify any rate limits, implementors are encouraged to set some for availability reasons. * 501 - The server has not implemented the current method. Reserved for future use. * 507 - The server has insufficient storage capacity to complete the request. * 509 - The bandwidth limit for the user or repository has been exceeded. The API does not specify any bandwidth limit, but implementors may track usage. Some server errors may trigger the client to retry requests, such as 500, 502, 503, and 504. git-lfs-3.6.1/docs/api/locking.md000066400000000000000000000336571472372047300165660ustar00rootroot00000000000000# Git LFS File Locking API Added: v2.0 The File Locking API is used to create, list, and delete locks, as well as verify that locks are respected in Git pushes. The locking URLs are built by adding a suffix to the LFS Server URL. Git remote: https://git-server.com/foo/bar
LFS server: https://git-server.com/foo/bar.git/info/lfs
Locks API: https://git-server.com/foo/bar.git/info/lfs/locks
See the [Server Discovery doc](./server-discovery.md) for more info on how LFS builds the LFS server URL. All File Locking requests require the following HTTP headers: Accept: application/vnd.git-lfs+json Content-Type: application/vnd.git-lfs+json The client may also include a `charset=utf-8` parameter in the `Content-Type` header, which servers should be prepared to accept. See the [Authentication doc](./authentication.md) for more info on how LFS gets authorizes Batch API requests. Note: This is the first version of the File Locking API, supporting only the simplest use case: single branch locking. The API is designed to be extensible as we experiment with more advanced locking scenarios, as defined in the [original proposal](/docs/proposals/locking.md). The [Batch API's `ref` property docs](./batch.md#ref-property) describe how the `ref` property can be used to support auth schemes that include the server ref. Locking API implementations should also only use it for authentication, until advanced locking scenarios have been developed. ## Create Lock The client sends the following to create a lock by sending a `POST` to `/locks` (appended to the LFS server url, as described above). Servers should ensure that users have push access to the repository, and that files are locked exclusively to one user. * `path` - String path name of the file that is locked. This should be relative to the root of the repository working directory. * `ref` - Optional object describing the server ref that the locks belong to. Note: Added in v2.4. * `name` - Fully-qualified server refspec. ```js // POST https://lfs-server.com/locks // Accept: application/vnd.git-lfs+json // Content-Type: application/vnd.git-lfs+json // Authorization: Basic ... { "path": "foo/bar.zip", "ref": { "name": "refs/heads/my-feature" } } ``` ### Successful Response Successful responses return the created lock: * `id` - String ID of the Lock. Git LFS doesn't enforce what type of ID is used, as long as it's returned as a string. * `path` - String path name of the locked file. This should be relative to the root of the repository working directory. * `locked_at` - The timestamp the lock was created, as an uppercase RFC 3339-formatted string with second precision. * `owner` - Optional name of the user that created the Lock. This should be set from the user credentials posted when creating the lock. ```js // HTTP/1.1 201 Created // Content-Type: application/vnd.git-lfs+json { "lock": { "id": "some-uuid", "path": "foo/bar.zip", "locked_at": "2016-05-17T15:49:06+00:00", "owner": { "name": "Jane Doe" } } } ``` ### Bad Response: Lock Exists Lock services should reject lock creations if one already exists for the given path on the current repository. * `lock` - The existing Lock that clashes with the request. * `message` - String error message. * `request_id` - Optional String unique identifier for the request. Useful for debugging. * `documentation_url` - Optional String to give the user a place to report errors. ```js // HTTP/1.1 409 Conflict // Content-Type: application/vnd.git-lfs+json { "lock": { // details of existing lock }, "message": "already created lock", "documentation_url": "https://lfs-server.com/docs/errors", "request_id": "123" } ``` ### Unauthorized Response Lock servers should require that users have push access to the repository before they can create locks. * `message` - String error message. * `request_id` - Optional String unique identifier for the request. Useful for debugging. * `documentation_url` - Optional String to give the user a place to report errors. ```js // HTTP/1.1 403 Forbidden // Content-Type: application/vnd.git-lfs+json { "message": "You must have push access to create a lock", "documentation_url": "https://lfs-server.com/docs/errors", "request_id": "123" } ``` ### Error Response * `message` - String error message. * `request_id` - Optional String unique identifier for the request. Useful for debugging. * `documentation_url` - Optional String to give the user a place to report errors. ```js // HTTP/1.1 500 Internal server error // Content-Type: application/vnd.git-lfs+json { "message": "internal server error", "documentation_url": "https://lfs-server.com/docs/errors", "request_id": "123" } ``` ## List Locks The client can request the current active locks for a repository by sending a `GET` to `/locks` (appended to the LFS server url, as described above). LFS Servers should ensure that users have at least pull access to the repository. The properties are sent as URI query values, instead of through a JSON body: * `path` - Optional string path to match against locks on the server. * `id` - Optional string ID to match against a lock on the server. * `cursor` - The optional string value to continue listing locks. This value should be the `next_cursor` from a previous request. * `limit` - The integer limit of the number of locks to return. The server should have its own upper and lower bounds on the supported limits. * `refspec` - Optional fully qualified server refspec from which to search for locks. ```js // GET https://lfs-server.com/locks?path=&id=&cursor=&limit=&refspec= // Accept: application/vnd.git-lfs+json // Authorization: Basic ... (if needed) ``` ### Successful Response A successful response will list the matching locks: * `locks` - Array of matching Lock objects. See the "Create Lock" successful response section to see what Lock properties are possible. * `next_cursor` - Optional string cursor that the server can return if there are more locks matching the given filters. The client will re-do the request, setting the `?cursor` query value with this `next_cursor` value. Note: If the server has no locks, it must return an empty `locks` array. ```js // HTTP/1.1 200 Ok // Content-Type: application/vnd.git-lfs+json { "locks": [ { "id": "some-uuid", "path": "/path/to/file", "locked_at": "2016-05-17T15:49:06+00:00", "owner": { "name": "Jane Doe" } } ], "next_cursor": "optional next ID" } ``` ### Unauthorized Response Lock servers should require that users have pull access to the repository before they can list locks. * `message` - String error message. * `request_id` - Optional String unique identifier for the request. Useful for debugging. * `documentation_url` - Optional String to give the user a place to report errors. ```js // HTTP/1.1 403 Forbidden // Content-Type: application/vnd.git-lfs+json { "message": "You must have pull access to list locks", "documentation_url": "https://lfs-server.com/docs/errors", "request_id": "123" } ``` ### Error Response * `message` - String error message. * `request_id` - Optional String unique identifier for the request. Useful for debugging. * `documentation_url` - Optional String to give the user a place to report errors. ```js // HTTP/1.1 500 Internal server error // Content-Type: application/vnd.git-lfs+json { "message": "unable to list locks", "documentation_url": "https://lfs-server.com/docs/errors", "request_id": "123" } ``` ## List Locks for Verification The client can use the Lock Verification endpoint to check for active locks that can affect a Git push. For a caller, this endpoint is very similar to the "List Locks" endpoint above, except: * Verification requires a `POST` request. * The `cursor`, `ref` and `limit` values are sent as properties in the json request body. * The response includes locks partitioned into `ours` and `theirs` properties. LFS Servers should ensure that users have push access to the repository. Clients send the following to list locks for verification by sending a `POST` to `/locks/verify` (appended to the LFS server url, as described above): * `ref` - Optional object describing the server ref that the locks belong to. Note: Added in v2.4. * `name` - Fully-qualified server refspec. * `cursor` - Optional cursor to allow pagination. Servers can determine how cursors are formatted based on how they are stored internally. * `limit` - Optional limit to how many locks to return. ```js // POST https://lfs-server.com/locks/verify // Accept: application/vnd.git-lfs+json // Content-Type: application/vnd.git-lfs+json // Authorization: Basic ... { "cursor": "optional cursor", "limit": 100, // also optional "ref": { "name": "refs/heads/my-feature" } } ``` Note: As more advanced locking workflows are implemented, more details will likely be added to this request body in future iterations. ### Successful Response A successful response will list the relevant locks: * `ours` - Array of Lock objects currently owned by the authenticated user. modify. * `theirs` - Array of Lock objects currently owned by other users. * `next_cursor` - Optional string cursor that the server can return if there are more locks matching the given filters. The client will re-do the request, setting the `cursor` property with this `next_cursor` value. If a Git push updates any files matching any of "our" locks, Git LFS will list them in the push output, in case the user will want to unlock them after the push. However, any updated files matching one of "their" locks will halt the push. At this point, it is up to the user to resolve the lock conflict with their team. Note: If the server has no locks, it must return an empty array in the `ours` or `theirs` properties. ```js // HTTP/1.1 200 Ok // Content-Type: application/vnd.git-lfs+json { "ours": [ { "id": "some-uuid", "path": "/path/to/file", "locked_at": "2016-05-17T15:49:06+00:00", "owner": { "name": "Jane Doe" } } ], "theirs": [], "next_cursor": "optional next ID" } ``` ### Not Found Response By default, an LFS server that doesn't implement any locking endpoints should return 404. This response will not halt any Git pushes. Any 404 will do, but Git LFS will show a better error message with a json response. * `message` - String error message. * `request_id` - Optional String unique identifier for the request. Useful for debugging. * `documentation_url` - Optional String to give the user a place to report errors. ```js // HTTP/1.1 404 Not found // Content-Type: application/vnd.git-lfs+json { "message": "Not found", "documentation_url": "https://lfs-server.com/docs/errors", "request_id": "123" } ``` ### Unauthorized Response Lock servers should require that users have push access to the repository before they can get a list of locks to verify a Git push. * `message` - String error message. * `request_id` - Optional String unique identifier for the request. Useful for debugging. * `documentation_url` - Optional String to give the user a place to report errors. ```js // HTTP/1.1 403 Forbidden // Content-Type: application/vnd.git-lfs+json { "message": "You must have push access to verify locks", "documentation_url": "https://lfs-server.com/docs/errors", "request_id": "123" } ``` ### Error Response * `message` - String error message. * `request_id` - Optional String unique identifier for the request. Useful for debugging. * `documentation_url` - Optional String to give the user a place to report errors. ```js // HTTP/1.1 500 Internal server error // Content-Type: application/vnd.git-lfs+json { "message": "unable to list locks", "documentation_url": "https://lfs-server.com/docs/errors", "request_id": "123" } ``` ## Delete Lock The client can delete a lock, given its ID, by sending a `POST` to `/locks/:id/unlock` (appended to the LFS server url, as described above). LFS servers should ensure that callers have push access to the repository. They should also prevent a user from deleting another user's lock, unless the `force` property is given. Properties: * `force` - Optional boolean specifying that the user is deleting another user's lock. * `ref` - Optional object describing the server ref that the locks belong to. Note: Added in v2.4. * `name` - Fully-qualified server refspec. ```js // POST https://lfs-server.com/locks/:id/unlock // Accept: application/vnd.git-lfs+json // Content-Type: application/vnd.git-lfs+json // Authorization: Basic ... { "force": true, "ref": { "name": "refs/heads/my-feature" } } ``` ### Successful Response Successful deletions return the deleted lock. See the "Create Lock" successful response section to see what Lock properties are possible. ```js // HTTP/1.1 200 Ok // Content-Type: application/vnd.git-lfs+json { "lock": { "id": "some-uuid", "path": "/path/to/file", "locked_at": "2016-05-17T15:49:06+00:00", "owner": { "name": "Jane Doe" } } } ``` ### Unauthorized Response Lock servers should require that users have push access to the repository before they can delete locks. Also, if the `force` parameter is omitted, or false, the user should only be allowed to delete locks that they created. * `message` - String error message. * `request_id` - Optional String unique identifier for the request. Useful for debugging. * `documentation_url` - Optional String to give the user a place to report errors. ```js // HTTP/1.1 403 Forbidden // Content-Type: application/vnd.git-lfs+json { "message": "You must have push access to delete locks", "documentation_url": "https://lfs-server.com/docs/errors", "request_id": "123" } ``` ### Error response * `message` - String error message. * `request_id` - Optional String unique identifier for the request. Useful for debugging. * `documentation_url` - Optional String to give the user a place to report errors. ```js // HTTP/1.1 500 Internal server error // Content-Type: application/vnd.git-lfs+json { "message": "unable to delete lock", "documentation_url": "https://lfs-server.com/docs/errors", "request_id": "123" } ``` git-lfs-3.6.1/docs/api/schemas/000077500000000000000000000000001472372047300162235ustar00rootroot00000000000000git-lfs-3.6.1/docs/api/schemas/http-batch-request-schema.json000077700000000000000000000000001472372047300346602../../../tq/schemas/http-batch-request-schema.jsonustar00rootroot00000000000000git-lfs-3.6.1/docs/api/schemas/http-batch-response-schema.json000077700000000000000000000000001472372047300351742../../../tq/schemas/http-batch-response-schema.jsonustar00rootroot00000000000000git-lfs-3.6.1/docs/api/schemas/http-lock-create-request-schema.json000077700000000000000000000000001472372047300401022../../../locking/schemas/http-lock-create-request-schema.jsonustar00rootroot00000000000000git-lfs-3.6.1/docs/api/schemas/http-lock-create-response-schema.json000077700000000000000000000000001472372047300404162../../../locking/schemas/http-lock-create-response-schema.jsonustar00rootroot00000000000000git-lfs-3.6.1/docs/api/schemas/http-lock-delete-request-schema.json000077700000000000000000000000001472372047300401002../../../locking/schemas/http-lock-delete-request-schema.jsonustar00rootroot00000000000000git-lfs-3.6.1/docs/api/schemas/http-lock-list-response-schema.json000077700000000000000000000000001472372047300376362../../../locking/schemas/http-lock-list-response-schema.jsonustar00rootroot00000000000000git-lfs-3.6.1/docs/api/schemas/http-lock-verify-response-schema.json000077700000000000000000000000001472372047300405202../../../locking/schemas/http-lock-verify-response-schema.jsonustar00rootroot00000000000000git-lfs-3.6.1/docs/api/server-discovery.md000066400000000000000000000063471472372047300204470ustar00rootroot00000000000000# Server Discovery One of the Git LFS goals is to work with supporting Git remotes with as few required configuration properties as possible. Git LFS will attempt to use your Git remote to determine the LFS server. You can also configure a custom LFS server if your Git remote doesn't support one, or you just want to use a separate one. Look for the `Endpoint` properties in `git lfs env` to see your current LFS servers. ## Guessing the Server By default, Git LFS will append `.git/info/lfs` to the end of a Git remote url to build the LFS server URL it will use: Git Remote: `https://git-server.com/foo/bar`
LFS Server: `https://git-server.com/foo/bar.git/info/lfs` Git Remote: `https://git-server.com/foo/bar.git`
LFS Server: `https://git-server.com/foo/bar.git/info/lfs` Git Remote: `git@git-server.com:foo/bar.git`
LFS Server: `https://git-server.com/foo/bar.git/info/lfs` Git Remote: `ssh://git-server.com/foo/bar.git`
LFS Server: `https://git-server.com/foo/bar.git/info/lfs` ## SSH If Git LFS detects an SSH remote, it will run the `git-lfs-authenticate` command. This allows supporting Git servers to give the Git LFS client alternative authentication so the user does not have to setup a git credential helper. Git LFS runs the following command: $ ssh [{user}@]{server} git-lfs-authenticate {path} {operation} The `user`, `server`, and `path` properties are taken from the SSH remote. The `operation` can either be "download" or "upload". The SSH command can be tweaked with the `GIT_SSH` or `GIT_SSH_COMMAND` environment variables. The output for successful commands is JSON, and matches the schema as an `action` in a Batch API response. Git LFS will dump the STDERR from the `ssh` command if it returns a non-zero exit code. Examples: The `git-lfs-authenticate` command can even suggest an LFS endpoint that does not match the Git remote by specifying an `href` property. ```bash # Called for remotes like: # * git@git-server.com:foo/bar.git # * ssh://git@git-server.com/foo/bar.git $ ssh git@git-server.com git-lfs-authenticate foo/bar.git download { "href": "https://lfs-server.com/foo/bar", "header": { "Authorization": "RemoteAuth some-token" }, "expires_in": 86400 } ``` Git LFS will output the STDERR if `git-lfs-authenticate` returns a non-zero exit code: ```bash $ ssh git@git-server.com git-lfs-authenticate foo/bar.git wat Invalid LFS operation: "wat" ``` ## Custom Configuration If Git LFS can't guess your LFS server, or you aren't using the `git-lfs-authenticate` command, you can specify the LFS server using Git config. Set `lfs.url` to set the LFS server, regardless of Git remote. ```bash $ git config lfs.url https://lfs-server.com/foo/bar ``` You can set `remote.{name}.lfsurl` to set the LFS server for that specific remote only: ```bash $ git config remote.dev.lfsurl http://lfs-server.dev/foo/bar $ git lfs env ... Endpoint=https://git-server.com/foo/bar.git/info/lfs (auth=none) Endpoint (dev)=http://lfs-server.dev/foo/bar (auth=none) ``` Git LFS will also read these settings from a `.lfsconfig` file in the root of your repository. This lets you commit it to the repository so that all users can use it, if you wish. ```bash $ git config --file=.lfsconfig lfs.url https://lfs-server.com/foo/bar ``` git-lfs-3.6.1/docs/custom-transfers.md000066400000000000000000000312041472372047300176700ustar00rootroot00000000000000# Adding Custom Transfer Agents to LFS ## Introduction Git LFS supports multiple ways to transfer (upload and download) files. In the core client, the basic way to do this is via a one-off HTTP request via the URL returned from the LFS API for a given object. The core client also supports extensions to allow resuming of downloads (via `Range` headers) and uploads (via the [tus.io](http://tus.io) protocol). Some people might want to be able to transfer content in other ways, however. To enable this, git-lfs allows configuring Custom Transfers, which are simply processes which must adhere to the protocol defined later in this document. git-lfs will invoke the process at the start of all transfers, and will communicate with the process via stdin/stdout for each transfer. ## Custom Transfer Type Selection In the LFS API request, the client includes a list of transfer types it supports. When replying, the API server will pick one of these and make any necessary adjustments to the returned object actions, in case the picked transfer type needs custom details about how to do each transfer. ## Using a Custom Transfer Type without the API server In some cases the transfer agent can figure out by itself how and where the transfers should be made, without having to query the API server. In this case it's possible to use the custom transfer agent directly, without querying the server, by using the following config option: * `lfs.standalonetransferagent`, `lfs..standalonetransferagent` Specifies a custom transfer agent to be used if the API server URL matches as in `git config --get-urlmatch lfs.standalonetransferagent `. `git-lfs` will not contact the API server. It instead sets stage 2 transfer actions to `null`. `lfs..standalonetransferagent` can be used to configure a custom transfer agent for individual remotes. `lfs.standalonetransferagent` unconditionally configures a custom transfer agent for all remotes. The custom transfer agent must be specified in a `lfs.customtransfer.` settings group. ## Defining a Custom Transfer Type A custom transfer process is defined under a settings group called `lfs.customtransfer.`, where `` is an identifier (see [Naming](#naming) below). * `lfs.customtransfer..path` `path` should point to the process you wish to invoke. This will be invoked at the start of all transfers (possibly many times, see the `concurrent` option below) and the protocol over stdin/stdout is defined below in the [Protocol](#protocol) section. * `lfs.customtransfer..args` If the custom transfer process requires any arguments, these can be provided here. Typically you would only need this if your process was multi-purpose or particularly flexible, most of the time you won't need it. Note that this string will be expanded by the shell. * `lfs.customtransfer..concurrent` If true (the default), git-lfs will invoke the custom transfer process multiple times in parallel, according to `lfs.concurrenttransfers`, splitting the transfer workload between the processes. If you would prefer that only one instance of the transfer process is invoked, maybe because you want to do your own parallelism internally (e.g. slicing files into parts), set this to false. * `lfs.customtransfer..direction` Specifies which direction the custom transfer process supports, either `download`, `upload`, or `both`. The default if unspecified is `both`. ## Naming Each custom transfer must have a name which is unique to the underlying mechanism, and the client and the server must agree on that name. The client will advertise this name to the server as a supported transfer approach, and if the server supports it, it will return relevant object action links. Because these may be very different from standard HTTP URLs it's important that the client and server agree on the name. For example, let's say I've implemented a custom transfer process which uses NFS. I could call this transfer type `nfs` - although it's not specific to my configuration exactly, it is specific to the way NFS works, and the server will need to give me different URLs. Assuming I define my transfer like this, and the server supports it, I might start getting object action links back like `nfs:///path/to/object` ## Protocol The git-lfs client communicates with the custom transfer process via the stdin and stdout streams. No file content is communicated on these streams, only request / response metadata. The metadata exchanged is always in JSON format. External files will be referenced when actual content is exchanged. ### Line Delimited JSON Because multiple JSON messages will be exchanged on the same stream it's useful to delimit them explicitly rather than have the parser find the closing `}` in an arbitrary stream, therefore each JSON structure will be sent and received on a **single line** as per [Line Delimited JSON](https://en.wikipedia.org/wiki/JSON_Streaming#Line_delimited_JSON_2). In other words when git-lfs sends a JSON message to the custom transfer it will be on a single line, with a line feed at the end. The transfer process must respond the same way by writing a JSON structure back to stdout with a single line feed at the end (and flush the output). ### Protocol Stages The protocol consists of 3 stages: #### Stage 1: Initiation Immediately after invoking a custom transfer process, git-lfs sends initiation data to the process over stdin. This tells the process useful information about the configuration. The message will look like this: ```json { "event": "init", "operation": "download", "remote": "origin", "concurrent": true, "concurrenttransfers": 3 } ``` * `event`: Always `init` to identify this message * `operation`: will be `upload` or `download` depending on transfer direction * `remote`: The Git remote. It can be a remote name like `origin` or an URL like `ssh://git.example.com//path/to/repo`. A standalone transfer agent can use it to determine the location of remote files. * `concurrent`: reflects the value of `lfs.customtransfer..concurrent`, in case the process needs to know * `concurrenttransfers`: reflects the value of `lfs.concurrenttransfers`, for if the transfer process wants to implement its own concurrency and wants to respect this setting. The transfer process should use the information it needs from the initiation structure, and also perform any one-off setup tasks it needs to do. It should then respond on stdout with a simple empty confirmation structure, as follows: ```json { } ``` Or if there was an error: ```json { "error": { "code": 32, "message": "Some init failure message" } } ``` #### Stage 2: 0..N Transfers After the initiation exchange, git-lfs will send any number of transfer requests to the stdin of the transfer process, in a serial sequence. Once a transfer request is sent to the process, it awaits a completion response before sending the next request. ##### Uploads For uploads the request sent from git-lfs to the transfer process will look like this: ```json { "event": "upload", "oid": "bf3e3e2af9366a3b704ae0c31de5afa64193ebabffde2091936ad2e7510bc03a", "size": 346232, "path": "/path/to/file.png", "action": { "href": "nfs://server/path", "header": { "key": "value" } } } ``` * `event`: Always `upload` to identify this message * `oid`: the identifier of the LFS object * `size`: the size of the LFS object * `path`: the file which the transfer process should read the upload data from * `action`: the `upload` action copied from the response from the batch API. This contains `href` and `header` contents, which are named per HTTP conventions, but can be interpreted however the custom transfer agent wishes (this is an NFS example, but it doesn't even have to be an URL). Generally, `href` will give the primary connection details, with `header` containing any miscellaneous information needed. `action` is `null` for standalone transfer agents. The transfer process should post one or more [progress messages](#progress) and then a final completion message as follows: ```json { "event": "complete", "oid": "bf3e3e2af9366a3b704ae0c31de5afa64193ebabffde2091936ad2e7510bc03a" } ``` * `event`: Always `complete` to identify this message * `oid`: the identifier of the LFS object Or if there was an error in the transfer: ```json { "event": "complete", "oid": "bf3e3e2af9366a3b704ae0c31de5afa64193ebabffde2091936ad2e7510bc03a", "error": { "code": 2, "message": "Explain what happened to this transfer" } } ``` * `event`: Always `complete` to identify this message * `oid`: the identifier of the LFS object * `error`: Should contain a `code` and `message` explaining the error ##### Downloads For downloads the request sent from git-lfs to the transfer process will look like this: ```json { "event": "download", "oid": "22ab5f63670800cc7be06dbed816012b0dc411e774754c7579467d2536a9cf3e", "size": 21245, "action": { "href": "nfs://server/path", "header": { "key": "value" } } } ``` * `event`: Always `download` to identify this message * `oid`: the identifier of the LFS object * `size`: the size of the LFS object * `action`: the `download` action copied from the response from the batch API. This contains `href` and `header` contents, which are named per HTTP conventions, but can be interpreted however the custom transfer agent wishes (this is an NFS example, but it doesn't even have to be an URL). Generally, `href` will give the primary connection details, with `header` containing any miscellaneous information needed. `action` is `null` for standalone transfer agents. Note there is no file path included in the download request; the transfer process should create a file itself and return the path in the final response after completion (see below). The transfer process should post one or more [progress messages](#progress) and then a final completion message as follows: ```json { "event": "complete", "oid": "22ab5f63670800cc7be06dbed816012b0dc411e774754c7579467d2536a9cf3e", "path": "/path/to/file.png" } ``` * `event`: Always `complete` to identify this message * `oid`: the identifier of the LFS object * `path`: the path to a file containing the downloaded data, which the transfer process relinquishes control of to git-lfs. git-lfs will move the file into LFS storage. Or, if there was a failure transferring this item: ```json { "event": "complete", "oid": "22ab5f63670800cc7be06dbed816012b0dc411e774754c7579467d2536a9cf3e", "error": { "code": 2, "message": "Explain what happened to this transfer" } } ``` * `event`: Always `complete` to identify this message * `oid`: the identifier of the LFS object * `error`: Should contain a `code` and `message` explaining the error Errors for a single transfer request should not terminate the process. The error should be returned in the response structure instead. The custom transfer adapter does not need to check the SHA of the file content it has downloaded, git-lfs will do that before moving the final content into the LFS store. ##### Progress In order to support progress reporting while data is uploading / downloading, the transfer process should post messages to stdout as follows before sending the final completion message: ```json { "event": "progress", "oid": "22ab5f63670800cc7be06dbed816012b0dc411e774754c7579467d2536a9cf3e", "bytesSoFar": 1234, "bytesSinceLast": 64 } ``` * `event`: Always `progress` to identify this message * `oid`: the identifier of the LFS object * `bytesSoFar`: the total number of bytes transferred so far * `bytesSinceLast`: the number of bytes transferred since the last progress message The transfer process should post these messages such that the last one sent has `bytesSoFar` equal to the file size on success. #### Stage 3: Finish & Cleanup When all transfers have been processed, git-lfs will send the following message to the stdin of the transfer process: ```json { "event": "terminate" } ``` On receiving this message the transfer process should clean up and terminate. No response is expected. ## Error handling Any unexpected fatal errors in the transfer process (not errors specific to a transfer request) should set the exit code to non-zero and print information to stderr. Otherwise the exit code should be 0 even if some transfers failed. ## A Note On Verify Actions You may have noticed that that only the `upload` and `download` actions are passed to the custom transfer agent for processing, what about the `verify` action, if the API returns one? Custom transfer agents do not handle the verification process, only the upload and download of content. The verify link is typically used to notify a system *other* than the actual content store after an upload was completed, therefore it makes more sense for that to be handled via the normal API process. git-lfs-3.6.1/docs/extensions.md000066400000000000000000000215621472372047300165560ustar00rootroot00000000000000# Extending LFS Teams who use Git LFS often have custom requirements for how the pointer files and blobs should be handled. Some examples of extensions that could be built: * Compress large files on clean, uncompress them on smudge/fetch * Encrypt files on clean, decrypt on smudge/fetch * Scan files on clean to make sure they don't contain sensitive information The basic extensibility model is that LFS extensions must be registered explicitly, and they will be invoked on clean and smudge to manipulate the contents of the files as needed. On clean, LFS itself ensures that the pointer file is updated with all the information needed to be able to smudge correctly, and the extensions never modify the pointer file directly. NOTE: This feature is considered experimental, and included so developers can work on extensions. Exact details of how extensions work are subject to change based on feedback. It is possible for buggy extensions to leave your repository in a bad state, so don't rely on them with a production git repository without extensive testing. ## Registration To register an LFS extension, it must be added to the Git config. Each extension needs to define: * Its unique name. This will be used as part of the key in the pointer file. * The command to run on clean (when files are added to git). * The command to run on smudge (when files are downloaded and checked out). * The priority of the extension, which must be a unique, non-negative integer. The sequence `%f` in the clean and smudge commands will be replaced by the filename being processed. Here's an example extension registration in the Git config: ``` [lfs "extension.foo"] clean = foo clean %f smudge = foo smudge %f priority = 0 [lfs "extension.bar"] clean = bar clean %f smudge = bar smudge %f priority = 1 ``` ## Clean When staging a file, Git invokes the LFS clean filter, as described earlier. If no extensions are installed, the LFS clean filter reads bytes from STDIN, calculates the SHA-256 signature, and writes the bytes to a temp file. It then moves the temp file into the appropriate place in .git/lfs/objects and writes a valid pointer file to STDOUT. When an extension is installed, LFS will invoke the extension to do additional processing on the bytes before writing them into the temp file. If multiple extensions are installed, they are invoked in the order defined by their priority. LFS will also insert a key in the pointer file for each extension that was invoked, indicating both the order that the extension was invoked and the oid of the file before that extension was invoked. All of that information is required to be able to reliably smudge the file later. Each new line in the pointer file will be of the form: `ext-{order}-{name} {hash-method}:{hash-of-input-to-extension}` This naming ensures that all extensions are written in both alphabetical and priority order, and also shows the progression of changes to the oid as it is processed by the extensions. Here's an example sequence, assuming extensions foo and bar are installed, as shown in the previous section. * Git passes the original contents of the file to LFS clean over STDIN. * LFS reads those bytes and calculates the original SHA-256 signature. * LFS streams the bytes to STDIN of `foo clean`, which is expected to write those bytes, modified or not, to its STDOUT. * LFS reads the bytes from STDOUT of `foo clean`, calculates the SHA-256 signature, and writes them to STDIN of `bar clean`, which then writes those bytes, modified or not, to its STDOUT. * LFS reads the bytes from STDOUT of `bar clean`, calculates the SHA-256 signature, and writes the bytes to a temp file. * When finished, LFS atomically moves the temp file into `.git/lfs/objects`. * LFS generates the pointer file, with some changes: * The oid and size keys are calculated from the final bytes written to LFS local storage. * LFS also writes keys named `ext-0-foo` and `ext-1-bar` into the pointer, along with their respective input oids. Here's an example pointer file, for a file processed by extensions foo and bar: ``` version https://git-lfs.github.com/spec/v1 ext-0-foo sha256:{original hash} ext-1-bar sha256:{hash after foo} oid sha256:{hash after bar} size 123 (ending \n) ``` Note: as an optimization, if an extension just does a pass-through, its key can be omitted from the pointer file. This will make smudging the file a bit more efficient since that extension can be skipped. LFS can detect a pass-through extension because the input and output oids will be the same. This implies that extensions must have no side effects other than writing to their STDOUT. Otherwise LFS has no way to know what extensions modified a file. ## Smudge When a file is checked out, Git invokes the LFS smudge filter, as described earlier. If no extensions are installed, the LFS smudge filter inspects the first 100 bytes of the bytes off STDIN, and if it is a pointer file, uses the oid to find the correct object in the LFS storage, and writes those bytes to STDOUT so that Git can write them to the working directory. If the pointer file indicates that extensions were invoked on that file, then those extensions must be installed in order to smudge. If they are not installed, not found, or unusable for any reason, LFS will fail to smudge the file, and outputs an error indicating which extension is missing. Each of the extensions indicated in the pointer file must be invoked in reverse order to undo the changes they made to the contents of the file. After each extension is invoked, LFS will compare the SHA-256 signature of the bytes output by the extension with the oid stored in the pointer file as the original input to that same extension. Those signatures must match, otherwise the extension did not undo its changes correctly. In that case, LFS fails to smudge the file, and outputs an error indicating which extension is failing. Here's an example sequence, indicating how LFS will smudge the pointer file shown in the previous section: * Git passes the bytes of the pointer file to LFS smudge over STDIN. Note that when using `git lfs checkout`, LFS reads the files directly from disk rather than off STDIN. The rest of the steps are unaffected either way. * LFS reads those bytes and inspects them to see if this is a pointer file. If it was not, the bytes would just be passed through to STDOUT. * Since it is a pointer file, LFS reads the whole file off STDIN, parses it, and determines that extensions foo and bar both processed the file, in that order. * LFS uses the value of the oid key to find the blob in the `.git/lfs/objects` folder, or download from the server as needed. * LFS writes the contents of the blob to STDIN of `bar smudge`, which modifies them as needed and writes them to its STDOUT. * LFS reads the bytes from STDOUT of `bar smudge`, calculates the SHA-256 signature, and writes the bytes to STDIN of `foo smudge`, which modifies them as needed and writes to them its STDOUT. * LFS reads the bytes from STDOUT of `foo smudge`, calculates the SHA-256 signature, and writes the bytes to its own STDOUT. * At the end, ensure that the hashes calculated on the outputs of foo and bar match their corresponding input hashes from the pointer file. If not, write a descriptive error message indicating which extension failed to undo its changes. * Question: On error, should we overwrite the file in the working directory with the original pointer file? Can this be done reliably? ## Handling errors If there are errors in the configuration of LFS extensions, such as invalid extension names, duplicate priorities, etc, then any LFS commands that rely on them will abort with a descriptive error message. If an extension is unable to perform its task, it can indicate this error by returning a non-zero error code and writing a descriptive error message to its STDERR. The behavior on an error depends on whether we are cleaning or smudging. ### Clean If an extension fails to clean a file, it will return a non-zero error code and write an error message to its STDERR. Because the file was not cleaned correctly, it can't be added to the index. LFS will ensure that no pointer file is added or updated for failed files. In addition, it will display the error messages for any files that could not be cleaned (and keep those errors in a log), so that the user can diagnose the failure, and then rerun "git add" on those files. ### Smudge If an extension fails to smudge a file, it will return a non-zero error code and write an error message to its STDERR. Because the file was not smudged correctly, LFS cannot update that file in the working directory. LFS will ensure that the pointer file is written to both the index and working directory. In addition, it will display the error messages for any files that could not be smudged (and keep those errors in a log), so that the user can diagnose the failure and then rerun `git-lfs checkout` to fix up any remaining pointer files. git-lfs-3.6.1/docs/howto/000077500000000000000000000000001472372047300151675ustar00rootroot00000000000000git-lfs-3.6.1/docs/howto/release-git-lfs.md000066400000000000000000000304661472372047300205050ustar00rootroot00000000000000# Releasing Git LFS The core team of Git LFS maintainers publishes releases on a cadence of their determining. ## Release Naming We follow Semantic Versioning standards as follows: * `MAJOR` releases are done on a scale of 2-4 years. These encompass breaking, incompatible API changes, or command-line interface changes that would cause existing programs or use-cases scripted against Git LFS to break. * `MINOR` releases are done on a scale of 2-6 months. These encompass new features, bug fixes, and other "medium"-sized changes into a semi-regular release schedule. * `PATCH` releases are done on the scale of weeks to months. These encompass critical bug fixes, but lack new features. They are amended to a `MINOR` release "series", or, if serious enough (e.g., security vulnerabilities, etc.) are backported to previous versions. ## Release Artifacts We package several artifacts for each tagged release. They are: 1. `git-lfs-@{os}-v@{release}-@{arch}.tar.gz` for the following values: | | operating system | architecture | | --- | ---------------- | ------------ | | git-lfs-darwin-amd64-v@{version}.tar.gz | darwin | amd64 | | git-lfs-darwin-arm64-v@{version}.tar.gz | darwin | arm64 | | git-lfs-freebsd-386-v@{version}.tar.gz | freebsd | 386 | | git-lfs-freebsd-amd64-v@{version}.tar.gz | freebsd | amd64 | | git-lfs-linux-386-v@{version}.tar.gz | linux (generic) | 386 | | git-lfs-linux-amd64-v@{version}.tar.gz | linux (generic) | amd64 | | git-lfs-linux-arm-v@{version}.tar.gz | linux (generic) | arm | | git-lfs-linux-arm64-v@{version}.tar.gz | linux (generic) | arm64 | | git-lfs-linux-ppc64le-v@{version}.tar.gz | linux (generic) | ppc64le | | git-lfs-linux-s390x-v@{version}.tar.gz | linux (generic) | s390x | | git-lfs-linux-loong64-v@{version}.tar.gz | linux (generic) | loong64 | 2. `git-lfs-windows-v@{release}-@{arch}.zip` for the following values: | | operating system | architecture | | --- | ---------------- | ------------ | | git-lfs-windows-386-v@{version}.zip | windows | 386 | | git-lfs-windows-amd64-v@{version}.zip | windows | amd64 | | git-lfs-windows-arm64-v@{version}.zip | windows | arm64 | 3. `git-lfs-windows-v@{release}.exe`, a signed Windows installer that contains copies of both `-x86` and `-x64` copies of Git LFS. 4. `*.deb`, and `*.rpm` packages for all of the distributions named in `script/packagegcloud.rb`. ## Development Philosophy We do all major development on the `main` branch, and assume it to be passing tests at all times. New features are added via the feature-branch workflow, or (optionally) from a contributor's fork. This is done so that `main` can progress and grow new features, while historical releases such as `vM.N.0` can receive bug fixes as they are applied to `main`, eventually culminating in a `vM.N.1` (and so on) release. ## Building a release Let release `vM.N.P` denote the version that we are _releasing_. When `P` is equal to zero, we say that we are releasing a MINOR version of Git LFS in the `vM.N`-series, unless `N` is also equal to zero, in which case we are releasing a MAJOR version. Conversely, if `P` is not equal to zero, we are releasing a PATCH version. 1. First, we write the release notes and do the housekeeping required to indicate a new version. For a MAJOR or MINOR version, we start with a `main` branch which is up to date with the latest changes from the remote and then checkout a new `release-next` branch from that base. If we are releasing a PATCH version, we create a `release-M.N` branch with cherry-picked merges from the `main` branch, as described in the [instructions](#building-patch-versions) below, and then checkout the `release-next` branch from that base. We next perform the following steps to prepare the `release-next` branch: * Run `script/changelog` and categorize each merge commit as a feature, bug fix, miscellaneous change, or a change to be skipped and ignored. Ensure that your `~/.netrc` credentials are up-to-date in order to make requests to the GitHub API, or use a `GITHUB_TOKEN` environment variable. The `changelog` script will write a portion of the new CHANGELOG to stdout, which you should copy and paste into `CHANGELOG.md`, along with an H2-level heading containing the new version and the expected release date. This heading should be consistent with the exising style in the document. For a MAJOR release, use `script/changelog v(M-1).L.0...HEAD`, where `(M-1)` is the previous MAJOR release number and `L` is the final MINOR release number in that series. For a MINOR release, use `script/changelog vM.(N-1).0...HEAD`, where `(N-1)` is the previous MINOR release number, and for a PATCH release, use `script/changelog --patch vM.N.(P-1)...HEAD`, where `(P-1)` is the previous PATCH release number. * Optionally write 1-2 paragraphs summarizing the release and calling out community contributions. * If we are releasing a MAJOR or MINOR version and not a PATCH, and if the most recent non-PATCH release was followed by a series of one or more PATCH releases, include any changes listed in the CHANGELOG of that series' release branch in the new release's CHANGELOG. (For a new MAJOR version, the prior release branch would be named `release-(M-1).L`, following the terminology defined above, while for a new MINOR version the prior release branch would be named `release-M.(N-1)`.) * Run `script/update-version vM.N.P` to update the version number in all of the relevant files. Note that this script requires a version of `sed(1)` compatible with the GNU implementation. * Adjust the date in the `debian/changelog` entry to reflect the expected release date rather than the current date. * Commit all the files changed in the steps above in a single new commit: ```ShellSession $ git commit -m 'release: vM.N.P' ``` 2. Then, push the `release-next` branch and create a pull request with your changes from the branch. If you're building a MAJOR or MINOR release, set the base to the `main` branch. Otherwise, set the base to the `release-M.N` branch. * Add the `release` label to the PR. * In the PR description, consider uploading builds for implementers to use in testing. These can be generated from a local tag, which does not need to be signed (but must be annotated). Check that the local version of Go is equivalent to the most recent one used by the GitHub Actions workflows for the release branch, which may be different from that used on the `main` branch. For a patch release in particular you may need to downgrade your local Go version. The build artifacts will be placed in the `bin/releases` directory and may be uploaded into the PR from there: ```ShellSession $ git tag -m vM.N.P-pre vM.N.P-pre $ make release $ ls bin/releases ``` * Notify the `@git-lfs/releases` and `@git-lfs/implementers` teams, collections of humans who are interested in Git LFS releases. * Ensure that the normal Continuous Integration workflow for PRs that runs automatically in GitHub Actions succeeds fully. * As the GitHub Actions release workflow will not run for PRs, consider creating an annotated tag with the `-pre` suffix and pushing the tag, which will trigger a run of the release workflow that does not upload artifacts to Packagecloud. Alternatively, in a private clone of the repository, create such a tag from the `release-next` branch plus one commit to change the repository name in `script/upload`, and push the tag so Actions will run the release workflow. Ensure that the workflow succeeds (excepting the Packagecloud upload step, which will be skipped). 3. Once approved and verified, merge the pull request you created in the previous step. Locally, create a GPG-signed tag on the merge commit called `vM.N.P`: ```ShellSession $ git show -q --pretty=%s%n%b HEAD Merge pull request #xxxx from git-lfs/release-next release: vM.N.P $ git tag -s vM.N.P -m vM.N.P $ git describe HEAD vM.N.P $ git show -q --pretty=%s%d%n%b vM.N.P tag vM.N.P Tagger: ... vM.N.P -----BEGIN PGP SIGNATURE----- ... -----END PGP SIGNATURE----- Merge pull request #xxxx from git-lfs/release-next (tag: vM.N.P) release: vM.N.P ``` 4. Push the tag, via: ```ShellSession $ git push origin vM.N.P ``` This will kick off the process of building the release artifacts. This process will take somewhere between 45 minutes and an hour. When it's done, you'll end up with a draft release in the repository for the version in question. 5. From the command line, finalize the release process by signing the release: ```ShellSession $ script/upload --finalize vM.N.P ``` Note that this script requires GnuPG as well as Ruby (with the OpenSSL gem) and several other tools. You will need to provide your GitHub credentials in your `~/.netrc` file or via a `GITHUB_TOKEN` environment variable. If you want to inspect the data before approving it, pass the `--inspect` option, which will drop you to a shell and let you look at things. If the shell exits successfully, the build will be signed; otherwise, the process will be aborted. 6. Publish the release on GitHub, assuming it looks correct. 7. Move any remaining items out of the milestone for the current release to a future release and close the milestone. 8. Update the `_config.yml` file in [`git-lfs/git-lfs.github.com`](https://github.com/git-lfs/git-lfs.github.com), similar to the following: ```diff --- _config.yml +++ _config.yml @@ -1,7 +1,7 @@ # Site settings title: "Git Large File Storage" description: "Git Large File Storage (LFS) replaces large files such as audio samples, videos, datasets, and graphics with text pointers inside Git, while storing the file contents on a remote server like GitHub.com or GitHub Enterprise." -git-lfs-release: M.(N-1).0 +git-lfs-release: M.N.0 url: "https://git-lfs.com" ``` 9. Create a GitHub PR to update the Homebrew formula for Git LFS with the `brew bump-formula-pr` command on a macOS system. The SHA-256 value should correspond with the packaged artifact containing the new release's source files which is available at the given URL: ``` $ brew tap homebrew/core $ brew bump-formula-pr \ --url https://github.com/git-lfs/git-lfs/releases/download/vM.N.P/git-lfs-vM.N.P.tar.gz \ --sha256 \ git-lfs ``` ### Building PATCH versions When building a PATCH release, we cherry-pick merges from `main` to the `vM.N` release branch, creating the branch first if it does not exist, and then use that branch as the base for the PATCH release. 1. If the `release-M.N` branch does not already exist, create it from the corresponding MINOR release tag (or MAJOR release tag, if no MINOR releases have been made since the last MAJOR release): ```ShellSession $ git checkout -b release-M.N vM.N.0 ``` If the release branch already exists because this is not the first patch release for the given MINOR (or MAJOR) release, simply checkout the `release-M.N` branch, and ensure that you have the latest changes from the remote. 2. Gather a set of potential candidates to backport to the `release-M.N` branch with: ```ShellSession $ git log --merges --first-parent vM.N.(P-1)...main ``` 3. For each merge that you want to backport, run: ```ShellSession $ git cherry-pick -m1 ``` This will cherry-pick the merge onto your release branch, using the `-m1` option to specify that the first parent of the merge corresponds to the mainline. 4. Then follow the [guidelines](#building-a-release) above, using the `release-M.N` branch as the base for the new PATCH release. git-lfs-3.6.1/docs/l10n.md000066400000000000000000000110651472372047300151260ustar00rootroot00000000000000# Localization of Git LFS Git LFS now has support for a localization framework using [Gotext](https://github.com/leonelquinteros/gotext), a Go library based around the popular gettext format. Localization is important since the majority of people on the planet don't speak English and people should be able to use software in the language they're most comfortable with. In addition, having access to localized software is a great way for people learning a language to improve their technical vocabulary. ## Choosing What to Translate Here are some things that should be translated: * Status messages * Error messages * Help output * Generally, anything that the user sees in the normal course of operation Here are some things that should not be translated: * Trace output (e.g., calls to `tracerx.Printf`) * Strings which have a functional or protocol use (e.g., `Basic` and other authentication schemes, HTTP verbs) * Names of programs, commands, command-line options, or subcommands (although you _should_ translate their help output) * Personal names, names of businesses, email addresses, and other proper nouns (most of which we should not typically have in our codebase) * The names of Git LFS and Git themselves ## Making Text Translatable The easiest way to make a string translatable is to wrap the string and any formatting arguments it takes in a call to `tr.Tr.Get`. For example, you might write this: ``` Print(tr.Tr.Get("fetch: Fetching reference %s", ref.Name)) ``` If you have a string which varies based on a number, use `tr.Tr.GetN`, provide the singular string, the plural string, the number upon which it varies, and then the arguments: ``` Print(tr.Tr.GetN( "fetch: Fetching changes within %v day of %v", "fetch: Fetching changes within %v days of %v", fetchconf.FetchRecentCommitsDays, fetchconf.FetchRecentCommitsDays, refName, )) ``` Here are some tips for making your code as easy to translate as possible: * Avoid creating strings out of several independent words. For example, rather than taking a string like "Upload" or "Download" and appending "ing objects" to it, write the strings out in full as "Uploading objects" and "Downloading objects". Not all languages compose words and phrases in the same way, and using full sentences or phrases makes it easier to translate. * If you have a string which varies based on number, use `tr.Tr.GetN`, which handles pluralization correctly. In some languages, zero is singular instead of plural. Also, unlike English, some languages have more than just two forms of a word (e.g., singular, dual, and plural), or words may not vary in number at all. This also lets users in English see correctly pluralized strings in all cases. * Only mark literal strings for translation, and mark them wherever they appear in the codebase. The `xgotext` binary rips out only literal strings and doesn't handle variables, so strings that are not marked for translation at the same place they appear in the text won't end up in the translation files to be translated. * Try to use text in strings that is simple and direct, avoiding the use of slang, idioms, or regional varieties of English. Slang, idioms, and regional vocabulary are hard to translate and they aren't as accessible to people who are learning English as a second language. ## Guidelines for Translators If you're interested in submitting a translation, please open an issue, and we'll work with you to get what you need. We'll let you know a little in advance of our next planned release to give you time to update the translations. When choosing to create a translation, we ask that you write a generic translation when possible (e.g., `es` rather than `es_MX`). We realize that is not always achievable (such as with `pt_BR` and `pt_PT`), but when possible, it makes the translations more accessible. For vocabulary, we recommend using the same rules as Git whenever possible. If there's a decision to be made, picking the more universally intelligible option (e.g., _quatre-vingts_ instead of _huitante_ in French or _ustedes_ instead of _vosotros_ in Spanish) is preferable. We'll generally defer to your experience on these matters. To extract the strings for translation, run the following commands: ``` $ go install -v github.com/leonelquinteros/gotext/cli/xgotext $ xgotext -in . -out po -v ``` Be aware that at the moment, `xgotext` is extremely slow. You may wish to get a coffee while you wait. ## Help If you're unclear on the best way to do things or you find a problem, feel free to open an issue or discussion, as appropriate, and we'll be happy to work with you to find a solution. git-lfs-3.6.1/docs/man/000077500000000000000000000000001472372047300146025ustar00rootroot00000000000000git-lfs-3.6.1/docs/man/git-lfs-checkout.adoc000066400000000000000000000052561472372047300206120ustar00rootroot00000000000000= git-lfs-checkout(1) == NAME git-lfs-checkout - Update working copy with file content if available == SYNOPSIS `git lfs checkout` [...] + `git lfs checkout` --to {--base|--ours|--theirs} == DESCRIPTION Try to ensure that the working copy contains file content for Git LFS objects for the current ref, if the object data is available. Does not download any content; see git-lfs-fetch(1) for that. Checkout scans the current ref for all LFS objects that would be required, then where a file is either missing in the working copy, or contains placeholder pointer content with the same SHA, the real file content is written, provided we have it in the local store. Modified files are never overwritten. One or more s may be provided as arguments to restrict the set of files that are updated. Glob patterns are matched as per the format described in gitignore(5). When used with `--to` and the working tree is in a conflicted state due to a merge, this option checks out one of the three stages a conflicting Git LFS object into a separate file (which can be outside of the work tree). This can make using diff tools to inspect and resolve merges easier. A single Git LFS object's file path must be provided in ``. == OPTIONS `--base`:: Check out the merge base of the specified file. `--ours`:: Check out our side (that of the current branch) of the conflict for the specified file. `--theirs`:: Check out their side (that of the other branch) of the conflict for the specified file. `--to `:: If the working tree is in a conflicted state, check out the portion of the conflict specified by `--base`, `--ours`, or `--theirs` to the given path. == EXAMPLES * Checkout all files that are missing or placeholders: .... $ git lfs checkout .... * Checkout a specific couple of files: .... $ git lfs checkout path/to/file1.png path/to.file2.png .... * Checkout a path with a merge conflict into separate files: .... # Attempt merge with a branch that has a merge conflict $ git merge conflicting-branch CONFLICT (content): Merge conflict in path/to/conflicting/file.dat # Checkout versions of the conflicting file into temp files $ git lfs checkout ours.dat --ours path/to/conflicting/file.dat $ git lfs checkout theirs.dat --theirs path/to/conflicting/file.dat # Compare conflicting versions in ours.dat and theirs.dat, # then resolve conflict (e.g., by choosing one version over # the other, or creating a new version) # Cleanup and continue with merge $ rm ours.dat theirs.dat $ git add path/to/conflicting/file.dat $ git merge --continue .... == SEE ALSO git-lfs-fetch(1), git-lfs-pull(1), gitignore(5). Part of the git-lfs(1) suite. git-lfs-3.6.1/docs/man/git-lfs-clean.adoc000066400000000000000000000012141472372047300200550ustar00rootroot00000000000000= git-lfs-clean(1) == NAME git-lfs-clean - Git clean filter that converts large files to pointers == SYNOPSIS `git lfs clean` == DESCRIPTION Read the contents of a large file from standard input, and write a Git LFS pointer file for that file to standard output. Clean is typically run by Git's clean filter, configured by the repository's Git attributes. Clean is not part of the user-facing Git plumbing commands. To preview the pointer of a large file as it would be generated, see the git-lfs-pointer(1) command. == SEE ALSO git-lfs-install(1), git-lfs-push(1), git-lfs-pointer(1), gitattributes(5). Part of the git-lfs(1) suite. git-lfs-3.6.1/docs/man/git-lfs-clone.adoc000066400000000000000000000037401472372047300201010ustar00rootroot00000000000000= git-lfs-clone(1) == NAME git-lfs-clone - Efficiently clone a LFS-enabled repository == SYNOPSIS `git lfs clone` [git clone options] [] == DESCRIPTION Clone an LFS enabled Git repository more efficiently by disabling LFS during the git clone, then performing a 'git lfs pull' directly afterwards. 'git lfs clone' also installs all of the repo-level hooks (.git/hooks) that LFS requires to operate. If `--separate-git-dir` is given, the hooks will be installed there. This is faster than a regular 'git clone' because that will download LFS content using the smudge filter, which is executed individually per file in the working copy. This is relatively inefficient compared to the batch mode and parallel downloads performed by 'git lfs pull'. == OPTIONS All options supported by 'git clone' `-I `:: `--include=`:: See <<_include_and_exclude>>. `-X `:: `--exclude=`:: See <<_include_and_exclude>>. `--skip-repo`:: Skip installing repo-level hooks (.git/hooks) that LFS requires. Disabled by default. == INCLUDE AND EXCLUDE You can configure Git LFS to only fetch objects to satisfy references in certain paths of the repo, and/or to exclude certain paths of the repo, to reduce the time you spend downloading things you do not use. In your Git configuration or in a `.lfsconfig` file, you may set either or both of `lfs.fetchinclude` and `lfs.fetchexclude` to comma-separated lists of paths. If `lfs.fetchinclude` is defined, Git LFS objects will only be fetched if their path matches one in that list, and if `lfs.fetchexclude` is defined, Git LFS objects will only be fetched if their path does not match one in that list. Paths are matched using wildcard matching as per gitignore(5). Note that using the command-line options `-I` and `-X` override the respective configuration settings. Setting either option to an empty string clears the value. == SEE ALSO git-clone(1), git-lfs-pull(1), gitignore(5). Part of the git-lfs(1) suite. git-lfs-3.6.1/docs/man/git-lfs-completion.adoc000066400000000000000000000302631472372047300211520ustar00rootroot00000000000000= git-lfs-completion(1) == NAME git-lfs-completion - Shell tab-completion script generation for Git LFS == SYNOPSIS `git lfs completion bash` + `git lfs completion fish` + `git lfs completion zsh` == DESCRIPTION Outputs a script which, when executed in a session of the given shell, will implement command-line tab-completion of Git LFS commands. Each shell requires a different set of commands to load the completion script, either for an individual session or automatically whenever a new session is started. See the <<_examples>> section for details. The script for each shell provides tab-completion of Git LFS command names and flags, but does not offer completion of Git terms such as the names of Git remotes, branches, or tags. (This may change in a future release of Git LFS.) By default, each shell's script supports Git LFS command completion when prompted with a tab character entered following the program name `git-lfs`. For instance, `git-lfs [Tab]` will list the available Git LFS commands such as `fetch`, `migrate`, and `pull`, and `git-lfs pull --[Tab]` will list the possible flags for the git-lfs-pull(1) command. However, most users will be accustomed to using Git LFS as a program invoked by Git, e.g., `git lfs checkout` or `git lfs pull`. To enable tab-completion of Git LFS commands in this case, tab-completion for regular Git commands must be active as well. Assuming this is true, the scripts generated by the `git lfs completion` command should support completion of Git LFS commands when a tab character is entered following `git lfs`, such as `git lfs [Tab]` to list all available Git LFS commands or `git lfs pull --[Tab]` to list that command's possible flags. See the <<_shells>> section for details regarding Git tab-completion in the supported shells. As is common for shell tab-completion, a space must be entered after the `git-lfs` or `git lfs` command names before the tab character will cause the Git LFS completion script to be executed. Without the space character, any active shell tab-completion will instead search for programs whose names have a matching prefix. The completion scripts make use of "hidden" Git LFS commands to request completion results from the Git LFS client, specifically the `git lfs \__complete` and `git lfs __completeNoDesc` commands. These commands may be removed in the future, or their action may be altered without notice, and so users should not call them directly or consider them to be part of the officially documented Git LFS command-line interface. == SHELLS The `git lfs completion` command supports three Unix shells, GNU Bash (Bourne Again SHell), fish, and Zsh (Z shell). Tab-completion is configured differently in each, both in general and specifically for Git and therefore also for Git LFS. On Windows, users who have Git LFS installed as part of the Git for Windows project have access to an emulation of the Bash shell which is packaged with Git for Windows. * Bash: + While Bash does not offer tab-completion for Git by default, a completion script is available from the Git project, as described in the documentation: + https://git-scm.com/book/en/v2/Appendix-A%3A-Git-in-Other-Environments-Git-in-Bash + The script returned by the `git lfs completion bash` command should be compatible with this Git completion script and allow for tab-completion of Git LFS commands entered using either the `git lfs` or `git-lfs` command formats. + After retrieving a copy of the Git completion script appropriate to your version of Git, run the following command to load the script in your current session: + .... $ source git-completion.bash .... + To load the script in all future sessions, add this command to your Bash startup files, or place the file in one of the locations searched by the utilities from the bash-completion package. + The bash-completion project, which is separate from the GNU Bash shell itself, includes a large number of command completion scripts and utilities to load them automatically when starting a new session: + https://github.com/scop/bash-completion + The Git LFS completion script for Bash depends on several functions provided by the bash-completion package, and so that package must be installed in order to use tab-completion with Git LFS commands. (It is not required by the Git completion script for Bash, however.) + Assuming the bash-completion package is installed, to load the Git completion script in all future sessions started by the current user (but not all users), place the `git-completion.bash` script in the `bash-completion/completions` directory within the location defined by the `XDG_DATA_HOME` environment variable, or, if that variable is not defined, the `~/.local/share` directory. For example: + .... $ cp git-completion.bash ~/.local/share/bash-completion/completions/git .... + For the bash-completion utilities to load the Git completion script for all users, place the Git completion script in the appropriate system directory. On Linux this may be `/usr/share/bash-completion/completions`, and on macOS, when bash-completion is installed using Homebrew, it may be the `share/bash-completion/completions` directory within the location returned by the `brew --prefix` command. However, these locations will vary depending on how the bash-completion package was installed. * fish: + The fish shell provides its own implementation of Git command tab-completion, defined in a `git.fish` file which is likely present by default in the list of locations the shell searches for completion scripts. + The script returned by the `git lfs completion fish` command should be compatible with this implementation and allow for tab-completion of Git LFS commands entered using either the `git lfs` or `git-lfs` command formats. * Zsh: + To enable tab-completion in Zsh for any commands, the `compinit` function must be loaded first with a command similar to the following: + .... % autoload -Uz compinit && compinit .... + This may be done individually for each session, or added to a startup file such as `~/.zshrc` or `/etc/zshrc` so it will apply to all future sessions, either for the current user or for all users. + Zsh reads completion functions from the locations specified in the `FPATH` environment variable, with the paths listed first taking precedence. This list is also available as an array in the `fpath` variable; the shell automatically synchronizes `FPATH` and `fpath`. + The Z shell provides its own implementation of Git command tab-completion, defined in a `_git` file which is likely present in one of the locations specified in `fpath`. + The script returned by the `git lfs completion zsh` command should be compatible with this implementation and allow for tab-completion of Git LFS commands entered using either the `git lfs` or `git-lfs` command formats. + The Git project also offers completion scripts for Zsh, in the form of the same `git-completion.bash` script used for the Bash shell, plus a `git-completion.zsh` script which is intended to be renamed to `_git` and placed in a location listed in `fpath`. These scripts are not compatible with the one returned by the `git lfs completion zsh` command, and if they used in conjunction with that script, tab-completion of Git LFS commands will not function properly when initiated using the `git lfs` command format. + On macOS, if Git is installed using Homebrew, the Git project's Zsh completion scripts may be installed in a location where they take precedence over the implementation provided by Zsh. In this case, to make full use of the script returned by `git lfs completion zsh`, the `_git` completion script file installed by Homebrew for Git must be moved or removed so it does not precede the Z shell's own Git completion script in the path order specified by `fpath`. == EXAMPLES === Loading completions for the current shell session To load Git LFS command completions for the current shell session only, execute the script generated by `git lfs completion` directly. * Bash: + .... $ source <(git lfs completion bash) .... + Note that with Bash 3.2, the `source` builtin command will not properly execute the output of a process substitution, and so it will be necessary to use a temporary file instead: + .... $ git lfs completion bash >git-lfs-completion.bash $ source git-lfs-completion.bash .... * fish: + .... > git lfs completion fish | source .... * zsh: + Note that the `compinit` function must also be executed to enable tab-completion, as described in the <<_shells>> section. + .... % source <(git lfs completion zsh) .... === Automatically loading completions for future shell sessions To load Git LFS command completions in all future shell sessions, store the script generated by `git lfs completion` in a location where it will be read by your shell during session startup. * Bash: + As mentioned in the <<_shells>> section, the bash-completion package is required by the Git LFS completion scripts for Bash, and it also provides convenient utilities which search for completion scripts in a set of defined locations and execute them during session startup. + To load the Git LFS completion script in all future sessions started by the current user (but not other users), store the script in the `bash-completion/completions` directory within the location defined by the `XDG_DATA_HOME` environment variable, or, if that variable is not defined, the `~/.local/share` directory. For example: + .... $ mkdir -p ~/.local/share/bash-completion/completions $ git lfs completion bash >~/.local/share/bash-completion/completions/git-lfs .... + To load the completion script in all users' future sessions, store the script instead in an appropriate system directory, which on Linux may be `/usr/share/bash-completion/completions`, or on macOS, if bash-completion was installed using Homebrew, may be the `share/bash-completion/completions` directory within the location returned by the `brew --prefix` command. These locations will vary depending on how the bash-completion package was installed and configured, however. For details, check the documentation relevant to your system's bash-completion package. * fish: + The fish shell searches for completion scripts in a number of locations, as described in the documentation: + https://fishshell.com/docs/current/completions.html#where-to-put-completions + To load the Git LFS completion script in all sessions started by the current user (but not other users), store the script in the `fish/completions` directory within the location defined by the `XDG_CONFIG_HOME` environment variable, or, if that variable is not defined, the `~/.config` directory. For example: + .... > mkdir -p ~/.config/fish/completions > git lfs completion fish >~/.config/fish/completions/git-lfs.fish .... + To load the completion script in all users' future sessions, store the script in one of the other locations searched by the shell, such under `fish/completions` within the shell's system configuration directory. On Linux this is typically `/etc/fish/completions`. On macOS, when the fish shell is installed using Homebrew, this would normally be the `etc/fish/completions` directory within the location returned by the `brew --prefix` command. * zsh: + Note that the `compinit` function must also be executed to enable tab-completion, as described in the <<_shells>> section. + To load the Git LFS completion script in all sessions, store the script as a file named `_git-lfs` in one of the locations listed in the `fpath` variable. The specific location selected may affect whether the completion script is loaded only for sessions started by the current user or for all users' sessions, depending on how the set of paths in the `fpath` array is constructed. + The following command will store the script in the first location defined in `fpath`: + .... % git lfs completion zsh >"${fpath[1]}/_git-lfs" .... + You may also prefer to store the file in another location defined in `fpath`, for instance, the last location, in which case `${fpath[-1]}` should be used instead. + It is also possible to add a custom location to the list in `fpath` and store the Git LFS completion script there. To do this, add the commands that update the `fpath` variable to a startup file such as `~/.zshrc` or `/etc/zshrc` so they will apply to all future sessions, either for the current user or for all users. == SEE ALSO Part of the git-lfs(1) suite. git-lfs-3.6.1/docs/man/git-lfs-config.adoc000066400000000000000000000442701472372047300202510ustar00rootroot00000000000000= git-lfs-config(5) == NAME git-lfs-config - Configuration options for git-lfs == CONFIGURATION FILES git-lfs reads its configuration from any file supported by `git config -l`, including all per-repository, per-user, and per-system Git configuration files. Additionally, a small number of settings can be specified in a file called `.lfsconfig` at the root of the repository; see the "LFSCONFIG" section for more details. This configuration file is useful for setting options such as the LFS URL or access type for all users of a repository, especially when these differ from the default. The `.lfsconfig` file uses the same format as `.gitconfig`. If the `.lfsconfig` file is missing, the index is checked for a version of the file, and that is used instead. If both are missing, `HEAD` is checked for the file. If the repository is bare, only `HEAD` is checked. This order may change for checkouts in the future to better match Git's behavior. Settings from Git configuration files override the `.lfsconfig` file. This allows you to override settings like `lfs.url` in your local environment without having to modify the `.lfsconfig` file. Most options regarding git-lfs are contained in the `[lfs]` section, meaning they are all named `lfs.foo` or similar, although occasionally an lfs option can be scoped inside the configuration for a remote. == LIST OF OPTIONS === General settings * `lfs.url` / `remote..lfsurl` + The url used to call the Git LFS remote API. Default blank (derive from clone URL). * `lfs.pushurl` / `remote..lfspushurl` + The url used to call the Git LFS remote API when pushing. Default blank (derive from either LFS non-push urls or clone url). * `remote.lfsdefault` + The remote used to find the Git LFS remote API. `lfs.url` and `branch.*.remote` for the current branch override this setting. If this setting is not specified and there is exactly one remote, that remote is picked; otherwise, the default is `origin`. * `remote.lfspushdefault` + The remote used to find the Git LFS remote API when pushing. `lfs.url` and `branch.*.pushremote` for the current branch override this setting. If this setting is not set, `remote.pushdefault` is used, or if that is not set, the order of selection is used as specified in the `remote.lfsdefault` above. * `lfs.remote.autodetect` + This boolean option enables the remote autodetect feaure within Git LFS. LFS tries to derive the corresponding remote from the commit information and, in case of success, ignores the settings defined by `remote.lfsdefault` and `remote..lfsurl`. * `lfs.remote.searchall` + This boolean option enables Git LFS to search all registered remotes to find LFS data. This is a fallback mechanism executed only if the LFS data could not be found via the ordinary heuristics as described in `remote.lfsdefault`, `remote..lfsurl` and, if enabled, `lfs.remote.autodetect`. * `lfs.dialtimeout` + Sets the maximum time, in seconds, that the HTTP client will wait to initiate a connection. This does not include the time to send a request and wait for a response. Default: 30 seconds * `lfs.tlstimeout` + Sets the maximum time, in seconds, that the HTTP client will wait for a TLS handshake. Default: 30 seconds. * `lfs.activitytimeout` / `lfs.https://.activitytimeout` + Sets the maximum time, in seconds, that the HTTP client will wait for the next tcp read or write. If < 1, no activity timeout is used at all. Default: 30 seconds * `lfs.keepalive` + Sets the maximum time, in seconds, for the HTTP client to maintain keepalive connections. Default: 30 minutes. * `lfs.ssh.automultiplex` + When using the pure SSH-based protocol, whether to multiplex requests over a single connection when possible. This option requires the use of OpenSSH or a compatible SSH client. Default: false on Windows, otherwise true. * `lfs.ssh.retries` + Specifies the number of times Git LFS will attempt to obtain authorization via SSH before aborting. Default: 5. * `core.askpass`, GIT_ASKPASS + Given as a program and its arguments, this is invoked when authentication is needed against the LFS API. The contents of stdout are interpreted as the password. * `lfs.cachecredentials` + Enables in-memory SSH and Git Credential caching for a single 'git lfs' command. Default: enabled. * `lfs.storage` + Allow override LFS storage directory. Non-absolute path is relativized to inside of Git repository directory (usually `.git`). + Note: you should not run `git lfs prune` if you have different repositories sharing the same storage directory. + Default: `lfs` in Git repository directory (usually `.git/lfs`). * `lfs.largefilewarning` + Warn when a file is 4 GiB or larger. Such files will be corrupted when using Windows (unless smudging is disabled) with a Git for Windows version less than 2.34.0 due to a limitation in Git. Default: true if the version is less than 2.34.0, false otherwise. === Upload and download transfer settings These settings control how the upload and download of LFS content occurs. * `lfs.concurrenttransfers` + The number of concurrent uploads/downloads. Default 8. * `lfs.basictransfersonly` + If set to true, only basic HTTP upload/download transfers will be used, ignoring any more advanced transfers that the client/server may support. This is primarily to work around bugs or incompatibilities. + The git-lfs client supports basic HTTP downloads, resumable HTTP downloads (using `Range` headers), and resumable uploads via tus.io protocol. Custom transfer methods can be added via `lfs.customtransfer` (see next section). However setting this value to true limits the client to simple HTTP. * `lfs.tustransfers` + If set to true, this enables resumable uploads of LFS objects through the tus.io API. Once this feature is finalized, this setting will be removed, and tus.io uploads will be available for all clients. * `lfs.standalonetransferagent` + Allows the specified custom transfer agent to be used directly for transferring files, without asking the server how the transfers should be made. The custom transfer agent has to be defined in a `lfs.customtransfer.` settings group. * `lfs.customtransfer..path` + `lfs.customtransfer.` is a settings group which defines a custom transfer hook which allows you to upload/download via an intermediate process, using any mechanism you like (rather than just HTTP). `path` should point to the process you wish to invoke. The protocol between the git-lfs client and the custom transfer process is documented at https://github.com/git-lfs/git-lfs/blob/main/docs/custom-transfers.md + must be a unique identifier that the LFS server understands. When calling the LFS API the client will include a list of supported transfer types. If the server also supports this named transfer type, it will select it and actions returned from the API will be in relation to that transfer type (may not be traditional URLs for example). Only if the server accepts as a transfer it supports will this custom transfer process be invoked. * `lfs.customtransfer..args` + If the custom transfer process requires any arguments, these can be provided here. This string will be expanded by the shell. * `lfs.customtransfer..concurrent` + If true (the default), git-lfs will invoke the custom transfer process multiple times in parallel, according to `lfs.concurrenttransfers`, splitting the transfer workload between the processes. * `lfs.customtransfer..direction` + Specifies which direction the custom transfer process supports, either "download", "upload", or "both". The default if unspecified is "both". * `lfs.transfer.maxretries` + Specifies how many retries LFS will attempt per OID before marking the transfer as failed. Must be an integer which is at least one. If the value is not an integer, is less than one, or is not given, a value of eight will be used instead. * `lfs.transfer.maxretrydelay` + Specifies the maximum time in seconds LFS will wait between each retry attempt. LFS uses exponential backoff for retries, doubling the time between each retry until reaching this limit. If a server requests a delay using the `Retry-After` header, the header value overrides the exponential delay for that attempt and is not limited by this option. + Must be an integer which is not negative. Use zero to disable delays between retries unless requested by a server. If the value is not an integer, is negative, or is not given, a value of ten will be used instead. * `lfs.transfer.maxverifies` + Specifies how many verification requests LFS will attempt per OID before marking the transfer as failed, if the object has a verification action associated with it. Must be an integer which is at least one. If the value is not an integer, is less than one, or is not given, a default value of three will be used instead. * `lfs.transfer.enablehrefrewrite` + If set to true, this enables rewriting href of LFS objects using `url.*.insteadof/pushinsteadof` config. `pushinsteadof` is used only for uploading, and `insteadof` is used for downloading and for uploading when `pushinsteadof` is not set. * `lfs.transfer.batchSize` + The number of objects to download/upload sent in a single batch request to the LFS server. Default is 100. + This value should be changed with caution, as it can have a significant impact on the performance of the LFS server and the server is free to return an HTTP 413 status code if this value is too high as the Batch API specification states. === Push settings * `lfs.allowincompletepush` + When pushing, allow objects to be missing from the local cache without halting a Git push. Default: false. === Fetch settings * `lfs.fetchinclude` + When fetching, only download objects which match any entry on this comma-separated list of paths/filenames. Wildcard matching is as per gitignore(5). See git-lfs-fetch(1) for examples. * `lfs.fetchexclude` + When fetching, do not download objects which match any item on this comma-separated list of paths/filenames. Wildcard matching is as per gitignore(5). See git-lfs-fetch(1) for examples. * `lfs.fetchrecentrefsdays` + If non-zero, fetches refs which have commits within N days of the current date. Only local refs are included unless lfs.fetchrecentremoterefs is true. Also used as a basis for pruning old files. The default is 7 days. * `lfs.fetchrecentremoterefs` + If true, fetches remote refs (for the remote you're fetching) as well as local refs in the recent window. This is useful to fetch objects for remote branches you might want to check out later. The default is true; if you set this to false, fetching for those branches will only occur when you either check them out (losing the advantage of fetch --recent), or create a tracking local branch separately then fetch again. * `lfs.fetchrecentcommitsdays` + In addition to fetching at refs, also fetches previous changes made within N days of the latest commit on the ref. This is useful if you're often reviewing recent changes. Also used as a basis for pruning old files. The default is 0 (no previous changes). * `lfs.fetchrecentalways` + Always operate as if --recent was included in a `git lfs fetch` call. Default false. === Prune settings * `lfs.pruneoffsetdays` + The number of days added to the `lfs.fetchrecent*` settings to determine what can be pruned. Default is 3 days, i.e. that anything fetched at the very oldest edge of the 'recent window' is eligible for pruning 3 days later. * `lfs.pruneremotetocheck` + Set the remote that LFS files must have been pushed to in order for them to be considered eligible for local pruning. Also the remote which is called if --verify-remote is enabled. * `lfs.pruneverifyremotealways` + Always run `git lfs prune` as if `--verify-remote` was provided. * `lfs.pruneverifyunreachablealways` + Always run `git lfs prune` as if `--verify-unreachable` was provided. === Extensions * `lfs.extension..` + Git LFS extensions enable the manipulation of files streams during smudge and clean. `name` groups the settings for a single extension, and the settings are: ** `clean` The command which runs when files are added to the index ** `smudge` The command which runs when files are written to the working copy ** `priority` The order of this extension compared to others === Other settings * `lfs..access` + Note: this setting is normally set by LFS itself on receiving a 401 response (authentication required), you don't normally need to set it manually. + If set to "basic" then credentials will be requested before making batch requests to this url, otherwise a public request will initially be attempted. * `lfs..locksverify` + Determines whether locks are checked before Git pushes. This prevents you from pushing changes to files that other users have locked. The Git LFS pre-push hook varies its behavior based on the value of this config key. ** `null` - In the absence of a value, Git LFS will attempt the call, and warn if it returns an error. If the response is valid, Git LFS will set the value to `true`, and will halt the push if the user attempts to update a file locked by another user. If the server returns a `501 Not Implemented` response, Git LFS will set the value to `false.` ** `true` - Git LFS will attempt to verify locks, halting the Git push if there are any server issues, or if the user attempts to update a file locked by another user. ** `false` - Git LFS will completely skip the lock check in the pre-push hook. You should set this if you're not using File Locking, or your Git server verifies locked files on pushes automatically. + Supports URL config lookup as described in: https://git-scm.com/docs/git-config#Documentation/git-config.txt-httplturlgt. To set this value per-host: `git config --global lfs.https://github.com/.locksverify [true|false]`. * `lfs.sshtransfer` / `lfs..sshtransfer` + Configures whether SSH transfers (the pure SSH protocol) are used. By default (or if the value is set to `negotiate`), the pure SSH protocol is tried first, and then the older hybrid protocol. If `always` is used, then only the pure SSH protocol is tried. Similarly, if `never` is used, then only the hybrid protocol is attempted. * `lfs..contenttype` + Determines whether Git LFS should attempt to detect an appropriate HTTP `Content-Type` header when uploading using the 'basic' upload adapter. If set to false, the default header of `Content-Type: application/octet-stream` is chosen instead. Default: 'true'. * `lfs.skipdownloaderrors` + Causes Git LFS not to abort the smudge filter when a download error is encountered, which allows actions such as checkout to work when you are unable to download the LFS content. LFS files which could not download will contain pointer content instead. + Note that this will result in git commands which call the smudge filter to report success even in cases when LFS downloads fail, which may affect scripts. + You can also set the environment variable GIT_LFS_SKIP_DOWNLOAD_ERRORS=1 to get the same effect. * `GIT_LFS_PROGRESS` + This environment variable causes Git LFS to emit progress updates to an absolute file-path on disk when cleaning, smudging, or fetching. + Progress is reported periodically in the form of a new line being appended to the end of the file. Each new line will take the following format: + ` / / ` + Each field is described below: ** `direction`: The direction of transfer, either "checkout", "download", or "upload". ** `current` The index of the currently transferring file. ** `total files` The estimated count of all files to be transferred. ** `downloaded` The number of bytes already downloaded. ** `total` The entire size of the file, in bytes. ** `name` The name of the file. * `GIT_LFS_FORCE_PROGRESS` `lfs.forceprogress` + Controls whether Git LFS will suppress progress status when the standard output stream is not attached to a terminal. The default is `false` which makes Git LFS detect whether stdout is a terminal and suppress progress when it's not; you can disable this behaviour and force progress status even when standard output stream is not a terminal by setting either variable to 1, 'yes' or 'true'. * `GIT_LFS_SKIP_SMUDGE` + Sets whether or not Git LFS will skip attempting to convert pointers of files tracked into their corresponding objects when checked out into a working copy. If 'true', '1', 'on', or similar, Git LFS will skip the smudge process in both `git lfs smudge` and `git lfs filter-process`. If unset, or set to 'false', '0', 'off', or similar, Git LFS will smudge files as normal. * `GIT_LFS_SKIP_PUSH` + Sets whether or not Git LFS will attempt to upload new Git LFS object in a pre-push hook. If 'true', '1', 'on', or similar, Git LFS will skip the pre-push hook, so no new Git LFS objects will be uploaded. If unset, or set to 'false', '0', 'off', or similar, Git LFS will proceed as normal. * `GIT_LFS_SET_LOCKABLE_READONLY` `lfs.setlockablereadonly` + These settings, the first an environment variable and the second a gitconfig setting, control whether files marked as 'lockable' in `git lfs track` are made read-only in the working copy when not locked by the current user. The default is `true`; you can disable this behaviour and have all files writeable by setting either variable to 0, 'no' or 'false'. * `lfs.lockignoredfiles` + This setting controls whether Git LFS will set ignored files that match the lockable pattern read only as well as tracked files. The default is `false`; you can enable this behavior by setting the variable to 1, 'yes', or 'true'. * `lfs.defaulttokenttl` + This setting sets a default token TTL when git-lfs-authenticate does not include the TTL in the JSON response but still enforces it. + Note that this is only necessary for larger repositories hosted on LFS servers that don't include the TTL. == LFSCONFIG The .lfsconfig file in a repository is read and interpreted in the same format as the file stored in .git/config. It allows a subset of keys to be used, including and limited to: * lfs.allowincompletepush * lfs.fetchexclude * lfs.fetchinclude * lfs.gitprotocol * lfs.locksverify * lfs.pushurl * lfs.skipdownloaderrors * lfs.url * lfs.\{*}.access * remote.\{name}.lfsurl The set of keys allowed in this file is restricted for security reasons. == EXAMPLES * Configure a custom LFS endpoint for your repository: `git config -f .lfsconfig lfs.url https://lfs.example.com/foo/bar/info/lfs` == SEE ALSO git-config(1), git-lfs-install(1), gitattributes(5), gitignore(5). Part of the git-lfs(1) suite. git-lfs-3.6.1/docs/man/git-lfs-dedup.adoc000066400000000000000000000013741472372047300201030ustar00rootroot00000000000000= git-lfs-dedup(1) == NAME git-lfs-dedup - Deduplicate Git LFS files == SYNOPSIS `git lfs dedup` == DESCRIPTION Deduplicates storage by re-creating working tree files as clones of the files in the Git LFS storage directory using the operating system's copy-on-write file creation functionality. If the operating system or file system don't support copy-on-write file creation, this command exits unsuccessfully. This command will also exit without success if any Git LFS extensions are configured, as these will typically be used to alter the file contents before they are written to the Git LFS storage directory, and therefore the working tree files should not be copy-on-write clones of the LFS object files. == SEE ALSO Part of the git-lfs(1) suite. git-lfs-3.6.1/docs/man/git-lfs-env.adoc000066400000000000000000000003131472372047300175620ustar00rootroot00000000000000= git-lfs-env(1) == NAME git-lfs-env - Display the Git LFS environment == SYNOPSIS `git lfs env` == DESCRIPTION Display the current Git LFS environment. == SEE ALSO Part of the git-lfs(1) suite. git-lfs-3.6.1/docs/man/git-lfs-ext.adoc000066400000000000000000000006631472372047300176020ustar00rootroot00000000000000= git-lfs-ext(1) == NAME git-lfs-ext - View extension details == SYNOPSIS `git lfs ext list` [...] == DESCRIPTION Git LFS extensions enable the manipulation of files streams during smudge and clean. == EXAMPLES * List details for all extensions + .... $ git lfs ext $ git lfs ext list .... * List details for the specified extensions + .... $ git lfs ext list 'foo' 'bar' .... == SEE ALSO Part of the git-lfs(1) suite. git-lfs-3.6.1/docs/man/git-lfs-faq.adoc000066400000000000000000000313411472372047300175460ustar00rootroot00000000000000= git-lfs-faq(7) == NAME git-lfs-faq - FAQ for Git LFS == ENTRIES [[cat-file-size]] === File Size [[faq-track-by-size]] Does Git LFS provide a way to track files by size?:: No, it doesn't. Unfortunately, Git itself doesn't specify a way to make `.gitattributes` patterns apply to files of a certain size and we rely on the `.gitattributes` file to specify which files are tracked by Git LFS. + You _can_ use the `--above` option to `git lfs migrate import` to migrate all files that at the specified time are larger than a certain size. However, if your files change to be smaller or larger in the future, or you add more files in the future that are larger than the limit you specified, you will have to track them manually. + For these reasons, we recommend using patterns rather than `--above`. [[faq-4gb-windows]] Why doesn't Git LFS handle files larger than 4 GiB on Windows?:: Git LFS itself handles these files just fine. However, Git LFS is usually invoked by Git, and until Git 2.34, Git itself on Windows didn't handle files using smudge and clean filters (like Git LFS) that are larger than 4 GiB. So you can update Git for Windows to 2.34 to natively support these file sizes. + On older versions, set `GIT_LFS_SKIP_SMUDGE` to `1` and run `git lfs pull` to pull down the LFS files. This bypasses Git's smudging functionality and therefore avoids its limitations. [[cat-working-tree-contents]] === Working Tree Contents [[faq-only-pointer-files]] Why do I end up with small text files in my working tree instead of my files?:: Git LFS stores small text files called pointer files in the repository instead of your large files, which it stores elsewhere. These pointer files usually start with the line `version https://git-lfs.github.com/spec/v1`. + Normally, if you've run `git lfs install` at least once for your user account on the system, then Git LFS will be automatically invoked by Git when you check out files or clone a repository and this won't happen. However, if you haven't, or you've explicitly chosen to skip that behaviour by using the `--skip-smudge` option of `git lfs install`, then you may need to use `git lfs pull` to replace the pointer files in your working tree with large files. [[faq-working-tree-modified]] Why do I end up with some of my working tree files constantly showing as modified?:: This can happen if someone made a commit to a file that's tracked by Git LFS but didn't have Git LFS properly set up on their system. The objects that were checked into the repository are Git objects, not the pointers to Git LFS objects, and when Git checks these files out, it shows them as modified. + There are also several other possible ways to encounter this problem, such as an incomplete migration of your repository. For example, you should not use `git lfs track` to track patterns that are already in your repository without running `git add --renormalize .`, since that can lead to this problem. + Users frequently find that this cannot be changed by doing `git reset --hard` or other techniques because Git then checks the files out and marks them as modified again. The best way to solve this problem is by fixing the files and the committing the change, which you can do with the following on an otherwise clean tree: + [source,shell-session] ---- $ git add --renormalize . $ git commit -m "Fix broken LFS files" ---- + This requires that every branch you want to fix have this done to it. + To prevent this from reoccurring in the future, make sure that everyone working with large files on a project has run `git lfs install` at least once. The command `git lfs fsck --pointers BASE..HEAD` (with suitable values of `BASE` and `HEAD`) may be used in your CI system to verify that nobody is introducing such problems. [[cat-tracking-and-migration]] === Tracking and Migration [[faq-track-existing-files]] How do I track files that are already in a repository?:: If you want to track files that already exist in a repository, you need to do two things. First, you need to use `git lfs track` (or a manual modification of `.gitattributes`) to mark the files as LFS files. Then, you need to run `git add --renormalize .` and commit the changes to the repository. + If you skip this second step, then you'll end up with files that are marked as LFS files but are stored as Git files, which can lead to files which are always modified, as outlined in the FAQ entry above. Note also that this doesn't change large files in your history. To do that, use `git lfs migrate import --everything` instead, as specified in one of the entries below. [[faq-export-to-git]] How do I convert from using Git LFS to a plain Git repository?:: If you'd like to stop using Git LFS and switch back to storing your large files in the plain Git format, you can do so with `git lfs migrate export --everything`. Note that you will need to provide an appropriate `--include` option to match all the patterns that you currently have tracked in any ref. + This also rewrites history, so the Git object IDs of many, if not all, of your objects will change. [[faq-github-large-files]] I'm using Git LFS, but I still see GitHub's large file error. How do I fix this?:: GitHub rejects large files anywhere in the history of your repository, not just in the latest commit. If you're still seeing this message, then you have some large files somewhere in your history, even if in the latest commits you've moved them to Git LFS. + To fix this, you can use `git lfs migrate import --everything` with an appropriate `--include` argument. For example, if you wanted to move your `.jpg` and `.png` files into Git LFS, you can do that with `git lfs migrate import --everything --include="\*.jpg,*.png"`. More complicated patterns are possible: run `git help gitattributes` for more information on valid patterns. Note that if you're specifying directories, using slashes is mandatory: backslashes are not allowed as path separators. [[cat-configuration]] === Configuration [[faq-proxy]] Can I use a proxy with Git LFS?:: Yes, Git LFS supports proxies using the same mechanisms that Git supports, namely the `http_proxy` environment variable and the configuration of `http.proxy` (or `http.*.proxy` for per-URL usage). However, Git LFS only supports proxies which use Basic or no authentication, and it doesn't currently support Digest or Kerberos authentication. + If you're using a proxy, we recommend that you set the full URL in the proxy value, including a scheme. Thus, `http://example.com:3128` is a better choice than `example.com:3128`. If you need a username and password, they must be percent-encoded in the URL, so a username of `foo\bar` with a password of `abc@123+` using the above proxy would be `http://foo%5cbar:abc%40123%2b@example.com:3128`. + Note that, just like with Git, proxies must not modify, buffer, tamper with, or change the response to the data in any way, upstream or downstream, and any proxy which does so will break things and is not supported. The Git LFS developers don't recommend any sort of proxy, including any sort of antivirus, firewall, or monitoring software, which performs TLS interception because these are known to cause breakage and in general have been shown to worsen security. [[faq-tls-ca]] Can I use a custom set of SSL/TLS certificate authorities with Git LFS?:: Yes, Git LFS supports configuring trusted certificate authorities and client certificates for HTTPS, using the same configuration as for Git. + To configure a set of trusted certificate authorities, you can use `http.sslCAPath` to specify a directory of files, each one containing a PKCS#1 certificate of a trusted certificate authority. If you'd prefer to use a single file with all trusted certificates, you can use `http.sslCAInfo` to refer to a single file containing PKCS#1 certificates, one following the other. + For example, on Debian, to set this option to provide the default behavior, you could run `git config http.sslCAPath /etc/ssl/certs`. Note that it's also possible to set these configuration options on a per-URL basis, like so: `git config http.https://example.com/.sslCAPath /etc/ssl/certs`. + Note that PKCS#12 files are a Git for Windows extension to Git and are not supported by Git LFS. Additionally, take into account the information about TLS-intercepting proxies above if this configuration is because of a TLS-intercepting proxy. [[faq-tls-client-config]] Can I use an SSL/TLS client certificate with Git LFS?:: Yes, Git LFS supports configuring client certificates and keys using the same configuration as for Git. + To configure a client certificate, use the `http.sslCert` configuration option pointing to a file containing a PKCS#1 certificate. To configure the corresponding key, use `http.sslKey` with a file containing a PEM-encoded key. If it is encrypted, you will be prompted using the credential helper for the passphrase. + Usually you will want to specify these values on a per-URL basis, such as the following: `git config http.https://example.com/.sslKey /home/user/.certs/mine.key`. + Note that PKCS#8 and PKCS#12 files are not supported by Git LFS. [[cat-git-features]] === Working with Git Features [[faq-git-diff-on-lfs]] How do I enable git diff to work on LFS files?:: You can run `git config diff.lfs.textconv cat`, which will produce normal diffs if your files are text files. [[faq-git-diff-on-lfs-by-path]] How do I enable git diff to work on LFS files based on extension or path?:: If the above solution is too broad, each entry in the `.gitattributes` file can be customized by creating a custom global converter: + [source,shell-session] ---- $ git config --global diff.lfstext.textconv cat ---- + Any given `.gitattributes` entry for large text files can be customized to use this global text converter (e.g., patch files), whereas binary formats can continue to use the conventional lfs diff tool, like so: + [source,shell-session] ---- $ cat .gitattributes .... *.bin filter=lfs diff=lfs merge=lfs -text *.patch filter=lfs diff=lfstext merge=lfs -text .... ---- + Be advised that all developers sharing this repo with such a modified `.gitattributes` file must have similarly configured the `lfstext` text converter, whether globally or on a per repository basis. [[faq-archive-subdirectory]] Why are LFS files not included when I archive a subdirectory?:: When you run `git archive` with only a subdirectory, such as `git archive HEAD:src`, Git resolves the revision (in this case, `HEAD:src`) to a tree, and only processes items in that tree. Because the `.gitattributes` file is typically only in the root of the repository, Git doesn't even see that file, which controls whether files are considered LFS files, and hence doesn't consider any of the files in the directory as LFS files, and thus doesn't invoke Git LFS at all. + Since Git LFS doesn't even get invoked in this case, there's no way to change how this works. If you just want to include the single subdirectory without stripping the prefix, you can do this: `git archive -o archive.tar.gz --prefix=archive/ HEAD src`. If you do want to strip the subdirectory name (`src`) in this case, one option if you have the libarchive tar (available on Windows and macOS as `tar`, and usually on Linux as `bsdtar`) is to do something like this script: + [source,shell] ---- #!/bin/sh # With trailing slash. ARCHIVE_PREFIX="archive/" # Without trailing slash. SOURCE_PREFIX="src" # Without directory or file components. REVISION="HEAD" temp=$(mktemp -d) git archive --prefix="$ARCHIVE_PREFIX" "$REVISION" "$SOURCE_PREFIX" | bsdtar -C "$temp" -xf - bsdtar -s "!^\./!$ARCHIVE_PREFIX!" --format=pax -czf archive.tar.gz -C "$temp/$ARCHIVE_PREFIX$SOURCE_PREFIX" . rm -fr "$temp" ---- [[cat-non-git-tools]] === Working with Non-Git Tools [[faq-jenkins-hooks]] I'm using Jenkins and `git lfs install` fails due to an invalid hook path. What do I do?:: Recent versions of Jenkins have set `core.hooksPath` to various values, notably `NUL:` on Windows, with the goal of disabling hooks. This is not a valid path on Windows, nor a valid value for this configuration option, so when `git lfs install` runs and Git LFS attempts to install hooks, the operation fails. + The easiest way to solve this problem is by using the `--skip-repo` option to `git lfs install`, which skips the installation of the hooks. Despite the name, it can be successfully combined with `--local` if you need that option. + Note that this prevents things like `git push` from pushing LFS objects and locked files from being read only, since those are implemented by hooks. If you need that functionality, you should review the Jenkins documentation about how to properly configure the environment in such a situation so that hooks can be used. == SEE ALSO git-config(1), git-lfs-install(1), gitattributes(5), gitignore(5). Part of the git-lfs(1) suite. git-lfs-3.6.1/docs/man/git-lfs-fetch.adoc000066400000000000000000000125721472372047300200750ustar00rootroot00000000000000= git-lfs-fetch(1) == NAME git-lfs-fetch - Download all Git LFS files for a given ref == SYNOPSIS `git lfs fetch` [options] [ [...]] == DESCRIPTION Download Git LFS objects at the given refs from the specified remote. See <<_default_remote>> and <<_default_refs>> for what happens if you don't specify. This does not update the working copy. == OPTIONS `-I `:: `--include=`:: Specify lfs.fetchinclude just for this invocation; see <<_include_and_exclude>>. `-X `:: `--exclude=`:: Specify lfs.fetchexclude just for this invocation; see <<_include_and_exclude>>. `--recent`:: Download objects referenced by recent branches & commits in addition to those that would otherwise be downloaded. See <<_recent_changes>>. `--all`:: Download all objects that are referenced by any commit reachable from the refs provided as arguments. If no refs are provided, then all refs are fetched. This is primarily for backup and migration purposes. Cannot be combined with --recent or --include/--exclude. Ignores any globally configured include and exclude paths to ensure that all objects are downloaded. `--prune`:: `-p`:: Prune old and unreferenced objects after fetching, equivalent to running `git lfs prune` afterwards. See git-lfs-prune(1) for more details. == INCLUDE AND EXCLUDE You can configure Git LFS to only fetch objects to satisfy references in certain paths of the repo, and/or to exclude certain paths of the repo, to reduce the time you spend downloading things you do not use. In your Git configuration or in a `.lfsconfig` file, you may set either or both of `lfs.fetchinclude` and `lfs.fetchexclude` to comma-separated lists of paths. If `lfs.fetchinclude` is defined, Git LFS objects will only be fetched if their path matches one in that list, and if `lfs.fetchexclude` is defined, Git LFS objects will only be fetched if their path does not match one in that list. Paths are matched using wildcard matching as per gitignore(5). Note that using the command-line options `-I` and `-X` override the respective configuration settings. Setting either option to an empty string clears the value. === Examples * `git config lfs.fetchinclude "textures,images/foo*"` + This will only fetch objects referenced in paths in the textures folder, and files called foo* in the images folder * `git config lfs.fetchinclude "*.jpg,*.png,*.tga"` + Only fetch JPG/PNG/TGA files, wherever they are in the repository * `git config lfs.fetchexclude "media/reallybigfiles"` + Don't fetch any LFS objects referenced in the folder media/reallybigfiles, but fetch everything else * `git config lfs.fetchinclude "media"` `git config lfs.fetchexclude "media/excessive"` + Only fetch LFS objects in the 'media' folder, but exclude those in one of its subfolders. == DEFAULT REMOTE Without arguments, fetch downloads from the default remote. The default remote is the same as for `git fetch`, i.e. based on the remote branch you're tracking first, or origin otherwise. == DEFAULT REFS If no refs are given as arguments, the currently checked out ref is used. In addition, if enabled, recently changed refs and commits are also included. See <<_recent_changes>> for details. == RECENT CHANGES If the `--recent` option is specified, or if the gitconfig option `lfs.fetchrecentalways` is true, then after the current ref (or those in the arguments) is fetched, we also search for 'recent' changes to fetch objects for, so that it's more convenient to checkout or diff those commits without incurring further downloads. What changes are considered 'recent' is based on a number of gitconfig options: `lfs.fetchrecentrefsdays`:: If non-zero, includes branches which have commits within N days of the current date. Only local refs are included unless lfs.fetchrecentremoterefs is true. The default is 7 days. `lfs.fetchrecentremoterefs`:: If true, fetches remote refs (for the remote you're fetching) as well as local refs in the recent window. This is useful to fetch objects for remote branches you might want to check out later. The default is true; if you set this to false, fetching for those branches will only occur when you either check them out (losing the advantage of fetch --recent), or create a tracking local branch separately then fetch again. `lfs.fetchrecentcommitsdays`:: In addition to fetching at branches, also fetches changes made within N days of the latest commit on the branch. This is useful if you're often reviewing recent changes. The default is 0 (no previous changes). `lfs.fetchrecentalways`:: Always operate as if --recent was provided on the command line. == EXAMPLES * Fetch the LFS objects for the current ref from default remote + `git lfs fetch` * Fetch the LFS objects for the current ref AND recent changes from default remote + `git lfs fetch --recent` * Fetch the LFS objects for the current ref from a secondary remote 'upstream' + `git lfs fetch upstream` * Fetch all the LFS objects from the default remote that are referenced by any commit in the `main` and `develop` branches + `git lfs fetch --all origin main develop` * Fetch the LFS objects for a branch from origin + `git lfs fetch origin mybranch` * Fetch the LFS objects for 2 branches and a commit from origin + `git lfs fetch origin main mybranch e445b45c1c9c6282614f201b62778e4c0688b5c8` == SEE ALSO git-lfs-checkout(1), git-lfs-pull(1), git-lfs-prune(1), gitconfig(5). Part of the git-lfs(1) suite. git-lfs-3.6.1/docs/man/git-lfs-filter-process.adoc000066400000000000000000000032671472372047300217460ustar00rootroot00000000000000= git-lfs-filter-process(1) == NAME git-lfs-filter-process - Git filter process that converts between pointer and actual content == SYNOPSIS `git lfs filter-process` + `git lfs filter-process --skip` == DESCRIPTION Implement the Git process filter API, exchanging handshake messages and then accepting and responding to requests to either clean or smudge a file. filter-process is always run by Git's filter process, and is configured by the repository's Git attributes. In your Git configuration or in a `.lfsconfig` file, you may set either or both of `lfs.fetchinclude` and `lfs.fetchexclude` to comma-separated lists of paths. If `lfs.fetchinclude` is defined, Git LFS pointer files will only be replaced with the contents of the corresponding Git LFS object file if their path matches one in that list, and if `lfs.fetchexclude` is defined, Git LFS pointer files will only be replaced with the contents of the corresponding Git LFS object file if their path does not match one in that list. Paths are matched using wildcard matching as per gitignore(5). Git LFS pointer files that are not replaced with the contents of their corresponding object files are simply copied to standard output without change. The filter process uses Git's pkt-line protocol to communicate, and is documented in detail in gitattributes(5). == OPTIONS Without any options, filter-process accepts and responds to requests normally. `--skip`:: Skip automatic downloading of objects on clone or pull. `GIT_LFS_SKIP_SMUDGE`:: Disables the smudging process. For more, see: git-lfs-config(5). == SEE ALSO git-lfs-clean(1), git-lfs-install(1), git-lfs-smudge(1), gitattributes(5), gitignore(5). Part of the git-lfs(1) suite. git-lfs-3.6.1/docs/man/git-lfs-fsck.adoc000066400000000000000000000023471472372047300177310ustar00rootroot00000000000000= git-lfs-fsck(1) == NAME git-lfs-fsck - Check GIT LFS files for consistency == SYNOPSIS `git lfs fsck` [options] [revisions] == DESCRIPTION Checks all Git LFS files in the current HEAD for consistency. Corrupted files are moved to ".git/lfs/bad". The revisions may be specified as either a single committish, in which case only that commit is inspected; specified as a range of the form `A..B` (and only this form), in which case that range is inspected; or omitted entirely, in which case HEAD (and, for --objects, the index) is examined. The default is to perform all checks. In your Git configuration or in a `.lfsconfig` file, you may set `lfs.fetchexclude` to a comma-separated list of paths. If `lfs.fetchexclude` is defined, then any Git LFS files whose paths match one in that list will not be checked for consistency. Paths are matched using wildcard matching as per gitignore(5). == OPTIONS `--objects`:: Check that each object in HEAD matches its expected hash and that each object exists on disk. `--pointers`:: Check that each pointer is canonical and that each file which should be stored as a Git LFS file is so stored. == SEE ALSO git-lfs-ls-files(1), git-lfs-status(1), gitignore(5). Part of the git-lfs(1) suite. git-lfs-3.6.1/docs/man/git-lfs-install.adoc000066400000000000000000000045111472372047300204440ustar00rootroot00000000000000= git-lfs-install(1) == NAME git-lfs-install - Install Git LFS configuration. == SYNOPSIS `git lfs install` [options] == DESCRIPTION Perform the following actions to ensure that Git LFS is setup properly: * Set up the clean and smudge filters under the name "lfs" in the global Git config. * Install a pre-push hook to run git-lfs-pre-push(1) for the current repository, if run from inside one. If "core.hooksPath" is configured in any Git configuration (and supported, i.e., the installed Git version is at least 2.9.0), then the pre-push hook will be installed to that directory instead. == OPTIONS Without any options, `git lfs install` will only setup the "lfs" smudge and clean filters if they are not already set. `--force`:: Sets the "lfs" smudge and clean filters, overwriting existing values. `--local`:: Sets the "lfs" smudge and clean filters in the local repository's git config, instead of the global git config (~/.gitconfig). `--worktree`:: Sets the "lfs" smudge and clean filters in the current working tree's git config, instead of the global git config (~/.gitconfig) or local repository's git config ($GIT_DIR/config). If multiple working trees are in use, the Git config extension `worktreeConfig` must be enabled to use this option. If only one working tree is in use, `--worktree` has the same effect as `--local`. This option is only available if the installed Git version is at least 2.20.0 and therefore supports the "worktreeConfig" extension. `--manual`:: Print instructions for manually updating your hooks to include git-lfs functionality. Use this option if `git lfs install` fails because of existing hooks and you want to retain their functionality. `--system`:: Sets the "lfs" smudge and clean filters in the system git config, e.g. /etc/gitconfig instead of the global git config (~/.gitconfig). `--skip-smudge`:: Skips automatic downloading of objects on clone or pull. This requires a manual "git lfs pull" every time a new commit is checked out on your repository. `--skip-repo`:: Skips installation of hooks into the local repository; use if you want to install the LFS filters but not make changes to the hooks. It is valid to use `--local`, `--global`, or `--system` in conjunction with this option. == SEE ALSO git-lfs-uninstall(1), git-worktree(1). Part of the git-lfs(1) suite. git-lfs-3.6.1/docs/man/git-lfs-lock.adoc000066400000000000000000000017351472372047300177330ustar00rootroot00000000000000= git-lfs-lock(1) == NAME git-lfs-lock - Set a file as "locked" on the Git LFS server == SYNOPSIS `git lfs lock` [options] == DESCRIPTION Sets the given file path as "locked" against the Git LFS server, with the intention of blocking attempts by other users to update the given path. Locking a file requires the file to exist in the working copy. Once locked, LFS will verify that Git pushes do not modify files locked by other users. See the description of the `lfs..locksverify` config key in git-lfs-config(5) for details. == OPTIONS `-r `:: `--remote=`:: Specify the Git LFS server to use. Ignored if the `lfs.url` config key is set. `--json`:: Writes lock info as JSON to STDOUT if the command exits successfully. Intended for interoperation with external tools. If the command returns with a non-zero exit code, plain text messages will be sent to STDERR. == SEE ALSO git-lfs-unlock(1), git-lfs-locks(1). Part of the git-lfs(1) suite. git-lfs-3.6.1/docs/man/git-lfs-locks.adoc000066400000000000000000000033771472372047300201220ustar00rootroot00000000000000= git-lfs-locks(1) == NAME git-lfs-locks - Lists currently locked files from the Git LFS server. == SYNOPSIS `git lfs locks` [options] == DESCRIPTION Lists current locks from the Git LFS server. == OPTIONS `-r `:: `--remote=`:: Specify the Git LFS server to use. Ignored if the `lfs.url` config key is set. `-i `:: `--id=`:: Specifies a lock by its ID. Returns a single result. `-p `:: `--path=`:: Specifies a lock by its path. Returns a single result. `--local`:: Lists only our own locks which are cached locally. Skips a remote call. `--cached`:: Lists cached locks from the last remote call. Contrary to `--local`, this will include locks of other users as well. This option is intended to display the last known locks in case you are offline. There is no guarantee that locks on the server have not changed in the meanwhile. `--verify`:: Verifies the lock owner on the server and marks our own locks by 'O'. Own locks are actually held by us and corresponding files can be updated for the next push. All other locks are held by someone else. Contrary to --local, this option will also detect locks which are held by us despite no local lock information being available (e.g. because the file had been locked from a different clone); it will also detect 'broken' locks (e.g. if someone else has forcefully unlocked our files). `-l `:: `--limit=`:: Specifies number of results to return. `--json`:: Writes lock info as JSON to STDOUT if the command exits successfully. Intended for interoperation with external tools. If the command returns with a non-zero exit code, plain text messages will be sent to STDERR. == SEE ALSO git-lfs-lock(1), git-lfs-unlock(1). Part of the git-lfs(1) suite. git-lfs-3.6.1/docs/man/git-lfs-logs.adoc000066400000000000000000000011661472372047300177450ustar00rootroot00000000000000= git-lfs-logs(1) == NAME git-lfs-logs - Show errors from the git-lfs command == SYNOPSIS `git lfs logs` + `git lfs logs` + `git lfs logs clear` + `git lfs logs boomtown` == DESCRIPTION Display errors from the git-lfs command. Any time it crashes, the details are saved to ".git/lfs/logs". == COMMANDS `clear`:: Clears all of the existing logged errors. `boomtown`:: Triggers a dummy exception. == OPTIONS Without any options, `git lfs logs` simply shows the list of error logs. * ``: Shows the specified error log. Use "last" to show the most recent error. == SEE ALSO Part of the git-lfs(1) suite. git-lfs-3.6.1/docs/man/git-lfs-ls-files.adoc000066400000000000000000000031231472372047300205120ustar00rootroot00000000000000= git-lfs-ls-files(1) == NAME git-lfs-ls-files - Show information about Git LFS files in the index and working tree == SYNOPSIS `git lfs ls-files` [] + `git lfs ls-files` == DESCRIPTION Display paths of Git LFS files that are found in the tree at the given reference. If no reference is given, scan the currently checked-out branch. If two references are given, the LFS files that are modified between the two references are shown; deletions are not listed. An asterisk (*) after the OID indicates a full object, a minus (-) indicates an LFS pointer. == OPTIONS `-l`:: `--long`:: Show the entire 64 character OID, instead of just first 10. `-s`:: `--size`:: Show the size of the LFS object between parenthesis at the end of a line. `-d`:: `--debug`:: Show as much information as possible about a LFS file. This is intended for manual inspection; the exact format may change at any time. `-a`:: `--all`:: Inspects the full history of the repository, not the current HEAD (or other provided reference). This will include previous versions of LFS objects that are no longer found in the current tree. `--deleted`:: Shows the full history of the given reference, including objects that have been deleted. `-I `:: `--include=`:: Include paths matching only these patterns; see <<_fetch_settings>>. `-X `:: `--exclude=`:: Exclude paths matching any of these patterns; see <<_fetch_settings>>. `-n`:: `--name-only`:: Show only the lfs tracked file names. == SEE ALSO git-lfs-status(1), git-lfs-config(5). Part of the git-lfs(1) suite. git-lfs-3.6.1/docs/man/git-lfs-merge-driver.adoc000066400000000000000000000065121472372047300213710ustar00rootroot00000000000000= git-lfs-merge-driver(1) == NAME git-lfs-merge-driver - Merge text-based LFS files == SYNOPSIS `git lfs merge-driver` [options] == DESCRIPTION Merge text files stored in Git LFS using the default Git merge machinery, or a custom merge driver if specified. Note that this, in general, does not support partial renames or copies because Git does not support them in this case. This program is intended to be invoked automatically by Git and not by users manually. See <<_configuration>> for details on the configuration required for that. == OPTIONS `--ancestor `:: Specify the file containing the ancestor revision. `--current `:: Specify the file containing the current revision. `--marker-size `:: Specify the conflict marker size as an integer. `--other `:: Specify the file containing the other revision. `--program `:: Specify a command, which is passed to the shell after substitution, that performs the actual merge. If this is not specified, `git merge-file` is invoked with appropriate arguments to perform the merge of the file. + See <<_configuration>> for the sequences which are substituted here. == CONFIGURATION Git allows the use of a custom merge driver for files based on the `merge` attribute set in `.gitattributes`. By default, when using `git lfs track`, this value is set to `lfs`. Because Git LFS can be used to store both text and binary files and it isn't always clear which behavior should be used, Git LFS does not enable this merge driver by default. However, if you know that some or all of your files are text files, then you can set the `merge` attribute for those files to `lfs-text` and use `git config` to set the merge driver like so: [source,console] ---- $ git config merge.lfs-text.driver 'git lfs merge-driver --ancestor %O --current %A --other %B --marker-size %L --output %A' ---- This tells Git to invoke the custom Git LFS merge driver, which in turn uses Git's merge machinery, to merge files where the `merge` attribute is set to `lfs-text`. Note that `lfs-text` here is an example and any syntactically valid value can be used. If you are using a special type of file that needs rules different from Git's standard merge machinery, you can also specify the `--program` option, which is passed to `sh` after substituting its own percent-encoded escapes: * `%A`: the current version * `%B`: the other version * `%D`: the destination version * `%O`: the ancestor version * `%L`: the conflict marker size Note that the percent sign must typically be doubled to prevent Git from substituting its own values here. Therefore, specifying the default behavior explicitly looks like this: [source,console] ---- $ git config merge.lfs-text.driver \ 'git lfs merge-driver --ancestor %O --current %A --other %B --marker-size %L --output %A --program '\''git merge-file --stdout --marker-size=%%L %%A %%O %%B >%%D'\''' ---- The exit status from the custom command should be zero on success or non-zero on conflicts or other failure. Note that if no merge driver is specified for the value of the `merge` attribute (as is the case by default with `merge=lfs`), then the default Git merge strategy is used. For LFS files, this means that Git will try to merge the pointer files, which usually is not useful. == SEE ALSO git-merge(1), git-merge-file(1), gitattributes(5) Part of the git-lfs(1) suite. git-lfs-3.6.1/docs/man/git-lfs-migrate.adoc000066400000000000000000000521001472372047300204230ustar00rootroot00000000000000= git-lfs-migrate(1) == NAME git-lfs-migrate - Migrate history to or from Git LFS == SYNOPSIS `git lfs migrate` [options] [--] [branch ...] == DESCRIPTION Convert files in a Git repository to or from Git LFS pointers, or summarize Git file sizes by file type. The `import` mode converts Git files (i.e., blobs) to Git LFS, while the `export` mode does the reverse, and the `info` mode provides an informational summary which may be useful in deciding which files to import or export. In all modes, by default `git lfs migrate` operates only on the currently checked-out branch, and only on files (of any size and type) added in commits which do not exist on any remote. Multiple options are available to override these defaults. When converting files to or from Git LFS, the `git lfs migrate` command will only make changes to your local repository and working copy, never any remotes. This is intentional as the `import` and `export` modes are generally "destructive" in the sense that they rewrite your Git history, changing commits and generating new commit SHAs. (The exception is the "no-rewrite" `import` sub-mode; see <<_import_without_rewriting_history>> for details.) You should therefore always first commit or stash any uncommitted work before using the `import` or `export` modes, and then validate the result of the migration before pushing the changes to your remotes, for instance by running the `info` mode and by examining your rewritten commit history. Once you are satisfied with the changes, you will need to force-push the new Git history of any rewritten branches to all your remotes. This is a step which should be taken with care, since you will be altering the Git history on your remotes. To examine or modify files in branches other than the currently checked-out one, branch refs may be specified directly, or provided in one or more `--include-ref` options. They may also be excluded by prefixing them with `^` or providing them in `--exclude-ref` options. Use the `--everything` option to specify that all refs should be examined, including all remote refs. See <<_include_and_exclude_references>> for details. For the `info` and `import` modes, all file types are considered by default; while useful in the `info` mode, this is often not desirable when importing, so either filename patterns (pathspecs) or the `--fixup` option should normally be specified in that case. (At least one include pathspec is required for the `export` mode.) Pathspecs may be defined using the `--include` and `--exclude` options (`-I` and `-X` for short), as described in <<_include_and_exclude>>. As typical Git LFS usage depends on tracking specific file types using filename patterns defined in `.gitattributes` files, the `git lfs migrate` command will examine, create, and modify `.gitattributes` files as necessary. The `.gitattributes` files will always be assigned the default read/write permissions mode (i.e., without execute permissions). Any symbolic links with that name will cause the migration to halt prematurely. The `import` mode (see <<_import>>) will convert Git objects of the file types specified (e.g., with `--include`) to Git LFS pointers, and will add entries for those file types to `.gitattributes` files, creating those files if they do not exist. The result should be as if `git lfs track` commands had been run at the points in your Git history corresponding to where each type of converted file first appears. The exception is if the `--fixup` option is given, in which case the `import` mode will only examine any existing `.gitattributes` files and then convert Git objects which should be tracked by Git LFS but are not yet. The `export` mode (see <<_export>>) works as the reverse operation to the `import` mode, converting any Git LFS pointers that match the file types specified with `--include`, which must be given at least once. Note that `.gitattributes` entries will not be removed, nor will the files; instead, the `export` mode inserts "do not track" entries similar to those created by the `git lfs untrack` command. The `--remote` option is available in the `export` mode to specify the remote from which Git LFS objects should be fetched if they do not exist in the local Git LFS object cache; if not provided, `origin` is used by default. The `info` mode (see <<_info>>) summarizes by file type (i.e., by filename extension) the total number and size of files in a repository. Note that like the other two modes, by default the `info` mode operates only on the currently checked-out branch and only on commits which do not exist on any remote, so to get a summary of the entire repository across all branches, use the `--everything` option. If objects have already been converted to Git LFS pointers, then by default the size of the referenced objects is totaled and reported separately. You may also choose to ignore them by using `--pointers=ignore` or to treat the pointers as files by using `--pointers=no-follow`. (The latter option is akin to how existing Git LFS pointers were handled by the `info` mode in prior versions of Git LFS). When using the `--everything` option, take note that it means all commits reachable from all refs (local and remote) will be considered, but not necessarily all file types. The `import` and `info` modes consider all file types by default, although the `--include` and `--exclude` options constrain this behavior. While the `--everything` option means all commits reachable from any ref will be considered for migration, after migration only local refs will be updated even when `--everything` is specified. This ensures remote refs stay synchronized with their remote. In other words, `refs/heads/foo` will be updated with the `--everything` option, but `refs/remotes/origin/foo` will not, so it stays in sync with the remote until `git push origin foo` is performed. After checking that the results of a migration with `--everything` are satisfactory, it may be convenient to push all local branches to your remotes by using the `--all` option to `git push`. Unless the `--skip-fetch` option is given, `git lfs migrate` always begins by fetching updated lists of refs from all the remotes returned by `git remote`, but as noted above, after making changes to your local Git history while converting objects, it will never automatically push those changes to your remotes. == MODES info:: Show information about repository size. See <<_info>>. import:: Convert Git objects to Git LFS pointers. See <<_import>> and <<_import_without_rewriting_history>> export:: Convert Git LFS pointers to Git objects. See <<_export>>. == OPTIONS `-I `:: `--include=`:: See <<_include_and_exclude>>. `-X `:: `--exclude=`:: See <<_include_and_exclude>>. `--include-ref=`:: See <<_include_and_exclude_references>>. `--include-ref=`:: See <<_include_and_exclude_references>>. `--exclude-ref=`:: See <<_include_and_exclude_references>>. `--skip-fetch`:: Assumes that the known set of remote references is complete, and should not be refreshed when determining the set of "un-pushed" commits to migrate. Has no effect when combined with `--include-ref` or `--exclude-ref`. `--everything`:: See <<_include_and_exclude_references>>. + Note: Git refs are "case-sensitive" on all platforms in "packed from" (see `git-pack-refs(1)`). On "case-insensitive" file systems, e.g. NTFS on Windows or default APFS on macOS, `git-lfs-migrate(1)` would only migrate the first ref if two or more refs are equal except for upper/lower case letters. `--yes`:: Assume a yes answer to any prompts, permitting noninteractive use. Currently, the only such prompt is the one asking whether to overwrite (destroy) any working copy changes. Thus, specifying this option may cause data loss if you are not careful. `[branch ...]`:: Migrate only the set of branches listed. If not given, `git-lfs-migrate(1)` will migrate the currently checked out branch. + References beginning with `^` will be excluded, whereas branches that do not begin with `^` will be included. + If any of `--include-ref` or `--exclude-ref` are given, the checked out branch will not be appended, but branches given explicitly will be appended. === INFO The `info` mode summarizes the sizes of file objects present in the Git history. It supports all the core `migrate` options and these additional ones: `--above=`:: Only count files whose individual filesize is above the given size. `size` may be specified as a number of bytes, or a number followed by a storage unit, e.g., "1b", "20 MB", "3 TiB", etc. + If a set of files sharing a common extension has no files in that set whose individual size is above the given `--above` no files no entry for that set will be shown. `--top=`:: Only display the top `n` entries, ordered by how many total files match the given pathspec. The default is to show only the top 5 entries. When existing Git LFS objects are found, an extra, separate "LFS Objects" line is output in addition to the top `n` entries, unless the `--pointers` option is used to change this behavior. `--unit=`:: Format the number of bytes in each entry as a quantity of the storage unit provided. Valid units include: * b, kib, mib, gib, tib, pib - for IEC storage units * b, kb, mb, gb, tb, pb - for SI storage units + If a `--unit` is not specified, the largest unit that can fit the number of counted bytes as a whole number quantity is chosen. `--pointers=[follow|no-follow|ignore]`:: Treat existing Git LFS pointers in the history according to one of three alternatives. In the default `follow` case, if any pointers are found, an additional separate "LFS Objects" line item is output which summarizes the total number and size of the Git LFS objects referenced by pointers. In the `ignore` case, any pointers are simply ignored, while the `no-follow` case replicates the behavior of the `info` mode in older Git LFS versions and treats any pointers it finds as if they were regular files, so the output totals only include the contents of the pointers, not the contents of the objects to which they refer. `--fixup`:: Infer `--include` and `--exclude` filters on a per-commit basis based on the `.gitattributes` files in a repository. In practice, this option counts any filepaths which should be tracked by Git LFS according to the repository's `.gitattributes` file(s), but aren't already pointers. The `.gitattributes` files are not reported, in contrast to the normal output of the `info` mode. This option is incompatible with explicitly given `--include`, `--exclude` filters and with any `--pointers` setting other than `ignore`, hence `--fixup` implies `--pointers=ignore` if it is not explicitly set. The format of the output shows the filename pattern, the total size of the file objects (excluding those below the `--above` threshold, if one was defined), and the ratio of the number of files above the threshold to the total number of files; this ratio is also shown as a percentage. For example: .... *.gif 93 MB 9480/10504 files(s) 90% *.png 14 MB 1732/1877 files(s) 92% .... By default only the top five entries are shown, but `--top` allows for more or fewer to be output as desired. === IMPORT The `import` mode migrates objects present in the Git history to pointer files tracked and stored with Git LFS. It supports all the core `migrate` options and these additional ones: `--verbose`:: Print the commit oid and filename of migrated files to STDOUT. `--above=`:: Only migrate files whose individual filesize is above the given size. `size` may be specified as a number of bytes, or a number followed by a storage unit, e.g., "1b", "20 MB", "3 TiB", etc. This option cannot be used with the `--include`, `--exclude`, and `--fixup` options. `--object-map=`:: Write to `path` a file with the mapping of each rewritten commits. The file format is CSV with this pattern: `OLD-SHA`,`NEW-SHA` `--no-rewrite`:: Migrate objects to Git LFS in a new commit without rewriting Git history. Please note that when this option is used, the `migrate import` command will expect a different argument list, specialized options will become available, and the core `migrate` options will be ignored. See <<_import_without_rewriting_history>>. `--fixup`:: Infer `--include` and `--exclude` filters on a per-commit basis based on the `.gitattributes` files in a repository. In practice, this option imports any filepaths which should be tracked by Git LFS according to the repository's `.gitattributes` file(s), but aren't already pointers. This option is incompatible with explicitly given `--include`, `--exclude` filters. If `--no-rewrite` is not provided and `--include` or `--exclude` (`-I`, `-X`, respectively) are given, the `.gitattributes` will be modified to include any new filepath patterns as given by those flags. If `--no-rewrite` is not provided and neither of those flags are given, the gitattributes will be incrementally modified to include new filepath extensions as they are rewritten in history. === IMPORT WITHOUT REWRITING HISTORY The `import` mode has a special sub-mode enabled by the `--no-rewrite` flag. This sub-mode will migrate objects to pointers as in the base `import` mode, but will do so in a new commit without rewriting Git history. When using this sub-mode, the base `migrate` options, such as `--include-ref`, will be ignored, as will those for the base `import` mode. The `migrate` command will also take a different argument list. As a result of these changes, `--no-rewrite` will only operate on the current branch - any other interested branches must have the generated commit merged in. The `--no-rewrite` sub-mode supports the following options and arguments: `-m `:: `--message=`:: Specifies a commit message for the newly created commit. [file ...]:: The list of files to import. These files must be tracked by patterns specified in the gitattributes. If `--message` is given, the new commit will be created with the provided message. If no message is given, a commit message will be generated based on the file arguments. === EXPORT The `export` mode migrates Git LFS pointer files present in the Git history out of Git LFS, converting them into their corresponding object files. It supports all the core `migrate` options and these additional ones: `--verbose`:: Print the commit oid and filename of migrated files to STDOUT. `--object-map=`:: Write to `path` a file with the mapping of each rewritten commit. The file format is CSV with this pattern: `OLD-SHA`,`NEW-SHA` `--remote=`:: Download LFS objects from the provided `git-remote` during the export. If not provided, defaults to `origin`. The `export` mode requires at minimum a pattern provided with the `--include` argument to specify which files to export. Files matching the `--include` patterns will be removed from Git LFS, while files matching the `--exclude` patterns will retain their Git LFS status. The export command will modify the `.gitattributes` to set/unset any filepath patterns as given by those flags. == INCLUDE AND EXCLUDE You can specify that `git lfs migrate` should only convert files whose pathspec matches the `--include` glob patterns and does not match the `--exclude` glob patterns, either to reduce total migration time or to only migrate part of your repo. Multiple patterns may be given using commas as delimiters. Pattern matching is done so as to be functionally equivalent to the pattern matching format of `.gitattributes`. In addition to simple file extension matches (e.g., `*.gif`) patterns may also specify directory paths, in which case the `path/**` format may be used to match recursively. Note that this form of pattern matching for the `--include` and `--exclude` options used by the `git lfs migrate` command is unique among the suite of `git lfs` commands. Other commands which also take these options, such as `git lfs ls-files`, use the gitignore(5) form of pattern matching instead. == INCLUDE AND EXCLUDE REFERENCES You can specify that `git lfs migrate` should only convert files added in commits reachable from certain references, namely those defined using one or more `--include-ref` options, and should ignore files in commits reachable from references defined in `--exclude-ref` options. .... D---E---F / \ A---B------C refs/heads/my-feature \ \ \ refs/heads/main \ refs/remotes/origin/main .... In the above configuration, the following commits are reachable by each ref: .... refs/heads/main: C, B, A refs/heads/my-feature: F, E, D, B, A refs/remote/origin/main: A .... The following `git lfs migrate` options would, therefore, include commits F, E, D, C, and B, but exclude commit A: .... --include-ref=refs/heads/my-feature --include-ref=refs/heads/main --exclude-ref=refs/remotes/origin/main .... The presence of flag `--everything` indicates that all commits reachable from all local and remote references should be migrated (but note that the remote refs themselves will not be updated). == EXAMPLES === Migrate unpushed commits A common use case for the migrate command is to convert large Git objects to LFS before pushing your commits. By default, it only scans commits that don't exist on any remote, so long as the repository is non-bare. First, run `git lfs migrate info` to list the file types taking up the most space in your repository: .... $ git lfs migrate info migrate: Fetching remote refs: ..., done migrate: Sorting commits: ..., done migrate: Examining commits: 100% (1/1), done *.mp3 284 MB 1/1 files(s) 100% *.pdf 42 MB 8/8 files(s) 100% *.psd 9.8 MB 15/15 files(s) 100% *.ipynb 6.9 MB 6/6 files(s) 100% *.csv 5.8 MB 2/2 files(s) 100% .... Now, you can run `git lfs migrate import` to convert some file types to LFS: .... $ git lfs migrate import --include="*.mp3,*.psd" migrate: Fetching remote refs: ..., done migrate: Sorting commits: ..., done migrate: Rewriting commits: 100% (1/1), done main d2b959babd099fe70da1c1512e2475e8a24de163 -> 136e706bf1ae79643915c134e17a6c933fd53c61 migrate: Updating refs: ..., done .... If after conversion you find that some files in your working directory have been replaced with Git LFS pointers, this is normal, and the working copies of these files can be repopulated with their full expected contents by using `git lfs checkout`. === Migrate local history You can also migrate the entire history of your repository: .... # Check for large files and existing Git LFS objects in your local main branch $ git lfs migrate info --include-ref=main # Check for large files and existing Git LFS objects in every branch $ git lfs migrate info --everything # Check for large files in every branch, ignoring any existing Git LFS objects, # and listing the top 100 or fewer results $ git lfs migrate info --everything --pointers=ignore --top=100 .... The same flags will work in `import` mode: .... # Convert all zip files in your main branch $ git lfs migrate import --include-ref=main --include="*.zip" # Convert all zip files in every local branch $ git lfs migrate import --everything --include="*.zip" # Convert all files over 100K in every local branch $ git lfs migrate import --everything --above=100Kb .... Note: This will require a force-push to any existing Git remotes. Using the `--all` option when force-pushing may be convenient if many local refs were updated, e.g., after importing to Git LFS with the `--everything` option. === Migrate without rewriting local history You can also migrate files without modifying the existing history of your repository. Note that in the examples below, files in subdirectories are not included because they are not explicitly specified. Without a specified commit message: .... $ git lfs migrate import --no-rewrite test.zip *.mp3 *.psd .... With a specified commit message: .... $ git lfs migrate import --no-rewrite \ -m "Import test.zip, .mp3, .psd files in root of repo" \ test.zip *.mp3 *.psd .... === Migrate from Git LFS If you no longer wish to use Git LFS for some or all of your files, you can use the `export` mode to convert Git LFS objects into regular Git blobs again. The `export` mode requires at least one `--include` pathspec, and will download any objects not found locally from your `origin` Git remote, or from the Git remote you specify with the `--remote` option. .... # Convert all zip Git LFS objects to files in your main branch $ git lfs migrate export --include-ref=main --include="*.zip" # Convert all zip Git LFS objects to files in every local branch, # fetching any object data not cached locally from the my-remote Git remote $ git lfs migrate export --everything --include="*.zip" --remote=my-remote # Convert all Git LFS objects to files in every local branch $ git lfs migrate export --everything --include="*" .... Note: This will require a force-push to any existing Git remotes. Using the `--all` option when force-pushing may be convenient if many local refs were updated, e.g., after exporting from Git LFS with the `--everything` option. == SEE ALSO git-lfs-checkout(1), git-lfs-ls-files(1), git-lfs-track(1), git-lfs-untrack(1), gitattributes(5), gitignore(5). Part of the git-lfs(1) suite. git-lfs-3.6.1/docs/man/git-lfs-pointer.adoc000066400000000000000000000025261472372047300204620ustar00rootroot00000000000000= git-lfs-pointer(1) == NAME git-lfs-pointer - Build, compare, and check pointers == SYNOPSIS `git lfs pointer --file=path/to/file` + `git lfs pointer --file=path/to/file --pointer=path/to/pointer` + `git lfs pointer --file=path/to/file --stdin` + `git lfs pointer --check --file=path/to/file` == Description Builds and optionally compares generated pointer files to ensure consistency between different Git LFS implementations. == OPTIONS `--file`:: A local file to build the pointer from. `--pointer`:: A local file including the contents of a pointer generated from another implementation. This is compared to the pointer generated from `--file`. `--stdin`:: Reads the pointer from STDIN to compare with the pointer generated from `--file`. `--check`:: Reads the pointer from STDIN (if `--stdin` is given) or the filepath (if `--file`) is given. If neither or both of `--stdin` and `--file` are given, the invocation is invalid. Exits 0 if the data read is a valid Git LFS pointer. Exits 1 otherwise. `--strict`:: `--no-strict`:: In conjunction with `--check`, `--strict` verifies that the pointer is canonical; that is, it would be the one created by Git LFS. If it is not, exits 2. The default, for backwards compatibility, is `--no-strict`, but this may change in a future version. == SEE ALSO Part of the git-lfs(1) suite. git-lfs-3.6.1/docs/man/git-lfs-post-checkout.adoc000066400000000000000000000007071472372047300215710ustar00rootroot00000000000000= git-lfs-post-checkout(1) == NAME git-lfs-post-checkout - Git post-checkout hook implementation == SYNOPSIS `git lfs post-checkout` == DESCRIPTION Responds to Git post-checkout events. It makes sure that any files which are marked as lockable by `git lfs track` are read-only in the working copy, if not currently locked by the local user. == SEE ALSO git-lfs-track(1) Part of the git-lfs(1) suite. git-lfs-3.6.1/docs/man/git-lfs-post-commit.adoc000066400000000000000000000013171472372047300212520ustar00rootroot00000000000000= git-lfs-post-commit(1) == NAME git-lfs-post-commit - Git post-commit hook implementation == SYNOPSIS `git lfs post-commit` == DESCRIPTION Responds to Git post-commit events. It makes sure that any files which are marked as lockable by `git lfs track` are read-only in the working copy, if not currently locked by the local user. Where the `git lfs post-merge` command, which has a similar purpose, must examine all files in the working copy, `git lfs post-commit` can limit itself checking only those files which have changed in `HEAD`. It primarily handles newly added lockable files which have not yet been made read-only. == SEE ALSO git-lfs-post-merge(1), git-lfs-track(1) Part of the git-lfs(1) suite. git-lfs-3.6.1/docs/man/git-lfs-post-merge.adoc000066400000000000000000000006261472372047300210630ustar00rootroot00000000000000= git-lfs-post-merge(1) == NAME git-lfs-post-merge - Git post-merge hook implementation == SYNOPSIS `git lfs post-merge` == DESCRIPTION Responds to Git post-merge events. It makes sure that any files which are marked as lockable by `git lfs track` are read-only in the working copy, if not currently locked by the local user. == SEE ALSO git-lfs-track(1) Part of the git-lfs(1) suite. git-lfs-3.6.1/docs/man/git-lfs-pre-push.adoc000066400000000000000000000015421472372047300205420ustar00rootroot00000000000000= git-lfs-pre-push(1) == NAME git-lfs-pre-push - Git pre-push hook implementation == SYNOPSIS `git lfs pre-push` [remoteurl] == DESCRIPTION Responds to Git pre-hook events. It reads the range of commits from STDIN, in the following format: .... SP SP SP \n .... It also takes the remote name and URL as arguments. If any of those Git objects are associated with Git LFS objects, those objects will be pushed to the Git LFS API. In the case of pushing a new branch, the list of Git objects will be all of the Git objects in this branch. In the case of deleting a branch, no attempts to push Git LFS objects will be made. == OPTIONS * `GIT_LFS_SKIP_PUSH`: Do nothing on pre-push. For more, see: git-lfs-config(5). == SEE ALSO git-lfs-clean(1), git-lfs-push(1). Part of the git-lfs(1) suite. git-lfs-3.6.1/docs/man/git-lfs-prune.adoc000066400000000000000000000151361472372047300201340ustar00rootroot00000000000000= git-lfs-prune(1) == NAME git-lfs-prune - Delete old LFS files from local storage == SYNOPSIS `git lfs prune` [options] == DESCRIPTION Deletes local copies of LFS files which are old, thus freeing up disk space. Prune operates by enumerating all the locally stored objects, and then deleting any which are not referenced by at least ONE of the following: * the current checkout * all existing stashes * a 'recent branch'; see <<_recent_files>> * a 'recent commit' on the current branch or recent branches; see <<_recent_files>> * a commit which has not been pushed; see <<_unpushed_lfs_files>> * any other worktree checkouts; see git-worktree(1) In general terms, prune will delete files you're not currently using and which are not 'recent', so long as they've been pushed i.e. the local copy is not the only one. The reflog is not considered, only commits. Therefore LFS objects that are only referenced by orphaned commits are always deleted. Note: you should not run `git lfs prune` if you have different repositories sharing the same custom storage directory; see git-lfs-config(5) for more details about `lfs.storage` option. In your Git configuration or in a `.lfsconfig` file, you may set `lfs.fetchexclude` to a comma-separated list of paths. If `lfs.fetchexclude` is defined, then any Git LFS files whose paths match one in that list will be pruned unless they are referenced by a stash or an unpushed commit. Paths are matched using wildcard matching as per gitignore(5). == OPTIONS `--dry-run`:: `-d`:: Don't actually delete anything, just report on what would have been done `--force`:: `-f`:: Prune all objects except unpushed objects, including objects required for currently checked out refs. Implies `--recent`. `--recent`:: Prune even objects that would normally be preserved by the configuration options specified below in <<_recent_files>>. `--verify-remote`:: `-c`:: Contact the remote and check that copies of reachable files we would delete definitely exist before deleting. See <<_verify_remote>>. `--no-verify-remote`:: Disables remote verification if lfs.pruneverifyremotealways was enabled in settings. See <<_verify_remote>>. `--verify-reachable`:: When doing `--verify-remote` contact the remote and check unreachable objects as well. See <<_verify_remote>>. `--no-verify-reachable`:: Disables remote verification of unreachable files if lfs.pruneverifyunreachablealways was enabled in settings. See <<_verify_remote>>. `--when-unverified=`:: When `--verify-remote` cannot verify an object on the remote, either halt the execution or continue the deletion of verified objects. See <<_verify_remote>>. `--verbose`:: `-v`:: Report the full detail of what is/would be deleted. == RECENT FILES Prune won't delete LFS files referenced by 'recent' commits, in case you want to use them again without having to download. The definition of 'recent' is derived from the one used by git-lfs-fetch(1) to download recent objects with the `--recent` option, with an offset of a number of days (default 3) to ensure that we always keep files you download for a few days. Here are the git-config(1) settings that control this behaviour: * `lfs.pruneoffsetdays` The number of extra days added to the fetch recent settings when using them to decide when to prune. So for a reference to be considered old enough to prune, it has to be this many days older than the oldest reference that would be downloaded via `git lfs fetch --recent`. Only used if the relevant fetch recent 'days' setting is non-zero. Default 3 days. * `lfs.fetchrecentrefsdays` `lfs.fetchrecentremoterefs` `lfs.fetchrecentcommitsdays` These have the same meaning as git-lfs-fetch(1) with the `--recent` option, they are used as a base for the offset above. Anything which falls outside of this offsetted window is considered old enough to prune. If a day value is zero, that condition is not used at all to retain objects and they will be pruned. == UNPUSHED LFS FILES When the only copy of an LFS file is local, and it is still reachable from any reference, that file can never be pruned, regardless of how old it is. To determine whether an LFS file has been pushed, we check the difference between local refs and remote refs; where the local ref is ahead, any LFS files referenced in those commits is unpushed and will not be deleted. This works because the LFS pre-push hook always ensures that LFS files are pushed before the remote branch is updated. See <<_default_remote>>, for which remote is considered 'pushed' for pruning purposes. == VERIFY REMOTE The `--verify-remote` option calls the remote to ensure that any reachable LFS files to be deleted have copies on the remote before actually deleting them. Usually the check performed by <<_unpushed_lfs_files>> is enough to determine that files have been pushed, but if you want to be extra sure at the expense of extra overhead you can make prune actually call the remote API and verify the presence of the files you're about to delete locally. See <<_default_remote>> for which remote is checked. You can make this behaviour the default by setting `lfs.pruneverifyremotealways` to true. In addition to the overhead of calling the remote, using this option also requires prune to distinguish between totally unreachable files (e.g. those that were added to the index but never committed, or referenced only by orphaned commits), and files which are still referenced, but by commits which are prunable. This makes the prune process take longer. If you want to verify unreachable objects as well, set the `--verify-unreachable` option. You can check for unreachable objects by default by setting `lfs.pruneverifyunreachablealways` to true. By default, `--verify-remote` halts execution if a file cannot be verified. Set `--when-unverified=continue` to not halt exceution but continue deleting all objects that can be verified. == DEFAULT REMOTE When identifying <<_unpushed_lfs_files>> and performing <<_verify_remote>>, a single remote, 'origin', is normally used as the reference. This one remote is considered canonical; even if you use multiple remotes, you probably want to retain your local copies until they've made it to that remote. 'origin' is used by default because that will usually be a main central repo, or your fork of it - in both cases that's a valid remote backup of your work. If origin doesn't exist then by default nothing will be pruned because everything is treated as 'unpushed'. You can alter the remote via git config: `lfs.pruneremotetocheck`. Set this to a different remote name to check that one instead of 'origin'. == SEE ALSO git-lfs-fetch(1), gitignore(5). Part of the git-lfs(1) suite. git-lfs-3.6.1/docs/man/git-lfs-pull.adoc000066400000000000000000000034151472372047300177540ustar00rootroot00000000000000= git-lfs-pull(1) == NAME git-lfs-pull - Download all Git LFS files for current ref & checkout == SYNOPSIS `git lfs pull` [options] [] == DESCRIPTION Download Git LFS objects for the currently checked out ref, and update the working copy with the downloaded content if required. This is equivalent to running the following 2 commands: git lfs fetch [options] [] git lfs checkout == OPTIONS `-I `:: `--include=`:: Specify lfs.fetchinclude just for this invocation; see <<_include_and_exclude>> `-X `:: `--exclude=`:: Specify lfs.fetchexclude just for this invocation; see <<_include_and_exclude>> == INCLUDE AND EXCLUDE You can configure Git LFS to only fetch objects to satisfy references in certain paths of the repo, and/or to exclude certain paths of the repo, to reduce the time you spend downloading things you do not use. In your Git configuration or in a `.lfsconfig` file, you may set either or both of `lfs.fetchinclude` and `lfs.fetchexclude` to comma-separated lists of paths. If `lfs.fetchinclude` is defined, Git LFS objects will only be fetched if their path matches one in that list, and if `lfs.fetchexclude` is defined, Git LFS objects will only be fetched if their path does not match one in that list. Paths are matched using wildcard matching as per gitignore(5). Note that using the command-line options `-I` and `-X` override the respective configuration settings. Setting either option to an empty string clears the value. == DEFAULT REMOTE Without arguments, pull downloads from the default remote. The default remote is the same as for `git pull`, i.e. based on the remote branch you're tracking first, or origin otherwise. == SEE ALSO git-lfs-fetch(1), git-lfs-checkout(1), gitignore(5). Part of the git-lfs(1) suite. git-lfs-3.6.1/docs/man/git-lfs-push.adoc000066400000000000000000000027721472372047300177640ustar00rootroot00000000000000= git-lfs-push(1) == NAME git-lfs-push - Push queued large files to the Git LFS endpoint == SYNOPSIS `git lfs push` [options] [...] + `git lfs push` [...] + `git lfs push` [options] --stdin `git lfs push` --object-id [...] `git lfs push` --object-id --stdin == DESCRIPTION Upload Git LFS files to the configured endpoint for the current Git remote. By default, it filters out objects that are already referenced by the local clone of the remote. == OPTIONS `--dry-run`:: Print the files that would be pushed, without actually pushing them. `--all`:: This pushes all objects to the remote that are referenced by any commit reachable from the refs provided as arguments. If no refs are provided, then all local refs are pushed. Note that this behavior differs from that of git-lfs-fetch(1) when its `--all` option is used; in that case, all refs are fetched, including refs other than those under `refs/heads` and `refs/tags`. If you are migrating a repository with these commands, make sure to run `git lfs push` for any additional remote refs that contain Git LFS objects not reachable from your local refs. `--object-id`:: This pushes only the object OIDs listed at the end of the command, separated by spaces. `--stdin`:: Read a list of newline-delimited refs (or object IDs when using `--object-id`) from standard input instead of the command line. == SEE ALSO git-lfs-fetch(1), git-lfs-pre-push(1). Part of the git-lfs(1) suite. git-lfs-3.6.1/docs/man/git-lfs-smudge.adoc000066400000000000000000000034531472372047300202660ustar00rootroot00000000000000= git-lfs-smudge(1) == NAME git-lfs-smudge - Git smudge filter that converts pointer in blobs to the actual content == SYNOPSIS `git lfs smudge` [] + `git lfs smudge` --skip [] == DESCRIPTION Read a Git LFS pointer file from standard input and write the contents of the corresponding large file to standard output. If needed, download the file's contents from the Git LFS endpoint. The argument, if provided, is only used for a progress bar. Smudge is typically run by Git's smudge filter, configured by the repository's Git attributes. In your Git configuration or in a `.lfsconfig` file, you may set either or both of `lfs.fetchinclude` and `lfs.fetchexclude` to comma-separated lists of paths. If `lfs.fetchinclude` is defined, Git LFS pointer files will only be replaced with the contents of the corresponding Git LFS object file if their path matches one in that list, and if `lfs.fetchexclude` is defined, Git LFS pointer files will only be replaced with the contents of the corresponding Git LFS object file if their path does not match one in that list. Paths are matched using wildcard matching as per gitignore(5). Git LFS pointer files that are not replaced with the contents of their corresponding object files are simply copied to standard output without change. == OPTIONS Without any options, `git lfs smudge` outputs the raw Git LFS content to standard output. `--skip`:: Skip automatic downloading of objects on clone or pull. `GIT_LFS_SKIP_SMUDGE`:: Disables the smudging process. For more, see: git-lfs-config(5). == KNOWN BUGS On Windows, Git before 2.34.0 does not handle files in the working tree larger than 4 gigabytes. Newer versions of Git, as well as Unix versions, are unaffected. == SEE ALSO git-lfs-install(1), gitattributes(5), gitignore(5). Part of the git-lfs(1) suite. git-lfs-3.6.1/docs/man/git-lfs-standalone-file.adoc000066400000000000000000000012621472372047300220430ustar00rootroot00000000000000= git-lfs-standalone-file(1) == NAME git-lfs-standalone-file - Standalone transfer adapter for file URLs == SYNOPSIS `git lfs standalone-file` == DESCRIPTION Provides a standalone transfer adapter for file URLs (local paths). By default, Git LFS requires the support of an HTTP server to implement the Git LFS protocol. However, this tool allows the use of URLs starting with `file:///` (that is, those representing local paths) in addition. Configuration is not necessary; Git LFS handles this internally. When invoked, this tool speaks JSON on input and output as a standalone transfer adapter. It is not intended for use by end users. == SEE ALSO Part of the git-lfs(1) suite. git-lfs-3.6.1/docs/man/git-lfs-status.adoc000066400000000000000000000014741472372047300203260ustar00rootroot00000000000000= git-lfs-status(1) == NAME git-lfs-status - Show the status of Git LFS files in the working tree == SYNOPSIS `git lfs status` [] == DESCRIPTION Display paths of Git LFS objects that * have not been pushed to the Git LFS server. These are large files that would be uploaded by `git push`. * have differences between the index file and the current HEAD commit. These are large files that would be committed by `git commit`. * have differences between the working tree and the index file. These are files that could be staged using `git add`. This command must be run in a non-bare repository. == OPTIONS `--porcelain`:: Give the output in an easy-to-parse format for scripts. `--json`:: Give the output in a stable json format for scripts. == SEE ALSO git-lfs-ls-files(1). Part of the git-lfs(1) suite. git-lfs-3.6.1/docs/man/git-lfs-track.adoc000066400000000000000000000047041472372047300201060ustar00rootroot00000000000000= git-lfs-track(1) == NAME git-lfs-track - View or add Git LFS paths to Git attributes == SYNOPSIS `git lfs track` [options] [...] == DESCRIPTION Start tracking the given patterns(s) through Git LFS. The argument is written to .gitattributes. If no paths are provided, simply list the currently-tracked paths. The https://git-scm.com/docs/gitattributes[gitattributes documentation] states that patterns use the https://git-scm.com/docs/gitignore[gitignore pattern rules] to match paths. This means that patterns which contain asterisk (`*`), question mark (`?`), and the bracket characters (`[` and `]`) are treated specially; to disable this behavior and treat them literally instead, use `--filename` or escape the character with a backslash. == OPTIONS `--verbose`:: `-v`:: If enabled, have `git lfs track` log files which it will touch. Disabled by default. `--dry-run`:: `-d`:: If enabled, have `git lfs track` log all actions it would normally take (adding entries to .gitattributes, touching files on disk, etc) without performing any mutative operations to the disk. + `git lfs track --dry-run [files]` also implicitly mocks the behavior of passing the `--verbose`, and will log in greater detail what it is doing. + Disabled by default. `--filename`:: Treat the arguments as literal filenames, not as patterns. Any special glob characters in the filename will be escaped when writing the `.gitattributes` file. `--lockable`:: `-l`:: Make the paths 'lockable', meaning they should be locked to edit them, and will be made read-only in the working copy when not locked. `--not-lockable`:: Remove the lockable flag from the paths so they are no longer read-only unless locked. `--no-excluded`:: Do not list patterns that are excluded in the output; only list patterns that are tracked. --no-modify-attrs: Makes matched entries stat-dirty so that Git can re-index files you wish to convert to LFS. Does not modify any `.gitattributes` file(s). == EXAMPLES * List the patterns that Git LFS is currently tracking: + `git lfs track` * Configure Git LFS to track GIF files: + `git lfs track "*.gif"` * Configure Git LFS to track PSD files and make them read-only unless locked: + `git lfs track --lockable "*.psd"` * Configure Git LFS to track the file named `project [1].psd`: + `git lfs track --filename "project [1].psd"` == SEE ALSO git-lfs-untrack(1), git-lfs-install(1), gitattributes(5), gitignore(5). Part of the git-lfs(1) suite. git-lfs-3.6.1/docs/man/git-lfs-uninstall.adoc000066400000000000000000000026171472372047300210140ustar00rootroot00000000000000= git-lfs-uninstall(1) == NAME git-lfs-uninstall - Remove Git LFS configuration == SYNOPSIS `git lfs uninstall` == DESCRIPTION Perform the following actions to remove the Git LFS configuration: * Remove the "lfs" clean and smudge filters from the global Git config. * Uninstall the Git LFS pre-push hook if run from inside a Git repository. == OPTIONS `--local`:: Removes the "lfs" smudge and clean filters from the local repository's git config, instead of the global git config (~/.gitconfig). `--worktree`:: Removes the "lfs" smudge and clean filters from the current working tree's git config, instead of the global git config (~/.gitconfig) or local repository's git config ($GIT_DIR/config). If multiple working trees are in use, the Git config extension `worktreeConfig` must be enabled to use this option. If only one working tree is in use, `--worktree` has the same effect as `--local`. This option is only available if the installed Git version is at least 2.20.0 and therefore supports the "worktreeConfig" extension. `--system`:: Removes the "lfs" smudge and clean filters from the system git config, instead of the global git config (~/.gitconfig). `--skip-repo`:: Skips cleanup of the local repo; use if you want to uninstall the global lfs filters but not make changes to the current repo. == SEE ALSO git-lfs-install(1), git-worktree(1). Part of the git-lfs(1) suite. git-lfs-3.6.1/docs/man/git-lfs-unlock.adoc000066400000000000000000000017011472372047300202670ustar00rootroot00000000000000= git-lfs-unlock(1) == NAME git-lfs-unlock - Remove "locked" setting for a file on the Git LFS server == SYNOPSIS `git lfs unlock` [] == DESCRIPTION Removes the given file path as "locked" on the Git LFS server. Files must exist and have a clean git status before they can be unlocked. The `--force` flag will skip these checks. == OPTIONS `-r `:: `--remote=`:: Specify the Git LFS server to use. Ignored if the `lfs.url` config key is set. `-f`:: `--force`:: Tells the server to remove the lock, even if it's owned by another user. `-i `:: `--id=`:: Specifies a lock by its ID instead of path. `--json`:: Writes lock info as JSON to STDOUT if the command exits successfully. Intended for interoperation with external tools. If the command returns with a non-zero exit code, plain text messages will be sent to STDERR. == SEE ALSO git-lfs-lock(1), git-lfs-locks(1). Part of the git-lfs(1) suite. git-lfs-3.6.1/docs/man/git-lfs-untrack.adoc000066400000000000000000000006701472372047300204470ustar00rootroot00000000000000= git-lfs-untrack(1) == NAME git-lfs-untrack - Remove Git LFS paths from Git Attributes == SYNOPSIS `git lfs untrack` ... == DESCRIPTION Stop tracking the given path(s) through Git LFS. The argument can be a glob pattern or a file path. == EXAMPLES * Configure Git LFS to stop tracking GIF files: + `git lfs untrack "*.gif"` == SEE ALSO git-lfs-track(1), git-lfs-install(1), gitattributes(5). Part of the git-lfs(1) suite. git-lfs-3.6.1/docs/man/git-lfs-update.adoc000066400000000000000000000014151472372047300202600ustar00rootroot00000000000000= git-lfs-update(1) == NAME git-lfs-update - Update Git hooks == SYNOPSIS `git lfs update` [--manual | --force] == DESCRIPTION Updates the Git hooks used by Git LFS. Silently upgrades known hook contents. If you have your own custom hooks you may need to use one of the extended options below. == OPTIONS `--manual`:: `-m`:: Print instructions for manually updating your hooks to include git-lfs functionality. Use this option if `git lfs update` fails because of existing hooks and you want to retain their functionality. `--force`:: `-f`:: Forcibly overwrite any existing hooks with git-lfs hooks. Use this option if `git lfs update` fails because of existing hooks but you don't care about their current contents. == SEE ALSO Part of the git-lfs(1) suite. git-lfs-3.6.1/docs/man/git-lfs.adoc000066400000000000000000000077751472372047300170170ustar00rootroot00000000000000= git-lfs(1) == NAME git-lfs - Work with large files in Git repositories == SYNOPSIS `git lfs` [] == DESCRIPTION Git LFS is a system for managing and versioning large files in association with a Git repository. Instead of storing the large files within the Git repository as blobs, Git LFS stores special "pointer files" in the repository, while storing the actual file contents on a Git LFS server. The contents of the large file are downloaded automatically when needed, for example when a Git branch containing the large file is checked out. Git LFS works by using a "smudge" filter to look up the large file contents based on the pointer file, and a "clean" filter to create a new version of the pointer file when the large file's contents change. It also uses a `pre-push` hook to upload the large file contents to the Git LFS server whenever a commit containing a new large file version is about to be pushed to the corresponding Git server. == COMMANDS Like Git, Git LFS commands are separated into high level ("porcelain") commands and low level ("plumbing") commands. === High level porcelain commands git-lfs-checkout(1):: Populate working copy with real content from Git LFS files. git-lfs-completion(1):: Generate shell scripts for command-line tab-completion of Git LFS commands. git-lfs-dedup(1):: De-duplicate Git LFS files. git-lfs-env(1):: Display the Git LFS environment. git-lfs-ext(1):: Display Git LFS extension details. git-lfs-fetch(1):: Download Git LFS files from a remote. git-lfs-fsck(1):: Check Git LFS files for consistency. git-lfs-install(1):: Install Git LFS configuration. git-lfs-lock(1):: Set a file as "locked" on the Git LFS server. git-lfs-locks(1):: List currently "locked" files from the Git LFS server. git-lfs-logs(1):: Show errors from the Git LFS command. git-lfs-ls-files(1):: Show information about Git LFS files in the index and working tree. git-lfs-migrate(1):: Migrate history to or from Git LFS git-lfs-prune(1):: Delete old Git LFS files from local storage git-lfs-pull(1):: Fetch Git LFS changes from the remote & checkout any required working tree files. git-lfs-push(1):: Push queued large files to the Git LFS endpoint. git-lfs-status(1):: Show the status of Git LFS files in the working tree. git-lfs-track(1):: View or add Git LFS paths to Git attributes. git-lfs-uninstall(1):: Uninstall Git LFS by removing hooks and smudge/clean filter configuration. git-lfs-unlock(1):: Remove "locked" setting for a file on the Git LFS server. git-lfs-untrack(1):: Remove Git LFS paths from Git Attributes. git-lfs-update(1):: Update Git hooks for the current Git repository. git-lfs-version(1):: Report the version number. === Low level plumbing commands git-lfs-clean(1):: Git clean filter that converts large files to pointers. git-lfs-filter-process(1):: Git process filter that converts between large files and pointers. git-lfs-merge-driver(1):: Merge text-based LFS files git-lfs-pointer(1):: Build and compare pointers. git-lfs-post-checkout(1):: Git post-checkout hook implementation. git-lfs-post-commit(1):: Git post-commit hook implementation. git-lfs-post-merge(1):: Git post-merge hook implementation. git-lfs-pre-push(1):: Git pre-push hook implementation. git-lfs-smudge(1):: Git smudge filter that converts pointer in blobs to the actual content. git-lfs-standalone-file(1):: Git LFS standalone transfer adapter for file URLs (local paths). == EXAMPLES To get started with Git LFS, the following commands can be used. . Setup Git LFS on your system. You only have to do this once per user account: + .... git lfs install .... . Choose the type of files you want to track, for examples all `ISO` images, with git-lfs-track(1): + .... git lfs track "*.iso" .... . The above stores this information in gitattributes(5) files, so that file needs to be added to the repository: + .... git add .gitattributes .... . Commit, push and work with the files normally: + .... git add file.iso git commit -m "Add disk image" git push .... git-lfs-3.6.1/docs/man/mangen.go000066400000000000000000000137311472372047300164030ustar00rootroot00000000000000package main import ( "bufio" "flag" "fmt" "io" "os" "path/filepath" "regexp" "strings" ) func infof(w io.Writer, format string, a ...interface{}) { if !*verbose { return } fmt.Fprintf(w, format, a...) } func warnf(w io.Writer, format string, a ...interface{}) { fmt.Fprintf(w, format, a...) } func readManDir() (string, []os.DirEntry) { rootDirs := []string{ "..", "/tmp/docker_run/git-lfs", } var err error for _, rootDir := range rootDirs { fs, err := os.ReadDir(filepath.Join(rootDir, "docs", "man")) if err == nil { return rootDir, fs } } warnf(os.Stderr, "Failed to open man dir: %v\n", err) os.Exit(2) return "", nil } func titleizeXref(s string) string { return strings.Replace(strings.ToTitle(s[1:2])+s[2:], "_", " ", -1) } var ( verbose = flag.Bool("verbose", false, "Show verbose output.") ) // Reads all .adoc files & and converts them to string literals // triggered by "go generate" comment // Literals are inserted into a map using an init function, this means // that there are no compilation errors if 'go generate' hasn't been run, just // blank man files. func main() { flag.Parse() infof(os.Stderr, "Converting man pages into code...\n") rootDir, fs := readManDir() manDir := filepath.Join(rootDir, "docs", "man") out, err := os.Create(filepath.Join(rootDir, "commands", "mancontent_gen.go")) if err != nil { warnf(os.Stderr, "Failed to create go file: %v\n", err) os.Exit(2) } out.WriteString("package commands\n\nfunc init() {\n") out.WriteString("\t// THIS FILE IS GENERATED, DO NOT EDIT\n") out.WriteString("\t// Use 'go generate ./commands' to update\n") fileregex := regexp.MustCompile(`git-lfs(?:-([A-Za-z\-]+))?.adoc`) headerregex := regexp.MustCompile(`^(===?)\s+([A-Za-z0-9 -]+)`) // cross-references linkregex := regexp.MustCompile(`<<([^,>]+)(?:,([^>]+))?>>`) // man links manlinkregex := regexp.MustCompile(`(git)(?:-(lfs))?-([a-z\-]+)\(\d\)`) // source blocks sourceblockregex := regexp.MustCompile(`\[source(,.*)?\]`) // anchors anchorregex := regexp.MustCompile(`\[\[(.+)\]\]`) count := 0 for _, f := range fs { if match := fileregex.FindStringSubmatch(f.Name()); match != nil { infof(os.Stderr, "%v\n", f.Name()) cmd := match[1] if len(cmd) == 0 { // This is git-lfs.1.adoc cmd = "git-lfs" } out.WriteString("\tManPages[\"" + cmd + "\"] = `") contentf, err := os.Open(filepath.Join(manDir, f.Name())) if err != nil { warnf(os.Stderr, "Failed to open %v: %v\n", f.Name(), err) os.Exit(2) } // Process the asciidoc to make it nicer as help text scanner := bufio.NewScanner(contentf) firstHeaderDone := false skipNextLineIfBlank := false lastLineWasList := false isSourceBlock := false sourceBlockLine := "" scanloop: for scanner.Scan() { line := scanner.Text() trimmedline := strings.TrimSpace(line) if skipNextLineIfBlank && len(trimmedline) == 0 { skipNextLineIfBlank = false lastLineWasList = false continue } // Special case headers if hmatch := headerregex.FindStringSubmatch(line); hmatch != nil { if len(hmatch[1]) == 2 { header := strings.ToLower(hmatch[2]) switch header { case "name": continue case "synopsis": // Ignore this, just go direct to command case "description": // Just skip the header & newline skipNextLineIfBlank = true case "options": out.WriteString("Options:" + "\n") case "see also": // don't include any content after this break scanloop default: out.WriteString(strings.ToUpper(header[:1]) + header[1:] + "\n") out.WriteString(strings.Repeat("-", len(header)) + "\n") } firstHeaderDone = true } else { out.WriteString(hmatch[2] + "\n") out.WriteString(strings.Repeat("~", len(hmatch[2])) + "\n") } lastLineWasList = false continue } if lmatches := linkregex.FindAllStringSubmatch(line, -1); lmatches != nil { for _, lmatch := range lmatches { if len(lmatch) > 2 && lmatch[2] != "" { line = strings.Replace(line, lmatch[0], `"`+lmatch[2]+`"`, 1) } else { line = strings.Replace(line, lmatch[0], `"`+titleizeXref(lmatch[1])+`"`, 1) } } } if manmatches := manlinkregex.FindAllStringSubmatch(line, -1); manmatches != nil { for _, manmatch := range manmatches { line = strings.Replace(line, manmatch[0], strings.Join(manmatch[1:], " "), 1) } } if sourceblockmatches := sourceblockregex.FindStringIndex(line); sourceblockmatches != nil { isSourceBlock = true continue } if anchormatches := anchorregex.FindStringIndex(line); anchormatches != nil { // Skip anchors. continue } // Skip content until after first header if !firstHeaderDone { continue } // OK, content here // handle source block headers if isSourceBlock { sourceBlockLine = line isSourceBlock = false line = "" continue } else if sourceBlockLine != "" && line == sourceBlockLine { line = "" sourceBlockLine = "" } // remove characters that asciidoc would render invisible in a text env. for _, invis := range []string{"`", "...."} { line = strings.Replace(line, invis, "", -1) } line = strings.TrimSuffix(line, " +") // indent bullets and definition lists if strings.HasPrefix(line, "*") { lastLineWasList = true } else if strings.HasSuffix(line, "::") { lastLineWasList = true line = strings.TrimSuffix(line, ":") } else if lastLineWasList && line == "+" { line = "" } else if lastLineWasList && line == "" { lastLineWasList = false } else if lastLineWasList && !strings.HasPrefix(line, " ") { // indent paragraphs under bullets if not already done line = " " + line } out.WriteString(line + "\n") } out.WriteString("`\n") contentf.Close() count++ } } out.WriteString("}\n") infof(os.Stderr, "Successfully processed %d man pages.\n", count) } git-lfs-3.6.1/docs/proposals/000077500000000000000000000000001472372047300160515ustar00rootroot00000000000000git-lfs-3.6.1/docs/proposals/README.md000066400000000000000000000005241472372047300173310ustar00rootroot00000000000000# Git LFS Proposals This directory contains high level proposals for future Git LFS features. Inclusion here does not guarantee when or if a feature will make it in to Git LFS. It doesn't even guarantee that the specifics won't change. Everyone is welcome to submit their own proposal as a markdown file in a pull request for discussion. git-lfs-3.6.1/docs/proposals/locking.md000066400000000000000000000442771472372047300200370ustar00rootroot00000000000000# Locking feature proposal We need the ability to lock files to discourage (we can never prevent) parallel editing of binary files which will result in an unmergeable situation. This is not a common theme in git (for obvious reasons, it conflicts with its distributed, parallel nature), but is a requirement of any binary management system, since files are very often completely unmergeable, and no-one likes having to throw their work away & do it again. ## What not to do: single branch model The simplest way to organise locking is to require that if binary files are only ever edited on a single branch, and therefore editing this file can follow a simple sequence: 1. File starts out read-only locally 2. User locks the file, user is required to have the latest version locally from the 'main' branch 3. User edits file & commits 1 or more times 4. User pushes these commits to the main branch 5. File is unlocked (and made read only locally again) ## A more usable approach: multi-branch model In practice teams need to work on more than one branch, and sometimes that work will have corresponding binary edits. It's important to remember that the core requirement is to prevent *unintended parallel edits of an unmergeable file*. One way to address this would be to say that locking a file locks it across all branches, and that lock is only released when the branch where the edit is is merged back into a 'primary' branch. The problem is that although that allows branching and also prevents merge conflicts, it forces merging of feature branches before a further edit can be made by someone else. An alternative is that locking a file locks it across all branches, but when the lock is released, further locks on that file can only be taken on a descendant of the latest edit that has been made, whichever branch it is on. That means a change to the rules of the lock sequence, namely: 1. File starts out read-only locally 2. User tries to lock a file. This is only allowed if: * The file is not already locked by anyone else, AND * One of the following are true: * The user has, or agrees to check out, a descendant of the latest commit that was made for that file, whatever branch that was on, OR * The user stays on their current commit but resets the locked file to the state of the latest commit (making it modified locally, and also cherry-picking changes for that file in practice). 3. User edits file & commits 1 or more times, on any branch they like 4. User pushes the commits 5. File is unlocked if: * the latest commit to that file has been pushed (on any branch), and * the file is not locally edited This means that long-running branches can be maintained but that editing of a binary file must always incorporate the latest binary edits. This means that if this system is always respected, there is only ever one linear stream of development for this binary file, even though that 'thread' may wind its way across many different branches in the process. This does mean that no-one's changes are accidentally lost, but it does mean that we are either making new branches dependent on others, OR we're cherry-picking changes to individual files across branches. This does change the traditional git workflow, but importantly it achieves the core requirement of never *accidentally* losing anyone's changes. How changes are threaded across branches is always under the user's control. ## Breaking the rules We must allow the user to break the rules if they know what they are doing. Locking is there to prevent unintended binary merge conflicts, but sometimes you might want to intentionally create one, with the full knowledge that you're going to have to manually merge the result (or more likely, pick one side and discard the other) later down the line. There are 2 cases of rule breaking to support: 1. **Break someone else's lock** People lock files and forget they've locked them, then go on holiday, or worse, leave the company. You can't be stuck not being able to edit that file so must be able to forcibly break someone else's lock. Ideally this should result in some kind of notification to the original locker (might need to be a special value-add on BB/Stash). This effectively removes the other person's lock and is likely to cause them problems if they had edited and try to push next time. 2. **Allow a parallel lock** Actually similar to breaking someone else's lock, except it lets you take another lock on a file in parallel, leaving their lock in place too, and knowing that you're going to have to resolve the merge problem later. You could handle this just by manually making files read/write, then using 'force push' to override hooks that prevent pushing when not locked. However by explicitly registering a parallel lock (possible form: 'git lfs lock --force') this could be recorded and communicated to anyone else with a lock, letting them know about possible merge issues down the line. ## Detailed feature points |No | Feature | Notes |---|---------|------------------ |1 |Lock server must be available at same API URL| |2 |Identify unmergeable files as subset of lfs files|`git lfs track -b` ? |3 |Make unmergeable files read-only on checkout|Perform in smudge filter |4 |Lock a file
  • Check with server which must atomically check/set
  • Check person requesting the lock is checked out on a commit which is a descendent of the last edit of that file (locally or on server, although last lock shouldn't have been released until push anyway), or allow --force to break rule
  • Record lock on server
  • Make file read/write locally if success
|`git lfs lock `? |5 |Release a lock
  • Check if locally modified, if so must discard
  • Check if user has more recent commit of this file than server, if so must push first
  • Release lock on server atomically
  • Make local file read-only
|`git lfs unlock `? |6 |Break a lock, ie override someone else's lock and take it yourself.
  • Release lock on server atomically
  • Proceed as per 'Lock a file'
  • Notify original lock holder HOW?
|`git lfs lock -break `? |7 |Release lock on reset (maybe). Configurable option / prompt? May be resetting just to start editing again| |8 |Release lock on push (maybe, if unmodified). See above| |9 |Cater for read-only binary files when merging locally
  • Because files are read-only this might prevent merge from working when actually it's valid.
  • Always fine to merge the latest version of a binary file to anywhere else
  • Fine to merge the non-latest version if user is aware that this may cause merge problems (see Breaking the rules)
  • Therefore this feature is about dealing with the read-only flag and issuing a warning if not the latest
| |10 |List current locks
  • That the current user has
  • That anyone has
  • Potentially scoped to folder
|`git lfs lock --list [paths...]` |11 |Reject a push containing a binary file currently locked by someone else|pre-receive hook on server, allow --force to override (i.e. existing parameter to git push) ## Locking challenges ### Making files read-only This is useful because it means it provides a reminder that the user should be locking the file before they start to edit it, to avoid the case of an unexpected merge later on. I've done some tests with chmod and discovered: * Removing the write bit doesn't cause the file to be marked modified (good) * In most editors it either prevents saving or (in Apple tools) prompts to 'unlock'. The latter is slightly unhelpful * In terms of marking files that need locking, adding custom flags to .gitattributes (like 'lock') seems to work; `git check-attr -a ` correctly lists the custom attribute * Once a file is marked read-only however, `git checkout` replaces it without prompting, with the write bit set * We can use the `post-checkout` hook to make files read-only, but we don't get any file information, on refs. This means we'd have to scan the whole working copy to figure out what we needed to mark read-only. To do this we'd have to have the attribute information and all the current lock information. This could be time consuming. * A way to speed up the `post-checkout` would be to diff the pre- and post-ref information that's provided and only check the files that changed. In the case of single-file checkouts I'm not sure this is possible though. * We could also feed either the diff or a file scan into `git check-attr --stdin` in order to share the exe, or do our own attribute matching * It's not entirely clear yet how merge & rebase might operate. May also need the `post-merge` hook * See contrib/hooks/setgitperms.perl for an example; so this isn't unprecedented #### Test cases for post-checkout * Checkout a branch * Calls `post-checkout` with pre/post SHA and branch=1 * Checkout a tag * Calls `post-checkout` with pre/post SHA and branch=1 (even though it's a tag) * Checkout by commit SHA * Calls `post-checkout` with pre/post SHA and branch=1 (even though it's a plain SHA) * Checkout named files (e.g. discard changes) * Calls `post-checkout` with identical pre/post SHA (HEAD) and branch=0 * Reset all files (discard all changes ie git reset --hard HEAD) * Doesn't call `post-checkout` - could restore write bit, but must have been set anyway for file to be edited, so not a problem? * Reset a branch to a previous commit * Doesn't call `post-checkout` - PROBLEM because can restore write bit & file was not modified. BUT: rare & maybe liveable * Merge a branch with lockable file changes (non-conflicting) * Rebase a branch with lockable files (non-conflicting) * Merge conflicts - fix then commit * Rebase conflicts - fix then continue * ## Implementation details (Initial simple API-only pass) ### Types To make the implementing locking on the lfs-test-server as well as other servers in the future easier, it makes sense to create a `lock` package that can be depended upon from any server. This will go along with Steve's refactor which touches the `lfs` package quite a bit. Below are enumerated some of the types that will presumably land in this sub-package. ```go // Lock represents a single lock that against a particular path. // // Locks returned from the API may or may not be currently active, according to // the Expired flag. type Lock struct { // Id is the unique identifier corresponding to this particular Lock. It // must be consistent with the local copy, and the server's copy. Id string `json:"id"` // Path is an absolute path to the file that is locked as a part of this // lock. Path string `json:"path"` // Committer is the author who initiated this lock. Committer struct { Name string `json:"name"` Email string `json:"email"` } `json:"creator"` // CommitSHA is the commit that this Lock was created against. It is // strictly equal to the SHA of the minimum commit negotiated in order // to create this lock. CommitSHA string `json:"commit_sha" // LockedAt is a required parameter that represents the instant in time // that this lock was created. For most server implementations, this // should be set to the instant at which the lock was initially // received. LockedAt time.Time `json:"locked_at"` // ExpiresAt is an optional parameter that represents the instant in // time that the lock stopped being active. If the lock is still active, // the server can either a) not send this field, or b) send the // zero-value of time.Time. UnlockedAt time.Time `json:"unlocked_at,omitempty"` } // Active returns whether or not the given lock is still active against the file // that it is protecting. func (l *Lock) Active() bool { return time.IsZero(l.UnlockedAt) } ``` ### Proposed Commands #### `git lfs lock ` The `lock` command will be used in accordance with the multi-branch flow as proposed above to request that lock be granted to the specific path passed an argument to the command. ```go // LockRequest encapsulates the payload sent across the API when a client would // like to obtain a lock against a particular path on a given remote. type LockRequest struct { // Path is the path that the client would like to obtain a lock against. Path string `json:"path"` // LatestRemoteCommit is the SHA of the last known commit from the // remote that we are trying to create the lock against, as found in // `.git/refs/origin/`. LatestRemoteCommit string `json:"latest_remote_commit"` // Committer is the individual that wishes to obtain the lock. Committer struct { // Name is the name of the individual who would like to obtain the // lock, for instance: "Rick Olson". Name string `json:"name"` // Email is the email assopsicated with the individual who would // like to obtain the lock, for instance: "rick@github.com". Email string `json:"email"` } `json:"committer"` } ``` ```go // LockResponse encapsulates the information sent over the API in response to // a `LockRequest`. type LockResponse struct { // Lock is the Lock that was optionally created in response to the // payload that was sent (see above). If the lock already exists, then // the existing lock is sent in this field instead, and the author of // that lock remains the same, meaning that the client failed to obtain // that lock. An HTTP status of "409 - Conflict" is used here. // // If the lock was unable to be created, this field will hold the // zero-value of Lock and the Err field will provide a more detailed set // of information. // // If an error was experienced in creating this lock, then the // zero-value of Lock should be sent here instead. Lock Lock `json:"lock"` // CommitNeeded holds the minimum commit SHA that client must have to // obtain the lock. CommitNeeded string `json:"commit_needed"` // Err is the optional error that was encountered while trying to create // the above lock. Err error `json:"error,omitempty"` } ``` #### `git lfs unlock ` The `unlock` command is responsible for releasing the lock against a particular file. The command takes a `` argument which the LFS client will have to internally resolve into a Id to unlock. The API associated with this command can also be used on the server to remove existing locks after a push. ```go // An UnlockRequest is sent by the client over the API when they wish to remove // a lock associated with the given Id. type UnlockRequest struct { // Id is the identifier of the lock that the client wishes to remove. Id string `json:"id"` } ``` ```go // UnlockResult is the result sent back from the API when asked to remove a // lock. type UnlockResult struct { // Lock is the lock corresponding to the asked-about lock in the // `UnlockPayload` (see above). If no matching lock was found, this // field will take the zero-value of Lock, and Err will be non-nil. Lock Lock `json:"lock"` // Err is an optional field which holds any error that was experienced // while removing the lock. Err error `json:"error,omitempty"` } ``` Clients can determine whether or not their lock was removed by calling the `Active()` method on the returned Lock, if `UnlockResult.Err` is nil. #### `git lfs locks (-r |-b )|(-i id)` For many operations, the LFS client will need to have knowledge of existing locks on the server. Additionally, the client should not have to self-sort/index this (potentially) large set. To remove this need, both the `locks` command and corresponding API method take several filters. Clients should turn the flag-values that were passed during the command invocation into `Filter`s as described below, and batched up into the `Filters` field in the `LockListRequest`. ```go // Property is a constant-type that narrows fields pertaining to the server's // Locks. type Property string const ( Branch Property = "branch" Id Property = "id" // (etc) ... ) // LockListRequest encapsulates the request sent to the server when the client // would like a list of locks that match the given criteria. type LockListRequest struct { // Filters is the set of filters to query against. If the client wishes // to obtain a list of all locks, an empty array should be passed here. Filters []{ // Prop is the property to search against. Prop Property `json:"prop"` // Value is the value that the property must take. Value string `json:"value"` } `json:"filters"` // Cursor is an optional field used to tell the server which lock was // seen last, if scanning through multiple pages of results. // // Servers must return a list of locks sorted in reverse chronological // order, so the Cursor provides a consistent method of viewing all // locks, even if more were created between two requests. Cursor string `json:"cursor,omitempty"` // Limit is the maximum number of locks to return in a single page. Limit int `json:"limit"` } ``` ```go // LockList encapsulates a set of Locks. type LockList struct { // Locks is the set of locks returned back, typically matching the query // parameters sent in the LockListRequest call. If no locks were matched // from a given query, then `Locks` will be represented as an empty // array. Locks []Lock `json:"locks"` // NextCursor returns the Id of the Lock the client should update its // cursor to, if there are multiple pages of results for a particular // `LockListRequest`. NextCursor string `json:"next_cursor,omitempty"` // Err populates any error that was encountered during the search. If no // error was encountered and the operation was successful, then a value // of nil will be passed here. Err error `json:"error,omitempty"` } git-lfs-3.6.1/docs/proposals/locking_api.md000066400000000000000000000072131472372047300206550ustar00rootroot00000000000000# Locking API proposal ## POST /locks | Method | Accept | Content-Type | Authorization | |---------|--------------------------------|--------------------------------|---------------| | `POST` | `application/vnd.git-lfs+json` | `application/vnd.git-lfs+json` | Basic | ### Request ``` > GET https://git-lfs-server.com/locks > Accept: application/vnd.git-lfs+json > Authorization: Basic > Content-Type: application/vnd.git-lfs+json > > { > path: "/path/to/file", > remote: "origin", > latest_remote_commit: "d3adbeef", > committer: { > name: "Jane Doe", > email: "jane@example.com" > } > } ``` ### Response * **Successful response** ``` < HTTP/1.1 201 Created < Content-Type: application/vnd.git-lfs+json < < { < lock: { < id: "some-uuid", < path: "/path/to/file", < committer: { < name: "Jane Doe", < email: "jane@example.com" < }, < commit_sha: "d3adbeef", < locked_at: "2016-05-17T15:49:06+00:00" < } < } ``` * **Bad request: minimum commit not met** ``` < HTTP/1.1 400 Bad request < Content-Type: application/vnd.git-lfs+json < < { < "commit_needed": "other_sha" < } ``` * **Bad request: lock already present** ``` < HTTP/1.1 409 Conflict < Content-Type: application/vnd.git-lfs+json < < { < lock: { < /* the previously created lock */ < }, < error: "already created lock" < } ``` * **Bad response: server error** ``` < HTTP/1.1 500 Internal server error < Content-Type: application/vnd.git-lfs+json < < { < error: "unable to create lock" < } ``` ## POST /locks/:id/unlock | Method | Accept | Content-Type | Authorization | |---------|--------------------------------|--------------|---------------| | `POST` | `application/vnd.git-lfs+json` | None | Basic | ### Request ``` > POST https://git-lfs-server.com/locks/:id/unlock > Accept: application/vnd.git-lfs+json > Authorization: Basic ``` ### Response * **Success: unlocked** ``` < HTTP/1.1 200 Ok < Content-Type: application/vnd.git-lfs+json < < { < lock: { < id: "some-uuid", < path: "/path/to/file", < committer: { < name: "Jane Doe", < email: "jane@example.com" < }, < commit_sha: "d3adbeef", < locked_at: "2016-05-17T15:49:06+00:00", < unlocked_at: "2016-05-17T15:49:06+00:00" < } < } } ``` * **Bad response: server error** ``` < HTTP/1.1 500 Internal error < Content-Type: application/vnd.git-lfs+json < < { < error: "git-lfs/git-lfs: internal server error" < } ``` ## GET /locks | Method | Accept | Content-Type | Authorization | |--------|-------------------------------|--------------|---------------| | `GET` | `application/vnd.git-lfs+json | None | Basic | ### Request ``` > GET https://git-lfs-server.com/locks?filters...&cursor=&limit= > Accept: application/vnd.git-lfs+json > Authorization: Basic ``` ### Response * **Success: locks found** Note: no matching locks yields a payload of `locks: []`, and a status of 200. ``` < HTTP/1.1 200 Ok < Content-Type: application/vnd.git-lfs+json < < { < locks: [ < { < id: "some-uuid", < path: "/path/to/file", < committer": { < name: "Jane Doe", < email: "jane@example.com" < }, < commit_sha: "1ec245f", < locked_at: "2016-05-17T15:49:06+00:00" < } < ], < next_cursor: "optional-next-id", < error: "optional error" < } ``` * **Bad response: some locks may have matched, but the server encountered an error** ``` < HTTP/1.1 500 Internal error < Content-Type: application/vnd.git-lfs+json < < { < locks: [], < error: "git-lfs/git-lfs: internal server error" < } ``` git-lfs-3.6.1/docs/proposals/locking_notes.md000066400000000000000000000104541472372047300212350ustar00rootroot00000000000000# Capture Locking Notes during Locks creation and retrieve same during List Locks. ## Concept The git-lfs REST API for Locks creation should be able to accept additonal attribute (message/notes) which would be easy to have some information related to lock creation. This same information can be retrieved back with the git-lfs List Locks REST API. - Allow to have additional attribute to store the lock message/notes during lock creation - Include lock message/notes in the git-lfs List Locks REST API response ## API extensions The current Git LFS File Locking API [https://github.com/git-lfs/git-lfs/blob/v3.2.0/docs/api/locking.md] doesn't have a field to capture some information related to Locks creation which would be useful to understand why and from where the lock was acquired. With this enhancement, we can have some predefined comment as part of lock creation and get back same with the List Locks REST API so that it will be useful to differentiate and get more information on the File lock. # Create Locks Enhancement API proposal ### Request ``` > POST https://lfs-server.com/locks > Accept: application/vnd.git-lfs+json > Content-Type: application/vnd.git-lfs+json > Authorization: Basic ... > { > "path": "foo/bar.zip", > "ref": { > "name": "refs/heads/my-feature" > }, > "notes": "Lock applied from Workspace A" > } ``` ### Response * **Successful response** ``` < HTTP/1.1 201 Created < Content-Type: application/vnd.git-lfs+json < { < "lock": { < "id": "some-uuid", < "path": "foo/bar.zip", < "locked_at": "2022-05-17T15:49:06+00:00", < "owner": { < "name": "Jane Doe" < }, < "notes": "Lock applied from Workspace A" < } < } ``` # List Locks Enhancement API proposal ### Request (with notes -- notes=true) ``` > GET https://lfs-server.com/locks?path=&id&cursor=limit&**notes=true**&refspec= > Accept: application/vnd.git-lfs+json > Authorization: Basic ... (if needed) ``` ### Response * **Successful response** ``` < HTTP/1.1 200 Ok < Content-Type: application/vnd.git-lfs+json < { < "locks": [ < { < "id": "some-uuid", < "path": "foo/bar.zip", < "locked_at": "2022-05-17T15:49:06+00:00", < "owner": { < "name": "Jane Doe" < }, < "notes": "Lock applied from Workspace A" < } < ], < "next_cursor": "optional next ID" < } ``` ### Request (with out notes) ``` > GET https://lfs-server.com/locks?path=&id&cursor=limit&refspec= > Accept: application/vnd.git-lfs+json > Authorization: Basic ... (if needed) ``` ### Response * **Successful response** ``` < HTTP/1.1 200 Ok < Content-Type: application/vnd.git-lfs+json < { < "locks": [ < { < "id": "some-uuid", < "path": "foo/bar.zip", < "locked_at": "2022-05-17T15:49:06+00:00", < "owner": { < "name": "Jane Doe" < } < } < ], < "next_cursor": "optional next ID" < } ``` # List Locks for Verification Enhancement API proposal ### Request (with notes) ``` > POST https://lfs-server.com/locks/verify > Accept: application/vnd.git-lfs+json > Content-Type: application/vnd.git-lfs+json > Authorization: Basic ... > { > "cursor": "optional cursor", > "limit": 100, // also optional > "ref": { > "name": "refs/heads/my-feature" > }, > "notes" : true, // also optional > } ``` ### Response * **Successful response** ``` < HTTP/1.1 200 Ok < Content-Type: application/vnd.git-lfs+json < { < "ours": [ < { < "id": "some-uuid", < "path": "/path/to/file", < "locked_at": "2016-05-17T15:49:06+00:00", < "owner": { < "name": "Jane Doe" < }, < "notes": "Lock applied from Workspace A" < } < ], < "theirs": [], < "next_cursor": "optional next ID" < } ``` ### Request (with out notes) ``` > POST https://lfs-server.com/locks/verify > Accept: application/vnd.git-lfs+json > Content-Type: application/vnd.git-lfs+json > Authorization: Basic ... > { > "cursor": "optional cursor", > "limit": 100, // also optional > "ref": { > "name": "refs/heads/my-feature" > } > } ``` ### Response * **Successful response** ``` < HTTP/1.1 200 Ok < Content-Type: application/vnd.git-lfs+json < { < "ours": [ < { < "id": "some-uuid", < "path": "/path/to/file", < "locked_at": "2016-05-17T15:49:06+00:00", < "owner": { < "name": "Jane Doe" < } < } < ], < "theirs": [], < "next_cursor": "optional next ID" < } ``` git-lfs-3.6.1/docs/proposals/multipart_transfer_mode.md000066400000000000000000000415441472372047300233340ustar00rootroot00000000000000# Multipart HTTP transfer mode proposal This is a proposal for a new transfer mode, designed to support multi-part HTTP uploads. This is a protocol extension to Git LFS, defining a new transfer mode to be implemented by Git LFS clients and servers in addition to the current `basic` transfer mode. This proposal is based on the experimental `multipart-basic` transfer mode originally [implemented by datopian/giftless](https://giftless.datopian.com/en/latest/multipart-spec.html). ## Reasoning Many storage vendors and cloud vendors today offer an API to upload files in "parts" or "chunks", using multiple HTTP requests, allowing improved stability and performance. This is especially handy when files are multiple gigabytes in size, and a failure during the upload of a file would require re-uploading it, which could be extremely time consuming. The purpose of the `multipart` transfer mode is to allow Git LFS servers and client facilitate direct-to-storage uploads for backends supporting multipart or chunked uploads. As the APIs offered by storage vendors differ greatly, `multipart` transfer mode will offer abstraction over most of these complexities in hope of supporting as many storage vendors as possible. ## Terminology Throughout this document, the following terms are in use: * *LFS Server* - The HTTP server to which the LFS `batch` request is sent * *Client* or *LFS Client* - a client using the Git LFS protocol to push large files to storage via an LFS server * *Storage Backend* - The HTTP server handling actual storage; This may or may not be the same server as the LFS server, and for the purpose of this document, typically it is not. A typical implementation of this protocol would have the Storage Backend be a cloud storage service such as *Amazon S3* or *Google Cloud Storage*. ## Design Goals * Abstract vendor specific API and flow into a generic protocol * Remain as close as possible to the `basic` transfer API * Work at least with the multi-part APIs of [Amazon S3](https://aws.amazon.com/s3/), [Google Cloud Storage](https://cloud.google.com/storage) and [Azure Blob Storage](https://azure.microsoft.com/en-us/services/storage/blobs/), * Define how uploads can be resumed by re-doing parts and not-redoing parts that were uploaded successfully (this may be vendor specific and not always supported) * Do not require any state to be maintained in the server side ## High Level Protocol Specs * The name of the transfer is `multipart` * Batch requests are the same as `basic` requests except that `{"transfers": ["multipart", "basic"]}` is the expected transfers value. Clients MUST retain `basic` as the fallback transfer mode to ensure compatibility with servers not implementing this extension. * `{"operation": "download"}` replies work exactly like `basic` download request with no change * `{"operation": "upload"}` replies will break the upload into several `actions`: * `parts` (optional), a list of zero or more part upload actions * `verify` (optional), an action to verify the file is in storage, similar to `basic` upload verify actions * `abort` (optional), an action to abort the upload and clean up all unfinished chunks and state * Just like `basic` transfers, if the file fully exists and is committed to storage, no `actions` will be provided in the reply and the upload can simply be skipped * If a `verify` action is provided, calling it is required and not optional. In some cases, this endpoint may be used to finalize the upload. * An empty or missing list of `parts` with a `verify` action may mean all parts have been uploaded but `verify` still needs to be called by the client. * Authentication and authorization behave just like with the `basic` protocol. ## Action Objects Each one of the `parts` and the `verify` and `abort` actions contain instructions for the client on how to send a request performing a the particular action. These are similar to `basic` transfer adapter `actions` but may include some common, as well as action specific additional parameters. All actions allow `href`, `header` and `expires_in` parameters just like `basic` transfer actions. ### `parts` actions Each `parts` action should include the `pos` and `size` attributes, in addition to the attributes specified above: * `pos` indicate the position in bytes within the file in which the part should begin. If not specified, `0` (that is the beginning of the file) is assumed. * `size` is the size of the part in bytes. If `size` is omitted, default to read until the end of file. * If both `pos` and `size` are omitted, the action is expected to be a single-part upload of the entire file In addition, `parts` actions may include the following parameters: * `method`, with `PUT` as the default method if none is specified. This allows customizing the HTTP method used when uploading object parts. * `want_digest` to specify an expected HTTP `Digest` header, as described below. ### `verify` action The `verify` action is similar to `basic` transfer mode `verify`, with the following additional parameters: * `params` - an object with additional parameters to send to the server when sending the `verify` request. These parameters are to be sent to the server exactly as provided, as the value of the `params` JSON attribute. ### `abort` action The `abort` action may include the `method` attribute as specified for `parts` actions above. ## Batch Request / Response Examples ### Upload Batch Request The following is a ~10mb file upload request: ```json { "transfers": ["multipart", "basic"], "operation": "upload", "objects": [ { "oid": "20492a4d0d84f8beb1767f6616229f85d44c2827b64bdbfb260ee12fa1109e0e", "size": 10000000 } ] } ``` ### Upload Batch Response The following is a response for the same request, given an imaginary storage backend: ```json { "transfer": "multipart", "objects": [ { "oid": "20492a4d0d84f8beb1767f6616229f85d44c2827b64bdbfb260ee12fa1109e0e", "size": 10000000, "actions": { "parts": [ { "href": "https://storage.cloud.example/storage/upload/20492a4d0d84?part=0", "header": { "Authorization": "Bearer someauthorizationtokenwillbesethere" }, "pos": 0, "size": 2500000, "expires_in": 86400 }, { "href": "https://storage.cloud.example/storage/upload/20492a4d0d84?part=1", "header": { "Authorization": "Bearer someauthorizationtokenwillbesethere" }, "pos": 2500000, "size": 2500000, "expires_in": 86400 }, { "href": "https://storage.cloud.example/storage/upload/20492a4d0d84?part=2", "header": { "Authorization": "Bearer someauthorizationtokenwillbesethere" }, "pos": 5000000, "size": 2500000, "expires_in": 86400 }, { "href": "https://storage.cloud.example/storage/upload/20492a4d0d84?part=3", "header": { "Authorization": "Bearer someauthorizationtokenwillbesethere" }, "pos": 7500000, "expires_in": 86400 } ], "verify": { "href": "https://lfs.mycompany.example/myorg/myrepo/multipart/verify", "authenticated": true, "header": { "Authorization": "Basic 123abc123abc123abc123abc123=" }, "expires_in": 86400, "params": { "uploadId": "20492a4d0d84", "partIds": [0, 1, 2, 3] } }, "abort": { "href": "https://storage.cloud.example/storage/upload/20492a4d0d84", "authenticated": true, "header": { "Authorization": "Basic 123abc123abc123abc123abc123=" }, "method": "DELETE", "expires_in": 86400 } } } ] } ``` ### `verify` request example Given the `batch` response above, after all parts have been uploaded the client should send the following `verify` request to `https://lfs.mycompany.example/myorg/myrepo/multipart/verify`: ``` POST /myorg/myrepo/multipart/verify Host: lfs.mycompany.example Authorization: Basic 123abc123abc123abc123abc123= Content-type: application/json { "oid": "20492a4d0d84f8beb1767f6616229f85d44c2827b64bdbfb260ee12fa1109e0e", "size": 10000000, "params": { "uploadId": "20492a4d0d84", "partIds": [0, 1, 2, 3] } } ``` Assuming that all parts were uploaded successfully, the server should respond with a `200 OK` response. ### `abort` request example Given the `batch` response above, the client may choose to cancel the upload by sending the following `abort` request to `https://storage.cloud.example/storage/upload/20492a4d0d84`: ``` > DELETE /storage/upload/20492a4d0d84 > Host: storage.cloud.example > Content-length: 0 ``` ## Uploaded Part Digest Some storage backends will support, or even require, uploading clients to send a digest of the uploaded part when uploading the part. This is a useful capability even if not required, as it allows backends to validate each part separately as it is uploaded. To support this, `parts` request objects may include a `want_digest` value, which is expected to be a list of digest algorithms in the same format of the `Want-Digest` HTTP header specified by [RFC-3230](https://tools.ietf.org/html/rfc3230). Any cryptographically secure digest algorithm [registered with IANA](https://www.iana.org/assignments/http-dig-alg/http-dig-alg.xhtml) via the process outlined in [RFC-3230](https://tools.ietf.org/html/rfc3230) may be specified in `want_digest`. Algorithms considered cryptographically insecure, including `MD5` and `SHA-1`, should not be accepted. Namely, `contentMD5` is **not** an accepted value of `want_digest`. If one or more digest algorithms with non-zero q-value is specified in `want_digest`, clients *should* select a favored supported algorithm, calculate the part digest using that algorithm, and send it when uploading the part using the `Digest` HTTP header as specified by [RFC-3230 section 4.3.1](https://tools.ietf.org/html/rfc3230#section-4.3.1). While clients may include the part digest calculated using more than one algorithm, this is typically not required and should be avoided. Note that if `want_digest` is specified but the client cannot support any of the requested algorithms, the client may still choose to continue uploading parts without sending a `Digest` header. However, the storage server may choose to reject the request in such cases. ### Uploaded Part Digest Example #### Examples of a batch response with `want_digest` in the reply With SHA-512 as a preferred algorithm, and SHA-256 as a less preferred option if SHA-512 is not possible: ```json { "actions": { "parts": [ { "href": "https://storage.cloud.example/storage/upload/20492a4d0d84?part=3", "header": { "Authorization": "Bearer someauthorizationtokenwillbesethere" }, "pos": 7500001, "want_digest": "sha-512;q=1.0, sha-256;q=0.5" } ] } } ``` #### Example of part upload request send to the storage server Following on the `want_digest` value specified in the last example, the client should now send the following headers to the storage server when uploading the part, assuming `SHA-512` is supported: ``` HTTP/1.1 PUT /storage/upload/20492a4d0d84?part=3 Authorization: Bearer someauthorizationtokenwillbesethere Digest: SHA-512=thvDyvhfIqlvFe+A9MYgxAfm1q5thvDyvhfIqlvFe+A9MYgxAfm1q5= ``` ## Expected HTTP Responses For each one of the `parts`, as well as `abort` and `verify` requests sent by the client, the following responses are to be expected: * Any response with a `20x` status code is to be considered by clients as successful. This ambiguity is by design, to support variances between vendors (which may use `200` or `201` to indicate a successful upload, for example). * Any other response is to be considered as an error, and it is up to the client to decide whether the request should be retried or not. Implementors are encouraged to follow standard HTTP error status code guidelines. ### `batch` replies for partially uploaded content When content was already partially uploaded, the server is expected to return a normal reply but omit request and parts which do not need to be repeated. If the entire file has been uploaded, it is expected that no `actions` value will be returned, in which case clients should simply skip the upload. However, if parts of the file were successfully uploaded while others weren't, it is expected that a normal reply would be returned, but with less `parts` to send. ### `verify` HTTP 409 errors An `HTTP 409` error on `verify` requests typically indicates that the file could not be fully committed or verified. In this case, clients should follow the following process to try and recover from the error: * retry the `batch` request to see if any parts of the file were not uploaded yet. If there are still `parts` to upload (i.e. `parts` is not empty), proceed to upload them and re-do `verify` * If `parts` is empty, it is possible that the file exists in storage but is corrupt / has wrong size. In this case it is recommended to issue an `abort` and re-attempt the same upload again * It is recommended to take special note of the number of retries, to avoid infinite recovery attempt loops ## Additional Considerations ### Chunk sizing It is up to the LFS server to decide the size of each file chunk. ### Action lifetime considerations As multipart uploads tend to require much more time than simple uploads, it is recommended to allow for longer `expires_in` values than one would consider for `basic` uploads. It is possible that the process of uploading a single object in multiple parts may take several hours from `batch` to `verify`. ### Falling back to `basic` transfer for small files Using multipart upload APIs has some complexity and speed overhead. For this reason, if a client specifies support for both `multipart` and `basic` transfer modes in a batch request, and the object(s) uploaded are small enough to fit in a single part upload, servers *may* choose to respond with a `basic` transfer mode even if `multipart` is supported: For example a small (2mb) upload batch request: ``` { "transfers": ["multipart", "basic"], "operation": "upload", "objects": [ { "oid": "13aea96040f2133033d103008d5d96cfe98b3361f7202d77bea97b2424a7a6cd", "size": 2000000 } ] } ``` May be responded with: ``` { "transfer": "basic", "objects": [ ... ] } ``` Even if the server does support `multipart`, as `basic` can be preferable in this case. ## Implementation Notes ### Hiding initialization / commit complexities from clients While `part` requests are typically quite similar between vendors, the specifics of multipart upload initialization and commit procedures are very specific to vendors. For this reason, in many cases, it will be up to the LFS server to take care of initialization and commit code. This is fine, as long as actual uploaded data is sent directly to the storage backend. For example, in the case of Amazon S3: * All requests need to have an "upload ID" token which is obtained in an initial request * When finalizing the upload, a special "commit" request need to be sent, listing all uploaded part IDs. These are very hard to abstract in a way that would allow clients to send them directly to the server. In addition, as we do not want to maintain any state in the server, there is a need to make two requests when finalizing the upload: one to fetch a list of uploaded chunks, and another to send this list to the S3 finalization endpoint. For this reason, it is expected that any initialization actions will be handled by the Git LFS server during the `batch` request handling. In most cases, the `verify` action will also be responsible for any finalization / commit actions. The `params` attribute of the `verify` action is designed specifically to transfer some vendor-specific "state" between initialization and finalization of the upload process. ### Implementing Complex `abort` actions Some storage backends will accept a simple `DELETE` or `POST` request to a URL, with no request body, in order to abort the upload. In such cases, `abort` may refer directly to the storage backend. However, in cases where aborting the upload requires more complex logic or some payload in the request body, `abort` actions should point to an endpoint of the LFS server, and it should be up to the LFS server to abort the upload and clean up any partially uploaded parts. As `abort` requests do not have a body, any parameters required by the LFS server in order to complete the request should be passed as part of the URL in the `href` parameter. It should be noted that clients will not always be able to `abort` partial uploads cleanly. Implementors are expected to ensure proper cleanup of partially uploaded files via other means, such as a periodical cron job that locates uncommitted uploaded parts and deletes them. git-lfs-3.6.1/docs/proposals/ntlm.md000066400000000000000000000027241472372047300173520ustar00rootroot00000000000000# NTLM Authentication With Git-Lfs Enterprise users in a windows ecosystem are frequently required to use integrated auth. Basic auth does not meet their security requirements and setting up SSH on Windows is painful. There is an overview of NTLM at http://www.innovation.ch/personal/ronald/ntlm.html ### Implementation If the LFS server returns a "Www-Authenticate: NTLM" header, we will set lfs.{endpoint}.access to be ntlm and resubmit the http request. Subsequent requests will go through the ntlm auth flow. We will store NTLM credentials in the credential helper. When the user is prompted for their credentials they must use username:{DOMAIN}\{user} and password:{pass} The ntlm protocol will be handled by an ntlm.go class that hides the implementation of InitHandshake, Authenticate, and Challenge. This allows minimal changes to the existing client.go class. ### Tech There is a ntlm-go library available at https://github.com/ThomsonReutersEikon/go-ntlm that we can use. We will need to implement the Negotiate method and publish docs on what NTLM switches we support. I think simple user/pass/domain is best here so we avoid supporting a million settings with conflicting docs. ### Work Before supporting this as a mainstream scenario we should investigate making the CI work on windows so that we can successfully test changes. ### More Info You can see a hacked-together implementation of git lfs push with NTLM at https://github.com/WillHipschman/git-lfs/tree/ntlm git-lfs-3.6.1/docs/proposals/ssh_adapter.md000066400000000000000000000306711472372047300206770ustar00rootroot00000000000000# SSH protocol proposal We'd like to implement a protocol for Git LFS that uses SSH protocol exclusively, avoiding the need to use HTTPS altogether. This will make deployment and use easier in a variety of situations where access to certain ports is limited. This is merely a proposal, not a commitment to implement for either the client or server side. Implementers who prefer to use HTTP can continue to do so. ## What not to do There are several possible approaches that could be adopted. SSH provides a native capability for the SFTP protocol, which can be used to transfer files. However, in order to implement this on the server side, each access (upload or download) must have an access control check instead of one at the beginning of the operation. This would be inefficient in some server-side implementations, and nearly impossible to implement securely for implementations that use the system OpenSSH for implementation. ## A more usable approach Git already has some places we can look for inspiration. Its SSH protocol is based on the Git native protocol, which is based on the pkt-line scheme. Recently, Git has learned about protocol version 2, which provides better support for expressing and negotiating capabilities. Ideally, we would allow multiple operations to occur on a single connection for efficiency's sake, especially on high-latency connections, where the cost of SSH connection setup may be high due to multiple round trips. In addition, a protocol which maps well onto HTTP may be beneficial for those server-side implementations which would like to proxy connections to an HTTP-based backend. ## Preliminary design This design assumes a reference to Git's pkt-line and protocol v2 documentation. pkt-line headers for this document may contain values up to 65519 decimal. To initiate a connection, Git LFS should run the following command: $ ssh [{user}@]{server} git-lfs-transfer {path} {operation} If authentication fails, or some other connection error occurs, errors will be read from standard error and displayed to the user. The operation may be `upload`, `download`. Other operations may be implemented in the future. Once the connection is established, the server should send a capability advertisement: ``` capability-advertisement = capability-list flush-pkt capability-list = *capability capability = PKT-LINE(key[=value] LF) key = 1*(ALPHA | DIGIT | "-_") value = 1*(ALPHA | DIGIT | "-_.,?\/{}[]()<>!@#$%^&*+=:;") ``` Unlike the Git protocol, but like IMAP, the protocol version is specified as a capability. This document defines protocol version 1, which is specified as `version=1`. If the server supports other protocol versions, it may enumerate them here as well. If the server supports locking, the `locking` capability should be advertised, and the client may then use the `lock`, `unlock`, and `list-lock` commands. No capabilities other than the base functionality specified here are enabled without the client explicitly enabling them. Note that the `value` production here, unlike in Git, does not include the space character, since it is used as a delimiter in parts of the protocol. The client will then issue an appropriate version command: ``` version-request = PKT-LINE("version " number LF) flush-pkt number = 1*DIGIT ``` The response from the server will look like the following: ``` version-response = status-command delim-pkt error-message flush-pkt status-command = PKT-LINE("status " http-status-code LF) http-status-code = 3DIGIT error-message = *PKT-LINE(data LF) ``` The `http-status-code` portion of the response is an HTTP status code, identical to those used if the request is made over HTTP. The response code should be 200 if the version is accepted or 400 if it is not. Other values are possible if other errors occur. When the protocol is complete, the client sends a quit request: ``` quit-request = PKT-LINE("quit" LF) flush-pkt ``` The server must respond with success and then terminate the connection: ``` quit-response = PKT-LINE("status 200" LF) flush-pkt ``` This command exists to help distinguish expected termination from unexpected termination. ### Requests to transfer objects These commands may be used if the operation was `upload` or `download`. The `batch` command is used to specify a JSON command identical those used at the `info/lfs/object/batch` endpoint: ``` batch-request = batch-command *argument delim-pkt *oid-line flush-pkt batch-command = PKT-LINE("batch" LF) argument = PKT-LINE(key=[data] LF) oid-line = PKT-LINE(oid size *(key=[value]) LF) oid = 1*("a-f0-9") size = 1*DIGIT ``` The `transfer` argument is equivalent to the corresponding value in the HTTP JSON API. The `refname` argument is equivalent to the `name` argument of the `ref` object in the HTTP JSON API. The `hash-algo` argument is equivalent to the `hash_algo` argument in the HTTP JSON API. Unknown arguments should be ignored, as should unknown key-value pairs in the `oid-line` production. The response from the server will look like the following: ``` batch-response = status-command *argument delim-pkt (*batch-oid-line | error-message) flush-pkt batch-oid-line = PKT-LINE(oid size action *(key=[value]) LF) ``` If the status command is successful (that is, the status is not 200-series response), the data provided matches the `*batch-oid-line` production; otherwise, the data provided represents a user-visible error message. The server response should contain one pkt-line per oid-size-action tuple. That is, the same oid and size may be repeated if there are multiple actions. If the server has no actions that are valid for an object, it should be listed once in the response with the `noop` action. The response for an oid may include a string, `id`, which is an opaque identifier relevant only to the server to help it identify the object, and another string, `token`, which is an opaque identifier relevant only to the server to help it manage authentication. These strings must meet the syntax for the `value` production above; if arbitrary bytes are needed, Base64 encoding is recommended. The `hash-algo` argument has the same meaning as the `hash_algo` field in the HTTP JSON API. The `expires-in` and `expires-at` key-value pairs have the same meaning as their corresponding items from the HTTP JSON API. These values, if specified, must be passed as arguments to the `get-object` and `put-object` commands. ### Downloads These commands may be used if the operation was `download`. If the operation was `download`, the command `get-object` may be used to retrieve an object: ``` get-object-request = get-object-command *arguments flush-pkt get-object-command = PKT-LINE("get-object " oid LF) ``` If the `id` or `token` responses were provided in the response to `batch`, they must be specified as arguments here. The server may choose to ignore the `oid` field specified in favor of looking up the data using the `id` field. The response looks like the following: ``` status-data-response = status-success-response | status-error-response status-success-response = status-success-command *argument delim-pkt binary-data flush-pkt status-success-command = PKT-LINE("status 200" LF) binary-data = *PKT-LINE(data) status-error-response = status-error-command *argument delim-pkt error-message flush-pkt status-error-command = PKT-LINE("status " http-error-code LF) http-error-code = ("4" | "5") 2DIGIT ``` The `size` argument is required on `status` responses to `get-object` commands. ### Uploads These commands may be used if the operation was `upload`. If the operation was `upload`, the commands `put-object` and `verify-object` may be specified. `put-object` is used to upload an object to the server: ``` put-object-request = put-object-command *argument delim-pkt binary-data flush-pkt put-object-command = PKT-LINE("put-object " oid LF) ``` As above, the `size` command is required and `id` and `token` are required if provided by the server. The response matches the `status-data-response` production. The `binary-data` returned on success is not meaningful and should be empty. The `verify-object` command is used to verify an object: ``` verify-object-request = verify-object-command *argument flush-pkt verify-object-command = PKT-LINE("verify-object " oid LF) ``` A `size` argument identical to the one used in `put-object` is mandatory. The `id` and `token` items from the batch request must also be passed as arguments here, if specified. The response matches the following: ``` generic-status-response = generic-success-response | status-error-response generic-success-response = generic-success-command *argument flush-pkt generic-success-command = PKT-LINE("status 200" LF) ``` ### Locks The `lock` command may be used to lock a file on a ref: ``` lock-request = lock-command *argument flush-pkt lock-command = PKT-LINE("lock" LF) ``` The `path` and `refname` arguments correspond to the `path` component and the `name` component of the `ref` object in the HTTP JSON API. The response is as follows: ``` lock-response = lock-success-response | status-error-response lock-success-response = lock-success-command *argument flush-pkt lock-success-command = PKT-LINE("status 201" LF) ``` If the response is either successful or a 409 response, the arguments `id`, `path`, `locked-at`, and `ownername` are provided. In case of a successful response, these attributes represent the created lock; if the response is a 409, then the attributes represent the conflicting lock. The `list-lock` command may be used to list and verify locks: ``` list-lock-request = list-lock-command *argument flush-pkt list-lock-command = PKT-LINE("list-lock" LF) ``` The `path`, `id`, `cursor`, `limit`, and `refspec` correspond to the items in the HTTP JSON API. ``` list-lock-response = list-lock-success-response | status-error-response list-lock-success-response = list-lock-success-command *argument delim-pkt *lock-spec flush-pkt list-lock-success-command = PKT-LINE("status 200" LF) lock-spec = lock-decl path-id locked-at ownername-id *owner-id lock-decl = PKT-LINE("lock " lock-id LF) lock-id = value path-id = PKT-LINE("path " lock-id path LF) path = data locked-at = PKT-LINE("locked-at " lock-id timestamp LF) ownername-id = PKT-LINE("ownername " lock-id ownername LF) ownername = data owner-id = PKT-LINE("owner " lock-id who LF) who = ("ours" | "theirs") ``` The `lock-decl` production declares a new lock. The `lock-id` production refers to the ID provided by the server. The same ID is repeated in each line to allow for easier parsing. The `next-cursor` argument indicates the next value of the `cursor` argument to be passed to the `list-lock` command. If there is no `next-cursor` argument, this is the final response. The `owner-id` specification is optional if the user specified the command as `download` but is required if the command is `upload`. ``` unlock-request = unlock-command *argument flush-pkt unlock-command = PKT-LINE("unlock " lock-id LF) ``` The `force` and `refname` arguments have the same meaning as their corresponding values in the HTTP JSON API. The response is as follows: ``` unlock-response = unlock-success-response | status-error-response unlock-success-response = unlock-success-command *argument flush-pkt unlock-success-command = PKT-LINE("status 200" LF) ``` The `lock` and `unlock` commands may be issued when the command was `upload`. If the remote side has a concept of a repository administrator, it is recommended that unlocking a lock that the user does not own be reserved to the administrator. The `list-lock` commands may be issued when the command was `upload` or `download`. git-lfs-3.6.1/docs/proposals/transfer_adapters.md000066400000000000000000000105021472372047300221000ustar00rootroot00000000000000# Transfer adapters for resumable upload / download ## Concept To allow the uploading and downloading of LFS content to be implemented in more ways than the current simple HTTP GET/PUT approach. Features that could be supported by opening this up to other protocols might include: - Resumable transfers - Block-level de-duplication - Delegation to 3rd party services like Dropbox / Google Drive / OneDrive - Non-HTTP services ## API extensions See the [API documentation](../http-v1-batch.md) for specifics. All changes are optional extras so there are no breaking changes to the API. The current HTTP GET/PUT system will remain the default. When a version of the git-lfs client supports alternative transfer mechanisms, it notifies the server in the API request using the `accept-transfers` field. If the server also supports one of the mechanisms the client advertised, it may select one and alter the upload / download URLs to point at resources compatible with this transfer mechanism. It must also indicate the chosen transfer mechanism in the response using the `transfer` field. The URLs provided in this case may not be HTTP, they may be custom protocols. It is up to each individual transfer mechanism to define how URLs are used. ## Client extensions ### Phase 1: refactoring & abstraction 1. Introduce a new concept of 'transfer adapter'. 2. Adapters can provide either upload or download support, or both. This is necessary because some mechanisms are unidirectional, e.g. HTTP Content-Range is download only, tus.io is upload only. 3. Refactor our current HTTP GET/PUT mechanism to be the default implementation for both upload & download 4. The LFS core will pass oids to transfer to this adapter in bulk, and receive events back from the adapter for transfer progress, and file completion. 5. Each adapter is responsible for its own parallelism, but should respect the `lfs.concurrenttransfers` setting. For example the default (current) approach will parallelise on files (oids), but others may parallelise in other ways e.g. downloading multiple parts of the same file at once 6. Each adapter should store its own temporary files. On file completion it must notify the core which in the case of a download is then responsible for moving a completed file into permanent storage. 7. Update the core to have a registry of available transfer mechanisms which it passes to the API, and can recognise a chosen one in the response. Default to our refactored original. ### Phase 2: basic resumable downloads 1. Add a client transfer adapter for [HTTP Range headers](https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35) 2. Add a range request reference implementation to our integration test server ### Phase 3: basic resumable uploads 1. Add a client transfer adapter for [tus.io](http://tus.io) (upload only) 2. Add a tus.io reference implementation to our integration test server ### Phase 4: external transfer adapters Ideally we should allow people to add other transfer implementations so that we don't have to implement everything, or bloat the git-lfs binary with every custom system possible. Because Go is statically linked it's not possible to extend client functionality at runtime through loading libraries, so instead I propose allowing an external process to be invoked, and communicated with via a defined stream protocol. This protocol will be logically identical to the internal adapters; the core passing oids and receiving back progress and completion notifications; just that the implementation will be in an external process and the messages will be serialised over streams. Only one process will be launched and will remain for the entire period of all transfers. Like internal adapters, the external process will be responsible for its own parallelism and temporary storage, so internally they can (should) do multiple transfers at once. 1. Build a generic 'external' adapter which can invoke a named process and communicate with it using the standard stream protocol (probably just over stdout / stdin) 2. Establish a configuration for external adapters; minimum is an identifier (client and server must agree on what that is) and a path to invoke 3. Implement a small test process in Go which simply wraps the default HTTP mechanism in an external process, to prove the approach (not in release) git-lfs-3.6.1/docs/spec.md000066400000000000000000000140501472372047300153030ustar00rootroot00000000000000# Git LFS Specification This is a general guide for Git LFS clients. Typically it should be implemented by a command line `git-lfs` tool, but the details may be useful for other tools. ## The Pointer The core Git LFS idea is that instead of writing large blobs to a Git repository, only a pointer file is written. * Pointer files are text files which MUST contain only UTF-8 characters. * Each line MUST be of the format `{key} {value}\n` (trailing unix newline). * Only a single space character between `{key}` and `{value}`. * Keys MUST only use the characters `[a-z] [0-9] . -`. * The first key is _always_ `version`. * Lines of key/value pairs MUST be sorted alphabetically in ascending order (with the exception of `version`, which is always first). * Values MUST NOT contain return or newline characters. * Pointer files MUST be stored in Git with their executable bit matching that of the replaced file. * Pointer files must be less than 1024 bytes in size, including any pointer extension lines. * Pointer files are unique: that is, there is exactly one valid encoding for a pointer file. An empty file is the pointer for an empty file. That is, empty files are passed through LFS without any change. The required keys are: * `version` is a URL that identifies the pointer file spec. Parsers MUST use simple string comparison on the version, without any URL parsing or normalization. It is case sensitive, and %-encoding is discouraged. * `oid` tracks the unique object id for the file, prefixed by its hashing method: `{hash-method}:{hash}`. Currently, only `sha256` is supported. The hash is lower case hexadecimal. * `size` is in bytes. Example of a v1 text pointer: ``` version https://git-lfs.github.com/spec/v1 oid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393 size 12345 (ending \n) ``` Blobs created with the pre-release version of the tool generated files with a different version URL. Git LFS can read these files, but writes them using the version URL above. ``` version https://hawser.github.com/spec/v1 oid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393 size 12345 (ending \n) ``` For testing compliance of any tool generating its own pointer files, the reference is this official Git LFS tool: **NOTE:** exact pointer command behavior TBD! * Tools that parse and regenerate pointer files MUST preserve keys that they don't know or care about. * Run the `pointer` command to generate a pointer file for the given local file: ``` $ git lfs pointer --file=path/to/file Git LFS pointer for path/to/file: version https://git-lfs.github.com/spec/v1 oid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393 size 12345 ``` * Run `pointer` to compare the blob OID of a pointer file built by Git LFS with a pointer built by another tool. * Write the other implementation's pointer to "other/pointer/file": ``` $ git lfs pointer --file=path/to/file --pointer=other/pointer/file Git LFS pointer for path/to/file: version https://git-lfs.github.com/spec/v1 oid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393 size 12345 Blob OID: 60c8d8ab2adcf57a391163a7eeb0cdb8bf348e44 Pointer from other/pointer/file version https://git-lfs.github.com/spec/v1 oid sha256 4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393 size 12345 Blob OID: 08e593eeaa1b6032e971684825b4b60517e0638d Pointers do not match ``` * It can also read STDIN to get the other implementation's pointer: ``` $ cat other/pointer/file | git lfs pointer --file=path/to/file --stdin Git LFS pointer for path/to/file: version https://git-lfs.github.com/spec/v1 oid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393 size 12345 Blob OID: 60c8d8ab2adcf57a391163a7eeb0cdb8bf348e44 Pointer from STDIN version https://git-lfs.github.com/spec/v1 oid sha256 4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393 size 12345 Blob OID: 08e593eeaa1b6032e971684825b4b60517e0638d Pointers do not match ``` ## Intercepting Git Git LFS uses the `clean` and `smudge` filters to decide which files use it. The global filters can be set up with `git lfs install`: ``` $ git lfs install ``` These filters ensure that large files aren't written into the repository proper, instead being stored locally at `.git/lfs/objects/{OID-PATH}` (where `{OID-PATH}` is a sharded filepath of the form `OID[0:2]/OID[2:4]/OID`), synchronized with the Git LFS server as necessary. Here is a sample path to a file: .git/lfs/objects/4d/7a/4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393 The `clean` filter runs as files are added to repositories. Git sends the content of the file being added as STDIN, and expects the content to write to Git as STDOUT. * Stream binary content from STDIN to a temp file, while calculating its SHA-256 signature. * Atomically move the temp file to `.git/lfs/objects/{OID-PATH}` if it does not exist, and the sha-256 signature of the contents matches the given OID. * Delete the temp file. * Write the pointer file to STDOUT. Note that the `clean` filter does not push the file to the server. Use the `git push` command to do that (lfs files are pushed before commits in a pre-push hook). The `smudge` filter runs as files are being checked out from the Git repository to the working directory. Git sends the content of the Git blob as STDIN, and expects the content to write to the working directory as STDOUT. * Read 100 bytes. * If the content is ASCII and matches the pointer file format: * Look for the file in `.git/lfs/objects/{OID-PATH}`. * If it's not there, download it from the server. * Write its contents to STDOUT * Otherwise, simply pass the STDIN out through STDOUT. The `.gitattributes` file controls when the filters run. Here's a sample file that runs all mp3 and zip files through Git LFS: ``` $ cat .gitattributes *.mp3 filter=lfs -text *.zip filter=lfs -text ``` Use the `git lfs track` command to view and add to `.gitattributes`. git-lfs-3.6.1/errors/000077500000000000000000000000001472372047300144135ustar00rootroot00000000000000git-lfs-3.6.1/errors/context.go000066400000000000000000000021221472372047300164230ustar00rootroot00000000000000package errors type withContext interface { Set(string, interface{}) Get(string) interface{} Del(string) Context() map[string]interface{} } // ErrorSetContext sets a value in the error's context. If the error has not // been wrapped, it does nothing. func SetContext(err error, key string, value interface{}) { if e, ok := err.(withContext); ok { e.Set(key, value) } } // ErrorGetContext gets a value from the error's context. If the error has not // been wrapped, it returns an empty string. func GetContext(err error, key string) interface{} { if e, ok := err.(withContext); ok { return e.Get(key) } return "" } // ErrorDelContext removes a value from the error's context. If the error has // not been wrapped, it does nothing. func DelContext(err error, key string) { if e, ok := err.(withContext); ok { e.Del(key) } } // ErrorContext returns the context map for an error if it is a wrappedError. // If it is not a wrappedError it will return an empty map. func Context(err error) map[string]interface{} { if e, ok := err.(withContext); ok { return e.Context() } return nil } git-lfs-3.6.1/errors/errors.go000066400000000000000000000065741472372047300162720ustar00rootroot00000000000000// Package errors provides common error handling tools // NOTE: Subject to change, do not rely on this package from outside git-lfs source package errors // The LFS error system provides a simple wrapper around Go errors and the // ability to inspect errors. It is strongly influenced by Dave Cheney's post // at http://dave.cheney.net/2014/12/24/inspecting-errors. // // When passing errors out of lfs package functions, the return type should // always be `error`. The wrappedError details are not exported. If an error is // the kind of error a caller should need to investigate, an IsXError() // function is provided that tells the caller if the error is of that type. // There should only be a handful of cases where a simple `error` is // insufficient. // // The error behaviors can be nested when created. For example, the not // implemented error can also be marked as a fatal error: // // func LfsFunction() error { // err := functionCall() // if err != nil { // return newFatalError(newNotImplementedError(err)) // } // return nil // } // // Then in the caller: // // err := lfs.LfsFunction() // if lfs.IsNotImplementedError(err) { // log.Print("feature not implemented") // } // if lfs.IsFatalError(err) { // os.Exit(1) // } // // Wrapped errors contain a context, which is a map[string]string. These // contexts can be accessed through the Error*Context functions. Calling these // functions on a regular Go error will have no effect. // // Example: // // err := lfs.SomeFunction() // errors.ErrorSetContext(err, "foo", "bar") // errors.ErrorGetContext(err, "foo") // => "bar" // errors.ErrorDelContext(err, "foo") // // Wrapped errors also contain the stack from the point at which they are // called. Use the '%+v' printf verb to display. See the github.com/pkg/errors // docs for more info: https://godoc.org/github.com/pkg/errors import ( "bytes" "fmt" "github.com/pkg/errors" ) // New returns an error with the supplied message. New also records the stack // trace at thepoint it was called. func New(message string) error { return errors.New(message) } // Errorf formats according to a format specifier and returns the string // as a value that satisfies error. // Errorf also records the stack trace at the point it was called. func Errorf(format string, args ...interface{}) error { return errors.Errorf(format, args...) } // Wrap wraps an error with an additional message. func Wrap(err error, msg string) error { return newWrappedError(err, msg) } // Wrapf wraps an error with an additional formatted message. func Wrapf(err error, format string, args ...interface{}) error { if err == nil { err = errors.New("") } message := fmt.Sprintf(format, args...) return newWrappedError(err, message) } func StackTrace(err error) []string { type stacktrace interface { StackTrace() errors.StackTrace } if err, ok := err.(stacktrace); ok { frames := err.StackTrace() lines := make([]string, len(frames)) for i, f := range frames { lines[i] = fmt.Sprintf("%+v", f) } return lines } return nil } func Combine(errs []error) error { if len(errs) == 0 { return nil } var buf bytes.Buffer for i, err := range errs { if i > 0 { buf.WriteString("\n") } buf.WriteString(err.Error()) } return fmt.Errorf(buf.String()) } func Cause(err error) error { type causer interface { Cause() error } if cause, ok := err.(causer); ok { return Cause(cause.Cause()) } return err } git-lfs-3.6.1/errors/errors_test.go000066400000000000000000000027141472372047300173210ustar00rootroot00000000000000package errors import ( "errors" "testing" ) func TestChecksHandleGoErrors(t *testing.T) { err := errors.New("go error") if IsFatalError(err) { t.Error("go error should not be a fatal error") } } func TestCheckHandlesWrappedErrors(t *testing.T) { err := errors.New("go error") fatal := NewFatalError(err) if !IsFatalError(fatal) { t.Error("expected error to be fatal") } } func TestBehaviorWraps(t *testing.T) { err := errors.New("go error") fatal := NewFatalError(err) ni := NewNotImplementedError(fatal) if !IsNotImplementedError(ni) { t.Error("expected error to be not implemented") } if !IsFatalError(ni) { t.Error("expected wrapped error to also be fatal") } if IsNotImplementedError(fatal) { t.Error("expected fatal error to not be not implemented") } } func TestContextOnGoErrors(t *testing.T) { err := errors.New("go error") SetContext(err, "foo", "bar") v := GetContext(err, "foo") if v == "bar" { t.Error("expected empty context on go error") } } func TestContextOnWrappedErrors(t *testing.T) { err := NewFatalError(errors.New("go error")) SetContext(err, "foo", "bar") if v := GetContext(err, "foo"); v != "bar" { t.Error("expected to be able to use context on wrapped errors") } ctxt := Context(err) if ctxt["foo"] != "bar" { t.Error("expected to get the context of an error") } DelContext(err, "foo") if v := GetContext(err, "foo"); v == "bar" { t.Errorf("expected to delete from error context") } } git-lfs-3.6.1/errors/types.go000066400000000000000000000260241472372047300161120ustar00rootroot00000000000000package errors import ( goerrors "errors" "fmt" "net/url" "os/exec" "strconv" "syscall" "time" "github.com/git-lfs/git-lfs/v3/tr" "github.com/pkg/errors" ) // IsFatalError indicates that the error is fatal and the process should exit // immediately after handling the error. func IsFatalError(err error) bool { if e, ok := err.(interface { Fatal() bool }); ok { return e.Fatal() } if parent := parentOf(err); parent != nil { return IsFatalError(parent) } return false } // IsNotImplementedError indicates the client attempted to use a feature the // server has not implemented (e.g. the batch endpoint). func IsNotImplementedError(err error) bool { if e, ok := err.(interface { NotImplemented() bool }); ok { return e.NotImplemented() } if parent := parentOf(err); parent != nil { return IsNotImplementedError(parent) } return false } // IsAuthError indicates the client provided a request with invalid or no // authentication credentials when credentials are required (e.g. HTTP 401). func IsAuthError(err error) bool { if e, ok := err.(interface { AuthError() bool }); ok { return e.AuthError() } if parent := parentOf(err); parent != nil { return IsAuthError(parent) } return false } // IsSmudgeError indicates an error while smudging a files. func IsSmudgeError(err error) bool { if e, ok := err.(interface { SmudgeError() bool }); ok { return e.SmudgeError() } if parent := parentOf(err); parent != nil { return IsSmudgeError(parent) } return false } // IsCleanPointerError indicates an error while cleaning a file. func IsCleanPointerError(err error) bool { if e, ok := err.(interface { CleanPointerError() bool }); ok { return e.CleanPointerError() } if parent := parentOf(err); parent != nil { return IsCleanPointerError(parent) } return false } // IsNotAPointerError indicates the parsed data is not an LFS pointer. func IsNotAPointerError(err error) bool { if e, ok := err.(interface { NotAPointerError() bool }); ok { return e.NotAPointerError() } if parent := parentOf(err); parent != nil { return IsNotAPointerError(parent) } return false } // IsNotAPointerError indicates the parsed data is not an LFS pointer. func IsPointerScanError(err error) bool { if e, ok := err.(interface { PointerScanError() bool }); ok { return e.PointerScanError() } if parent := parentOf(err); parent != nil { return IsPointerScanError(parent) } return false } // IsBadPointerKeyError indicates that the parsed data has an invalid key. func IsBadPointerKeyError(err error) bool { if e, ok := err.(interface { BadPointerKeyError() bool }); ok { return e.BadPointerKeyError() } if parent := parentOf(err); parent != nil { return IsBadPointerKeyError(parent) } return false } // IsProtocolError indicates that the SSH pkt-line protocol data is invalid. func IsProtocolError(err error) bool { if e, ok := err.(interface { ProtocolError() bool }); ok { return e.ProtocolError() } if parent := parentOf(err); parent != nil { return IsProtocolError(parent) } return false } // If an error is abad pointer error of any type, returns NotAPointerError func StandardizeBadPointerError(err error) error { if IsBadPointerKeyError(err) { badErr := err.(badPointerKeyError) if badErr.Expected == "version" { return NewNotAPointerError(err) } } return err } // IsDownloadDeclinedError indicates that the smudge operation should not download. // TODO: I don't really like using errors to control that flow, it should be refactored. func IsDownloadDeclinedError(err error) bool { if e, ok := err.(interface { DownloadDeclinedError() bool }); ok { return e.DownloadDeclinedError() } if parent := parentOf(err); parent != nil { return IsDownloadDeclinedError(parent) } return false } // IsDownloadDeclinedError indicates that the upload operation failed because of // an HTTP 422 response code. func IsUnprocessableEntityError(err error) bool { if e, ok := err.(interface { UnprocessableEntityError() bool }); ok { return e.UnprocessableEntityError() } if parent := parentOf(err); parent != nil { return IsUnprocessableEntityError(parent) } return false } // IsRetriableError indicates the low level transfer had an error but the // caller may retry the operation. func IsRetriableError(err error) bool { if e, ok := err.(interface { RetriableError() bool }); ok { return e.RetriableError() } if cause, ok := Cause(err).(*url.Error); ok { return cause.Temporary() || cause.Timeout() } if parent := parentOf(err); parent != nil { return IsRetriableError(parent) } return false } func IsRetriableLaterError(err error) (time.Time, bool) { if e, ok := err.(interface { RetriableLaterError() (time.Time, bool) }); ok { return e.RetriableLaterError() } if parent := parentOf(err); parent != nil { return IsRetriableLaterError(parent) } return time.Time{}, false } type errorWithCause interface { Cause() error StackTrace() errors.StackTrace error fmt.Formatter } // wrappedError is the base error wrapper. It provides a Message string, a // stack, and a context map around a regular Go error. type wrappedError struct { errorWithCause context map[string]interface{} } // newWrappedError creates a wrappedError. func newWrappedError(err error, message string) *wrappedError { if err == nil { err = errors.New(tr.Tr.Get("Error")) } var errWithCause errorWithCause if len(message) > 0 { errWithCause = errors.Wrap(err, message).(errorWithCause) } else if ewc, ok := err.(errorWithCause); ok { errWithCause = ewc } else { errWithCause = errors.Wrap(err, "LFS").(errorWithCause) } return &wrappedError{ context: make(map[string]interface{}), errorWithCause: errWithCause, } } // Set sets the value for the key in the context. func (e wrappedError) Set(key string, val interface{}) { e.context[key] = val } // Get gets the value for a key in the context. func (e wrappedError) Get(key string) interface{} { return e.context[key] } // Del removes a key from the context. func (e wrappedError) Del(key string) { delete(e.context, key) } // Context returns the underlying context. func (e wrappedError) Context() map[string]interface{} { return e.context } // Definitions for IsFatalError() type fatalError struct { *wrappedError } func (e fatalError) Fatal() bool { return true } func NewFatalError(err error) error { return fatalError{newWrappedError(err, tr.Tr.Get("Fatal error"))} } // Definitions for IsNotImplementedError() type notImplementedError struct { *wrappedError } func (e notImplementedError) NotImplemented() bool { return true } func NewNotImplementedError(err error) error { return notImplementedError{newWrappedError(err, tr.Tr.Get("Not implemented"))} } // Definitions for IsAuthError() type authError struct { *wrappedError } func (e authError) AuthError() bool { return true } func NewAuthError(err error) error { return authError{newWrappedError(err, tr.Tr.Get("Authentication required"))} } // Definitions for IsSmudgeError() type smudgeError struct { *wrappedError } func (e smudgeError) SmudgeError() bool { return true } func NewSmudgeError(err error, oid, filename string) error { e := smudgeError{newWrappedError(err, tr.Tr.Get("Smudge error"))} SetContext(e, "OID", oid) SetContext(e, "FileName", filename) return e } // Definitions for IsCleanPointerError() type cleanPointerError struct { *wrappedError } func (e cleanPointerError) CleanPointerError() bool { return true } func NewCleanPointerError(pointer interface{}, bytes []byte) error { err := New(tr.Tr.Get("pointer error")) e := cleanPointerError{newWrappedError(err, "clean")} SetContext(e, "pointer", pointer) SetContext(e, "bytes", bytes) return e } // Definitions for IsNotAPointerError() type notAPointerError struct { *wrappedError } func (e notAPointerError) NotAPointerError() bool { return true } func NewNotAPointerError(err error) error { return notAPointerError{newWrappedError(err, tr.Tr.Get("Pointer file error"))} } // Definitions for IsPointerScanError() type PointerScanError struct { treeishOid string path string *wrappedError } func (e PointerScanError) PointerScanError() bool { return true } func (e PointerScanError) OID() string { return e.treeishOid } func (e PointerScanError) Path() string { return e.path } func NewPointerScanError(err error, treeishOid, path string) error { return PointerScanError{treeishOid, path, newWrappedError(err, tr.Tr.Get("Pointer error"))} } type badPointerKeyError struct { Expected string Actual string *wrappedError } func (e badPointerKeyError) BadPointerKeyError() bool { return true } func NewBadPointerKeyError(expected, actual string) error { err := Errorf(tr.Tr.Get("Expected key %s, got %s", expected, actual)) return badPointerKeyError{expected, actual, newWrappedError(err, tr.Tr.Get("pointer parsing"))} } // Definitions for IsDownloadDeclinedError() type downloadDeclinedError struct { *wrappedError } func (e downloadDeclinedError) DownloadDeclinedError() bool { return true } func NewDownloadDeclinedError(err error, msg string) error { return downloadDeclinedError{newWrappedError(err, msg)} } // Definitions for IsRetriableLaterError() type retriableLaterError struct { *wrappedError timeAvailable time.Time } func NewRetriableLaterError(err error, header string) error { if header == "" { return nil } secs, parseErr := strconv.Atoi(header) if parseErr == nil { return retriableLaterError{ wrappedError: newWrappedError(err, ""), timeAvailable: time.Now().Add(time.Duration(secs) * time.Second), } } parseTime, parseErr := time.Parse(time.RFC1123, header) if parseErr == nil { return retriableLaterError{ wrappedError: newWrappedError(err, ""), timeAvailable: parseTime, } } // We could not return a successful error from the Retry-After header. return nil } func (e retriableLaterError) RetriableLaterError() (time.Time, bool) { return e.timeAvailable, true } // Definitions for IsUnprocessableEntityError() type unprocessableEntityError struct { *wrappedError } func (e unprocessableEntityError) UnprocessableEntityError() bool { return true } func NewUnprocessableEntityError(err error) error { return unprocessableEntityError{newWrappedError(err, "")} } // Definitions for IsRetriableError() type retriableError struct { *wrappedError } func (e retriableError) RetriableError() bool { return true } func NewRetriableError(err error) error { return retriableError{newWrappedError(err, "")} } // Definitions for IsProtocolError() type protocolError struct { *wrappedError } func (e protocolError) ProtocolError() bool { return true } func NewProtocolError(message string, err error) error { return protocolError{newWrappedError(err, message)} } func parentOf(err error) error { type causer interface { Cause() error } if c, ok := err.(causer); ok { if innerC, innerOk := c.Cause().(causer); innerOk { return innerC.Cause() } } return nil } func ExitStatus(err error) int { var eerr *exec.ExitError if goerrors.As(err, &eerr) { ws, ok := eerr.ProcessState.Sys().(syscall.WaitStatus) if ok { return ws.ExitStatus() } } return -1 } git-lfs-3.6.1/errors/types_test.go000066400000000000000000000015071472372047300171500ustar00rootroot00000000000000package errors_test import ( "net/url" "testing" "github.com/git-lfs/git-lfs/v3/errors" "github.com/stretchr/testify/assert" ) type TemporaryError struct { } func (e TemporaryError) Error() string { return "" } func (e TemporaryError) Temporary() bool { return true } type TimeoutError struct { } func (e TimeoutError) Error() string { return "" } func (e TimeoutError) Timeout() bool { return true } func TestCanRetryOnTemporaryError(t *testing.T) { err := &url.Error{Err: TemporaryError{}} assert.True(t, errors.IsRetriableError(err)) } func TestCanRetryOnTimeoutError(t *testing.T) { err := &url.Error{Err: TimeoutError{}} assert.True(t, errors.IsRetriableError(err)) } func TestCannotRetryOnGenericUrlError(t *testing.T) { err := &url.Error{Err: errors.New("")} assert.False(t, errors.IsRetriableError(err)) } git-lfs-3.6.1/filepathfilter/000077500000000000000000000000001472372047300161015ustar00rootroot00000000000000git-lfs-3.6.1/filepathfilter/filepathfilter.go000066400000000000000000000105631472372047300214370ustar00rootroot00000000000000package filepathfilter import ( "strings" "github.com/git-lfs/git-lfs/v3/tr" "github.com/git-lfs/wildmatch/v2" "github.com/rubyist/tracerx" ) type Pattern interface { Match(filename string) bool // String returns a string representation (see: regular expressions) of // the underlying pattern used to match filenames against this Pattern. String() string } type Filter struct { include []Pattern exclude []Pattern defaultValue bool } type PatternType bool const ( GitIgnore = PatternType(false) GitAttributes = PatternType(true) ) func (p PatternType) String() string { if p == GitIgnore { return "gitignore" } return "gitattributes" } type options struct { defaultValue bool } type option func(*options) // DefaultValue is an option representing the default value of a filepathfilter // if no patterns match. If this option is not provided, the default is true. func DefaultValue(val bool) option { return func(args *options) { args.defaultValue = val } } func NewFromPatterns(include, exclude []Pattern, setters ...option) *Filter { args := &options{defaultValue: true} for _, setter := range setters { setter(args) } return &Filter{include: include, exclude: exclude, defaultValue: args.defaultValue} } func New(include, exclude []string, ptype PatternType, setters ...option) *Filter { return NewFromPatterns( convertToWildmatch(include, ptype), convertToWildmatch(exclude, ptype), setters...) } // Include returns the result of calling String() on each Pattern in the // include set of this *Filter. func (f *Filter) Include() []string { return wildmatchToString(f.include...) } // Exclude returns the result of calling String() on each Pattern in the // exclude set of this *Filter. func (f *Filter) Exclude() []string { return wildmatchToString(f.exclude...) } // wildmatchToString maps the given set of Pattern's to a string slice by // calling String() on each pattern. func wildmatchToString(ps ...Pattern) []string { s := make([]string, 0, len(ps)) for _, p := range ps { s = append(s, p.String()) } return s } func (f *Filter) Allows(filename string) bool { if f == nil { return true } var included bool for _, inc := range f.include { if included = inc.Match(filename); included { break } } if !included && len(f.include) > 0 { tracerx.Printf("filepathfilter: rejecting %q via %v", filename, f.include) return false } // Beyond this point, the only values we can logically return are false // or the default value. If the default is false, then there's no point // traversing the exclude patterns because the return value will always // be false; we'd do extra work for no functional benefit. if !included && !f.defaultValue { tracerx.Printf("filepathfilter: rejecting %q", filename) return false } for _, ex := range f.exclude { if ex.Match(filename) { tracerx.Printf("filepathfilter: rejecting %q via %q", filename, ex.String()) return false } } // No patterns matched and our default value is true. tracerx.Printf("filepathfilter: accepting %q", filename) return true } type wm struct { w *wildmatch.Wildmatch p string } func (w *wm) Match(filename string) bool { return w.w.Match(filename) } func (w *wm) String() string { return w.p } const ( sep byte = '/' ) func NewPattern(p string, ptype PatternType) Pattern { tracerx.Printf("filepathfilter: creating pattern %q of type %v", p, ptype) switch ptype { case GitIgnore: return &wm{ p: p, w: wildmatch.NewWildmatch( p, wildmatch.SystemCase, wildmatch.Contents, ), } case GitAttributes: return &wm{ p: p, w: wildmatch.NewWildmatch( p, wildmatch.SystemCase, wildmatch.Basename, wildmatch.GitAttributes, ), } default: panic(tr.Tr.Get("unreachable")) } } // join joins path elements together via the separator "sep" and produces valid // paths without multiple separators (unless multiple separators were included // in the original paths []string). func join(paths ...string) string { var joined string for i, path := range paths { joined = joined + path if i != len(paths)-1 && !strings.HasSuffix(path, string(sep)) { joined = joined + string(sep) } } return joined } func convertToWildmatch(rawpatterns []string, ptype PatternType) []Pattern { patterns := make([]Pattern, len(rawpatterns)) for i, raw := range rawpatterns { patterns[i] = NewPattern(raw, ptype) } return patterns } git-lfs-3.6.1/filepathfilter/filepathfilter_test.go000066400000000000000000000074701472372047300225010ustar00rootroot00000000000000package filepathfilter import ( "testing" "github.com/stretchr/testify/assert" ) func TestPatternMatch(t *testing.T) { assertPatternMatch(t, "*", "a", "a.a", "a/b", "a/b.b", "a/b/c", "a/b/c.c", ) assertPatternMatch(t, "filename.txt", "filename.txt") assertPatternMatch(t, "*.txt", "filename.txt") refutePatternMatch(t, "*.tx", "filename.txt") assertPatternMatch(t, "f*.txt", "filename.txt") refutePatternMatch(t, "g*.txt", "filename.txt") assertPatternMatch(t, "file*", "filename.txt") refutePatternMatch(t, "file", "filename.txt") // With no path separators, should match in subfolders assertPatternMatch(t, "*.txt", "sub/filename.txt") refutePatternMatch(t, "*.tx", "sub/filename.txt") assertPatternMatch(t, "f*.txt", "sub/filename.txt") refutePatternMatch(t, "g*.txt", "sub/filename.txt") assertPatternMatch(t, "file*", "sub/filename.txt") refutePatternMatch(t, "file", "sub/filename.txt") // matches only in subdir assertPatternMatch(t, "sub/*.txt", "sub/filename.txt") refutePatternMatch(t, "sub/*.txt", "top/sub/filename.txt", "sub/filename.dat", "other/filename.txt", ) // Needs wildcard for exact filename assertPatternMatch(t, "**/filename.txt", "sub/sub/sub/filename.txt") // Should not match dots to subparts refutePatternMatch(t, "*.ign", "sub/shouldignoreme.txt") // Path specific assertPatternMatch(t, "sub", "sub", "top/sub", ) refutePatternMatch(t, "sub", "subfilename.txt", "sub/filename.txt", "top/sub/filename.txt", ) assertPatternMatch(t, "/sub", "sub", ) refutePatternMatch(t, "/sub", "subfilename.txt", "sub/filename.txt", "top/sub", "top/sub/filename.txt", ) refutePatternMatch(t, "sub/", "sub", "subfilename.txt", "sub/filename.txt", "top/sub", "top/sub/filename.txt", ) assertPatternMatchIgnore(t, "sub/", "sub/", "sub/filename.txt", "top/sub/", "top/sub/filename.txt", ) refutePatternMatchIgnore(t, "sub/", "subfilename.txt", ) refutePatternMatch(t, "/sub/", "sub", "subfilename.txt", "sub/filename.txt", "top/sub", "top/sub/filename.txt", ) assertPatternMatchIgnore(t, "/sub/", "sub/", "sub/filename.txt", ) refutePatternMatchIgnore(t, "/sub/", "subfilename.txt", "top/sub", "top/sub/filename.txt", ) // Absolute assertPatternMatch(t, "*.dat", "/path/to/sub/.git/test.dat") assertPatternMatch(t, "**/.git", "/path/to/sub/.git") } func assertPatternMatch(t *testing.T, pattern string, filenames ...string) { p := NewPattern(pattern, GitAttributes) for _, filename := range filenames { assert.True(t, p.Match(filename), "%q should match pattern %q", filename, pattern) } } func assertPatternMatchIgnore(t *testing.T, pattern string, filenames ...string) { p := NewPattern(pattern, GitIgnore) for _, filename := range filenames { assert.True(t, p.Match(filename), "%q should match pattern %q", filename, pattern) } } func refutePatternMatch(t *testing.T, pattern string, filenames ...string) { p := NewPattern(pattern, GitAttributes) for _, filename := range filenames { assert.False(t, p.Match(filename), "%q should not match pattern %q", filename, pattern) } } func refutePatternMatchIgnore(t *testing.T, pattern string, filenames ...string) { p := NewPattern(pattern, GitIgnore) for _, filename := range filenames { assert.False(t, p.Match(filename), "%q should not match pattern %q", filename, pattern) } } type filterTest struct { expectedResult bool expectedPattern string includes []string excludes []string } func TestFilterReportsIncludePatterns(t *testing.T) { filter := New([]string{"*.foo", "*.bar"}, nil, GitAttributes) assert.Equal(t, []string{"*.foo", "*.bar"}, filter.Include()) } func TestFilterReportsExcludePatterns(t *testing.T) { filter := New(nil, []string{"*.baz", "*.quux"}, GitAttributes) assert.Equal(t, []string{"*.baz", "*.quux"}, filter.Exclude()) } git-lfs-3.6.1/fs/000077500000000000000000000000001472372047300135075ustar00rootroot00000000000000git-lfs-3.6.1/fs/cleanup.go000066400000000000000000000035271472372047300154740ustar00rootroot00000000000000package fs import ( "os" "path/filepath" "strings" "sync" "time" "github.com/git-lfs/git-lfs/v3/tools" "github.com/rubyist/tracerx" ) func (f *Filesystem) cleanupTmp() error { tmpdir := f.TempDir() if len(tmpdir) == 0 { return nil } // No temporary directory? No problem. if _, err := os.Stat(tmpdir); err != nil && os.IsNotExist(err) { return nil } traversedDirectories := &sync.Map{} var walkErr error tools.FastWalkDir(tmpdir, func(parentDir string, info os.FileInfo, err error) { if err != nil { walkErr = err } if walkErr != nil { return } path := filepath.Join(parentDir, info.Name()) if info.IsDir() { traversedDirectories.Store(path, info) return } parts := strings.SplitN(info.Name(), "-", 2) oid := parts[0] if len(parts) == 2 && len(oid) == 64 { fi, err := os.Stat(f.ObjectPathname(oid)) if err == nil && !fi.IsDir() { tracerx.Printf("Removing existing tmp object file: %s", path) os.RemoveAll(path) return } } // Don't prune items in a directory younger than an hour. These // items could be hard links to files from other repositories, // which would have an older timestamp but which are still in // use by some active process. Exempt the main temporary from // this check, since we frequently modify it and we'd never // prune otherwise. if tmpdir != parentDir { var dirInfo os.FileInfo entry, ok := traversedDirectories.Load(parentDir) if ok { dirInfo = entry.(os.FileInfo) } else { dirInfo, err = os.Stat(parentDir) if err != nil { return } traversedDirectories.Store(path, dirInfo) } if time.Since(dirInfo.ModTime()) <= time.Hour { return } } if time.Since(info.ModTime()) > time.Hour { tracerx.Printf("Removing old tmp object file: %s", path) os.RemoveAll(path) return } }) return walkErr } git-lfs-3.6.1/fs/fs.go000066400000000000000000000203611472372047300144500ustar00rootroot00000000000000package fs import ( "bufio" "bytes" "crypto/sha256" "encoding/hex" "errors" "os" "path/filepath" "regexp" "strconv" "strings" "sync" "github.com/git-lfs/git-lfs/v3/tools" "github.com/git-lfs/git-lfs/v3/tr" "github.com/rubyist/tracerx" ) var ( oidRE = regexp.MustCompile(`\A[[:alnum:]]{64}`) EmptyObjectSHA256 = hex.EncodeToString(sha256.New().Sum(nil)) ) // Environment is a copy of a subset of the interface // github.com/git-lfs/git-lfs/config.Environment. // // For more information, see config/environment.go. type Environment interface { Get(key string) (val string, ok bool) } // Object represents a locally stored LFS object. type Object struct { Oid string Size int64 } type Filesystem struct { GitStorageDir string // parent of objects/lfs (may be same as GitDir but may not) LFSStorageDir string // parent of lfs objects and tmp dirs. Default: ".git/lfs" ReferenceDirs []string // alternative local media dirs (relative to clone reference repo) lfsobjdir string tmpdir string logdir string repoPerms os.FileMode mu sync.Mutex } func (f *Filesystem) EachObject(fn func(Object) error) error { var eachErr error tools.FastWalkDir(f.LFSObjectDir(), func(parentDir string, info os.FileInfo, err error) { if err != nil { eachErr = err return } if eachErr != nil || info.IsDir() { return } if oidRE.MatchString(info.Name()) { fn(Object{Oid: info.Name(), Size: info.Size()}) } }) return eachErr } func (f *Filesystem) ObjectExists(oid string, size int64) bool { if size == 0 { return true } return tools.FileExistsOfSize(f.ObjectPathname(oid), size) } func (f *Filesystem) ObjectPath(oid string) (string, error) { if len(oid) < 4 { return "", errors.New(tr.Tr.Get("too short object ID: %q", oid)) } if oid == EmptyObjectSHA256 { return os.DevNull, nil } dir := f.localObjectDir(oid) if err := tools.MkdirAll(dir, f); err != nil { return "", errors.New(tr.Tr.Get("error trying to create local storage directory in %q: %s", dir, err)) } return filepath.Join(dir, oid), nil } func (f *Filesystem) ObjectPathname(oid string) string { if oid == EmptyObjectSHA256 { return os.DevNull } return filepath.Join(f.localObjectDir(oid), oid) } func (f *Filesystem) DecodePathname(path string) string { return string(DecodePathBytes([]byte(path))) } func (f *Filesystem) RepositoryPermissions(executable bool) os.FileMode { if executable { return tools.ExecutablePermissions(f.repoPerms) } return f.repoPerms } /** * Revert non ascii characters escaped by git or windows (as octal sequences \000) back to bytes. */ func DecodePathBytes(path []byte) []byte { var expression = regexp.MustCompile(`\\[0-9]{3}`) var buffer bytes.Buffer // strip quotes if any if len(path) > 2 && path[0] == '"' && path[len(path)-1] == '"' { path = path[1 : len(path)-1] } base := 0 for _, submatches := range expression.FindAllSubmatchIndex(path, -1) { buffer.Write(path[base:submatches[0]]) match := string(path[submatches[0]+1 : submatches[0]+4]) k, err := strconv.ParseUint(match, 8, 64) if err != nil { return path } // abort on error buffer.Write([]byte{byte(k)}) base = submatches[1] } buffer.Write(path[base:len(path)]) return buffer.Bytes() } func (f *Filesystem) localObjectDir(oid string) string { return filepath.Join(f.LFSObjectDir(), oid[0:2], oid[2:4]) } func (f *Filesystem) ObjectReferencePaths(oid string) []string { if len(f.ReferenceDirs) == 0 { return nil } var paths []string for _, ref := range f.ReferenceDirs { paths = append(paths, filepath.Join(ref, oid[0:2], oid[2:4], oid)) } return paths } func (f *Filesystem) LFSObjectDir() string { f.mu.Lock() defer f.mu.Unlock() if len(f.lfsobjdir) == 0 { f.lfsobjdir = filepath.Join(f.LFSStorageDir, "objects") tools.MkdirAll(f.lfsobjdir, f) } return f.lfsobjdir } func (f *Filesystem) LogDir() string { f.mu.Lock() defer f.mu.Unlock() if len(f.logdir) == 0 { f.logdir = filepath.Join(f.LFSStorageDir, "logs") tools.MkdirAll(f.logdir, f) } return f.logdir } func (f *Filesystem) TempDir() string { f.mu.Lock() defer f.mu.Unlock() if len(f.tmpdir) == 0 { f.tmpdir = filepath.Join(f.LFSStorageDir, "tmp") tools.MkdirAll(f.tmpdir, f) } return f.tmpdir } func (f *Filesystem) Cleanup() error { if f == nil { return nil } return f.cleanupTmp() } // New initializes a new *Filesystem with the given directories. gitdir is the // path to the bare repo, workdir is the path to the repository working // directory, and lfsdir is the optional path to the `.git/lfs` directory. // repoPerms is the permissions for directories in the repository. func New(env Environment, gitdir, workdir, lfsdir string, repoPerms os.FileMode) *Filesystem { fs := &Filesystem{ GitStorageDir: resolveGitStorageDir(gitdir), } fs.ReferenceDirs = resolveReferenceDirs(env, fs.GitStorageDir) if len(lfsdir) == 0 { lfsdir = "lfs" } if filepath.IsAbs(lfsdir) { fs.LFSStorageDir = lfsdir } else { fs.LFSStorageDir = filepath.Join(fs.GitStorageDir, lfsdir) } fs.repoPerms = repoPerms return fs } func resolveReferenceDirs(env Environment, gitStorageDir string) []string { var references []string envAlternates, ok := env.Get("GIT_ALTERNATE_OBJECT_DIRECTORIES") if ok { splits := strings.Split(envAlternates, string(os.PathListSeparator)) for _, split := range splits { if dir, ok := existsAlternate(split); ok { references = append(references, dir) } } } cloneReferencePath := filepath.Join(gitStorageDir, "objects", "info", "alternates") if tools.FileExists(cloneReferencePath) { f, err := os.Open(cloneReferencePath) if err != nil { tracerx.Printf("could not open %s: %s", cloneReferencePath, err) return nil } defer f.Close() scanner := bufio.NewScanner(f) for scanner.Scan() { text := strings.TrimSpace(scanner.Text()) if len(text) == 0 || strings.HasPrefix(text, "#") { continue } if dir, ok := existsAlternate(text); ok { references = append(references, dir) } } if err := scanner.Err(); err != nil { tracerx.Printf("could not scan %s: %s", cloneReferencePath, err) } } return references } // existsAlternate takes an object directory given in "objs" (read as a single, // line from .git/objects/info/alternates). If that is a satisfiable alternates // directory (i.e., it exists), the directory is returned along with "true". If // not, the empty string and false is returned instead. func existsAlternate(objs string) (string, bool) { objs = strings.TrimSpace(objs) if strings.HasPrefix(objs, "\"") { var err error unquote := strings.LastIndex(objs, "\"") if unquote == 0 { return "", false } objs, err = strconv.Unquote(objs[:unquote+1]) if err != nil { return "", false } } storage := filepath.Join(filepath.Dir(objs), "lfs", "objects") if tools.DirExists(storage) { return storage, true } return "", false } // From a git dir, get the location that objects are to be stored (we will store lfs alongside) // Sometimes there is an additional level of redirect on the .git folder by way of a commondir file // before you find object storage, e.g. 'git worktree' uses this. It redirects to gitdir either by GIT_DIR // (during setup) or .git/git-dir: (during use), but this only contains the index etc, the objects // are found in another git dir via 'commondir'. func resolveGitStorageDir(gitDir string) string { commondirpath := filepath.Join(gitDir, "commondir") if tools.FileExists(commondirpath) && !tools.DirExists(filepath.Join(gitDir, "objects")) { // no git-dir: prefix in commondir storage, err := processGitRedirectFile(commondirpath, "") if err == nil { return storage } } return gitDir } func processGitRedirectFile(file, prefix string) (string, error) { data, err := os.ReadFile(file) if err != nil { return "", err } contents := string(data) var dir string if len(prefix) > 0 { if !strings.HasPrefix(contents, prefix) { // Prefix required & not found return "", nil } dir = strings.TrimSpace(contents[len(prefix):]) } else { dir = strings.TrimSpace(contents) } if !filepath.IsAbs(dir) { // The .git file contains a relative path. // Create an absolute path based on the directory the .git file is located in. dir = filepath.Join(filepath.Dir(file), dir) } return dir, nil } git-lfs-3.6.1/fs/fs_test.go000066400000000000000000000016671472372047300155170ustar00rootroot00000000000000package fs import ( "os" "testing" "github.com/stretchr/testify/assert" ) func TestDecodeNone(t *testing.T) { evaluate(t, "A:\\some\\regular\\windows\\path", "A:\\some\\regular\\windows\\path") } func TestDecodeSingle(t *testing.T) { evaluate(t, "A:\\bl\\303\\204\\file.txt", "A:\\blÄ\\file.txt") } func TestDecodeMultiple(t *testing.T) { evaluate(t, "A:\\fo\\130\\file\\303\\261.txt", "A:\\fo\130\\file\303\261.txt") } func evaluate(t *testing.T, input string, expected string) { fs := Filesystem{} output := fs.DecodePathname(input) if output != expected { t.Errorf("Expecting same path, got: %s, want: %s.", output, expected) } } func TestRepositoryPermissions(t *testing.T) { m := map[os.FileMode]os.FileMode{ 0777: 0666, 0755: 0644, 0700: 0600, } for k, v := range m { fs := Filesystem{repoPerms: v} assert.Equal(t, k, fs.RepositoryPermissions(true)) assert.Equal(t, v, fs.RepositoryPermissions(false)) } } git-lfs-3.6.1/git-lfs.go000066400000000000000000000011351472372047300147730ustar00rootroot00000000000000package main import ( "fmt" "os" "os/signal" "sync" "syscall" "github.com/git-lfs/git-lfs/v3/commands" "github.com/git-lfs/git-lfs/v3/tr" ) func main() { c := make(chan os.Signal) signal.Notify(c, os.Interrupt, os.Kill) var once sync.Once go func() { for { sig := <-c once.Do(commands.Cleanup) fmt.Fprintf(os.Stderr, "\n%s\n", tr.Tr.Get("Exiting because of %q signal.", sig)) exitCode := 1 if sysSig, ok := sig.(syscall.Signal); ok { exitCode = int(sysSig) } os.Exit(exitCode + 128) } }() code := commands.Run() once.Do(commands.Cleanup) os.Exit(code) } git-lfs-3.6.1/git-lfs_windows.go000066400000000000000000000001411472372047300165410ustar00rootroot00000000000000//go:build windows && !arm64 // +build windows,!arm64 //go:generate goversioninfo package main git-lfs-3.6.1/git-lfs_windows_arm64.go000066400000000000000000000001621472372047300175550ustar00rootroot00000000000000//go:build windows && arm64 // +build windows,arm64 //go:generate goversioninfo -arm=true -64=true package main git-lfs-3.6.1/git/000077500000000000000000000000001472372047300136625ustar00rootroot00000000000000git-lfs-3.6.1/git/attribs.go000066400000000000000000000145221472372047300156650ustar00rootroot00000000000000package git import ( "errors" "io" "os" "path" "path/filepath" "sort" "strings" "github.com/git-lfs/git-lfs/v3/filepathfilter" "github.com/git-lfs/git-lfs/v3/git/gitattr" "github.com/git-lfs/git-lfs/v3/tools" "github.com/git-lfs/git-lfs/v3/tr" "github.com/rubyist/tracerx" ) const ( LockableAttrib = "lockable" FilterAttrib = "filter" ) // AttributePath is a path entry in a gitattributes file which has the LFS filter type AttributePath struct { // Path entry in the attribute file Path string // The attribute file which was the source of this entry Source *AttributeSource // Path also has the 'lockable' attribute Lockable bool // Path is handled by Git LFS (i.e., filter=lfs) Tracked bool } type AttributeSource struct { Path string LineEnding string } type attrFile struct { path string readMacros bool } func (s *AttributeSource) String() string { return s.Path } // GetRootAttributePaths beahves as GetRootAttributePaths, and loads information // only from the global gitattributes file. func GetRootAttributePaths(mp *gitattr.MacroProcessor, cfg Env) []AttributePath { af, _ := cfg.Get("core.attributesfile") af, err := tools.ExpandConfigPath(af, "git/attributes") if err != nil { return nil } if _, err := os.Stat(af); os.IsNotExist(err) { return nil } // The working directory for the root gitattributes file is blank. return attrPathsFromFile(mp, af, "", true) } // GetSystemAttributePaths behaves as GetAttributePaths, and loads information // only from the system gitattributes file, respecting the $PREFIX environment // variable. func GetSystemAttributePaths(mp *gitattr.MacroProcessor, env Env) ([]AttributePath, error) { var path string if IsGitVersionAtLeast("2.42.0") { cmd, err := gitNoLFS("var", "GIT_ATTR_SYSTEM") if err != nil { return nil, errors.New(tr.Tr.Get("failed to find `git var GIT_ATTR_SYSTEM`: %v", err)) } out, err := cmd.Output() if err != nil { return nil, errors.New(tr.Tr.Get("failed to call `git var GIT_ATTR_SYSTEM`: %v", err)) } paths := strings.Split(string(out), "\n") if len(paths) == 0 { return nil, nil } path = paths[0] } else { prefix, _ := env.Get("PREFIX") if len(prefix) == 0 { prefix = string(filepath.Separator) } path = filepath.Join(prefix, "etc", "gitattributes") } if _, err := os.Stat(path); os.IsNotExist(err) { return nil, nil } return attrPathsFromFile(mp, path, "", true), nil } // GetAttributePaths returns a list of entries in .gitattributes which are // configured with the filter=lfs attribute // workingDir is the root of the working copy // gitDir is the root of the git repo func GetAttributePaths(mp *gitattr.MacroProcessor, workingDir, gitDir string) []AttributePath { paths := make([]AttributePath, 0) for _, file := range findAttributeFiles(workingDir, gitDir) { paths = append(paths, attrPathsFromFile(mp, file.path, workingDir, file.readMacros)...) } return paths } func attrPathsFromFile(mp *gitattr.MacroProcessor, path, workingDir string, readMacros bool) []AttributePath { attributes, err := os.Open(path) if err != nil { return nil } defer attributes.Close() return AttrPathsFromReader(mp, path, workingDir, attributes, readMacros) } func AttrPathsFromReader(mp *gitattr.MacroProcessor, fpath, workingDir string, rdr io.Reader, readMacros bool) []AttributePath { var paths []AttributePath relfile, _ := filepath.Rel(workingDir, fpath) // Go 1.20 now always returns ".\foo" instead of "foo" in filepath.Rel, // but only on Windows. Strip the extra dot here so our paths are // always fully relative with no "." or ".." components. reldir := filepath.ToSlash(tools.TrimCurrentPrefix(filepath.Dir(relfile))) if reldir == "." { reldir = "" } source := &AttributeSource{Path: relfile} lines, eol, err := gitattr.ParseLines(rdr) if err != nil { return nil } patternLines := mp.ProcessLines(lines, readMacros) for _, line := range patternLines { lockable := false tracked := false hasFilter := false for _, attr := range line.Attrs() { if attr.K == FilterAttrib { hasFilter = true tracked = attr.V == "lfs" } else if attr.K == LockableAttrib && attr.V == "true" { lockable = true } } if !hasFilter && !lockable { continue } pattern := line.Pattern().String() if len(reldir) > 0 { pattern = path.Join(reldir, pattern) } paths = append(paths, AttributePath{ Path: pattern, Source: source, Lockable: lockable, Tracked: tracked, }) } source.LineEnding = eol return paths } // GetAttributeFilter returns a list of entries in .gitattributes which are // configured with the filter=lfs attribute as a file path filter which // file paths can be matched against // workingDir is the root of the working copy // gitDir is the root of the git repo func GetAttributeFilter(workingDir, gitDir string) *filepathfilter.Filter { paths := GetAttributePaths(gitattr.NewMacroProcessor(), workingDir, gitDir) patterns := make([]filepathfilter.Pattern, 0, len(paths)) for _, path := range paths { // Convert all separators to `/` before creating a pattern to // avoid characters being escaped in situations like `subtree\*.md` patterns = append(patterns, filepathfilter.NewPattern(filepath.ToSlash(path.Path), filepathfilter.GitAttributes)) } return filepathfilter.NewFromPatterns(patterns, nil) } func findAttributeFiles(workingDir, gitDir string) []attrFile { var paths []attrFile repoAttributes := filepath.Join(gitDir, "info", "attributes") if info, err := os.Stat(repoAttributes); err == nil && !info.IsDir() { paths = append(paths, attrFile{path: repoAttributes, readMacros: true}) } lsFiles, err := NewLsFiles(workingDir, true, true) if err != nil { tracerx.Printf("Error finding .gitattributes: %v", err) return paths } if gitattributesFiles, present := lsFiles.FilesByName[".gitattributes"]; present { for _, f := range gitattributesFiles { tracerx.Printf("findAttributeFiles: located %s", f.FullPath) paths = append(paths, attrFile{ path: filepath.Join(workingDir, f.FullPath), readMacros: f.FullPath == ".gitattributes", // Read macros from the top-level attributes }) } } // reverse the order of the files so more specific entries are found first // when iterating from the front (respects precedence) sort.Slice(paths[:], func(i, j int) bool { return len(paths[i].path) > len(paths[j].path) }) return paths } git-lfs-3.6.1/git/config.go000066400000000000000000000156621472372047300154700ustar00rootroot00000000000000package git import ( "errors" "fmt" "os" "path/filepath" "strings" "sync" "github.com/git-lfs/git-lfs/v3/subprocess" "github.com/git-lfs/git-lfs/v3/tr" ) var ( ErrReadOnly = errors.New(tr.Tr.Get("configuration is read-only")) ) // Environment is a restricted version of config.Environment that only provides // a single method. type Environment interface { // Get is shorthand for calling `e.Fetcher.Get(key)`. Get(key string) (val string, ok bool) } // Configuration can fetch or modify the current Git config and track the Git // version. type Configuration struct { WorkDir string GitDir string version *string readOnly bool mu sync.Mutex } func NewConfig(workdir, gitdir string) *Configuration { if len(gitdir) == 0 && len(workdir) > 0 { gitdir = filepath.Join(workdir, ".git") } return &Configuration{WorkDir: workdir, GitDir: gitdir} } // NewReadOnlyConfig creates a new configuration that returns an error if an // attempt to write to the configuration is made. func NewReadOnlyConfig(workdir, gitdir string) *Configuration { cfg := NewConfig(workdir, gitdir) cfg.readOnly = true return cfg } func ParseConfigLines(lines string, onlySafeKeys bool) *ConfigurationSource { return &ConfigurationSource{ Lines: strings.Split(lines, "\n"), OnlySafeKeys: onlySafeKeys, } } type ConfigurationSource struct { Lines []string OnlySafeKeys bool } // Find returns the git config value for the key func (c *Configuration) Find(val string) string { output, _ := c.gitConfig(val) return output } // FindGlobal returns the git config value in global scope for the key func (c *Configuration) FindGlobal(key string) string { output, _ := c.gitConfig("--global", key) return output } // FindSystem returns the git config value in system scope for the key func (c *Configuration) FindSystem(key string) string { output, _ := c.gitConfig("--system", key) return output } // FindLocal returns the git config value in local scope for the key func (c *Configuration) FindLocal(key string) string { output, _ := c.gitConfig("--local", key) return output } // FindWorktree returns the git config value in worktree or local scope for the key, depending on whether multiple worktrees are in use func (c *Configuration) FindWorktree(key string) string { output, _ := c.gitConfig("--worktree", key) return output } // FindWorktree returns the git config value for the key in the given configuration file func (c *Configuration) FindFile(file, key string) string { output, _ := c.gitConfig("--file", file, key) return output } // SetGlobal sets the git config value for the key in the global config func (c *Configuration) SetGlobal(key, val string) (string, error) { return c.gitConfigWrite("--global", "--replace-all", key, val) } // SetSystem sets the git config value for the key in the system config func (c *Configuration) SetSystem(key, val string) (string, error) { return c.gitConfigWrite("--system", "--replace-all", key, val) } // SetLocal sets the git config value for the key in the specified config file func (c *Configuration) SetLocal(key, val string) (string, error) { return c.gitConfigWrite("--replace-all", key, val) } // SetWorktree sets the git config value for the key in the worktree or local config, depending on whether multiple worktrees are in use func (c *Configuration) SetWorktree(key, val string) (string, error) { return c.gitConfigWrite("--worktree", "--replace-all", key, val) } // SetFile sets the git config value for the key in the given configuration file func (c *Configuration) SetFile(file, key, val string) (string, error) { return c.gitConfigWrite("--file", file, "--replace-all", key, val) } // UnsetGlobalSection removes the entire named section from the global config func (c *Configuration) UnsetGlobalSection(key string) (string, error) { return c.gitConfigWrite("--global", "--remove-section", key) } // UnsetSystemSection removes the entire named section from the system config func (c *Configuration) UnsetSystemSection(key string) (string, error) { return c.gitConfigWrite("--system", "--remove-section", key) } // UnsetLocalSection removes the entire named section from the local config func (c *Configuration) UnsetLocalSection(key string) (string, error) { return c.gitConfigWrite("--local", "--remove-section", key) } // UnsetWorktreeSection removes the entire named section from the worktree or local config, depending on whether multiple worktrees are in use func (c *Configuration) UnsetWorktreeSection(key string) (string, error) { return c.gitConfigWrite("--worktree", "--remove-section", key) } // UnsetFileSection removes the entire named section from the given configuration file func (c *Configuration) UnsetFileSection(file, key string) (string, error) { return c.gitConfigWrite("--file", file, "--remove-section", key) } // UnsetLocalKey removes the git config value for the key from the specified config file func (c *Configuration) UnsetLocalKey(key string) (string, error) { return c.gitConfigWrite("--unset", key) } func (c *Configuration) Sources(dir string, optionalFilename string) ([]*ConfigurationSource, error) { gitconfig, err := c.Source() if err != nil { return nil, err } configs := make([]*ConfigurationSource, 0, 2) bare, err := IsBare() if err == nil { // First try to read from the working directory and then the index if // the file is missing from the working directory. var fileconfig *ConfigurationSource if !bare { fileconfig, err = c.FileSource(filepath.Join(dir, optionalFilename)) if err != nil { if !os.IsNotExist(err) { return nil, err } fileconfig, _ = c.RevisionSource(fmt.Sprintf(":%s", optionalFilename)) } } if fileconfig == nil { fileconfig, _ = c.RevisionSource(fmt.Sprintf("HEAD:%s", optionalFilename)) } if fileconfig != nil { configs = append(configs, fileconfig) } } return append(configs, gitconfig), nil } func (c *Configuration) FileSource(filename string) (*ConfigurationSource, error) { if _, err := os.Stat(filename); err != nil { return nil, err } out, err := c.gitConfig("-l", "-f", filename) if err != nil { return nil, err } return ParseConfigLines(out, true), nil } func (c *Configuration) RevisionSource(revision string) (*ConfigurationSource, error) { out, err := c.gitConfig("-l", "--blob", revision) if err != nil { return nil, err } return ParseConfigLines(out, true), nil } func (c *Configuration) Source() (*ConfigurationSource, error) { out, err := c.gitConfig("-l") if err != nil { return nil, err } return ParseConfigLines(out, false), nil } func (c *Configuration) gitConfig(args ...string) (string, error) { args = append([]string{"config", "--includes"}, args...) cmd, err := subprocess.ExecCommand("git", args...) if err != nil { return "", err } if len(c.GitDir) > 0 { cmd.Dir = c.GitDir } return subprocess.Output(cmd) } func (c *Configuration) gitConfigWrite(args ...string) (string, error) { if c.readOnly { return "", ErrReadOnly } return c.gitConfig(args...) } git-lfs-3.6.1/git/config_test.go000066400000000000000000000004631472372047300165200ustar00rootroot00000000000000package git_test // to avoid import cycles import ( "testing" . "github.com/git-lfs/git-lfs/v3/git" "github.com/stretchr/testify/assert" ) func TestReadOnlyConfig(t *testing.T) { cfg := NewReadOnlyConfig("", "") _, err := cfg.SetLocal("lfs.this.should", "fail") assert.Equal(t, err, ErrReadOnly) } git-lfs-3.6.1/git/filter_process_scanner.go000066400000000000000000000161421472372047300207510ustar00rootroot00000000000000// Package git contains various commands that shell out to git // NOTE: Subject to change, do not rely on this package from outside git-lfs source package git import ( "io" "strings" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/tr" "github.com/git-lfs/pktline" "github.com/rubyist/tracerx" ) // FilterProcessScanner provides a scanner-like interface capable of // initializing the filter process with the Git parent, and scanning for // requests across the protocol. // // Reading a request (and errors) is as follows: // // s := NewFilterProcessScanner(os.Stdin, os.Stderr) // for s.Scan() { // req := s.Request() // // ... // } // // if err := s.Err(); err != nil { // // ... // } type FilterProcessScanner struct { // pl is the *pktline instance used to read and write packets back and // forth between Git. pl *pktline.Pktline // req is a temporary variable used to hold the value accessible by the // `Request()` function. It is cleared at the beginning of each `Scan()` // invocation, and written to at the end of each `Scan()` invocation. req *Request // err is a temporary variable used to hold the value accessible by the // `Request()` function. It is cleared at the beginning of each `Scan()` // invocation, and written to at the end of each `Scan()` invocation. err error } // NewFilterProcessScanner constructs a new instance of the // `*FilterProcessScanner` type which reads packets from the `io.Reader` "r", // and writes packets to the `io.Writer`, "w". // // Both reader and writers SHOULD NOT be `*git.PacketReader` or // `*git.PacketWriter`s, they will be transparently treated as such. In other // words, it is safe (and recommended) to pass `os.Stdin` and `os.Stdout` // directly. func NewFilterProcessScanner(r io.Reader, w io.Writer) *FilterProcessScanner { return &FilterProcessScanner{ pl: pktline.NewPktline(r, w), } } // Init initializes the filter and ACKs back and forth between the Git LFS // subprocess and the Git parent process that each is a git-filter-server and // client respectively. // // If either side wrote an invalid sequence of data, or did not meet // expectations, an error will be returned. If the filter type is not supported, // an error will be returned. If the pkt-line welcome message was invalid, an // error will be returned. // // If there was an error reading or writing any of the packets below, an error // will be returned. func (o *FilterProcessScanner) Init() error { tracerx.Printf("Initialize filter-process") reqVer := "version=2" initMsg, err := o.pl.ReadPacketText() if err != nil { return errors.Wrap(err, tr.Tr.Get("reading filter-process initialization")) } if initMsg != "git-filter-client" { return errors.New(tr.Tr.Get("invalid filter-process pkt-line welcome message: %s", initMsg)) } supVers, err := o.pl.ReadPacketList() if err != nil { return errors.Wrap(err, tr.Tr.Get("reading filter-process versions")) } if !isStringInSlice(supVers, reqVer) { return errors.New(tr.Tr.Get("filter '%s' not supported (your Git supports: %s)", reqVer, supVers)) } err = o.pl.WritePacketList([]string{"git-filter-server", reqVer}) if err != nil { return errors.Wrap(err, tr.Tr.Get("writing filter-process initialization failed")) } return nil } // NegotiateCapabilities executes the process of negotiating capabilities // between the filter client and server. If we don't support any of the // capabilities given to LFS by Git, an error will be returned. If there was an // error reading or writing capabilities between the two, an error will be // returned. func (o *FilterProcessScanner) NegotiateCapabilities() ([]string, error) { reqCaps := []string{"capability=clean", "capability=smudge"} supCaps, err := o.pl.ReadPacketList() if err != nil { return nil, errors.New(tr.Tr.Get("reading filter-process capabilities failed with %s", err)) } for _, sup := range supCaps { if sup == "capability=delay" { reqCaps = append(reqCaps, "capability=delay") break } } for _, reqCap := range reqCaps { if !isStringInSlice(supCaps, reqCap) { return nil, errors.New(tr.Tr.Get("filter '%s' not supported (your Git supports: %s)", reqCap, supCaps)) } } err = o.pl.WritePacketList(reqCaps) if err != nil { return nil, errors.New(tr.Tr.Get("writing filter-process capabilities failed with %s", err)) } return supCaps, nil } // Request represents a single command sent to LFS from the parent Git process. type Request struct { // Header maps header strings to values, and is encoded as the first // part of the packet. Header map[string]string // Payload represents the body of the packet, and contains the contents // of the file in the index. Payload io.Reader } // Scan scans for the next request, or error and returns whether or not the scan // was successful, indicating the presence of a valid request. If the Scan // failed, there was either an error reading the next request (and the results // of calling `Err()` should be inspected), or the pipe was closed and no more // requests are present. // // Closing the pipe is Git's way to communicate that no more files will be // filtered. Git expects that the LFS process exits after this event. func (o *FilterProcessScanner) Scan() bool { o.req, o.err = nil, nil req, err := o.readRequest() if err != nil { o.err = err return false } o.req = req return true } // Request returns the request read from a call to Scan(). It is available only // after a call to `Scan()` has completed, and is re-initialized to nil at the // beginning of the subsequent `Scan()` call. func (o *FilterProcessScanner) Request() *Request { return o.req } // Err returns any error encountered from the last call to Scan(). It is available only // after a call to `Scan()` has completed, and is re-initialized to nil at the // beginning of the subsequent `Scan()` call. func (o *FilterProcessScanner) Err() error { return o.err } // readRequest reads the headers of a request and yields an `io.Reader` which // will read the body of the request. Since the body is _not_ offset, one // request should be read in its entirety before consuming the next request. func (o *FilterProcessScanner) readRequest() (*Request, error) { requestList, err := o.pl.ReadPacketList() if err != nil { return nil, err } req := &Request{ Header: make(map[string]string), Payload: pktline.NewPktlineReaderFromPktline(o.pl, 65536), } for _, pair := range requestList { v := strings.SplitN(pair, "=", 2) if len(v) > 1 { req.Header[v[0]] = v[1] } } return req, nil } // WriteList writes a list of strings to the underlying pktline data stream in // pktline format. func (o *FilterProcessScanner) WriteList(list []string) error { return o.pl.WritePacketList(list) } func (o *FilterProcessScanner) WriteStatus(status FilterProcessStatus) error { return o.pl.WritePacketList([]string{"status=" + status.String()}) } // isStringInSlice returns whether a given string "what" is contained in a // slice, "s". // // isStringInSlice is copied from "github.com/xeipuuv/gojsonschema/utils.go" func isStringInSlice(s []string, what string) bool { for i := range s { if s[i] == what { return true } } return false } git-lfs-3.6.1/git/filter_process_scanner_test.go000066400000000000000000000110351472372047300220040ustar00rootroot00000000000000package git import ( "bytes" "io" "testing" "github.com/git-lfs/pktline" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestFilterProcessScannerInitializesWithCorrectSupportedValues(t *testing.T) { var from, to bytes.Buffer pl := pktline.NewPktline(nil, &from) if err := pl.WritePacketText("git-filter-client"); err != nil { t.Fatalf("expected... %v", err.Error()) } require.Nil(t, pl.WritePacketText("git-filter-client")) require.Nil(t, pl.WritePacketList([]string{"version=2"})) fps := NewFilterProcessScanner(&from, &to) err := fps.Init() assert.Nil(t, err) out, err := pktline.NewPktline(&to, nil).ReadPacketList() assert.Nil(t, err) assert.Equal(t, []string{"git-filter-server", "version=2"}, out) } func TestFilterProcessScannerRejectsUnrecognizedInitializationMessages(t *testing.T) { var from, to bytes.Buffer pl := pktline.NewPktline(nil, &from) require.Nil(t, pl.WritePacketText("git-filter-client-unknown")) require.Nil(t, pl.WriteFlush()) fps := NewFilterProcessScanner(&from, &to) err := fps.Init() require.NotNil(t, err) assert.Equal(t, "invalid filter-process pkt-line welcome message: git-filter-client-unknown", err.Error()) assert.Empty(t, to.Bytes()) } func TestFilterProcessScannerRejectsUnsupportedFilters(t *testing.T) { var from, to bytes.Buffer pl := pktline.NewPktline(nil, &from) require.Nil(t, pl.WritePacketText("git-filter-client")) // Write an unsupported version require.Nil(t, pl.WritePacketList([]string{"version=0"})) fps := NewFilterProcessScanner(&from, &to) err := fps.Init() require.NotNil(t, err) assert.Equal(t, "filter 'version=2' not supported (your Git supports: [version=0])", err.Error()) assert.Empty(t, to.Bytes()) } func TestFilterProcessScannerNegotitatesSupportedCapabilities(t *testing.T) { var from, to bytes.Buffer pl := pktline.NewPktline(nil, &from) require.Nil(t, pl.WritePacketList([]string{ "capability=clean", "capability=smudge", "capability=not-invented-yet", })) fps := NewFilterProcessScanner(&from, &to) caps, err := fps.NegotiateCapabilities() assert.Contains(t, caps, "capability=clean") assert.Contains(t, caps, "capability=smudge") assert.Nil(t, err) out, err := pktline.NewPktline(&to, nil).ReadPacketList() assert.Nil(t, err) assert.Equal(t, []string{"capability=clean", "capability=smudge"}, out) } func TestFilterProcessScannerDoesNotNegotitatesUnsupportedCapabilities(t *testing.T) { var from, to bytes.Buffer pl := pktline.NewPktline(nil, &from) // Write an unsupported capability require.Nil(t, pl.WritePacketList([]string{ "capability=unsupported", })) fps := NewFilterProcessScanner(&from, &to) caps, err := fps.NegotiateCapabilities() require.NotNil(t, err) assert.Empty(t, caps) assert.Equal(t, "filter 'capability=clean' not supported (your Git supports: [capability=unsupported])", err.Error()) assert.Empty(t, to.Bytes()) } func TestFilterProcessScannerReadsRequestHeadersAndPayload(t *testing.T) { var from, to bytes.Buffer pl := pktline.NewPktline(nil, &from) // Headers require.Nil(t, pl.WritePacketList([]string{ "foo=bar", "other=woot", "crazy='sq',\\$x=.bin", })) // Multi-line packet require.Nil(t, pl.WritePacketText("first")) require.Nil(t, pl.WritePacketText("second")) require.Nil(t, pl.WriteFlush()) req, err := readRequest(NewFilterProcessScanner(&from, &to)) assert.Nil(t, err) assert.Equal(t, req.Header["foo"], "bar") assert.Equal(t, req.Header["other"], "woot") assert.Equal(t, req.Header["crazy"], "'sq',\\$x=.bin") payload, err := io.ReadAll(req.Payload) assert.Nil(t, err) assert.Equal(t, []byte("first\nsecond\n"), payload) } func TestFilterProcessScannerRejectsInvalidHeaderPackets(t *testing.T) { from := bytes.NewBuffer([]byte{ 0x30, 0x30, 0x30, 0x33, // 0003 (invalid packet length) }) req, err := readRequest(NewFilterProcessScanner(from, nil)) require.NotNil(t, err) assert.Equal(t, "Invalid packet length.", err.Error()) assert.Nil(t, req) } func TestFilterProcessScannerWritesLists(t *testing.T) { var to bytes.Buffer fps := NewFilterProcessScanner(nil, &to) err := fps.WriteList([]string{"hello", "goodbye"}) assert.NoError(t, err) assert.Equal(t, "000ahello\n000cgoodbye\n0000", to.String()) } // readRequest performs a single scan operation on the given // `*FilterProcessScanner`, "s", and returns: an error if there was one, or a // request if there was one. If neither, it returns (nil, nil). func readRequest(s *FilterProcessScanner) (*Request, error) { s.Scan() if err := s.Err(); err != nil { return nil, err } return s.Request(), nil } git-lfs-3.6.1/git/filter_process_status.go000066400000000000000000000016141472372047300206410ustar00rootroot00000000000000package git import "github.com/git-lfs/git-lfs/v3/tr" // FilterProcessStatus is a constant type representing the various valid // responses for `status=` in the Git filtering process protocol. type FilterProcessStatus uint8 const ( // StatusSuccess is a valid response when a successful event has // occurred. StatusSuccess FilterProcessStatus = iota + 1 // StatusDelay is a valid response when a delay has occurred. StatusDelay // StatusError is a valid response when an error has occurred. StatusError ) // String implements fmt.Stringer by returning a protocol-compliant // representation of the receiving status, or panic()-ing if the Status is // unknown. func (s FilterProcessStatus) String() string { switch s { case StatusSuccess: return "success" case StatusDelay: return "delayed" case StatusError: return "error" } panic(tr.Tr.Get("unknown FilterProcessStatus '%d'", s)) } git-lfs-3.6.1/git/git.go000066400000000000000000001372111472372047300150010ustar00rootroot00000000000000// Package git contains various commands that shell out to git // NOTE: Subject to change, do not rely on this package from outside git-lfs source package git import ( "bufio" "bytes" "crypto/sha1" "crypto/sha256" "encoding/hex" "errors" "fmt" "io" "net/url" "os" "path/filepath" "regexp" "strconv" "strings" "sync" "time" lfserrors "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/subprocess" "github.com/git-lfs/git-lfs/v3/tools" "github.com/git-lfs/git-lfs/v3/tr" "github.com/git-lfs/gitobj/v2" "github.com/rubyist/tracerx" ) type RefType int const ( RefTypeLocalBranch = RefType(iota) RefTypeRemoteBranch = RefType(iota) RefTypeLocalTag = RefType(iota) RefTypeRemoteTag = RefType(iota) RefTypeHEAD = RefType(iota) // current checkout RefTypeOther = RefType(iota) // stash or unknown SHA1HexSize = sha1.Size * 2 SHA256HexSize = sha256.Size * 2 ) var ( ObjectIDRegex = fmt.Sprintf("(?:[0-9a-f]{%d}(?:[0-9a-f]{%d})?)", SHA1HexSize, SHA256HexSize-SHA1HexSize) // ObjectIDLengths is a slice of valid Git hexadecimal object ID // lengths in increasing order. ObjectIDLengths = []int{SHA1HexSize, SHA256HexSize} emptyTree = "" emptyTreeMutex = &sync.Mutex{} ) type IndexStage int const ( IndexStageDefault IndexStage = iota IndexStageBase IndexStageOurs IndexStageTheirs ) // Prefix returns the given RefType's prefix, "refs/heads", "ref/remotes", // etc. It returns an additional value of either true/false, whether or not this // given ref type has a prefix. // // If the RefType is unrecognized, Prefix() will panic. func (t RefType) Prefix() (string, bool) { switch t { case RefTypeLocalBranch: return "refs/heads", true case RefTypeRemoteBranch: return "refs/remotes", true case RefTypeLocalTag: return "refs/tags", true default: return "", false } } func ParseRef(absRef, sha string) *Ref { r := &Ref{Sha: sha} if strings.HasPrefix(absRef, "refs/heads/") { r.Name = absRef[11:] r.Type = RefTypeLocalBranch } else if strings.HasPrefix(absRef, "refs/tags/") { r.Name = absRef[10:] r.Type = RefTypeLocalTag } else if strings.HasPrefix(absRef, "refs/remotes/") { r.Name = absRef[13:] r.Type = RefTypeRemoteBranch } else { r.Name = absRef if absRef == "HEAD" { r.Type = RefTypeHEAD } else { r.Type = RefTypeOther } } return r } // A git reference (branch, tag etc) type Ref struct { Name string Type RefType Sha string } // Refspec returns the fully-qualified reference name (including remote), i.e., // for a remote branch called 'my-feature' on remote 'origin', this function // will return: // // refs/remotes/origin/my-feature func (r *Ref) Refspec() string { if r == nil { return "" } prefix, ok := r.Type.Prefix() if ok { return fmt.Sprintf("%s/%s", prefix, r.Name) } return r.Name } // HasValidObjectIDLength returns true if `s` has a length that is a valid // hexadecimal Git object ID length. func HasValidObjectIDLength(s string) bool { for _, length := range ObjectIDLengths { if len(s) == length { return true } } return false } // IsZeroObjectID returns true if the string is a valid hexadecimal Git object // ID and represents the all-zeros object ID for some hash algorithm. func IsZeroObjectID(s string) bool { for _, length := range ObjectIDLengths { if s == strings.Repeat("0", length) { return true } } return false } func EmptyTree() (string, error) { emptyTreeMutex.Lock() defer emptyTreeMutex.Unlock() if len(emptyTree) == 0 { cmd, err := gitNoLFS("hash-object", "-t", "tree", "/dev/null") if err != nil { return "", errors.New(tr.Tr.Get("failed to find `git hash-object`: %v", err)) } cmd.Stdin = nil out, _ := cmd.Output() emptyTree = strings.TrimSpace(string(out)) } return emptyTree, nil } // Some top level information about a commit (only first line of message) type CommitSummary struct { Sha string ShortSha string Parents []string CommitDate time.Time AuthorDate time.Time AuthorName string AuthorEmail string CommitterName string CommitterEmail string Subject string } // Prepend Git config instructions to disable Git LFS filter func gitConfigNoLFS(args ...string) []string { // Before git 2.8, setting filters to blank causes lots of warnings, so use cat instead (slightly slower) // Also pre 2.2 it failed completely. We used to use it anyway in git 2.2-2.7 and // suppress the messages in stderr, but doing that with standard StderrPipe suppresses // the git clone output (git thinks it's not a terminal) and makes it look like it's // not working. You can get around that with https://github.com/kr/pty but that // causes difficult issues with passing through Stdin for login prompts // This way is simpler & more practical. filterOverride := "" if !IsGitVersionAtLeast("2.8.0") { filterOverride = "cat" } return append([]string{ "-c", fmt.Sprintf("filter.lfs.smudge=%v", filterOverride), "-c", fmt.Sprintf("filter.lfs.clean=%v", filterOverride), "-c", "filter.lfs.process=", "-c", "filter.lfs.required=false", }, args...) } // Invoke Git with disabled LFS filters func gitNoLFS(args ...string) (*subprocess.Cmd, error) { return subprocess.ExecCommand("git", gitConfigNoLFS(args...)...) } func gitNoLFSSimple(args ...string) (string, error) { return subprocess.SimpleExec("git", gitConfigNoLFS(args...)...) } func gitNoLFSBuffered(args ...string) (*subprocess.BufferedCmd, error) { return subprocess.BufferedExec("git", gitConfigNoLFS(args...)...) } func gitNoLFSBufferedStdout(args ...string) (*subprocess.BufferedCmd, error) { return subprocess.StdoutBufferedExec("git", gitConfigNoLFS(args...)...) } // Invoke Git with enabled LFS filters func git(args ...string) (*subprocess.Cmd, error) { return subprocess.ExecCommand("git", args...) } func gitSimple(args ...string) (string, error) { return subprocess.SimpleExec("git", args...) } func gitBuffered(args ...string) (*subprocess.BufferedCmd, error) { return subprocess.BufferedExec("git", args...) } func gitBufferedStdout(args ...string) (*subprocess.BufferedCmd, error) { return subprocess.StdoutBufferedExec("git", args...) } func CatFile() (*subprocess.BufferedCmd, error) { return gitNoLFSBuffered("cat-file", "--batch-check") } func DiffIndex(ref string, cached bool, refresh bool, workingDir string) (*bufio.Scanner, error) { if refresh { _, err := gitSimple("update-index", "-q", "--refresh") if err != nil { return nil, lfserrors.Wrap(err, tr.Tr.Get("Failed to run `git update-index`")) } } args := []string{"diff-index", "-M"} if cached { args = append(args, "--cached") } args = append(args, ref) if workingDir != "" { args = append([]string{"-C", workingDir}, args...) } cmd, err := gitBufferedStdout(args...) if err != nil { return nil, err } if err = cmd.Stdin.Close(); err != nil { return nil, err } return bufio.NewScanner(cmd.Stdout), nil } func DiffIndexWithPaths(ref string, cached bool, paths []string) (string, error) { args := []string{"diff-index"} if cached { args = append(args, "--cached") } args = append(args, ref) args = append(args, "--") args = append(args, paths...) output, err := gitSimple(args...) if err != nil { return "", err } return output, nil } func HashObject(r io.Reader) (string, error) { cmd, err := gitNoLFS("hash-object", "--stdin") if err != nil { return "", errors.New(tr.Tr.Get("failed to find `git hash-object`: %v", err)) } cmd.Stdin = r out, err := cmd.Output() if err != nil { return "", errors.New(tr.Tr.Get("error building Git blob OID: %s", err)) } return string(bytes.TrimSpace(out)), nil } func Log(args ...string) (*subprocess.BufferedCmd, error) { logArgs := append([]string{"log"}, args...) return gitNoLFSBuffered(logArgs...) } func LsRemote(remote, remoteRef string) (string, error) { if remote == "" { return "", errors.New(tr.Tr.Get("remote required")) } if remoteRef == "" { return gitNoLFSSimple("ls-remote", remote) } return gitNoLFSSimple("ls-remote", remote, remoteRef) } func LsTree(ref string) (*subprocess.BufferedCmd, error) { return gitNoLFSBuffered( "ls-tree", "-r", // recurse "-l", // report object size (we'll need this) "-z", // null line termination "--full-tree", // start at the root regardless of where we are in it ref, ) } func LsFilesLFS() (*subprocess.BufferedCmd, error) { // This requires Git 2.42.0 for `--format` with `objecttype`. return gitNoLFSBuffered( "ls-files", "--cached", "--exclude-standard", "--full-name", "--sparse", "-z", "--format=%(objectmode) %(objecttype) %(objectname) %(objectsize)\t%(path)", ":(top,attr:filter=lfs)", ) } func ResolveRef(ref string) (*Ref, error) { outp, err := gitNoLFSSimple("rev-parse", ref, "--symbolic-full-name", ref) if err != nil { return nil, errors.New(tr.Tr.Get("Git can't resolve ref: %q", ref)) } if outp == "" { return nil, errors.New(tr.Tr.Get("Git can't resolve ref: %q", ref)) } lines := strings.Split(outp, "\n") fullref := &Ref{Sha: lines[0]} if len(lines) == 1 { // ref is a sha1 and has no symbolic-full-name fullref.Name = lines[0] fullref.Sha = lines[0] fullref.Type = RefTypeOther return fullref, nil } // parse the symbolic-full-name fullref.Type, fullref.Name = ParseRefToTypeAndName(lines[1]) return fullref, nil } func ResolveRefs(refnames []string) ([]*Ref, error) { refs := make([]*Ref, len(refnames)) for i, name := range refnames { ref, err := ResolveRef(name) if err != nil { return refs, err } refs[i] = ref } return refs, nil } func CurrentRef() (*Ref, error) { return ResolveRef("HEAD") } func (c *Configuration) CurrentRemoteRef() (*Ref, error) { remoteref, err := c.RemoteRefNameForCurrentBranch() if err != nil { return nil, err } return ResolveRef(remoteref) } // RemoteRefForCurrentBranch returns the full remote ref (refs/remotes/{remote}/{remotebranch}) // that the current branch is tracking. func (c *Configuration) RemoteRefNameForCurrentBranch() (string, error) { ref, err := CurrentRef() if err != nil { return "", err } if ref.Type == RefTypeHEAD || ref.Type == RefTypeOther { return "", errors.New(tr.Tr.Get("not on a branch")) } remote := c.RemoteForBranch(ref.Name) if remote == "" { return "", errors.New(tr.Tr.Get("remote not found for branch %q", ref.Name)) } remotebranch := c.RemoteBranchForLocalBranch(ref.Name) return fmt.Sprintf("refs/remotes/%s/%s", remote, remotebranch), nil } // RemoteForBranch returns the remote name that a given local branch is tracking (blank if none) func (c *Configuration) RemoteForBranch(localBranch string) string { return c.Find(fmt.Sprintf("branch.%s.remote", localBranch)) } // RemoteBranchForLocalBranch returns the name (only) of the remote branch that the local branch is tracking // If no specific branch is configured, returns local branch name func (c *Configuration) RemoteBranchForLocalBranch(localBranch string) string { // get remote ref to track, may not be same name merge := c.Find(fmt.Sprintf("branch.%s.merge", localBranch)) if strings.HasPrefix(merge, "refs/heads/") { return merge[11:] } else { return localBranch } } func RemoteList() ([]string, error) { cmd, err := gitNoLFS("remote") if err != nil { return nil, errors.New(tr.Tr.Get("failed to find `git remote`: %v", err)) } outp, err := cmd.StdoutPipe() if err != nil { return nil, errors.New(tr.Tr.Get("failed to call `git remote`: %v", err)) } cmd.Start() defer cmd.Wait() scanner := bufio.NewScanner(outp) var ret []string for scanner.Scan() { ret = append(ret, strings.TrimSpace(scanner.Text())) } return ret, nil } func RemoteURLs(push bool) (map[string][]string, error) { cmd, err := gitNoLFS("remote", "-v") if err != nil { return nil, errors.New(tr.Tr.Get("failed to find `git remote -v`: %v", err)) } outp, err := cmd.StdoutPipe() if err != nil { return nil, errors.New(tr.Tr.Get("failed to call `git remote -v`: %v", err)) } cmd.Start() defer cmd.Wait() scanner := bufio.NewScanner(outp) text := "(fetch)" if push { text = "(push)" } ret := make(map[string][]string) for scanner.Scan() { // [remote, urlpair-text] pair := strings.Split(strings.TrimSpace(scanner.Text()), "\t") if len(pair) != 2 { continue } // [url, "(fetch)" | "(push)"] urlpair := strings.Split(pair[1], " ") if len(urlpair) != 2 || urlpair[1] != text { continue } ret[pair[0]] = append(ret[pair[0]], urlpair[0]) } return ret, nil } func MapRemoteURL(url string, push bool) (string, bool) { urls, err := RemoteURLs(push) if err != nil { return url, false } for name, remotes := range urls { if len(remotes) == 1 && url == remotes[0] { return name, true } } return url, false } // Refs returns all of the local and remote branches and tags for the current // repository. Other refs (HEAD, refs/stash, git notes) are ignored. func LocalRefs() ([]*Ref, error) { cmd, err := gitNoLFS("show-ref") if err != nil { return nil, errors.New(tr.Tr.Get("failed to find `git show-ref`: %v", err)) } outp, err := cmd.StdoutPipe() if err != nil { return nil, errors.New(tr.Tr.Get("failed to call `git show-ref`: %v", err)) } var refs []*Ref if err := cmd.Start(); err != nil { return refs, err } scanner := bufio.NewScanner(outp) for scanner.Scan() { line := strings.TrimSpace(scanner.Text()) parts := strings.SplitN(line, " ", 2) if len(parts) != 2 || !HasValidObjectIDLength(parts[0]) || len(parts[1]) < 1 { tracerx.Printf("Invalid line from `git show-ref`: %q", line) continue } rtype, name := ParseRefToTypeAndName(parts[1]) if rtype != RefTypeLocalBranch && rtype != RefTypeLocalTag { continue } refs = append(refs, &Ref{name, rtype, parts[0]}) } return refs, cmd.Wait() } // UpdateRef moves the given ref to a new sha with a given reason (and creates a // reflog entry, if a "reason" was provided). It returns an error if any were // encountered. func UpdateRef(ref *Ref, to []byte, reason string) error { return UpdateRefIn("", ref, to, reason) } // UpdateRef moves the given ref to a new sha with a given reason (and creates a // reflog entry, if a "reason" was provided). It operates within the given // working directory "wd". It returns an error if any were encountered. func UpdateRefIn(wd string, ref *Ref, to []byte, reason string) error { args := []string{"update-ref", ref.Refspec(), hex.EncodeToString(to)} if len(reason) > 0 { args = append(args, "-m", reason) } cmd, err := gitNoLFS(args...) if err != nil { return errors.New(tr.Tr.Get("failed to find `git update-ref`: %v", err)) } cmd.Dir = wd return cmd.Run() } // ValidateRemote checks that a named remote is valid for use // Mainly to check user-supplied remotes & fail more nicely func ValidateRemote(remote string) error { remotes, err := RemoteList() if err != nil { return err } return ValidateRemoteFromList(remotes, remote) } // ValidateRemote checks that a named remote is valid for use given a list from // RemoteList. This is completely identical to ValidateRemote, except that it // allows caching the remote list. func ValidateRemoteFromList(remotes []string, remote string) error { for _, r := range remotes { if r == remote { return nil } } if err := ValidateRemoteURL(remote); err == nil { return nil } return errors.New(tr.Tr.Get("invalid remote name: %q", remote)) } // ValidateRemoteURL checks that a string is a valid Git remote URL func ValidateRemoteURL(remote string) error { u, _ := url.Parse(remote) if u == nil || u.Scheme == "" { // This is either an invalid remote name (maybe the user made a typo // when selecting a named remote) or a bare SSH URL like // "x@y.com:path/to/resource.git". Guess that this is a URL in the latter // form if the string contains a colon ":", and an invalid remote if it // does not. if strings.Contains(remote, ":") { return nil } else { return errors.New(tr.Tr.Get("invalid remote name: %q", remote)) } } switch u.Scheme { case "ssh", "http", "https", "git", "file": return nil default: return errors.New(tr.Tr.Get("invalid remote URL protocol %q in %q", u.Scheme, remote)) } } func RewriteLocalPathAsURL(path string) string { var slash string if abs, err := filepath.Abs(path); err == nil { // Required for Windows paths to work. if !strings.HasPrefix(abs, "/") { slash = "/" } path = abs } var gitpath string if filepath.Base(path) == ".git" { gitpath = path path = filepath.Dir(path) } else { gitpath = filepath.Join(path, ".git") } if _, err := os.Stat(gitpath); err == nil { path = gitpath } else if _, err := os.Stat(path); err != nil { // Not a local path. We check down here because we perform // canonicalization by stripping off the .git above. return path } return fmt.Sprintf("file://%s%s", slash, filepath.ToSlash(path)) } func UpdateIndexFromStdin() (*subprocess.Cmd, error) { return git("update-index", "-q", "--refresh", "--stdin") } // RecentBranches returns branches with commit dates on or after the given date/time // Return full Ref type for easier detection of duplicate SHAs etc // since: refs with commits on or after this date will be included // includeRemoteBranches: true to include refs on remote branches // onlyRemote: set to non-blank to only include remote branches on a single remote func RecentBranches(since time.Time, includeRemoteBranches bool, onlyRemote string) ([]*Ref, error) { cmd, err := gitNoLFS("for-each-ref", `--sort=-committerdate`, `--format=%(refname) %(objectname) %(committerdate:iso)`, "refs") if err != nil { return nil, errors.New(tr.Tr.Get("failed to find `git for-each-ref`: %v", err)) } outp, err := cmd.StdoutPipe() if err != nil { return nil, errors.New(tr.Tr.Get("failed to call `git for-each-ref`: %v", err)) } cmd.Start() defer cmd.Wait() scanner := bufio.NewScanner(outp) // Output is like this: // refs/heads/master f03686b324b29ff480591745dbfbbfa5e5ac1bd5 2015-08-19 16:50:37 +0100 // refs/remotes/origin/master ad3b29b773e46ad6870fdf08796c33d97190fe93 2015-08-13 16:50:37 +0100 // Output is ordered by latest commit date first, so we can stop at the threshold regex := regexp.MustCompile(fmt.Sprintf(`^(refs/[^/]+/\S+)\s+(%s)\s+(\d{4}-\d{2}-\d{2}\s+\d{2}\:\d{2}\:\d{2}\s+[\+\-]\d{4})`, ObjectIDRegex)) tracerx.Printf("RECENT: Getting refs >= %v", since) var ret []*Ref for scanner.Scan() { line := scanner.Text() if match := regex.FindStringSubmatch(line); match != nil { fullref := match[1] sha := match[2] reftype, ref := ParseRefToTypeAndName(fullref) if reftype == RefTypeRemoteBranch { if !includeRemoteBranches { continue } if onlyRemote != "" && !strings.HasPrefix(ref, onlyRemote+"/") { continue } } // This is a ref we might use // Check the date commitDate, err := ParseGitDate(match[3]) if err != nil { return ret, err } if commitDate.Before(since) { // the end break } tracerx.Printf("RECENT: %v (%v)", ref, commitDate) ret = append(ret, &Ref{ref, reftype, sha}) } } return ret, nil } // Get the type & name of a git reference func ParseRefToTypeAndName(fullref string) (t RefType, name string) { const localPrefix = "refs/heads/" const remotePrefix = "refs/remotes/" const localTagPrefix = "refs/tags/" if fullref == "HEAD" { name = fullref t = RefTypeHEAD } else if strings.HasPrefix(fullref, localPrefix) { name = fullref[len(localPrefix):] t = RefTypeLocalBranch } else if strings.HasPrefix(fullref, remotePrefix) { name = fullref[len(remotePrefix):] t = RefTypeRemoteBranch } else if strings.HasPrefix(fullref, localTagPrefix) { name = fullref[len(localTagPrefix):] t = RefTypeLocalTag } else { name = fullref t = RefTypeOther } return } // Parse a Git date formatted in ISO 8601 format (%ci/%ai) func ParseGitDate(str string) (time.Time, error) { // Unfortunately Go and Git don't overlap in their builtin date formats // Go's time.RFC1123Z and Git's %cD are ALMOST the same, except that // when the day is < 10 Git outputs a single digit, but Go expects a leading // zero - this is enough to break the parsing. Sigh. // Format is for 2 Jan 2006, 15:04:05 -7 UTC as per Go return time.Parse("2006-01-02 15:04:05 -0700", str) } // FormatGitDate converts a Go date into a git command line format date func FormatGitDate(tm time.Time) string { // Git format is "Fri Jun 21 20:26:41 2013 +0900" but no zero-leading for day return tm.Format("Mon Jan 2 15:04:05 2006 -0700") } // Get summary information about a commit func GetCommitSummary(commit string) (*CommitSummary, error) { cmd, err := gitNoLFS("show", "-s", `--format=%H|%h|%P|%ai|%ci|%ae|%an|%ce|%cn|%s`, commit) if err != nil { return nil, errors.New(tr.Tr.Get("failed to find `git show`: %v", err)) } out, err := cmd.CombinedOutput() if err != nil { return nil, errors.New(tr.Tr.Get("failed to call `git show`: %v %v", err, string(out))) } // At most 10 substrings so subject line is not split on anything fields := strings.SplitN(string(out), "|", 10) // Cope with the case where subject is blank if len(fields) >= 9 { ret := &CommitSummary{} // Get SHAs from output, not commit input, so we can support symbolic refs ret.Sha = fields[0] ret.ShortSha = fields[1] ret.Parents = strings.Split(fields[2], " ") // %aD & %cD (RFC2822) matches Go's RFC1123Z format ret.AuthorDate, _ = ParseGitDate(fields[3]) ret.CommitDate, _ = ParseGitDate(fields[4]) ret.AuthorEmail = fields[5] ret.AuthorName = fields[6] ret.CommitterEmail = fields[7] ret.CommitterName = fields[8] if len(fields) > 9 { ret.Subject = strings.TrimRight(fields[9], "\n") } return ret, nil } else { msg := tr.Tr.Get("Unexpected output from `git show`: %v", string(out)) return nil, errors.New(msg) } } func GitAndRootDirs() (string, string, error) { cmd, err := gitNoLFS("rev-parse", "--git-dir", "--show-toplevel") if err != nil { return "", "", errors.New(tr.Tr.Get("failed to find `git rev-parse --git-dir --show-toplevel`: %v", err)) } buf := &bytes.Buffer{} cmd.Stderr = buf out, err := cmd.Output() output := string(out) if err != nil { // If we got a fatal error, it's possible we're on a newer // (2.24+) Git and we're not in a worktree, so fall back to just // looking up the repo directory. if lfserrors.ExitStatus(err) == 128 { absGitDir, err := GitDir() return absGitDir, "", err } return "", "", errors.New(tr.Tr.Get("failed to call `git rev-parse --git-dir --show-toplevel`: %q", buf.String())) } paths := strings.Split(output, "\n") pathLen := len(paths) if pathLen == 0 { return "", "", errors.New(tr.Tr.Get("bad `git rev-parse` output: %q", output)) } absGitDir, err := tools.CanonicalizePath(paths[0], false) if err != nil { return "", "", errors.New(tr.Tr.Get("error converting %q to absolute: %s", paths[0], err)) } if pathLen == 1 || len(paths[1]) == 0 { return absGitDir, "", nil } absRootDir, err := tools.CanonicalizePath(paths[1], false) return absGitDir, absRootDir, err } func RootDir() (string, error) { cmd, err := gitNoLFS("rev-parse", "--show-toplevel") if err != nil { return "", errors.New(tr.Tr.Get("failed to find `git rev-parse --show-toplevel`: %v", err)) } out, err := cmd.Output() if err != nil { return "", errors.New(tr.Tr.Get("failed to call `git rev-parse --show-toplevel`: %v %v", err, string(out))) } path := strings.TrimSpace(string(out)) path, err = tools.TranslateCygwinPath(path) if err != nil { return "", err } if len(path) == 0 { return "", errors.New(tr.Tr.Get("no output from `git rev-parse --show-toplevel`")) } return tools.CanonicalizePath(path, false) } func GitDir() (string, error) { cmd, err := gitNoLFS("rev-parse", "--git-dir") if err != nil { // The %w format specifier is unique to fmt.Errorf(), so we // do not pass it to tr.Tr.Get(). return "", fmt.Errorf("%s: %w", tr.Tr.Get("failed to find `git rev-parse --git-dir`"), err) } buf := &bytes.Buffer{} cmd.Stderr = buf out, err := cmd.Output() if err != nil { // The %w format specifier is unique to fmt.Errorf(), so we // do not pass it to tr.Tr.Get(). return "", fmt.Errorf("%s: %w %v: %v", tr.Tr.Get("failed to call `git rev-parse --git-dir`"), err, string(out), buf.String()) } path := strings.TrimSpace(string(out)) return tools.CanonicalizePath(path, false) } func GitCommonDir() (string, error) { // Versions before 2.5.0 don't have the --git-common-dir option, since // it came in with worktrees, so just fall back to the main Git // directory. if !IsGitVersionAtLeast("2.5.0") { return GitDir() } cmd, err := gitNoLFS("rev-parse", "--git-common-dir") if err != nil { return "", errors.New(tr.Tr.Get("failed to find `git rev-parse --git-common-dir`: %v", err)) } out, err := cmd.Output() buf := &bytes.Buffer{} cmd.Stderr = buf if err != nil { return "", errors.New(tr.Tr.Get("failed to call `git rev-parse --git-common-dir`: %v %v: %v", err, string(out), buf.String())) } path := strings.TrimSpace(string(out)) path, err = tools.TranslateCygwinPath(path) if err != nil { return "", err } return tools.CanonicalizePath(path, false) } // A git worktree (ref + path + flags) type Worktree struct { Ref Ref Dir string Prunable bool } // GetAllWorktrees returns the refs that all worktrees are using as HEADs plus the worktree's path. // This returns all worktrees plus the main working copy, and works even if // working dir is actually in a worktree right now // // Pass in the git storage dir (parent of 'objects') to work from, in case // we need to fall back to reading the worktree files directly. func GetAllWorktrees(storageDir string) ([]*Worktree, error) { // Versions before 2.7.0 don't support "git-worktree list", and // those before 2.36.0 don't support the "-z" option, so in these // cases we fall back to reading the .git/worktrees directory entries // and then reading the current worktree's HEAD ref. This requires // the contents of .git/worktrees/*/gitdir files to be absolute paths, // which is only true for Git versions prior to 2.48.0. if !IsGitVersionAtLeast("2.36.0") { return getAllWorktreesFromGitDir(storageDir) } cmd, err := gitNoLFS( "worktree", "list", "--porcelain", "-z", // handle special chars in filenames ) if err != nil { return nil, errors.New(tr.Tr.Get("failed to find `git worktree`: %v", err)) } stdout, err := cmd.StdoutPipe() if err != nil { return nil, errors.New(tr.Tr.Get("failed to open output pipe to `git worktree`: %v", err)) } stderr, err := cmd.StderrPipe() if err != nil { return nil, errors.New(tr.Tr.Get("failed to open error pipe to `git worktree`: %v", err)) } if err := cmd.Start(); err != nil { return nil, errors.New(tr.Tr.Get("failed to start `git worktree`: %v", err)) } scanner := bufio.NewScanner(stdout) scanner.Split(tools.SplitOnNul) var dir string var ref *Ref var prunable bool var worktrees []*Worktree for scanner.Scan() { line := scanner.Text() if len(line) == 0 { if len(dir) > 0 && ref != nil && len(ref.Sha) > 0 { worktrees = append(worktrees, &Worktree{ Ref: *ref, Dir: dir, Prunable: prunable, }) } dir = "" ref = nil continue } parts := strings.SplitN(scanner.Text(), " ", 2) // We ignore other attributes such as "locked" for now. switch parts[0] { case "worktree": if len(parts) == 2 && len(dir) == 0 { dir = filepath.Clean(parts[1]) ref = &Ref{Type: RefTypeOther} prunable = false } case "HEAD": if len(parts) == 2 && ref != nil { ref.Sha = parts[1] ref.Name = parts[1] } case "branch": if len(parts) == 2 && ref != nil && len(ref.Sha) > 0 { ref = ParseRef(parts[1], ref.Sha) } case "bare": // We ignore bare worktrees. dir = "" ref = nil case "prunable": prunable = true } } // We assume any error output will be short and won't block // command completion if it isn't drained by a separate goroutine. msg, _ := io.ReadAll(stderr) if err := cmd.Wait(); err != nil { return nil, errors.New(tr.Tr.Get("error in `git worktree`: %v: %s", err, msg)) } return worktrees, nil } func getAllWorktreesFromGitDir(storageDir string) ([]*Worktree, error) { worktreesdir := filepath.Join(storageDir, "worktrees") dirf, err := os.Open(worktreesdir) if err != nil && !os.IsNotExist(err) { return nil, err } var worktrees []*Worktree if err == nil { // There are some worktrees defer dirf.Close() direntries, err := dirf.Readdir(0) if err != nil { return nil, err } for _, dirfi := range direntries { if dirfi.IsDir() { // to avoid having to chdir and run git commands to identify the commit // just read the HEAD file & git rev-parse if necessary // Since the git repo is shared the same rev-parse will work from this location headfile := filepath.Join(worktreesdir, dirfi.Name(), "HEAD") ref, err := parseRefFile(headfile) if err != nil { tracerx.Printf("Error reading %v for worktree, skipping: %v", headfile, err) continue } // Read the gitdir file to get the location of the git repo dirfile := filepath.Join(worktreesdir, dirfi.Name(), "gitdir") dir, err := parseDirFile(dirfile) if err != nil { tracerx.Printf("Error reading %v for worktree, skipping: %v", dirfile, err) continue } // Check if the worktree exists. dir = filepath.Dir(dir) var prunable bool if _, err := os.Stat(dir); err != nil { if os.IsNotExist(err) { prunable = true } else { tracerx.Printf("Error checking worktree directory %s: %v", dir, err) } } worktrees = append(worktrees, &Worktree{ Ref: *ref, Dir: dir, Prunable: prunable, }) } } } // This has only established the separate worktrees, not the original checkout // If the storageDir contains a HEAD file and a RootDir then there is a main checkout // as well; this must be resolveable whether you're in the main checkout or // a worktree headfile := filepath.Join(storageDir, "HEAD") ref, err := parseRefFile(headfile) if err == nil { dir, err := RootDir() if err == nil { worktrees = append(worktrees, &Worktree{ Ref: *ref, Dir: dir, Prunable: false, }) } else { // ok if not exists, probably bare repo tracerx.Printf("Error getting toplevel for main checkout, skipping: %v", err) } } else if !os.IsNotExist(err) { // ok if not exists, probably bare repo tracerx.Printf("Error reading %v for main checkout, skipping: %v", headfile, err) } return worktrees, nil } // Manually parse a reference file like HEAD and return the Ref it resolves to func parseRefFile(filename string) (*Ref, error) { bytes, err := os.ReadFile(filename) if err != nil { return nil, err } contents := strings.TrimSpace(string(bytes)) if strings.HasPrefix(contents, "ref:") { contents = strings.TrimSpace(contents[4:]) } return ResolveRef(contents) } func parseDirFile(filename string) (string, error) { bytes, err := os.ReadFile(filename) if err != nil { return "", err } contents := strings.TrimSpace(string(bytes)) return contents, nil } // IsBare returns whether or not a repository is bare. It requires that the // current working directory is a repository. // // If there was an error determining whether or not the repository is bare, it // will be returned. func IsBare() (bool, error) { s, err := subprocess.SimpleExec( "git", "rev-parse", "--is-bare-repository") if err != nil { return false, err } return strconv.ParseBool(s) } // For compatibility with git clone we must mirror all flags in CloneWithoutFilters type CloneFlags struct { // --template TemplateDirectory string // -l --local Local bool // -s --shared Shared bool // --no-hardlinks NoHardlinks bool // -q --quiet Quiet bool // -n --no-checkout NoCheckout bool // --progress Progress bool // --bare Bare bool // --mirror Mirror bool // -o --origin Origin string // -b --branch Branch string // -u --upload-pack Upload string // --reference Reference string // --reference-if-able ReferenceIfAble string // --dissociate Dissociate bool // --separate-git-dir SeparateGit string // --depth Depth string // --recursive Recursive bool // --recurse-submodules RecurseSubmodules bool // -c --config Config string // --single-branch SingleBranch bool // --no-single-branch NoSingleBranch bool // --verbose Verbose bool // --ipv4 Ipv4 bool // --ipv6 Ipv6 bool // --shallow-since ShallowSince string // --shallow-since ShallowExclude string // --shallow-submodules ShallowSubmodules bool // --no-shallow-submodules NoShallowSubmodules bool // jobs Jobs int64 } // CloneWithoutFilters clones a git repo but without the smudge filter enabled // so that files in the working copy will be pointers and not real LFS data func CloneWithoutFilters(flags CloneFlags, args []string) error { cmdargs := []string{"clone"} // flags if flags.Bare { cmdargs = append(cmdargs, "--bare") } if len(flags.Branch) > 0 { cmdargs = append(cmdargs, "--branch", flags.Branch) } if len(flags.Config) > 0 { cmdargs = append(cmdargs, "--config", flags.Config) } if len(flags.Depth) > 0 { cmdargs = append(cmdargs, "--depth", flags.Depth) } if flags.Dissociate { cmdargs = append(cmdargs, "--dissociate") } if flags.Ipv4 { cmdargs = append(cmdargs, "--ipv4") } if flags.Ipv6 { cmdargs = append(cmdargs, "--ipv6") } if flags.Local { cmdargs = append(cmdargs, "--local") } if flags.Mirror { cmdargs = append(cmdargs, "--mirror") } if flags.NoCheckout { cmdargs = append(cmdargs, "--no-checkout") } if flags.NoHardlinks { cmdargs = append(cmdargs, "--no-hardlinks") } if flags.NoSingleBranch { cmdargs = append(cmdargs, "--no-single-branch") } if len(flags.Origin) > 0 { cmdargs = append(cmdargs, "--origin", flags.Origin) } if flags.Progress { cmdargs = append(cmdargs, "--progress") } if flags.Quiet { cmdargs = append(cmdargs, "--quiet") } if flags.Recursive { cmdargs = append(cmdargs, "--recursive") } if flags.RecurseSubmodules { cmdargs = append(cmdargs, "--recurse-submodules") } if len(flags.Reference) > 0 { cmdargs = append(cmdargs, "--reference", flags.Reference) } if len(flags.ReferenceIfAble) > 0 { cmdargs = append(cmdargs, "--reference-if-able", flags.ReferenceIfAble) } if len(flags.SeparateGit) > 0 { cmdargs = append(cmdargs, "--separate-git-dir", flags.SeparateGit) } if flags.Shared { cmdargs = append(cmdargs, "--shared") } if flags.SingleBranch { cmdargs = append(cmdargs, "--single-branch") } if len(flags.TemplateDirectory) > 0 { cmdargs = append(cmdargs, "--template", flags.TemplateDirectory) } if len(flags.Upload) > 0 { cmdargs = append(cmdargs, "--upload-pack", flags.Upload) } if flags.Verbose { cmdargs = append(cmdargs, "--verbose") } if len(flags.ShallowSince) > 0 { cmdargs = append(cmdargs, "--shallow-since", flags.ShallowSince) } if len(flags.ShallowExclude) > 0 { cmdargs = append(cmdargs, "--shallow-exclude", flags.ShallowExclude) } if flags.ShallowSubmodules { cmdargs = append(cmdargs, "--shallow-submodules") } if flags.NoShallowSubmodules { cmdargs = append(cmdargs, "--no-shallow-submodules") } if flags.Jobs > -1 { cmdargs = append(cmdargs, "--jobs", strconv.FormatInt(flags.Jobs, 10)) } // Now args cmdargs = append(cmdargs, args...) cmd, err := gitNoLFS(cmdargs...) if err != nil { return errors.New(tr.Tr.Get("failed to find `git clone`: %v", err)) } // Assign all streams direct cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr cmd.Stdin = os.Stdin err = cmd.Start() if err != nil { return errors.New(tr.Tr.Get("failed to start `git clone`: %v", err)) } err = cmd.Wait() if err != nil { return errors.New(tr.Tr.Get("`git clone` failed: %v", err)) } return nil } // Checkout performs an invocation of `git-checkout(1)` applying the given // treeish, paths, and force option, if given. // // If any error was encountered, it will be returned immediately. Otherwise, the // checkout has occurred successfully. func Checkout(treeish string, paths []string, force bool) error { args := []string{"checkout"} if force { args = append(args, "--force") } if len(treeish) > 0 { args = append(args, treeish) } if len(paths) > 0 { args = append(args, append([]string{"--"}, paths...)...) } _, err := gitNoLFSSimple(args...) return err } // CachedRemoteRefs returns the list of branches & tags for a remote which are // currently cached locally. No remote request is made to verify them. func CachedRemoteRefs(remoteName string) ([]*Ref, error) { var ret []*Ref cmd, err := gitNoLFS("show-ref") if err != nil { return nil, errors.New(tr.Tr.Get("failed to find `git show-ref`: %v", err)) } outp, err := cmd.StdoutPipe() if err != nil { return nil, errors.New(tr.Tr.Get("failed to call `git show-ref`: %v", err)) } cmd.Start() scanner := bufio.NewScanner(outp) refPrefix := fmt.Sprintf("refs/remotes/%v/", remoteName) for scanner.Scan() { if sha, name, ok := parseShowRefLine(refPrefix, scanner.Text()); ok { // Don't match head if name == "HEAD" { continue } ret = append(ret, &Ref{name, RefTypeRemoteBranch, sha}) } } return ret, cmd.Wait() } func parseShowRefLine(refPrefix, line string) (sha, name string, ok bool) { // line format: space := strings.IndexByte(line, ' ') if space < 0 { return "", "", false } ref := line[space+1:] if !strings.HasPrefix(ref, refPrefix) { return "", "", false } return line[:space], strings.TrimSpace(ref[len(refPrefix):]), true } // Fetch performs a fetch with no arguments against the given remotes. func Fetch(remotes ...string) error { if len(remotes) == 0 { return nil } var args []string if len(remotes) > 1 { args = []string{"--multiple", "--"} } args = append(args, remotes...) _, err := gitNoLFSSimple(append([]string{"fetch"}, args...)...) return err } // RemoteRefs returns a list of branches and, optionally, tags for a remote // by actually accessing the remote via git ls-remote. func RemoteRefs(remoteName string, withTags bool) ([]*Ref, error) { var ret []*Ref args := []string{"ls-remote", "--heads", "-q"} if withTags { args = append(args, "--tags") } args = append(args, remoteName) cmd, err := gitNoLFS(args...) if err != nil { return nil, errors.New(tr.Tr.Get("failed to find `git ls-remote`: %v", err)) } outp, err := cmd.StdoutPipe() if err != nil { return nil, errors.New(tr.Tr.Get("failed to call `git ls-remote`: %v", err)) } cmd.Start() scanner := bufio.NewScanner(outp) for scanner.Scan() { if sha, ns, name, ok := parseLsRemoteLine(scanner.Text()); ok { // Don't match head if name == "HEAD" { continue } typ := RefTypeRemoteBranch if ns == "tags" { if !withTags { return nil, errors.New(tr.Tr.Get("unexpected tag returned by `git ls-remote --heads`: %s %s", name, sha)) } typ = RefTypeRemoteTag } ret = append(ret, &Ref{name, typ, sha}) } } return ret, cmd.Wait() } func parseLsRemoteLine(line string) (sha, ns, name string, ok bool) { const headPrefix = "refs/heads/" const tagPrefix = "refs/tags/" // line format: tab := strings.IndexByte(line, '\t') if tab < 0 { return "", "", "", false } ref := line[tab+1:] switch { case strings.HasPrefix(ref, headPrefix): ns = "heads" name = ref[len(headPrefix):] case strings.HasPrefix(ref, tagPrefix): ns = "tags" name = ref[len(tagPrefix):] default: return "", "", "", false } return line[:tab], ns, strings.TrimSpace(name), true } // AllRefs returns a slice of all references in a Git repository in the current // working directory, or an error if those references could not be loaded. func AllRefs() ([]*Ref, error) { return AllRefsIn("") } // AllRefs returns a slice of all references in a Git repository located in a // the given working directory "wd", or an error if those references could not // be loaded. func AllRefsIn(wd string) ([]*Ref, error) { cmd, err := gitNoLFS( "for-each-ref", "--format=%(objectname)%00%(refname)") if err != nil { return nil, lfserrors.Wrap(err, tr.Tr.Get("failed to find `git for-each-ref`: %v", err)) } cmd.Dir = wd outp, err := cmd.StdoutPipe() if err != nil { return nil, lfserrors.Wrap(err, tr.Tr.Get("cannot open pipe")) } cmd.Start() refs := make([]*Ref, 0) scanner := bufio.NewScanner(outp) for scanner.Scan() { parts := strings.SplitN(scanner.Text(), "\x00", 2) if len(parts) != 2 { return nil, lfserrors.New(tr.Tr.Get( "invalid `git for-each-ref` line: %q", scanner.Text())) } sha := parts[0] typ, name := ParseRefToTypeAndName(parts[1]) refs = append(refs, &Ref{ Name: name, Type: typ, Sha: sha, }) } if err := scanner.Err(); err != nil { return nil, err } return refs, nil } // GetTrackedFiles returns a list of files which are tracked in Git which match // the pattern specified (standard wildcard form) // Both pattern and the results are relative to the current working directory, not // the root of the repository func GetTrackedFiles(pattern string) ([]string, error) { safePattern := sanitizePattern(pattern) rootWildcard := len(safePattern) < len(pattern) && strings.ContainsRune(safePattern, '*') var ret []string cmd, err := gitNoLFS( "ls-files", "--ignored", "--cached", // include things which are staged but not committed right now "-z", // handle special chars in filenames "-x", safePattern) if err != nil { return nil, errors.New(tr.Tr.Get("failed to find `git ls-files`: %v", err)) } outp, err := cmd.StdoutPipe() if err != nil { return nil, errors.New(tr.Tr.Get("failed to call `git ls-files`: %v", err)) } cmd.Start() scanner := bufio.NewScanner(outp) scanner.Split(tools.SplitOnNul) for scanner.Scan() { line := scanner.Text() // If the given pattern is a root wildcard, skip all files which // are not direct descendants of the repository's root. // // This matches the behavior of how .gitattributes performs // filename matches. if rootWildcard && filepath.Dir(line) != "." { continue } ret = append(ret, strings.TrimSpace(line)) } return ret, cmd.Wait() } func sanitizePattern(pattern string) string { if strings.HasPrefix(pattern, "/") { return pattern[1:] } return pattern } // GetFilesChanged returns a list of files which were changed, either between 2 // commits, or at a single commit if you only supply one argument and a blank // string for the other func GetFilesChanged(from, to string) ([]string, error) { var files []string args := []string{ "-c", "core.quotepath=false", // handle special chars in filenames "diff-tree", "--no-commit-id", "--name-only", "-r", } if len(from) > 0 { args = append(args, from) } if len(to) > 0 { args = append(args, to) } args = append(args, "--") // no ambiguous patterns cmd, err := gitNoLFS(args...) if err != nil { return nil, errors.New(tr.Tr.Get("failed to find `git diff-tree`: %v", err)) } outp, err := cmd.StdoutPipe() if err != nil { return nil, errors.New(tr.Tr.Get("failed to call `git diff-tree`: %v", err)) } if err := cmd.Start(); err != nil { return nil, errors.New(tr.Tr.Get("failed to start `git diff-tree`: %v", err)) } scanner := bufio.NewScanner(outp) for scanner.Scan() { files = append(files, strings.TrimSpace(scanner.Text())) } if err := cmd.Wait(); err != nil { return nil, errors.New(tr.Tr.Get("`git diff-tree` failed: %v", err)) } return files, err } // IsFileModified returns whether the filepath specified is modified according // to `git status`. A file is modified if it has uncommitted changes in the // working copy or the index. This includes being untracked. func IsFileModified(filepath string) (bool, error) { args := []string{ "-c", "core.quotepath=false", // handle special chars in filenames "status", "--porcelain", "--", // separator in case filename ambiguous filepath, } cmd, err := git(args...) if err != nil { return false, lfserrors.Wrap(err, tr.Tr.Get("failed to find `git status`")) } outp, err := cmd.StdoutPipe() if err != nil { return false, lfserrors.Wrap(err, tr.Tr.Get("Failed to call `git status`")) } if err := cmd.Start(); err != nil { return false, lfserrors.Wrap(err, tr.Tr.Get("Failed to start `git status`")) } matched := false for scanner := bufio.NewScanner(outp); scanner.Scan(); { line := scanner.Text() // Porcelain format is " " // Where = index status, = working copy status if len(line) > 3 { // Double-check even though should be only match if strings.TrimSpace(line[3:]) == filepath { matched = true // keep consuming output to exit cleanly // will typically fall straight through anyway due to 1 line output } } } if err := cmd.Wait(); err != nil { return false, lfserrors.Wrap(err, tr.Tr.Get("`git status` failed")) } return matched, nil } // IsWorkingCopyDirty returns true if and only if the working copy in which the // command was executed is dirty as compared to the index. // // If the status of the working copy could not be determined, an error will be // returned instead. func IsWorkingCopyDirty() (bool, error) { bare, err := IsBare() if bare || err != nil { return false, err } out, err := gitSimple("status", "--porcelain") if err != nil { return false, err } return len(out) != 0, nil } func ObjectDatabase(osEnv, gitEnv Environment, gitdir, tempdir string) (*gitobj.ObjectDatabase, error) { var options []gitobj.Option objdir, ok := osEnv.Get("GIT_OBJECT_DIRECTORY") if !ok { objdir = filepath.Join(gitdir, "objects") } alternates, _ := osEnv.Get("GIT_ALTERNATE_OBJECT_DIRECTORIES") if alternates != "" { options = append(options, gitobj.Alternates(alternates)) } hashAlgo, _ := gitEnv.Get("extensions.objectformat") if hashAlgo != "" { options = append(options, gitobj.ObjectFormat(gitobj.ObjectFormatAlgorithm(hashAlgo))) } odb, err := gitobj.FromFilesystem(objdir, tempdir, options...) if err != nil { return nil, err } if odb.Hasher() == nil { return nil, errors.New(tr.Tr.Get("unsupported repository hash algorithm %q", hashAlgo)) } return odb, nil } func remotesForTreeish(treeish string) []string { var outp string var err error if treeish == "" { //Treeish is empty for sparse checkout tracerx.Printf("git: treeish: not provided") outp, err = gitNoLFSSimple("branch", "-r", "--contains", "HEAD") } else { tracerx.Printf("git: treeish: %q", treeish) outp, err = gitNoLFSSimple("branch", "-r", "--contains", treeish) } if err != nil || outp == "" { tracerx.Printf("git: symbolic name: can't resolve symbolic name for ref: %q", treeish) return []string{} } return strings.Split(outp, "\n") } // remoteForRef will try to determine the remote from the ref name. // This will return an empty string if any of the remote names have a slash // because slashes introduce ambiguity. Consider two refs: // // 1. upstream/main // 2. upstream/test/main // // Is the remote "upstream" or "upstream/test"? It could be either, or both. // We could use git for-each-ref with %(upstream:remotename) if there were a tracking branch, // but this is not guaranteed to exist either. func remoteForRef(refname string) string { tracerx.Printf("git: working ref: %s", refname) remotes, err := RemoteList() if err != nil { return "" } parts := strings.Split(refname, "/") if len(parts) < 2 { return "" } for _, name := range remotes { if strings.Contains(name, "/") { tracerx.Printf("git: ref remote: cannot determine remote for ref %s since remote %s contains a slash", refname, name) return "" } } remote := parts[0] tracerx.Printf("git: working remote %s", remote) return remote } func getValidRemote(refs []string) string { for _, ref := range refs { if ref != "" { return ref } } return "" } // FirstRemoteForTreeish returns the first remote found which contains the treeish. func FirstRemoteForTreeish(treeish string) string { name := getValidRemote(remotesForTreeish(treeish)) if name == "" { tracerx.Printf("git: remote treeish: no valid remote refs parsed for %q", treeish) return "" } return remoteForRef(name) } git-lfs-3.6.1/git/git_test.go000066400000000000000000000536321472372047300160440ustar00rootroot00000000000000package git_test // to avoid import cycles import ( "os" "path/filepath" "sort" "testing" "time" . "github.com/git-lfs/git-lfs/v3/git" test "github.com/git-lfs/git-lfs/v3/t/cmd/util" "github.com/git-lfs/git-lfs/v3/tools" "github.com/stretchr/testify/assert" ) func TestRefString(t *testing.T) { const sha = "0000000000000000000000000000000000000000" for s, r := range map[string]*Ref{ "refs/heads/master": { Name: "master", Type: RefTypeLocalBranch, Sha: sha, }, "refs/remotes/origin/master": { Name: "origin/master", Type: RefTypeRemoteBranch, Sha: sha, }, "refs/tags/v1.0.0": { Name: "v1.0.0", Type: RefTypeLocalTag, Sha: sha, }, "HEAD": { Name: "HEAD", Type: RefTypeHEAD, Sha: sha, }, "other": { Name: "other", Type: RefTypeOther, Sha: sha, }, } { assert.Equal(t, s, r.Refspec()) } } func TestParseRefs(t *testing.T) { tests := map[string]RefType{ "refs/heads": RefTypeLocalBranch, "refs/tags": RefTypeLocalTag, "refs/remotes": RefTypeRemoteBranch, } for prefix, expectedType := range tests { r := ParseRef(prefix+"/branch", "abc123") assert.Equal(t, "abc123", r.Sha, "prefix: "+prefix) assert.Equal(t, "branch", r.Name, "prefix: "+prefix) assert.Equal(t, expectedType, r.Type, "prefix: "+prefix) } r := ParseRef("refs/foo/branch", "abc123") assert.Equal(t, "abc123", r.Sha, "prefix: refs/foo") assert.Equal(t, "refs/foo/branch", r.Name, "prefix: refs/foo") assert.Equal(t, RefTypeOther, r.Type, "prefix: refs/foo") r = ParseRef("HEAD", "abc123") assert.Equal(t, "abc123", r.Sha, "prefix: HEAD") assert.Equal(t, "HEAD", r.Name, "prefix: HEAD") assert.Equal(t, RefTypeHEAD, r.Type, "prefix: HEAD") } func TestCurrentRefAndCurrentRemoteRef(t *testing.T) { repo := test.NewRepo(t) repo.Pushd() defer func() { repo.Popd() repo.Cleanup() }() // test commits; we'll just modify the same file each time since we're // only interested in branches inputs := []*test.CommitInput{ { // 0 Files: []*test.FileInput{ {Filename: "file1.txt", Size: 20}, }, }, { // 1 NewBranch: "branch2", Files: []*test.FileInput{ {Filename: "file1.txt", Size: 25}, }, }, { // 2 ParentBranches: []string{"master"}, // back on master Files: []*test.FileInput{ {Filename: "file1.txt", Size: 30}, }, }, { // 3 NewBranch: "branch3", Files: []*test.FileInput{ {Filename: "file1.txt", Size: 32}, }, }, } outputs := repo.AddCommits(inputs) // last commit was on branch3 gitConf := repo.GitConfig() ref, err := CurrentRef() assert.Nil(t, err) assert.Equal(t, &Ref{ Name: "branch3", Type: RefTypeLocalBranch, Sha: outputs[3].Sha, }, ref) test.RunGitCommand(t, true, "checkout", "master") ref, err = CurrentRef() assert.Nil(t, err) assert.Equal(t, &Ref{ Name: "master", Type: RefTypeLocalBranch, Sha: outputs[2].Sha, }, ref) // Check remote repo.AddRemote("origin") test.RunGitCommand(t, true, "push", "-u", "origin", "master:someremotebranch") ref, err = gitConf.CurrentRemoteRef() assert.Nil(t, err) assert.Equal(t, &Ref{ Name: "origin/someremotebranch", Type: RefTypeRemoteBranch, Sha: outputs[2].Sha, }, ref) refname, err := gitConf.RemoteRefNameForCurrentBranch() assert.Nil(t, err) assert.Equal(t, "refs/remotes/origin/someremotebranch", refname) ref, err = ResolveRef(outputs[2].Sha) assert.Nil(t, err) assert.Equal(t, &Ref{ Name: outputs[2].Sha, Type: RefTypeOther, Sha: outputs[2].Sha, }, ref) } func TestRecentBranches(t *testing.T) { repo := test.NewRepo(t) repo.Pushd() defer func() { repo.Popd() repo.Cleanup() }() now := time.Now() // test commits; we'll just modify the same file each time since we're // only interested in branches & dates inputs := []*test.CommitInput{ { // 0 CommitDate: now.AddDate(0, 0, -20), Files: []*test.FileInput{ {Filename: "file1.txt", Size: 20}, }, }, { // 1 CommitDate: now.AddDate(0, 0, -15), NewBranch: "excluded_branch", // new branch & tag but too old Tags: []string{"excluded_tag"}, Files: []*test.FileInput{ {Filename: "file1.txt", Size: 25}, }, }, { // 2 CommitDate: now.AddDate(0, 0, -12), ParentBranches: []string{"master"}, // back on master Files: []*test.FileInput{ {Filename: "file1.txt", Size: 30}, }, }, { // 3 CommitDate: now.AddDate(0, 0, -6), NewBranch: "included_branch", // new branch within 7 day limit Files: []*test.FileInput{ {Filename: "file1.txt", Size: 32}, }, }, { // 4 CommitDate: now.AddDate(0, 0, -3), NewBranch: "included_branch_2", // new branch within 7 day limit Files: []*test.FileInput{ {Filename: "file1.txt", Size: 36}, }, }, { // 5 // Final commit, current date/time ParentBranches: []string{"master"}, // back on master Files: []*test.FileInput{ {Filename: "file1.txt", Size: 21}, }, }, } outputs := repo.AddCommits(inputs) // Add a couple of remotes and push some branches repo.AddRemote("origin") repo.AddRemote("upstream") test.RunGitCommand(t, true, "push", "origin", "master") test.RunGitCommand(t, true, "push", "origin", "excluded_branch") test.RunGitCommand(t, true, "push", "origin", "included_branch") test.RunGitCommand(t, true, "push", "upstream", "master") test.RunGitCommand(t, true, "push", "upstream", "included_branch_2") // Recent, local only refs, err := RecentBranches(now.AddDate(0, 0, -7), false, "") assert.Equal(t, nil, err) expectedRefs := []*Ref{ { Name: "master", Type: RefTypeLocalBranch, Sha: outputs[5].Sha, }, { Name: "included_branch_2", Type: RefTypeLocalBranch, Sha: outputs[4].Sha, }, { Name: "included_branch", Type: RefTypeLocalBranch, Sha: outputs[3].Sha, }, } assert.Equal(t, expectedRefs, refs, "Refs should be correct") // Recent, remotes too (all of them) refs, err = RecentBranches(now.AddDate(0, 0, -7), true, "") assert.Equal(t, nil, err) expectedRefs = []*Ref{ { Name: "master", Type: RefTypeLocalBranch, Sha: outputs[5].Sha, }, { Name: "included_branch_2", Type: RefTypeLocalBranch, Sha: outputs[4].Sha, }, { Name: "included_branch", Type: RefTypeLocalBranch, Sha: outputs[3].Sha, }, { Name: "upstream/master", Type: RefTypeRemoteBranch, Sha: outputs[5].Sha, }, { Name: "upstream/included_branch_2", Type: RefTypeRemoteBranch, Sha: outputs[4].Sha, }, { Name: "origin/master", Type: RefTypeRemoteBranch, Sha: outputs[5].Sha, }, { Name: "origin/included_branch", Type: RefTypeRemoteBranch, Sha: outputs[3].Sha, }, } // Need to sort for consistent comparison sort.Sort(test.RefsByName(expectedRefs)) sort.Sort(test.RefsByName(refs)) assert.Equal(t, expectedRefs, refs, "Refs should be correct") // Recent, only single remote refs, err = RecentBranches(now.AddDate(0, 0, -7), true, "origin") assert.Equal(t, nil, err) expectedRefs = []*Ref{ { Name: "master", Type: RefTypeLocalBranch, Sha: outputs[5].Sha, }, { Name: "origin/master", Type: RefTypeRemoteBranch, Sha: outputs[5].Sha, }, { Name: "included_branch_2", Type: RefTypeLocalBranch, Sha: outputs[4].Sha, }, { Name: "included_branch", Type: RefTypeLocalBranch, Sha: outputs[3].Sha, }, { Name: "origin/included_branch", Type: RefTypeRemoteBranch, Sha: outputs[3].Sha, }, } // Need to sort for consistent comparison sort.Sort(test.RefsByName(expectedRefs)) sort.Sort(test.RefsByName(refs)) assert.Equal(t, expectedRefs, refs, "Refs should be correct") } func TestResolveEmptyCurrentRef(t *testing.T) { repo := test.NewRepo(t) repo.Pushd() defer func() { repo.Popd() repo.Cleanup() }() _, err := CurrentRef() assert.NotEqual(t, nil, err) } func TestWorktrees(t *testing.T) { // Only git 2.5+ if !IsGitVersionAtLeast("2.5.0") { return } repo := test.NewRepo(t) repo.Pushd() defer func() { repo.Popd() repo.Cleanup() }() repoDir, _ := tools.CanonicalizePath(repo.Path, true) // test commits; we'll just modify the same file each time since we're // only interested in branches & dates inputs := []*test.CommitInput{ { // 0 Files: []*test.FileInput{ {Filename: "file1.txt", Size: 10}, }, Tags: []string{"tag1"}, }, { // 1 Files: []*test.FileInput{ {Filename: "file1.txt", Size: 20}, }, }, { // 2 NewBranch: "branch2", Files: []*test.FileInput{ {Filename: "file1.txt", Size: 25}, }, }, { // 3 NewBranch: "branch3", ParentBranches: []string{"master"}, // back on master Files: []*test.FileInput{ {Filename: "file1.txt", Size: 30}, }, }, { // 4 NewBranch: "branch4", ParentBranches: []string{"master"}, // back on master Files: []*test.FileInput{ {Filename: "file1.txt", Size: 40}, }, }, { // 5 NewBranch: "branch5", ParentBranches: []string{"master"}, // back on master Files: []*test.FileInput{ {Filename: "file1.txt", Size: 50}, }, }, } outputs := repo.AddCommits(inputs) // Checkout master again otherwise can't create a worktree from branch4 if we're on it here test.RunGitCommand(t, true, "checkout", "master") // We can create worktrees as subfolders for convenience // Each one is checked out to a different branch // Note that we *won't* create one for branch3 test.RunGitCommand(t, true, "worktree", "add", "tag1_wt", "tag1") test.RunGitCommand(t, true, "worktree", "add", "branch2_wt", "branch2") test.RunGitCommand(t, true, "worktree", "add", "branch4_wt", "branch4") test.RunGitCommand(t, true, "worktree", "add", "branch5_wt", "branch5") os.RemoveAll(filepath.Join(repoDir, "branch5_wt")) worktrees, err := GetAllWorktrees(filepath.Join(repo.Path, ".git")) assert.NoError(t, err) expectedWorktrees := []*Worktree{ { Ref: Ref{ Name: outputs[0].Sha, Type: RefTypeOther, Sha: outputs[0].Sha, }, Dir: filepath.Join(repoDir, "tag1_wt"), Prunable: false, }, { Ref: Ref{ Name: "master", Type: RefTypeLocalBranch, Sha: outputs[1].Sha, }, Dir: repoDir, Prunable: false, }, { Ref: Ref{ Name: "branch2", Type: RefTypeLocalBranch, Sha: outputs[2].Sha, }, Dir: filepath.Join(repoDir, "branch2_wt"), Prunable: false, }, { Ref: Ref{ Name: "branch4", Type: RefTypeLocalBranch, Sha: outputs[4].Sha, }, Dir: filepath.Join(repoDir, "branch4_wt"), Prunable: false, }, { Ref: Ref{ Name: "branch5", Type: RefTypeLocalBranch, Sha: outputs[5].Sha, }, Dir: filepath.Join(repoDir, "branch5_wt"), Prunable: true, }, } // Need to sort for consistent comparison sort.Sort(test.WorktreesByName(expectedWorktrees)) sort.Sort(test.WorktreesByName(worktrees)) assert.Equal(t, expectedWorktrees, worktrees, "Worktrees should be correct") } func TestWorktreesBareRepo(t *testing.T) { // Only git 2.5+ if !IsGitVersionAtLeast("2.5.0") { return } repo := test.NewBareRepo(t) repo.Pushd() defer func() { repo.Popd() repo.Cleanup() }() worktrees, err := GetAllWorktrees(repo.Path) assert.NoError(t, err) assert.Nil(t, worktrees) } func TestVersionCompare(t *testing.T) { assert.True(t, IsVersionAtLeast("2.6.0", "2.6.0")) assert.True(t, IsVersionAtLeast("2.6.0", "2.6")) assert.True(t, IsVersionAtLeast("2.6.0", "2")) assert.True(t, IsVersionAtLeast("2.6.10", "2.6.5")) assert.True(t, IsVersionAtLeast("2.8.1", "2.7.2")) assert.False(t, IsVersionAtLeast("1.6.0", "2")) assert.False(t, IsVersionAtLeast("2.5.0", "2.6")) assert.False(t, IsVersionAtLeast("2.5.0", "2.5.1")) assert.False(t, IsVersionAtLeast("2.5.2", "2.5.10")) } func TestGitAndRootDirs(t *testing.T) { repo := test.NewRepo(t) repo.Pushd() defer func() { repo.Popd() repo.Cleanup() }() git, root, err := GitAndRootDirs() if err != nil { t.Fatal(err) } expected, err := os.Stat(git) if err != nil { t.Fatal(err) } actual, err := os.Stat(filepath.Join(root, ".git")) if err != nil { t.Fatal(err) } assert.True(t, os.SameFile(expected, actual)) } func TestGetTrackedFiles(t *testing.T) { repo := test.NewRepo(t) repo.Pushd() defer func() { repo.Popd() repo.Cleanup() }() // test commits; we'll just modify the same file each time since we're // only interested in branches inputs := []*test.CommitInput{ { // 0 Files: []*test.FileInput{ {Filename: "file1.txt", Size: 20}, {Filename: "file2.txt", Size: 20}, {Filename: "folder1/file10.txt", Size: 20}, {Filename: "folder1/anotherfile.txt", Size: 20}, }, }, { // 1 Files: []*test.FileInput{ {Filename: "file3.txt", Size: 20}, {Filename: "file4.txt", Size: 20}, {Filename: "folder2/something.txt", Size: 20}, {Filename: "folder2/folder3/deep.txt", Size: 20}, }, }, } repo.AddCommits(inputs) tracked, err := GetTrackedFiles("*.txt") assert.Nil(t, err) sort.Strings(tracked) // for direct comparison fulllist := []string{"file1.txt", "file2.txt", "file3.txt", "file4.txt", "folder1/anotherfile.txt", "folder1/file10.txt", "folder2/folder3/deep.txt", "folder2/something.txt"} assert.Equal(t, fulllist, tracked) tracked, err = GetTrackedFiles("*file*.txt") assert.Nil(t, err) sort.Strings(tracked) sublist := []string{"file1.txt", "file2.txt", "file3.txt", "file4.txt", "folder1/anotherfile.txt", "folder1/file10.txt"} assert.Equal(t, sublist, tracked) tracked, err = GetTrackedFiles("folder1/*") assert.Nil(t, err) sort.Strings(tracked) sublist = []string{"folder1/anotherfile.txt", "folder1/file10.txt"} assert.Equal(t, sublist, tracked) tracked, err = GetTrackedFiles("folder2/*") assert.Nil(t, err) sort.Strings(tracked) sublist = []string{"folder2/folder3/deep.txt", "folder2/something.txt"} assert.Equal(t, sublist, tracked) // relative dir os.Chdir("folder1") tracked, err = GetTrackedFiles("*.txt") assert.Nil(t, err) sort.Strings(tracked) sublist = []string{"anotherfile.txt", "file10.txt"} assert.Equal(t, sublist, tracked) os.Chdir("..") // absolute paths only includes matches in repo root tracked, err = GetTrackedFiles("/*.txt") assert.Nil(t, err) sort.Strings(tracked) assert.Equal(t, []string{"file1.txt", "file2.txt", "file3.txt", "file4.txt"}, tracked) // Test includes staged but uncommitted files os.WriteFile("z_newfile.txt", []byte("Hello world"), 0644) test.RunGitCommand(t, true, "add", "z_newfile.txt") tracked, err = GetTrackedFiles("*.txt") assert.Nil(t, err) sort.Strings(tracked) fulllist = append(fulllist, "z_newfile.txt") assert.Equal(t, fulllist, tracked) // Test includes modified files (not staged) os.WriteFile("file1.txt", []byte("Modifications"), 0644) tracked, err = GetTrackedFiles("*.txt") assert.Nil(t, err) sort.Strings(tracked) assert.Equal(t, fulllist, tracked) // Test includes modified files (staged) test.RunGitCommand(t, true, "add", "file1.txt") tracked, err = GetTrackedFiles("*.txt") assert.Nil(t, err) sort.Strings(tracked) assert.Equal(t, fulllist, tracked) // Test excludes deleted files (not committed) test.RunGitCommand(t, true, "rm", "file2.txt") tracked, err = GetTrackedFiles("*.txt") assert.Nil(t, err) sort.Strings(tracked) deletedlist := []string{"file1.txt", "file3.txt", "file4.txt", "folder1/anotherfile.txt", "folder1/file10.txt", "folder2/folder3/deep.txt", "folder2/something.txt", "z_newfile.txt"} assert.Equal(t, deletedlist, tracked) } func TestLocalRefs(t *testing.T) { repo := test.NewRepo(t) repo.Pushd() defer func() { repo.Popd() repo.Cleanup() }() repo.AddCommits([]*test.CommitInput{ { Files: []*test.FileInput{ {Filename: "file1.txt", Size: 20}, }, }, { NewBranch: "branch", ParentBranches: []string{"master"}, Files: []*test.FileInput{ {Filename: "file1.txt", Size: 20}, }, }, }) test.RunGitCommand(t, true, "tag", "v1") refs, err := LocalRefs() if err != nil { t.Fatal(err) } actual := make(map[string]bool) for _, r := range refs { t.Logf("REF: %s", r.Name) switch r.Type { case RefTypeHEAD: t.Errorf("Local HEAD ref: %v", r) case RefTypeOther: t.Errorf("Stash or unknown ref: %v", r) default: actual[r.Name] = true } } expected := []string{"master", "branch", "v1"} found := 0 for _, refname := range expected { if actual[refname] { found += 1 } else { t.Errorf("could not find ref %q", refname) } } if found != len(expected) { t.Errorf("Unexpected local refs: %v", actual) } } func TestGetFilesChanges(t *testing.T) { repo := test.NewRepo(t) repo.Pushd() defer func() { repo.Popd() repo.Cleanup() }() commits := repo.AddCommits([]*test.CommitInput{ { Files: []*test.FileInput{ {Filename: "file1.txt", Size: 20}, }, }, { Files: []*test.FileInput{ {Filename: "file1.txt", Size: 25}, {Filename: "file2.txt", Size: 20}, {Filename: "folder/file3.txt", Size: 10}, }, Tags: []string{"tag1"}, }, { NewBranch: "abranch", ParentBranches: []string{"master"}, Files: []*test.FileInput{ {Filename: "file1.txt", Size: 30}, {Filename: "file4.txt", Size: 40}, }, }, }) expected0to1 := []string{"file1.txt", "file2.txt", "folder/file3.txt"} expected1to2 := []string{"file1.txt", "file4.txt"} expected0to2 := []string{"file1.txt", "file2.txt", "file4.txt", "folder/file3.txt"} // Test 2 SHAs changes, err := GetFilesChanged(commits[0].Sha, commits[1].Sha) assert.Nil(t, err) assert.Equal(t, expected0to1, changes) // Test SHA & tag changes, err = GetFilesChanged(commits[0].Sha, "tag1") assert.Nil(t, err) assert.Equal(t, expected0to1, changes) // Test SHA & branch changes, err = GetFilesChanged(commits[0].Sha, "abranch") assert.Nil(t, err) assert.Equal(t, expected0to2, changes) // Test tag & branch changes, err = GetFilesChanged("tag1", "abranch") assert.Nil(t, err) assert.Equal(t, expected1to2, changes) // Test fail _, err = GetFilesChanged("tag1", "nonexisting") assert.NotNil(t, err) _, err = GetFilesChanged("nonexisting", "tag1") assert.NotNil(t, err) // Test Single arg version changes, err = GetFilesChanged(commits[1].Sha, "") assert.Nil(t, err) assert.Equal(t, expected0to1, changes) changes, err = GetFilesChanged("abranch", "") assert.Nil(t, err) assert.Equal(t, expected1to2, changes) } func TestValidateRemoteURL(t *testing.T) { assert.Nil(t, ValidateRemoteURL("https://github.com/git-lfs/git-lfs")) assert.Nil(t, ValidateRemoteURL("http://github.com/git-lfs/git-lfs")) assert.Nil(t, ValidateRemoteURL("git://github.com/git-lfs/git-lfs")) assert.Nil(t, ValidateRemoteURL("ssh://git@github.com/git-lfs/git-lfs")) assert.Nil(t, ValidateRemoteURL("ssh://git@github.com:22/git-lfs/git-lfs")) assert.Nil(t, ValidateRemoteURL("git@github.com:git-lfs/git-lfs")) assert.Nil(t, ValidateRemoteURL("git@server:/absolute/path.git")) assert.NotNil(t, ValidateRemoteURL("ftp://git@github.com/git-lfs/git-lfs")) } func TestRefTypeKnownPrefixes(t *testing.T) { for typ, expected := range map[RefType]struct { Prefix string Ok bool }{ RefTypeLocalBranch: {"refs/heads", true}, RefTypeRemoteBranch: {"refs/remotes", true}, RefTypeLocalTag: {"refs/tags", true}, RefTypeHEAD: {"", false}, RefTypeOther: {"", false}, } { prefix, ok := typ.Prefix() assert.Equal(t, expected.Prefix, prefix) assert.Equal(t, expected.Ok, ok) } } func TestRemoteURLs(t *testing.T) { repo := test.NewRepo(t) repo.Pushd() defer func() { repo.Popd() repo.Cleanup() }() cfg := repo.GitConfig() cfg.SetLocal("remote.foo.url", "https://github.com/git-lfs/git-lfs.git") cfg.SetLocal("remote.bar.url", "https://github.com/git-lfs/wildmatch.git") cfg.SetLocal("remote.bar.pushurl", "https://github.com/git-lfs/pktline.git") expected := make(map[string][]string) expected["foo"] = []string{"https://github.com/git-lfs/git-lfs.git"} expected["bar"] = []string{"https://github.com/git-lfs/wildmatch.git"} actual, err := RemoteURLs(false) assert.Nil(t, err) assert.Equal(t, expected, actual) expected["bar"] = []string{"https://github.com/git-lfs/pktline.git"} actual, err = RemoteURLs(true) assert.Nil(t, err) assert.Equal(t, expected, actual) } func TestMapRemoteURL(t *testing.T) { repo := test.NewRepo(t) repo.Pushd() defer func() { repo.Popd() repo.Cleanup() }() cfg := repo.GitConfig() cfg.SetLocal("remote.foo.url", "https://github.com/git-lfs/git-lfs.git") cfg.SetLocal("remote.bar.url", "https://github.com/git-lfs/wildmatch.git") cfg.SetLocal("remote.bar.pushurl", "https://github.com/git-lfs/pktline.git") tests := []struct { url string push bool match bool val string }{ { "https://github.com/git-lfs/git-lfs.git", false, true, "foo", }, { "https://github.com/git-lfs/git-lfs.git", true, true, "foo", }, { "https://github.com/git-lfs/wildmatch.git", false, true, "bar", }, { "https://github.com/git-lfs/pktline.git", true, true, "bar", }, { "https://github.com/git-lfs/pktline.git", false, false, "https://github.com/git-lfs/pktline.git", }, { "https://github.com/git/git.git", true, false, "https://github.com/git/git.git", }, } for _, test := range tests { val, ok := MapRemoteURL(test.url, test.push) assert.Equal(t, ok, test.match) assert.Equal(t, val, test.val) } } func TestIsValidObjectIDLength(t *testing.T) { // Lengths are 40, 64, 39, and 12. assert.Equal(t, HasValidObjectIDLength("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), true) assert.Equal(t, HasValidObjectIDLength("2222222222222222222222222222222222222222222222222222222222222222"), true) assert.Equal(t, HasValidObjectIDLength("555555555555555555555555555555555555555"), false) assert.Equal(t, HasValidObjectIDLength("0123456789ab"), false) } func TestIsZeroObjectID(t *testing.T) { assert.Equal(t, IsZeroObjectID("0000000000000000000000000000000000000000"), true) assert.Equal(t, IsZeroObjectID("0000000000000000000000000000000000000000000000000000000000000000"), true) assert.Equal(t, IsZeroObjectID("000000000000000000000000000000000000000"), false) assert.Equal(t, IsZeroObjectID("e69de29bb2d1d6434b8b29ae775ad8c2e48c5391"), false) assert.Equal(t, IsZeroObjectID("473a0f4c3be8a93681a267e3b1e9a7dcda1185436fe141f7749120a303721813"), false) } git-lfs-3.6.1/git/gitattr/000077500000000000000000000000001472372047300153405ustar00rootroot00000000000000git-lfs-3.6.1/git/gitattr/attr.go000066400000000000000000000124411472372047300166430ustar00rootroot00000000000000package gitattr import ( "bufio" "bytes" "io" "strconv" "strings" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/tr" "github.com/git-lfs/wildmatch/v2" ) const attrPrefix = "[attr]" type Line interface { Attrs() []*Attr } type PatternLine interface { Pattern() *wildmatch.Wildmatch Line } type MacroLine interface { Macro() string Line } type lineAttrs struct { // Attrs is the list of attributes defined in a .gitattributes line. // // It is populated in-order as it was written in the .gitattributes file // being read, from left to right. attrs []*Attr } func (l *lineAttrs) Attrs() []*Attr { return l.attrs } type patternLine struct { // Pattern is a wildmatch pattern that, when matched, indicates that all // of the below attributes (Attrs) should be applied to that tree entry. // // Pattern is relative to the tree in which the .gitattributes was read // from. For example, /.gitattributes affects all blobs in the // repository, while /path/to/.gitattributes affects all blobs that are // direct or indirect children of /path/to. pattern *wildmatch.Wildmatch // Attrs is the list of attributes to be applied when the above pattern // matches a given filename. lineAttrs } func (pl *patternLine) Pattern() *wildmatch.Wildmatch { return pl.pattern } type macroLine struct { // Macro is the name of a macro that, when matched, indicates that all // of the below attributes (Attrs) should be applied to that tree // entry. macro string // Attrs is the list of attributes to be applied when the above macro // name is matched for a given filename. lineAttrs } func (ml *macroLine) Macro() string { return ml.macro } // Attr is a single attribute that may be applied to a file. type Attr struct { // K is the name of the attribute. It is commonly, "filter", "diff", // "merge", or "text". // // It will never contain the special "false" shorthand ("-"), or the // unspecify declarative ("!"). K string // V is the value held by that attribute. It is commonly "lfs", or // "false", indicating the special value given by a "-"-prefixed name. V string // Unspecified indicates whether or not this attribute was explicitly // unset by prefixing the keyname with "!". Unspecified bool } // ParseLines parses the given io.Reader "r" line-wise as if it were the // contents of a .gitattributes file. // // If an error was encountered, it will be returned and the []*Line should be // considered unusable. func ParseLines(r io.Reader) ([]Line, string, error) { var lines []Line splitter := &lineEndingSplitter{} scanner := bufio.NewScanner(r) scanner.Split(splitter.ScanLines) for scanner.Scan() { text := strings.TrimSpace(scanner.Text()) if len(text) == 0 { continue } var pattern string var applied string var macro string switch text[0] { case '#': continue case '"': var err error last := strings.LastIndex(text, "\"") if last == 0 { return nil, "", errors.New(tr.Tr.Get("unbalanced quote: %s", text)) } pattern, err = strconv.Unquote(text[:last+1]) if err != nil { return nil, "", errors.Wrap(err, tr.Tr.Get("unable to unquote: %s", text[:last+1])) } applied = strings.TrimSpace(text[last+1:]) default: splits := strings.SplitN(text, " ", 2) if strings.HasPrefix(splits[0], attrPrefix) { macro = splits[0][len(attrPrefix):] } else { pattern = splits[0] } if len(splits) == 2 { applied = splits[1] } } var lineAttrs lineAttrs for _, s := range strings.Split(applied, " ") { if s == "" { continue } var attr Attr if strings.HasPrefix(s, "-") { attr.K = strings.TrimPrefix(s, "-") attr.V = "false" } else if strings.HasPrefix(s, "!") { attr.K = strings.TrimPrefix(s, "!") attr.Unspecified = true } else if eq := strings.Index(s, "="); eq > -1 { attr.K = s[:eq] attr.V = s[eq+1:] } else { attr.K = s attr.V = "true" } lineAttrs.attrs = append(lineAttrs.attrs, &attr) } var line Line if pattern != "" { matchPattern := wildmatch.NewWildmatch(pattern, wildmatch.Basename, wildmatch.SystemCase, wildmatch.GitAttributes, ) line = &patternLine{matchPattern, lineAttrs} } else { line = ¯oLine{macro, lineAttrs} } lines = append(lines, line) } if err := scanner.Err(); err != nil { return nil, "", err } return lines, splitter.LineEnding(), nil } // copies bufio.ScanLines(), counting LF vs CRLF in a file type lineEndingSplitter struct { LFCount int CRLFCount int } func (s *lineEndingSplitter) LineEnding() string { if s.CRLFCount > s.LFCount { return "\r\n" } else if s.LFCount == 0 { return "" } return "\n" } func (s *lineEndingSplitter) ScanLines(data []byte, atEOF bool) (advance int, token []byte, err error) { if atEOF && len(data) == 0 { return 0, nil, nil } if i := bytes.IndexByte(data, '\n'); i >= 0 { // We have a full newline-terminated line. return i + 1, s.dropCR(data[0:i]), nil } // If we're at EOF, we have a final, non-terminated line. Return it. if atEOF { return len(data), data, nil } // Request more data. return 0, nil, nil } // dropCR drops a terminal \r from the data. func (s *lineEndingSplitter) dropCR(data []byte) []byte { if len(data) > 0 && data[len(data)-1] == '\r' { s.CRLFCount++ return data[0 : len(data)-1] } s.LFCount++ return data } git-lfs-3.6.1/git/gitattr/attr_test.go000066400000000000000000000136231472372047300177050ustar00rootroot00000000000000package gitattr import ( "fmt" "strings" "testing" "github.com/stretchr/testify/assert" ) func TestParseLines(t *testing.T) { lines, _, err := ParseLines(strings.NewReader("*.dat filter=lfs")) assert.NoError(t, err) assert.Len(t, lines, 1) assert.Implements(t, (*PatternLine)(nil), lines[0]) assert.Equal(t, lines[0].(PatternLine).Pattern().String(), "*.dat") assert.Equal(t, lines[0].Attrs()[0], &Attr{ K: "filter", V: "lfs", }) } func TestParseLinesManyAttrs(t *testing.T) { lines, _, err := ParseLines(strings.NewReader( "*.dat filter=lfs diff=lfs merge=lfs -text crlf")) assert.NoError(t, err) assert.Len(t, lines, 1) assert.Implements(t, (*PatternLine)(nil), lines[0]) assert.Equal(t, lines[0].(PatternLine).Pattern().String(), "*.dat") assert.Len(t, lines[0].Attrs(), 5) assert.Equal(t, lines[0].Attrs()[0], &Attr{K: "filter", V: "lfs"}) assert.Equal(t, lines[0].Attrs()[1], &Attr{K: "diff", V: "lfs"}) assert.Equal(t, lines[0].Attrs()[2], &Attr{K: "merge", V: "lfs"}) assert.Equal(t, lines[0].Attrs()[3], &Attr{K: "text", V: "false"}) assert.Equal(t, lines[0].Attrs()[4], &Attr{K: "crlf", V: "true"}) } func TestParseLinesManyLines(t *testing.T) { lines, _, err := ParseLines(strings.NewReader(strings.Join([]string{ "*.dat filter=lfs diff=lfs merge=lfs -text", "*.jpg filter=lfs diff=lfs merge=lfs -text", "# *.pdf filter=lfs diff=lfs merge=lfs -text", "*.png filter=lfs diff=lfs merge=lfs -text", "*.txt text"}, "\n"))) assert.NoError(t, err) assert.Len(t, lines, 4) assert.Implements(t, (*PatternLine)(nil), lines[0]) assert.Implements(t, (*PatternLine)(nil), lines[1]) assert.Implements(t, (*PatternLine)(nil), lines[2]) assert.Implements(t, (*PatternLine)(nil), lines[3]) assert.Equal(t, lines[0].(PatternLine).Pattern().String(), "*.dat") assert.Equal(t, lines[1].(PatternLine).Pattern().String(), "*.jpg") assert.Equal(t, lines[2].(PatternLine).Pattern().String(), "*.png") assert.Equal(t, lines[3].(PatternLine).Pattern().String(), "*.txt") assert.Len(t, lines[0].Attrs(), 4) assert.Equal(t, lines[0].Attrs()[0], &Attr{K: "filter", V: "lfs"}) assert.Equal(t, lines[0].Attrs()[1], &Attr{K: "diff", V: "lfs"}) assert.Equal(t, lines[0].Attrs()[2], &Attr{K: "merge", V: "lfs"}) assert.Equal(t, lines[0].Attrs()[3], &Attr{K: "text", V: "false"}) assert.Len(t, lines[1].Attrs(), 4) assert.Equal(t, lines[1].Attrs()[0], &Attr{K: "filter", V: "lfs"}) assert.Equal(t, lines[1].Attrs()[1], &Attr{K: "diff", V: "lfs"}) assert.Equal(t, lines[1].Attrs()[2], &Attr{K: "merge", V: "lfs"}) assert.Equal(t, lines[1].Attrs()[3], &Attr{K: "text", V: "false"}) assert.Len(t, lines[2].Attrs(), 4) assert.Equal(t, lines[2].Attrs()[0], &Attr{K: "filter", V: "lfs"}) assert.Equal(t, lines[2].Attrs()[1], &Attr{K: "diff", V: "lfs"}) assert.Equal(t, lines[2].Attrs()[2], &Attr{K: "merge", V: "lfs"}) assert.Equal(t, lines[2].Attrs()[3], &Attr{K: "text", V: "false"}) assert.Len(t, lines[3].Attrs(), 1) assert.Equal(t, lines[3].Attrs()[0], &Attr{K: "text", V: "true"}) } func TestParseLinesUnset(t *testing.T) { lines, _, err := ParseLines(strings.NewReader("*.dat -filter")) assert.NoError(t, err) assert.Len(t, lines, 1) assert.Implements(t, (*PatternLine)(nil), lines[0]) assert.Equal(t, lines[0].(PatternLine).Pattern().String(), "*.dat") assert.Equal(t, lines[0].Attrs()[0], &Attr{ K: "filter", V: "false", }) } func TestParseLinesUnspecified(t *testing.T) { lines, _, err := ParseLines(strings.NewReader("*.dat !filter")) assert.NoError(t, err) assert.Len(t, lines, 1) assert.Implements(t, (*PatternLine)(nil), lines[0]) assert.Equal(t, lines[0].(PatternLine).Pattern().String(), "*.dat") assert.Equal(t, lines[0].Attrs()[0], &Attr{ K: "filter", Unspecified: true, }) } func TestParseLinesQuotedPattern(t *testing.T) { lines, _, err := ParseLines(strings.NewReader( "\"space *.dat\" filter=lfs")) assert.NoError(t, err) assert.Len(t, lines, 1) assert.Implements(t, (*PatternLine)(nil), lines[0]) assert.Equal(t, lines[0].(PatternLine).Pattern().String(), "space *.dat") assert.Equal(t, lines[0].Attrs()[0], &Attr{ K: "filter", V: "lfs", }) } func TestParseLinesCommented(t *testing.T) { lines, _, err := ParseLines(strings.NewReader( "# \"space *.dat\" filter=lfs")) assert.NoError(t, err) assert.Len(t, lines, 0) } func TestParseLinesUnbalancedQuotes(t *testing.T) { const text = "\"space *.dat filter=lfs" lines, _, err := ParseLines(strings.NewReader(text)) assert.Empty(t, lines) assert.EqualError(t, err, fmt.Sprintf( "unbalanced quote: %s", text)) } func TestParseLinesWithNoAttributes(t *testing.T) { lines, _, err := ParseLines(strings.NewReader("*.dat")) assert.Len(t, lines, 1) assert.NoError(t, err) assert.Implements(t, (*PatternLine)(nil), lines[0]) assert.Equal(t, lines[0].(PatternLine).Pattern().String(), "*.dat") assert.Empty(t, lines[0].Attrs()) } func TestParseLinesWithMacros(t *testing.T) { lines, _, err := ParseLines(strings.NewReader(strings.Join([]string{ "[attr]lfs filter=lfs diff=lfs merge=lfs -text", "*.dat lfs", "*.txt text"}, "\n"))) assert.Len(t, lines, 3) assert.NoError(t, err) assert.Implements(t, (*MacroLine)(nil), lines[0]) assert.Implements(t, (*PatternLine)(nil), lines[1]) assert.Implements(t, (*PatternLine)(nil), lines[2]) assert.Equal(t, lines[0].(MacroLine).Macro(), "lfs") assert.Len(t, lines[0].Attrs(), 4) assert.Equal(t, lines[0].Attrs()[0], &Attr{K: "filter", V: "lfs"}) assert.Equal(t, lines[0].Attrs()[1], &Attr{K: "diff", V: "lfs"}) assert.Equal(t, lines[0].Attrs()[2], &Attr{K: "merge", V: "lfs"}) assert.Equal(t, lines[0].Attrs()[3], &Attr{K: "text", V: "false"}) assert.Equal(t, lines[1].(PatternLine).Pattern().String(), "*.dat") assert.Len(t, lines[1].Attrs(), 1) assert.Equal(t, lines[1].Attrs()[0], &Attr{K: "lfs", V: "true"}) assert.Equal(t, lines[2].(PatternLine).Pattern().String(), "*.txt") assert.Len(t, lines[2].Attrs(), 1) assert.Equal(t, lines[2].Attrs()[0], &Attr{K: "text", V: "true"}) } git-lfs-3.6.1/git/gitattr/macro.go000066400000000000000000000032341472372047300167720ustar00rootroot00000000000000package gitattr type MacroProcessor struct { macros map[string][]*Attr } // NewMacroProcessor returns a new MacroProcessor object for parsing macros. func NewMacroProcessor() *MacroProcessor { macros := make(map[string][]*Attr) // This is built into Git. macros["binary"] = []*Attr{ &Attr{K: "diff", V: "false"}, &Attr{K: "merge", V: "false"}, &Attr{K: "text", V: "false"}, } return &MacroProcessor{ macros: macros, } } // ProcessLines reads the specified lines, returning a new set of lines which // all have a valid pattern. If readMacros is true, it additionally loads any // macro lines as it reads them. func (mp *MacroProcessor) ProcessLines(lines []Line, readMacros bool) []PatternLine { result := make([]PatternLine, 0, len(lines)) for _, line := range lines { switch l := line.(type) { case PatternLine: var lineAttrs lineAttrs lineAttrs.attrs = make([]*Attr, 0, len(l.Attrs())) resultLine := &patternLine{l.Pattern(), lineAttrs} for _, attr := range l.Attrs() { macros := mp.macros[attr.K] if attr.V == "true" && macros != nil { resultLine.attrs = append( resultLine.attrs, macros..., ) } else if attr.Unspecified && macros != nil { for _, m := range macros { resultLine.attrs = append( resultLine.attrs, &Attr{ K: m.K, Unspecified: true, }, ) } } // Git copies through aliases as well as // expanding them. resultLine.attrs = append( resultLine.attrs, attr, ) } result = append(result, resultLine) case MacroLine: if readMacros { mp.macros[l.Macro()] = l.Attrs() } } } return result } git-lfs-3.6.1/git/gitattr/macro_test.go000066400000000000000000000151241472372047300200320ustar00rootroot00000000000000package gitattr import ( "strings" "testing" "github.com/stretchr/testify/assert" ) func TestProcessLinesWithMacros(t *testing.T) { lines, _, err := ParseLines(strings.NewReader(strings.Join([]string{ "[attr]lfs filter=lfs diff=lfs merge=lfs -text", "*.dat lfs", "*.txt text"}, "\n"))) assert.Len(t, lines, 3) assert.NoError(t, err) assert.Implements(t, (*MacroLine)(nil), lines[0]) assert.Implements(t, (*PatternLine)(nil), lines[1]) assert.Implements(t, (*PatternLine)(nil), lines[2]) mp := NewMacroProcessor() patternLines := mp.ProcessLines(lines, true) assert.Len(t, patternLines, 2) assert.Implements(t, (*PatternLine)(nil), patternLines[0]) assert.Implements(t, (*PatternLine)(nil), patternLines[1]) assert.Equal(t, patternLines[0].Pattern().String(), "*.dat") assert.Len(t, patternLines[0].Attrs(), 5) assert.Equal(t, patternLines[0].Attrs()[0], &Attr{K: "filter", V: "lfs"}) assert.Equal(t, patternLines[0].Attrs()[1], &Attr{K: "diff", V: "lfs"}) assert.Equal(t, patternLines[0].Attrs()[2], &Attr{K: "merge", V: "lfs"}) assert.Equal(t, patternLines[0].Attrs()[3], &Attr{K: "text", V: "false"}) assert.Equal(t, patternLines[0].Attrs()[4], &Attr{K: "lfs", V: "true"}) assert.Equal(t, patternLines[1].Pattern().String(), "*.txt") assert.Len(t, patternLines[1].Attrs(), 1) assert.Equal(t, patternLines[1].Attrs()[0], &Attr{K: "text", V: "true"}) } func TestProcessLinesWithMacrosDisabled(t *testing.T) { lines, _, err := ParseLines(strings.NewReader(strings.Join([]string{ "[attr]lfs filter=lfs diff=lfs merge=lfs -text", "*.dat lfs", "*.txt text"}, "\n"))) assert.Len(t, lines, 3) assert.NoError(t, err) assert.Implements(t, (*MacroLine)(nil), lines[0]) assert.Implements(t, (*PatternLine)(nil), lines[1]) assert.Implements(t, (*PatternLine)(nil), lines[2]) mp := NewMacroProcessor() patternLines := mp.ProcessLines(lines, false) assert.Len(t, patternLines, 2) assert.Implements(t, (*PatternLine)(nil), patternLines[0]) assert.Implements(t, (*PatternLine)(nil), patternLines[1]) assert.Equal(t, patternLines[0].Pattern().String(), "*.dat") assert.Len(t, patternLines[0].Attrs(), 1) assert.Equal(t, patternLines[0].Attrs()[0], &Attr{K: "lfs", V: "true"}) assert.Equal(t, patternLines[1].Pattern().String(), "*.txt") assert.Len(t, patternLines[1].Attrs(), 1) assert.Equal(t, patternLines[1].Attrs()[0], &Attr{K: "text", V: "true"}) } func TestProcessLinesWithUnspecifiedMacros(t *testing.T) { lines, _, err := ParseLines(strings.NewReader(strings.Join([]string{ "[attr]lfs filter=lfs diff=lfs merge=lfs -text", "*.dat lfs", "*.dat !lfs"}, "\n"))) assert.Len(t, lines, 3) assert.NoError(t, err) assert.Implements(t, (*MacroLine)(nil), lines[0]) assert.Implements(t, (*PatternLine)(nil), lines[1]) assert.Implements(t, (*PatternLine)(nil), lines[2]) mp := NewMacroProcessor() patternLines := mp.ProcessLines(lines, true) assert.Len(t, patternLines, 2) assert.Implements(t, (*PatternLine)(nil), patternLines[0]) assert.Implements(t, (*PatternLine)(nil), patternLines[1]) assert.Equal(t, patternLines[0].Pattern().String(), "*.dat") assert.Len(t, patternLines[0].Attrs(), 5) assert.Equal(t, patternLines[0].Attrs()[0], &Attr{K: "filter", V: "lfs"}) assert.Equal(t, patternLines[0].Attrs()[1], &Attr{K: "diff", V: "lfs"}) assert.Equal(t, patternLines[0].Attrs()[2], &Attr{K: "merge", V: "lfs"}) assert.Equal(t, patternLines[0].Attrs()[3], &Attr{K: "text", V: "false"}) assert.Equal(t, patternLines[0].Attrs()[4], &Attr{K: "lfs", V: "true"}) assert.Equal(t, patternLines[1].Pattern().String(), "*.dat") assert.Len(t, patternLines[1].Attrs(), 5) assert.Equal(t, patternLines[1].Attrs()[0], &Attr{K: "filter", V: "", Unspecified: true}) assert.Equal(t, patternLines[1].Attrs()[1], &Attr{K: "diff", V: "", Unspecified: true}) assert.Equal(t, patternLines[1].Attrs()[2], &Attr{K: "merge", V: "", Unspecified: true}) assert.Equal(t, patternLines[1].Attrs()[3], &Attr{K: "text", V: "", Unspecified: true}) assert.Equal(t, patternLines[1].Attrs()[4], &Attr{K: "lfs", V: "", Unspecified: true}) } func TestProcessLinesWithBinaryMacros(t *testing.T) { lines, _, err := ParseLines(strings.NewReader(strings.Join([]string{ "*.dat binary", "*.txt text"}, "\n"))) assert.Len(t, lines, 2) assert.NoError(t, err) assert.Implements(t, (*PatternLine)(nil), lines[0]) assert.Implements(t, (*PatternLine)(nil), lines[1]) mp := NewMacroProcessor() patternLines := mp.ProcessLines(lines, true) assert.Len(t, patternLines, 2) assert.Implements(t, (*PatternLine)(nil), patternLines[0]) assert.Implements(t, (*PatternLine)(nil), patternLines[1]) assert.Equal(t, patternLines[0].Pattern().String(), "*.dat") assert.Len(t, patternLines[0].Attrs(), 4) assert.Equal(t, patternLines[0].Attrs()[0], &Attr{K: "diff", V: "false"}) assert.Equal(t, patternLines[0].Attrs()[1], &Attr{K: "merge", V: "false"}) assert.Equal(t, patternLines[0].Attrs()[2], &Attr{K: "text", V: "false"}) assert.Equal(t, patternLines[0].Attrs()[3], &Attr{K: "binary", V: "true"}) assert.Equal(t, patternLines[1].Pattern().String(), "*.txt") assert.Len(t, patternLines[1].Attrs(), 1) assert.Equal(t, patternLines[1].Attrs()[0], &Attr{K: "text", V: "true"}) } func TestProcessLinesIsStateful(t *testing.T) { lines, _, err := ParseLines(strings.NewReader(strings.Join([]string{ "[attr]lfs filter=lfs diff=lfs merge=lfs -text", "*.txt text"}, "\n"))) assert.Len(t, lines, 2) assert.NoError(t, err) assert.Implements(t, (*MacroLine)(nil), lines[0]) assert.Implements(t, (*PatternLine)(nil), lines[1]) mp := NewMacroProcessor() patternLines := mp.ProcessLines(lines, true) assert.Len(t, patternLines, 1) assert.Implements(t, (*PatternLine)(nil), patternLines[0]) assert.Equal(t, patternLines[0].Pattern().String(), "*.txt") assert.Len(t, patternLines[0].Attrs(), 1) assert.Equal(t, patternLines[0].Attrs()[0], &Attr{K: "text", V: "true"}) lines2, _, err := ParseLines(strings.NewReader("*.dat lfs\n")) assert.Len(t, lines2, 1) assert.NoError(t, err) assert.Implements(t, (*PatternLine)(nil), lines2[0]) patternLines2 := mp.ProcessLines(lines2, false) assert.Len(t, patternLines2, 1) assert.Implements(t, (*PatternLine)(nil), patternLines2[0]) assert.Equal(t, patternLines2[0].Pattern().String(), "*.dat") assert.Len(t, patternLines2[0].Attrs(), 5) assert.Equal(t, patternLines2[0].Attrs()[0], &Attr{K: "filter", V: "lfs"}) assert.Equal(t, patternLines2[0].Attrs()[1], &Attr{K: "diff", V: "lfs"}) assert.Equal(t, patternLines2[0].Attrs()[2], &Attr{K: "merge", V: "lfs"}) assert.Equal(t, patternLines2[0].Attrs()[3], &Attr{K: "text", V: "false"}) assert.Equal(t, patternLines2[0].Attrs()[4], &Attr{K: "lfs", V: "true"}) } git-lfs-3.6.1/git/gitattr/tree.go000066400000000000000000000053501472372047300166310ustar00rootroot00000000000000package gitattr import ( "strings" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/tr" "github.com/git-lfs/gitobj/v2" ) // Tree represents the .gitattributes file at one layer of the tree in a Git // repository. type Tree struct { // Lines are the lines of the .gitattributes at this level of the tree. Lines []Line // Children are the named child directories in the repository. Children map[string]*Tree } // New constructs a *Tree starting at the given tree "t" and reading objects // from the given ObjectDatabase. If a tree was not able to be read, an error // will be propagated up accordingly. func New(db *gitobj.ObjectDatabase, t *gitobj.Tree) (*Tree, error) { children := make(map[string]*Tree) lines, _, err := linesInTree(db, t) if err != nil { return nil, err } for _, entry := range t.Entries { if entry.Type() != gitobj.TreeObjectType { continue } // For every entry in the current tree, parse its sub-trees to // see if they might contain a .gitattributes. t, err := db.Tree(entry.Oid) if err != nil { return nil, err } at, err := New(db, t) if err != nil { return nil, err } if len(at.Children) > 0 || len(at.Lines) > 0 { // Only include entries that have either (1) a // .gitattributes in their tree, or (2) a .gitattributes // in a sub-tree. children[entry.Name] = at } } return &Tree{ Lines: lines, Children: children, }, nil } // linesInTree parses a given tree's .gitattributes and returns a slice of lines // in that .gitattributes, or an error. If no .gitattributes blob was found, // return nil. func linesInTree(db *gitobj.ObjectDatabase, t *gitobj.Tree) ([]Line, string, error) { var at int = -1 for i, e := range t.Entries { if e.Name == ".gitattributes" { if e.IsLink() { return nil, "", errors.Errorf("migrate: %s", tr.Tr.Get("expected '.gitattributes' to be a file, got a symbolic link")) } at = i break } } if at < 0 { return nil, "", nil } blob, err := db.Blob(t.Entries[at].Oid) if err != nil { return nil, "", err } defer blob.Close() return ParseLines(blob.Contents) } // Applied returns a slice of attributes applied to the given path, relative to // the receiving tree. It traverse through sub-trees in a topological ordering, // if there are relevant .gitattributes matching that path. func (t *Tree) Applied(to string) []*Attr { var attrs []*Attr for _, line := range t.Lines { if l, ok := line.(PatternLine); ok { if l.Pattern().Match(to) { attrs = append(attrs, line.Attrs()...) } } } splits := strings.SplitN(to, "/", 2) if len(splits) == 2 { car, cdr := splits[0], splits[1] if child, ok := t.Children[car]; ok { attrs = append(attrs, child.Applied(cdr)...) } } return attrs } git-lfs-3.6.1/git/gitattr/tree_test.go000066400000000000000000000133461472372047300176740ustar00rootroot00000000000000package gitattr import ( "testing" "github.com/git-lfs/gitobj/v2" "github.com/git-lfs/wildmatch/v2" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) var ( dat = wildmatch.NewWildmatch("*.dat", wildmatch.Basename, wildmatch.SystemCase) example = &Tree{ Lines: []Line{ &patternLine{ pattern: dat, lineAttrs: lineAttrs{ attrs: []*Attr{ { K: "filter", V: "lfs", }, { K: "diff", V: "lfs", }, { K: "merge", V: "lfs", }, { K: "text", V: "false", }, }, }, }, }, Children: map[string]*Tree{ "subdir": &Tree{ Lines: []Line{ &patternLine{ pattern: dat, lineAttrs: lineAttrs{ attrs: []*Attr{ { K: "subdir", V: "yes", }, }, }, }, }, }, }, } ) func TestTreeAppliedInRoot(t *testing.T) { attrs := example.Applied("a.dat") assert.Len(t, attrs, 4) assert.Equal(t, attrs[0], &Attr{K: "filter", V: "lfs"}) assert.Equal(t, attrs[1], &Attr{K: "diff", V: "lfs"}) assert.Equal(t, attrs[2], &Attr{K: "merge", V: "lfs"}) assert.Equal(t, attrs[3], &Attr{K: "text", V: "false"}) } func TestTreeAppliedInSubtreeRelevant(t *testing.T) { attrs := example.Applied("subdir/a.dat") assert.Len(t, attrs, 5) assert.Equal(t, attrs[0], &Attr{K: "filter", V: "lfs"}) assert.Equal(t, attrs[1], &Attr{K: "diff", V: "lfs"}) assert.Equal(t, attrs[2], &Attr{K: "merge", V: "lfs"}) assert.Equal(t, attrs[3], &Attr{K: "text", V: "false"}) assert.Equal(t, attrs[4], &Attr{K: "subdir", V: "yes"}) } func TestTreeAppliedInSubtreeIrrelevant(t *testing.T) { attrs := example.Applied("subdir/a.txt") assert.Empty(t, attrs) } func TestTreeAppliedInIrrelevantSubtree(t *testing.T) { attrs := example.Applied("other/subdir/a.dat") assert.Len(t, attrs, 4) assert.Equal(t, attrs[0], &Attr{K: "filter", V: "lfs"}) assert.Equal(t, attrs[1], &Attr{K: "diff", V: "lfs"}) assert.Equal(t, attrs[2], &Attr{K: "merge", V: "lfs"}) assert.Equal(t, attrs[3], &Attr{K: "text", V: "false"}) } func TestNewDiscoversSimpleTrees(t *testing.T) { tmp := t.TempDir() db, err := gitobj.FromFilesystem(tmp, "") require.NoError(t, err) defer db.Close() blob, err := db.WriteBlob(gitobj.NewBlobFromBytes([]byte(` *.dat filter=lfs diff=lfs merge=lfs -text `))) require.NoError(t, err) tree, err := New(db, &gitobj.Tree{Entries: []*gitobj.TreeEntry{ { Name: ".gitattributes", Oid: blob, Filemode: 0100644, }, }}) require.NoError(t, err) attrs := tree.Applied("foo.dat") assert.Len(t, attrs, 4) assert.Equal(t, attrs[0], &Attr{K: "filter", V: "lfs"}) assert.Equal(t, attrs[1], &Attr{K: "diff", V: "lfs"}) assert.Equal(t, attrs[2], &Attr{K: "merge", V: "lfs"}) assert.Equal(t, attrs[3], &Attr{K: "text", V: "false"}) } func TestNewDiscoversSimpleChildrenTrees(t *testing.T) { tmp := t.TempDir() db, err := gitobj.FromFilesystem(tmp, "") require.NoError(t, err) defer db.Close() blob, err := db.WriteBlob(gitobj.NewBlobFromBytes([]byte(` *.dat filter=lfs diff=lfs merge=lfs -text `))) require.NoError(t, err) child, err := db.WriteTree(&gitobj.Tree{Entries: []*gitobj.TreeEntry{ { Name: ".gitattributes", Oid: blob, Filemode: 0100644, }, }}) require.NoError(t, err) tree, err := New(db, &gitobj.Tree{Entries: []*gitobj.TreeEntry{ { Name: "child", Oid: child, Filemode: 040000, }, }}) require.NoError(t, err) assert.Empty(t, tree.Lines) assert.Len(t, tree.Children, 1) attrs := tree.Applied("child/foo.dat") assert.Len(t, attrs, 4) assert.Equal(t, attrs[0], &Attr{K: "filter", V: "lfs"}) assert.Equal(t, attrs[1], &Attr{K: "diff", V: "lfs"}) assert.Equal(t, attrs[2], &Attr{K: "merge", V: "lfs"}) assert.Equal(t, attrs[3], &Attr{K: "text", V: "false"}) } func TestNewDiscoversIndirectChildrenTrees(t *testing.T) { tmp := t.TempDir() db, err := gitobj.FromFilesystem(tmp, "") require.NoError(t, err) defer db.Close() blob, err := db.WriteBlob(gitobj.NewBlobFromBytes([]byte(` *.dat filter=lfs diff=lfs merge=lfs -text `))) require.NoError(t, err) indirect, err := db.WriteTree(&gitobj.Tree{Entries: []*gitobj.TreeEntry{ { Name: ".gitattributes", Oid: blob, Filemode: 0100644, }, }}) require.NoError(t, err) child, err := db.WriteTree(&gitobj.Tree{Entries: []*gitobj.TreeEntry{ { Name: "indirect", Oid: indirect, Filemode: 040000, }, }}) require.NoError(t, err) tree, err := New(db, &gitobj.Tree{Entries: []*gitobj.TreeEntry{ { Name: "child", Oid: child, Filemode: 040000, }, }}) require.NoError(t, err) assert.Empty(t, tree.Lines) assert.Len(t, tree.Children, 1) attrs := tree.Applied("child/indirect/foo.dat") assert.Len(t, attrs, 4) assert.Equal(t, attrs[0], &Attr{K: "filter", V: "lfs"}) assert.Equal(t, attrs[1], &Attr{K: "diff", V: "lfs"}) assert.Equal(t, attrs[2], &Attr{K: "merge", V: "lfs"}) assert.Equal(t, attrs[3], &Attr{K: "text", V: "false"}) } func TestNewIgnoresChildrenAppropriately(t *testing.T) { tmp := t.TempDir() db, err := gitobj.FromFilesystem(tmp, "") require.NoError(t, err) defer db.Close() blob, err := db.WriteBlob(gitobj.NewBlobFromBytes([]byte(` *.dat filter=lfs diff=lfs merge=lfs -text `))) require.NoError(t, err) child, err := db.WriteTree(&gitobj.Tree{Entries: []*gitobj.TreeEntry{ { Name: "README.md", Oid: []byte("00000000000000000000"), Filemode: 0100644, }, }}) require.NoError(t, err) tree, err := New(db, &gitobj.Tree{Entries: []*gitobj.TreeEntry{ { Name: ".gitattributes", Oid: blob, Filemode: 0100644, }, { Name: "child", Oid: child, Filemode: 040000, }, }}) require.NoError(t, err) assert.NotContains(t, tree.Children, "child") } git-lfs-3.6.1/git/githistory/000077500000000000000000000000001472372047300160675ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/000077500000000000000000000000001472372047300177405ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/identical-blobs.git/000077500000000000000000000000001472372047300235555ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/identical-blobs.git/HEAD000066400000000000000000000000271472372047300242000ustar00rootroot00000000000000ref: refs/heads/master git-lfs-3.6.1/git/githistory/fixtures/identical-blobs.git/config000066400000000000000000000002111472372047300247370ustar00rootroot00000000000000[core] repositoryformatversion = 0 filemode = true bare = false logallrefupdates = true ignorecase = true precomposeunicode = true git-lfs-3.6.1/git/githistory/fixtures/identical-blobs.git/index000066400000000000000000000003211472372047300246030ustar00rootroot00000000000000DIRCY1Y13la X@.w7o&ia.txtY1Y13la X@.w7o&ib.txtTREE2 0 +E+Y2gltj~*h.#7uUgit-lfs-3.6.1/git/githistory/fixtures/identical-blobs.git/logs/000077500000000000000000000000001472372047300245215ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/identical-blobs.git/logs/HEAD000066400000000000000000000002421472372047300251430ustar00rootroot000000000000000000000000000000000000000000000000000000 42723ad796caa500ddf4e3f6ad37600ed5a65491 Taylor Blau 1496440063 -0600 commit (initial): initial commit git-lfs-3.6.1/git/githistory/fixtures/identical-blobs.git/logs/refs/000077500000000000000000000000001472372047300254605ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/identical-blobs.git/logs/refs/heads/000077500000000000000000000000001472372047300265445ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/identical-blobs.git/logs/refs/heads/master000066400000000000000000000002421472372047300277600ustar00rootroot000000000000000000000000000000000000000000000000000000 42723ad796caa500ddf4e3f6ad37600ed5a65491 Taylor Blau 1496440063 -0600 commit (initial): initial commit git-lfs-3.6.1/git/githistory/fixtures/identical-blobs.git/objects/000077500000000000000000000000001472372047300252065ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/identical-blobs.git/objects/42/000077500000000000000000000000001472372047300254335ustar00rootroot00000000000000723ad796caa500ddf4e3f6ad37600ed5a65491000066400000000000000000000001771472372047300325670ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/identical-blobs.git/objects/42x] B!F{v¿;"DDkhN$h}hI{0d2CBŦ-懳`L>8 B)vy wz[Ν"_1Oy {Qju'wJh8git-lfs-3.6.1/git/githistory/fixtures/identical-blobs.git/objects/94/000077500000000000000000000000001472372047300254425ustar00rootroot00000000000000f3610c08588440112ed977376f26a8fba169b0000066400000000000000000000000271472372047300322220ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/identical-blobs.git/objects/94xKOR`/LK(Mgit-lfs-3.6.1/git/githistory/fixtures/identical-blobs.git/objects/b6/000077500000000000000000000000001472372047300255155ustar00rootroot000000000000002b45ed2b59cf32dd676ca47497e76a1dab9c7e000066400000000000000000000000671472372047300330320ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/identical-blobs.git/objects/b6x+)JMU03c040031QH+(a9#APfyڊ 37@$TDZgit-lfs-3.6.1/git/githistory/fixtures/identical-blobs.git/refs/000077500000000000000000000000001472372047300245145ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/identical-blobs.git/refs/heads/000077500000000000000000000000001472372047300256005ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/identical-blobs.git/refs/heads/master000066400000000000000000000000511472372047300270120ustar00rootroot0000000000000042723ad796caa500ddf4e3f6ad37600ed5a65491 git-lfs-3.6.1/git/githistory/fixtures/linear-history-with-annotated-tags.git/000077500000000000000000000000001472372047300273535ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/linear-history-with-annotated-tags.git/HEAD000066400000000000000000000000271472372047300277760ustar00rootroot00000000000000ref: refs/heads/master git-lfs-3.6.1/git/githistory/fixtures/linear-history-with-annotated-tags.git/config000066400000000000000000000002111472372047300305350ustar00rootroot00000000000000[core] repositoryformatversion = 0 filemode = true bare = false logallrefupdates = true ignorecase = true precomposeunicode = true git-lfs-3.6.1/git/githistory/fixtures/linear-history-with-annotated-tags.git/index000066400000000000000000000002111472372047300303770ustar00rootroot00000000000000DIRCZ/w6eZ/w6w0l$4+|^՛CGhsome.txtTREE1 0 >ti_ iN.5SL-3F*H~git-lfs-3.6.1/git/githistory/fixtures/linear-history-with-annotated-tags.git/logs/000077500000000000000000000000001472372047300303175ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/linear-history-with-annotated-tags.git/logs/HEAD000066400000000000000000000007111472372047300307420ustar00rootroot000000000000000000000000000000000000000000000000000000 91b85be6928569390e937479509b80a1d0dccb0c Taylor Blau 1496954196 -0600 commit (initial): some.txt: a 91b85be6928569390e937479509b80a1d0dccb0c 228afe30855933151f7a88e70d9d88314fd2f191 Taylor Blau 1496954207 -0600 commit: some.txt: b 228afe30855933151f7a88e70d9d88314fd2f191 d941e4756add6b06f5bee766fcf669f55419f13f Taylor Blau 1496954214 -0600 commit: some.txt: c git-lfs-3.6.1/git/githistory/fixtures/linear-history-with-annotated-tags.git/logs/refs/000077500000000000000000000000001472372047300312565ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/linear-history-with-annotated-tags.git/logs/refs/heads/000077500000000000000000000000001472372047300323425ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/linear-history-with-annotated-tags.git/logs/refs/heads/master000066400000000000000000000007111472372047300335570ustar00rootroot000000000000000000000000000000000000000000000000000000 91b85be6928569390e937479509b80a1d0dccb0c Taylor Blau 1496954196 -0600 commit (initial): some.txt: a 91b85be6928569390e937479509b80a1d0dccb0c 228afe30855933151f7a88e70d9d88314fd2f191 Taylor Blau 1496954207 -0600 commit: some.txt: b 228afe30855933151f7a88e70d9d88314fd2f191 d941e4756add6b06f5bee766fcf669f55419f13f Taylor Blau 1496954214 -0600 commit: some.txt: c git-lfs-3.6.1/git/githistory/fixtures/linear-history-with-annotated-tags.git/objects/000077500000000000000000000000001472372047300310045ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/linear-history-with-annotated-tags.git/objects/05/000077500000000000000000000000001472372047300312305ustar00rootroot00000000000000797a38b05f910e6efe40dc1a5c0a046a9403e8000066400000000000000000000001751472372047300363010ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/linear-history-with-annotated-tags.git/objects/05x-A 0=e7 oٔJC%(ކK3a{fR&`Hg8qlyqh @FI=c&Pa h-z, CJ6#*Zں>>tU+E~OՃFǠ3f5'^$o餾jDgit-lfs-3.6.1/git/githistory/fixtures/linear-history-with-annotated-tags.git/refs/000077500000000000000000000000001472372047300303125ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/linear-history-with-annotated-tags.git/refs/heads/000077500000000000000000000000001472372047300313765ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/linear-history-with-annotated-tags.git/refs/heads/master000066400000000000000000000000511472372047300326100ustar00rootroot00000000000000d941e4756add6b06f5bee766fcf669f55419f13f git-lfs-3.6.1/git/githistory/fixtures/linear-history-with-annotated-tags.git/refs/tags/000077500000000000000000000000001472372047300312505ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/linear-history-with-annotated-tags.git/refs/tags/middle000066400000000000000000000000511472372047300324250ustar00rootroot0000000000000005797a38b05f910e6efe40dc1a5c0a046a9403e8 git-lfs-3.6.1/git/githistory/fixtures/linear-history-with-tags.git/000077500000000000000000000000001472372047300254005ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/linear-history-with-tags.git/HEAD000066400000000000000000000000271472372047300260230ustar00rootroot00000000000000ref: refs/heads/master git-lfs-3.6.1/git/githistory/fixtures/linear-history-with-tags.git/config000066400000000000000000000002111472372047300265620ustar00rootroot00000000000000[core] repositoryformatversion = 0 filemode = true bare = false logallrefupdates = true ignorecase = true precomposeunicode = true git-lfs-3.6.1/git/githistory/fixtures/linear-history-with-tags.git/index000066400000000000000000000002111472372047300264240ustar00rootroot00000000000000DIRCY9eY9eg(4+|^՛CGhsome.txtTREE1 0 >ti_ iN.tꐨF#_Kϛ8vgit-lfs-3.6.1/git/githistory/fixtures/linear-history-with-tags.git/logs/000077500000000000000000000000001472372047300263445ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/linear-history-with-tags.git/logs/HEAD000066400000000000000000000007111472372047300267670ustar00rootroot000000000000000000000000000000000000000000000000000000 91b85be6928569390e937479509b80a1d0dccb0c Taylor Blau 1496954196 -0600 commit (initial): some.txt: a 91b85be6928569390e937479509b80a1d0dccb0c 228afe30855933151f7a88e70d9d88314fd2f191 Taylor Blau 1496954207 -0600 commit: some.txt: b 228afe30855933151f7a88e70d9d88314fd2f191 d941e4756add6b06f5bee766fcf669f55419f13f Taylor Blau 1496954214 -0600 commit: some.txt: c git-lfs-3.6.1/git/githistory/fixtures/linear-history-with-tags.git/logs/refs/000077500000000000000000000000001472372047300273035ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/linear-history-with-tags.git/logs/refs/heads/000077500000000000000000000000001472372047300303675ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/linear-history-with-tags.git/logs/refs/heads/master000066400000000000000000000007111472372047300316040ustar00rootroot000000000000000000000000000000000000000000000000000000 91b85be6928569390e937479509b80a1d0dccb0c Taylor Blau 1496954196 -0600 commit (initial): some.txt: a 91b85be6928569390e937479509b80a1d0dccb0c 228afe30855933151f7a88e70d9d88314fd2f191 Taylor Blau 1496954207 -0600 commit: some.txt: b 228afe30855933151f7a88e70d9d88314fd2f191 d941e4756add6b06f5bee766fcf669f55419f13f Taylor Blau 1496954214 -0600 commit: some.txt: c git-lfs-3.6.1/git/githistory/fixtures/linear-history-with-tags.git/objects/000077500000000000000000000000001472372047300270315ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/linear-history-with-tags.git/objects/20/000077500000000000000000000000001472372047300272525ustar00rootroot00000000000000ecedad3e74a113695fe5f00ab003694e2e1e9c000066400000000000000000000000651472372047300345360ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/linear-history-with-tags.git/objects/20x+)JMU06c040031Q(M+(a0`^Vwuvs=|'g*git-lfs-3.6.1/git/githistory/fixtures/linear-history-with-tags.git/objects/22/000077500000000000000000000000001472372047300272545ustar00rootroot000000000000008afe30855933151f7a88e70d9d88314fd2f191000066400000000000000000000002341472372047300341350ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/linear-history-with-tags.git/objects/22x B1NicE{A7؁_9vcU N\lΖP]!d3hG@V<ӠKT9Lut  6PKPDZڇ<}tO7:2XJo,5!-x1w(^o,Cjgit-lfs-3.6.1/git/githistory/fixtures/linear-history-with-tags.git/objects/2e/000077500000000000000000000000001472372047300273375ustar00rootroot0000000000000065efe2a145dda7ee51d1741299f848e5bf752e000066400000000000000000000000201472372047300345060ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/linear-history-with-tags.git/objects/2exKOR0dH Rgit-lfs-3.6.1/git/githistory/fixtures/linear-history-with-tags.git/objects/34/000077500000000000000000000000001472372047300272575ustar00rootroot0000000000000010062ba67c5ed59b854387a8bc0ec012479368000066400000000000000000000000201472372047300341050ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/linear-history-with-tags.git/objects/34xKOR0dH Tgit-lfs-3.6.1/git/githistory/fixtures/linear-history-with-tags.git/objects/3c/000077500000000000000000000000001472372047300273365ustar00rootroot00000000000000b3201d7942353fff5f45e03d114e8e7a061f87000066400000000000000000000000651472372047300342550ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/linear-history-with-tags.git/objects/3cx+)JMU06c040031Q(M+(aK}h/tVOgit-lfs-3.6.1/git/githistory/fixtures/linear-history-with-tags.git/objects/52/000077500000000000000000000000001472372047300272575ustar00rootroot00000000000000a8963f48d54c7d352695a278ca4b025e130cb4000066400000000000000000000000641472372047300341750ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/linear-history-with-tags.git/objects/52x+)JMU06c040031Q(M+(aHq r^ [r.}=5git-lfs-3.6.1/git/githistory/fixtures/linear-history-with-tags.git/objects/63/000077500000000000000000000000001472372047300272615ustar00rootroot00000000000000d8dbd40c23542e740659a7168a0ce3138ea748000066400000000000000000000000201472372047300341650ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/linear-history-with-tags.git/objects/63xKOR0dH Sgit-lfs-3.6.1/git/githistory/fixtures/linear-history-with-tags.git/objects/91/000077500000000000000000000000001472372047300272625ustar00rootroot00000000000000b85be6928569390e937479509b80a1d0dccb0c000066400000000000000000000001771472372047300342220ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/linear-history-with-tags.git/objects/91x[ 0))@L7(4 aR-%@OҘAV̨9l,^ O#:ʓWqgmp."?цTd PppF;C9dt&6}git-lfs-3.6.1/git/githistory/fixtures/linear-history-with-tags.git/objects/d9/000077500000000000000000000000001472372047300273455ustar00rootroot0000000000000041e4756add6b06f5bee766fcf669f55419f13f000066400000000000000000000002341472372047300345320ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/linear-history-with-tags.git/objects/d9x 1}Nic7?w5lP0Fh;i>fR&`Hg8qlyqh @FI=c&Pa h-z, CJ6#*Zں>>tU+E~OՃFǠ3f5'^$o餾jDgit-lfs-3.6.1/git/githistory/fixtures/linear-history-with-tags.git/refs/000077500000000000000000000000001472372047300263375ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/linear-history-with-tags.git/refs/heads/000077500000000000000000000000001472372047300274235ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/linear-history-with-tags.git/refs/heads/master000066400000000000000000000000511472372047300306350ustar00rootroot00000000000000d941e4756add6b06f5bee766fcf669f55419f13f git-lfs-3.6.1/git/githistory/fixtures/linear-history-with-tags.git/refs/tags/000077500000000000000000000000001472372047300272755ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/linear-history-with-tags.git/refs/tags/middle000066400000000000000000000000511472372047300304520ustar00rootroot00000000000000228afe30855933151f7a88e70d9d88314fd2f191 git-lfs-3.6.1/git/githistory/fixtures/linear-history.git/000077500000000000000000000000001472372047300234735ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/linear-history.git/HEAD000066400000000000000000000000271472372047300241160ustar00rootroot00000000000000ref: refs/heads/master git-lfs-3.6.1/git/githistory/fixtures/linear-history.git/config000066400000000000000000000002111472372047300246550ustar00rootroot00000000000000[core] repositoryformatversion = 0 filemode = true bare = false logallrefupdates = true ignorecase = true precomposeunicode = true git-lfs-3.6.1/git/githistory/fixtures/linear-history.git/index000066400000000000000000000002111472372047300245170ustar00rootroot00000000000000DIRCY0sY0s2 D@BXiew.ha+S hello.txtTREE1 0 n1pĭsH85il~iNT7[cgit-lfs-3.6.1/git/githistory/fixtures/linear-history.git/logs/000077500000000000000000000000001472372047300244375ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/linear-history.git/logs/HEAD000066400000000000000000000007141472372047300250650ustar00rootroot000000000000000000000000000000000000000000000000000000 62811b8f930323895033b3b338c35f51c0b7268b Taylor Blau 1496347620 -0600 commit (initial): hello.txt: 1 62811b8f930323895033b3b338c35f51c0b7268b efeab7a9b61312fa56fc74eee1e0f5a714abfb70 Taylor Blau 1496347630 -0600 commit: hello.txt: 2 efeab7a9b61312fa56fc74eee1e0f5a714abfb70 e669b63f829bfb0b91fc52a5bcea53dd7977a0ee Taylor Blau 1496347641 -0600 commit: hello.txt: 3 git-lfs-3.6.1/git/githistory/fixtures/linear-history.git/logs/refs/000077500000000000000000000000001472372047300253765ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/linear-history.git/logs/refs/heads/000077500000000000000000000000001472372047300264625ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/linear-history.git/logs/refs/heads/master000066400000000000000000000007141472372047300277020ustar00rootroot000000000000000000000000000000000000000000000000000000 62811b8f930323895033b3b338c35f51c0b7268b Taylor Blau 1496347620 -0600 commit (initial): hello.txt: 1 62811b8f930323895033b3b338c35f51c0b7268b efeab7a9b61312fa56fc74eee1e0f5a714abfb70 Taylor Blau 1496347630 -0600 commit: hello.txt: 2 efeab7a9b61312fa56fc74eee1e0f5a714abfb70 e669b63f829bfb0b91fc52a5bcea53dd7977a0ee Taylor Blau 1496347641 -0600 commit: hello.txt: 3 git-lfs-3.6.1/git/githistory/fixtures/linear-history.git/objects/000077500000000000000000000000001472372047300251245ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/linear-history.git/objects/56/000077500000000000000000000000001472372047300253565ustar00rootroot00000000000000a6051ca2b02b04ef92d5150c9ef600403cb1de000066400000000000000000000000201472372047300324360ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/linear-history.git/objects/56xKOR0d0 "git-lfs-3.6.1/git/githistory/fixtures/linear-history.git/objects/62/000077500000000000000000000000001472372047300253535ustar00rootroot00000000000000811b8f930323895033b3b338c35f51c0b7268b000066400000000000000000000001751472372047300320450ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/linear-history.git/objects/62x 1}NۀǮ#"b 6]X8!!pvo)D9ئJQS"j0YX}'>uQӵ'v#9} '#f7ؙUk퓽B^7git-lfs-3.6.1/git/githistory/fixtures/linear-history.git/objects/6e/000077500000000000000000000000001472372047300254365ustar00rootroot0000000000000007bd31cb70c4add2c973481ad4fa38b235ca69000066400000000000000000000000661472372047300326370ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/linear-history.git/objects/6ex+)JMU06g040031QH+(axSDfnHԖbgit-lfs-3.6.1/git/githistory/fixtures/linear-history.git/objects/71/000077500000000000000000000000001472372047300253535ustar00rootroot00000000000000a488ec1804ee97ea651b094aa9181ca85aab0a000066400000000000000000000000661472372047300325600ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/linear-history.git/objects/71x+)JMU06g040031QH+(a[*h6IWEy}cpx?Igit-lfs-3.6.1/git/githistory/fixtures/linear-history.git/objects/c5/000077500000000000000000000000001472372047300254335ustar00rootroot00000000000000decfe1fcf39b8c489f4a0bf3b3823676339f80000066400000000000000000000000661472372047300326750ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/linear-history.git/objects/c5x+)JMU06g040031QH+(afuʥl޿Ń-O(git-lfs-3.6.1/git/githistory/fixtures/linear-history.git/objects/d8/000077500000000000000000000000001472372047300254375ustar00rootroot00000000000000263ee9860594d2806b0dfd1bfd17528b0ba2a4000066400000000000000000000000201472372047300324740ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/linear-history.git/objects/d8xKOR0d0 #git-lfs-3.6.1/git/githistory/fixtures/linear-history.git/objects/e4/000077500000000000000000000000001472372047300254345ustar00rootroot0000000000000040e5c842586965a7fb77deda2eca68612b1f53000066400000000000000000000000201472372047300325140ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/linear-history.git/objects/e4xKOR0d0 $git-lfs-3.6.1/git/githistory/fixtures/linear-history.git/objects/e6/000077500000000000000000000000001472372047300254365ustar00rootroot0000000000000069b63f829bfb0b91fc52a5bcea53dd7977a0ee000066400000000000000000000002361472372047300330250ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/linear-history.git/objects/e6xQ !Evn <N}:p\nj`!IGl EŜu 0ۂf ]/dI0z с]pv%yKD@b,cKk:pFWSjE X,ȣrJa=CRm7_ Fgit-lfs-3.6.1/git/githistory/fixtures/linear-history.git/objects/ef/000077500000000000000000000000001472372047300255165ustar00rootroot00000000000000eab7a9b61312fa56fc74eee1e0f5a714abfb70000066400000000000000000000002361472372047300331370ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/linear-history.git/objects/efxQ B!Evnoh m@M) upڃV~#g-UI}d H}!0ӤTP#y 8bUjGQ}k6ylcI2ޡ ĤEqϵ|Z|Cgit-lfs-3.6.1/git/githistory/fixtures/linear-history.git/refs/000077500000000000000000000000001472372047300244325ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/linear-history.git/refs/heads/000077500000000000000000000000001472372047300255165ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/linear-history.git/refs/heads/master000066400000000000000000000000511472372047300267300ustar00rootroot00000000000000e669b63f829bfb0b91fc52a5bcea53dd7977a0ee git-lfs-3.6.1/git/githistory/fixtures/non-repeated-subtrees.git/000077500000000000000000000000001472372047300247355ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/non-repeated-subtrees.git/HEAD000066400000000000000000000000271472372047300253600ustar00rootroot00000000000000ref: refs/heads/master git-lfs-3.6.1/git/githistory/fixtures/non-repeated-subtrees.git/config000066400000000000000000000002111472372047300261170ustar00rootroot00000000000000[core] repositoryformatversion = 0 filemode = true bare = false logallrefupdates = true ignorecase = true precomposeunicode = true git-lfs-3.6.1/git/githistory/fixtures/non-repeated-subtrees.git/index000066400000000000000000000003701472372047300257670ustar00rootroot00000000000000DIRCY5Y54փh4#a.txtY5Y54݁ q[‚^N` subdir/b.txtTREE82 1 =.>Wzu{vsubdir1 0 ~uHb1 }Nz9%BHH2git-lfs-3.6.1/git/githistory/fixtures/non-repeated-subtrees.git/logs/000077500000000000000000000000001472372047300257015ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/non-repeated-subtrees.git/logs/HEAD000066400000000000000000000005171472372047300263300ustar00rootroot000000000000000000000000000000000000000000000000000000 37f99c7f2706d317b3bf7ff13d574eef33d8788a Taylor Blau 1496686519 -0600 commit (initial): a.txt: initial commit 37f99c7f2706d317b3bf7ff13d574eef33d8788a bc63077ac5e575ccc9dbbd93dc882f1e10600ea7 Taylor Blau 1496686541 -0600 commit: subdir/b.txt: initial commit git-lfs-3.6.1/git/githistory/fixtures/non-repeated-subtrees.git/logs/refs/000077500000000000000000000000001472372047300266405ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/non-repeated-subtrees.git/logs/refs/heads/000077500000000000000000000000001472372047300277245ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/non-repeated-subtrees.git/logs/refs/heads/master000066400000000000000000000005171472372047300311450ustar00rootroot000000000000000000000000000000000000000000000000000000 37f99c7f2706d317b3bf7ff13d574eef33d8788a Taylor Blau 1496686519 -0600 commit (initial): a.txt: initial commit 37f99c7f2706d317b3bf7ff13d574eef33d8788a bc63077ac5e575ccc9dbbd93dc882f1e10600ea7 Taylor Blau 1496686541 -0600 commit: subdir/b.txt: initial commit git-lfs-3.6.1/git/githistory/fixtures/non-repeated-subtrees.git/objects/000077500000000000000000000000001472372047300263665ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/non-repeated-subtrees.git/objects/07/000077500000000000000000000000001472372047300266145ustar00rootroot00000000000000bd7fbfc41b7d36135bcffe7c465490f4aca32d000066400000000000000000000000621472372047300342430ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/non-repeated-subtrees.git/objects/07x+)JMU06f040031QH+(a9yk2S$f0Pgit-lfs-3.6.1/git/githistory/fixtures/non-repeated-subtrees.git/objects/12/000077500000000000000000000000001472372047300266105ustar00rootroot000000000000007ececad475cde6da0048051d62121cabd23194000066400000000000000000000000621472372047300337730ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/non-repeated-subtrees.git/objects/12x+)JMU06f040031QH+(a\sq5?jKhqgit-lfs-3.6.1/git/githistory/fixtures/non-repeated-subtrees.git/objects/19/000077500000000000000000000000001472372047300266175ustar00rootroot00000000000000acdd81ab0abc15c771fe005bf1c2825e4e6080000066400000000000000000000000241472372047300340550ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/non-repeated-subtrees.git/objects/19xKOR0eH+(git-lfs-3.6.1/git/githistory/fixtures/non-repeated-subtrees.git/objects/37/000077500000000000000000000000001472372047300266175ustar00rootroot00000000000000f99c7f2706d317b3bf7ff13d574eef33d8788a000066400000000000000000000002051472372047300340070ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/non-repeated-subtrees.git/objects/37x 1Nļ6""`7/ $wA7h~ 9V4Rn}砄Q!9'0=A,Ma@.X\b,mGb)R|Rw+ iTRQ"H1Moko~;fUWS^ q !?:8`~OߡylNOhg3o.Kgit-lfs-3.6.1/git/githistory/fixtures/non-repeated-subtrees.git/refs/000077500000000000000000000000001472372047300256745ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/non-repeated-subtrees.git/refs/heads/000077500000000000000000000000001472372047300267605ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/non-repeated-subtrees.git/refs/heads/master000066400000000000000000000000511472372047300301720ustar00rootroot00000000000000bc63077ac5e575ccc9dbbd93dc882f1e10600ea7 git-lfs-3.6.1/git/githistory/fixtures/octopus-merge.git/000077500000000000000000000000001472372047300233135ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/octopus-merge.git/HEAD000066400000000000000000000000271472372047300237360ustar00rootroot00000000000000ref: refs/heads/master git-lfs-3.6.1/git/githistory/fixtures/octopus-merge.git/ORIG_HEAD000066400000000000000000000000511472372047300245530ustar00rootroot000000000000008be6d64cddab01f53381e9feafe50d95ca5e6629 git-lfs-3.6.1/git/githistory/fixtures/octopus-merge.git/config000066400000000000000000000002111472372047300244750ustar00rootroot00000000000000[core] repositoryformatversion = 0 filemode = true bare = false logallrefupdates = true ignorecase = true precomposeunicode = true git-lfs-3.6.1/git/githistory/fixtures/octopus-merge.git/index000066400000000000000000000004311472372047300243430ustar00rootroot00000000000000DIRCY1Y12.eEݧQtHu.a.txtY1Y12c #T.tY Hb.txtY1nY1n2Lb g_:\0] hello.txtTREE3 0 /:ɝ[EAg^Ru9|^1q3~u@!8[git-lfs-3.6.1/git/githistory/fixtures/octopus-merge.git/logs/000077500000000000000000000000001472372047300242575ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/octopus-merge.git/logs/HEAD000066400000000000000000000024721472372047300247100ustar00rootroot000000000000000000000000000000000000000000000000000000 8be6d64cddab01f53381e9feafe50d95ca5e6629 Taylor Blau 1496421999 -0600 commit (initial): initial commit 8be6d64cddab01f53381e9feafe50d95ca5e6629 8be6d64cddab01f53381e9feafe50d95ca5e6629 Taylor Blau 1496422012 -0600 checkout: moving from master to branch-a 8be6d64cddab01f53381e9feafe50d95ca5e6629 251e6b3461a3b5adc6bab694d5ae1abc878edf85 Taylor Blau 1496422020 -0600 commit: a.txt: initial 251e6b3461a3b5adc6bab694d5ae1abc878edf85 8be6d64cddab01f53381e9feafe50d95ca5e6629 Taylor Blau 1496422026 -0600 checkout: moving from branch-a to master 8be6d64cddab01f53381e9feafe50d95ca5e6629 8be6d64cddab01f53381e9feafe50d95ca5e6629 Taylor Blau 1496422029 -0600 checkout: moving from master to branch-b 8be6d64cddab01f53381e9feafe50d95ca5e6629 15805fe2044dc1a0508853e93d1a230bd94636be Taylor Blau 1496422035 -0600 commit: b.txt: initial 15805fe2044dc1a0508853e93d1a230bd94636be 8be6d64cddab01f53381e9feafe50d95ca5e6629 Taylor Blau 1496422041 -0600 checkout: moving from branch-b to master 8be6d64cddab01f53381e9feafe50d95ca5e6629 6c9ccaeb45446e3fa88cd5848a940fd34c18192b Taylor Blau 1496422044 -0600 merge branch-a branch-b: Merge made by the 'octopus' strategy. git-lfs-3.6.1/git/githistory/fixtures/octopus-merge.git/logs/refs/000077500000000000000000000000001472372047300252165ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/octopus-merge.git/logs/refs/heads/000077500000000000000000000000001472372047300263025ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/octopus-merge.git/logs/refs/heads/branch-a000066400000000000000000000004631472372047300277030ustar00rootroot000000000000000000000000000000000000000000000000000000 8be6d64cddab01f53381e9feafe50d95ca5e6629 Taylor Blau 1496422012 -0600 branch: Created from HEAD 8be6d64cddab01f53381e9feafe50d95ca5e6629 251e6b3461a3b5adc6bab694d5ae1abc878edf85 Taylor Blau 1496422020 -0600 commit: a.txt: initial git-lfs-3.6.1/git/githistory/fixtures/octopus-merge.git/logs/refs/heads/branch-b000066400000000000000000000004631472372047300277040ustar00rootroot000000000000000000000000000000000000000000000000000000 8be6d64cddab01f53381e9feafe50d95ca5e6629 Taylor Blau 1496422029 -0600 branch: Created from HEAD 8be6d64cddab01f53381e9feafe50d95ca5e6629 15805fe2044dc1a0508853e93d1a230bd94636be Taylor Blau 1496422035 -0600 commit: b.txt: initial git-lfs-3.6.1/git/githistory/fixtures/octopus-merge.git/logs/refs/heads/master000066400000000000000000000005421472372047300275210ustar00rootroot000000000000000000000000000000000000000000000000000000 8be6d64cddab01f53381e9feafe50d95ca5e6629 Taylor Blau 1496421999 -0600 commit (initial): initial commit 8be6d64cddab01f53381e9feafe50d95ca5e6629 6c9ccaeb45446e3fa88cd5848a940fd34c18192b Taylor Blau 1496422044 -0600 merge branch-a branch-b: Merge made by the 'octopus' strategy. git-lfs-3.6.1/git/githistory/fixtures/octopus-merge.git/objects/000077500000000000000000000000001472372047300247445ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/octopus-merge.git/objects/04/000077500000000000000000000000001472372047300251675ustar00rootroot00000000000000df07b08ca746b3167d0f1d1514e2f39a52c16c000066400000000000000000000000661472372047300322270ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/octopus-merge.git/objects/04x+)JMU06g040031QH+(a';fT!Uc.Kwgit-lfs-3.6.1/git/githistory/fixtures/octopus-merge.git/objects/15/000077500000000000000000000000001472372047300251715ustar00rootroot00000000000000805fe2044dc1a0508853e93d1a230bd94636be000066400000000000000000000002371472372047300320730ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/octopus-merge.git/objects/15xA 0=li""v@Jق< /UEr1ҎX3i(w%2zΙF v]Pt9!J m\Vϴ>f/Mn?xiWzס=whƓl\dFgit-lfs-3.6.1/git/githistory/fixtures/octopus-merge.git/objects/25/000077500000000000000000000000001472372047300251725ustar00rootroot000000000000001e6b3461a3b5adc6bab694d5ae1abc878edf85000066400000000000000000000002401472372047300326170ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/octopus-merge.git/objects/25x 1ENieMƍ5$``fA7؁_M۲T֚4fq.Kb X 9%`(ZAgRsrgx[ao}135OBp{{b^ll~a'lO>[1git-lfs-3.6.1/git/githistory/fixtures/octopus-merge.git/objects/8b/000077500000000000000000000000001472372047300252555ustar00rootroot00000000000000e6d64cddab01f53381e9feafe50d95ca5e6629000066400000000000000000000002001472372047300326330ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/octopus-merge.git/objects/8bxK 1]};$D3x|08zޠ'pUPPŭ*@d hSA9n5|BYxё\Tsw7M+)n dBpDwU}U෠>1I84git-lfs-3.6.1/git/githistory/fixtures/octopus-merge.git/objects/b6/000077500000000000000000000000001472372047300252535ustar00rootroot00000000000000fc4c620b67d95f953a5c1c1230aaab5db5a1b0000066400000000000000000000000241472372047300325060ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/octopus-merge.git/objects/b6xKOR0eH git-lfs-3.6.1/git/githistory/fixtures/octopus-merge.git/objects/e9/000077500000000000000000000000001472372047300252615ustar00rootroot000000000000004edfabfb7605f7cb959b4ce8fb6652b509fe03000066400000000000000000000001201472372047300326450ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/octopus-merge.git/objects/e9x+)JMU07`040031QH+(aK}h/tTAFjNN>XѶ?>I7ZZu6git-lfs-3.6.1/git/githistory/fixtures/octopus-merge.git/refs/000077500000000000000000000000001472372047300242525ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/octopus-merge.git/refs/heads/000077500000000000000000000000001472372047300253365ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/octopus-merge.git/refs/heads/branch-a000066400000000000000000000000511472372047300267300ustar00rootroot00000000000000251e6b3461a3b5adc6bab694d5ae1abc878edf85 git-lfs-3.6.1/git/githistory/fixtures/octopus-merge.git/refs/heads/branch-b000066400000000000000000000000511472372047300267310ustar00rootroot0000000000000015805fe2044dc1a0508853e93d1a230bd94636be git-lfs-3.6.1/git/githistory/fixtures/octopus-merge.git/refs/heads/master000066400000000000000000000000511472372047300265500ustar00rootroot000000000000006c9ccaeb45446e3fa88cd5848a940fd34c18192b git-lfs-3.6.1/git/githistory/fixtures/packed-objects.git/000077500000000000000000000000001472372047300234005ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/packed-objects.git/HEAD000066400000000000000000000000271472372047300240230ustar00rootroot00000000000000ref: refs/heads/master git-lfs-3.6.1/git/githistory/fixtures/packed-objects.git/config000066400000000000000000000002101472372047300245610ustar00rootroot00000000000000[core] repositoryformatversion = 0 filemode = true bare = true logallrefupdates = true ignorecase = true precomposeunicode = true git-lfs-3.6.1/git/githistory/fixtures/packed-objects.git/index000066400000000000000000000002111472372047300244240ustar00rootroot00000000000000DIRCY Y 'yV&˂=, 1504643527 -0400 commit (initial): *: initial commit git-lfs-3.6.1/git/githistory/fixtures/packed-objects.git/logs/refs/000077500000000000000000000000001472372047300253035ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/packed-objects.git/logs/refs/heads/000077500000000000000000000000001472372047300263675ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/packed-objects.git/logs/refs/heads/master000066400000000000000000000002451472372047300276060ustar00rootroot000000000000000000000000000000000000000000000000000000 749f1b43e00eeb98194fedb7827b3cfb43b42b0e Taylor Blau 1504643527 -0400 commit (initial): *: initial commit git-lfs-3.6.1/git/githistory/fixtures/packed-objects.git/objects/000077500000000000000000000000001472372047300250315ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/packed-objects.git/objects/info/000077500000000000000000000000001472372047300257645ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/packed-objects.git/objects/info/packs000066400000000000000000000000661472372047300270120ustar00rootroot00000000000000P pack-ac516ce2d006668dc5e001e8dda0aa1c7198500f.pack git-lfs-3.6.1/git/githistory/fixtures/packed-objects.git/objects/pack/000077500000000000000000000000001472372047300257475ustar00rootroot00000000000000pack-ac516ce2d006668dc5e001e8dda0aa1c7198500f.idx000066400000000000000000000022041472372047300350010ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/packed-objects.git/objects/packtOcGk 1ԅtCO{xHQ(/IQ$x340031QH+(aXeMlбq{* jQlfݠqPgit-lfs-3.6.1/git/githistory/fixtures/packed-objects.git/refs/000077500000000000000000000000001472372047300243375ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/packed-objects.git/refs/heads/000077500000000000000000000000001472372047300254235ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/packed-objects.git/refs/heads/master000066400000000000000000000000511472372047300266350ustar00rootroot00000000000000749f1b43e00eeb98194fedb7827b3cfb43b42b0e git-lfs-3.6.1/git/githistory/fixtures/repeated-subtrees.git/000077500000000000000000000000001472372047300241455ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/repeated-subtrees.git/HEAD000066400000000000000000000000271472372047300245700ustar00rootroot00000000000000ref: refs/heads/master git-lfs-3.6.1/git/githistory/fixtures/repeated-subtrees.git/config000066400000000000000000000002111472372047300253270ustar00rootroot00000000000000[core] repositoryformatversion = 0 filemode = true bare = false logallrefupdates = true ignorecase = true precomposeunicode = true git-lfs-3.6.1/git/githistory/fixtures/repeated-subtrees.git/index000066400000000000000000000003701472372047300251770ustar00rootroot00000000000000DIRCY0/Y0/2l#=!6e3=G'Ca.txtY0Y02uc #T.tY H subdir/b.txtTREE82 1 G"j% #IS^(subdir1 0 M6s МϫpǏ)؉9;¹ggit-lfs-3.6.1/git/githistory/fixtures/repeated-subtrees.git/logs/000077500000000000000000000000001472372047300251115ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/repeated-subtrees.git/logs/HEAD000066400000000000000000000004721472372047300255400ustar00rootroot000000000000000000000000000000000000000000000000000000 0b4747509ab885114690ff291f8f108045b1d749 Taylor Blau 1496362788 -0600 commit (initial): initial commit 0b4747509ab885114690ff291f8f108045b1d749 b9621d5d84b3174de020ad2c869f43b2f61f337f Taylor Blau 1496362801 -0600 commit: a.txt: changes git-lfs-3.6.1/git/githistory/fixtures/repeated-subtrees.git/logs/refs/000077500000000000000000000000001472372047300260505ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/repeated-subtrees.git/logs/refs/heads/000077500000000000000000000000001472372047300271345ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/repeated-subtrees.git/logs/refs/heads/master000066400000000000000000000004721472372047300303550ustar00rootroot000000000000000000000000000000000000000000000000000000 0b4747509ab885114690ff291f8f108045b1d749 Taylor Blau 1496362788 -0600 commit (initial): initial commit 0b4747509ab885114690ff291f8f108045b1d749 b9621d5d84b3174de020ad2c869f43b2f61f337f Taylor Blau 1496362801 -0600 commit: a.txt: changes git-lfs-3.6.1/git/githistory/fixtures/repeated-subtrees.git/objects/000077500000000000000000000000001472372047300255765ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/repeated-subtrees.git/objects/0b/000077500000000000000000000000001472372047300260775ustar00rootroot000000000000004747509ab885114690ff291f8f108045b1d749000066400000000000000000000002001472372047300325240ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/repeated-subtrees.git/objects/0bxA EsF z^`Ld+ zO,`=IgqYr6!hx(jOj);,WC zfo5gVÎ;CY2-/9igit-lfs-3.6.1/git/githistory/fixtures/repeated-subtrees.git/objects/12/000077500000000000000000000000001472372047300260205ustar00rootroot00000000000000b98c239e8f933d213617a1b965333d478b2743000066400000000000000000000000211472372047300325240ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/repeated-subtrees.git/objects/12xKOR0bH4git-lfs-3.6.1/git/githistory/fixtures/repeated-subtrees.git/objects/2e/000077500000000000000000000000001472372047300261045ustar00rootroot0000000000000065efe2a145dda7ee51d1741299f848e5bf752e000066400000000000000000000000201472372047300332530ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/repeated-subtrees.git/objects/2exKOR0dH Rgit-lfs-3.6.1/git/githistory/fixtures/repeated-subtrees.git/objects/47/000077500000000000000000000000001472372047300260305ustar00rootroot00000000000000d4d71022adc7ec6a14250d23491e535ec228f4000066400000000000000000000001231472372047300330000ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/repeated-subtrees.git/objects/47x+)JMU03c040031QH+(a٣<ƶ&@P\YİWqf׏^9yau=ygit-lfs-3.6.1/git/githistory/fixtures/repeated-subtrees.git/objects/5e/000077500000000000000000000000001472372047300261075ustar00rootroot00000000000000497ceceb14ad3c43bac781ed5c804bc67e8f3b000066400000000000000000000001231472372047300335400ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/repeated-subtrees.git/objects/5ex+)JMU03c040031QH+(aK}h/t(&d1l\tYWq^) git-lfs-3.6.1/git/githistory/fixtures/repeated-subtrees.git/objects/63/000077500000000000000000000000001472372047300260265ustar00rootroot00000000000000d8dbd40c23542e740659a7168a0ce3138ea748000066400000000000000000000000201472372047300327320ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/repeated-subtrees.git/objects/63xKOR0dH Sgit-lfs-3.6.1/git/githistory/fixtures/repeated-subtrees.git/objects/b9/000077500000000000000000000000001472372047300261105ustar00rootroot00000000000000621d5d84b3174de020ad2c869f43b2f61f337f000066400000000000000000000002421472372047300330770ustar00rootroot00000000000000git-lfs-3.6.1/git/githistory/fixtures/repeated-subtrees.git/objects/b9x[j@ HJ9C. jBG1Am?lS@$&Q)@ jVȦw0qȔs@Xw*sGa–8yڼn* // []string{"a", "b", "c.txt"}). parts := strings.Split(path, "/") for i := 0; i < len(parts)-1; i++ { part := parts[i] // Load the subtree given by that name. var subtree *gitobj.Tree for _, entry := range root.Entries { if entry.Name != part { continue } subtree, err = db.Tree(entry.Oid) if err != nil { t.Fatalf("gitobj: cannot load subtree %s: %s", filepath.Join(parts[:i]...), err) } break } if subtree == nil { t.Fatalf("gitobj: subtree %s does not exist", path) } // And re-assign it to root, creating a sort of pseudo-recursion. root = subtree } filename := parts[len(parts)-1] // Find the blob given by the last entry in parts (the filename). var blob *gitobj.Blob for _, entry := range root.Entries { if entry.Name == filename { blob, err = db.Blob(entry.Oid) if err != nil { t.Fatalf("gitobj: cannot load blob %x: %s", entry.Oid, err) } } } // If we couldn't find the blob, fail immediately. if blob == nil { t.Fatalf("gitobj: blob at %s in %s does not exist", path, tree) } // Perform an assertion on the blob's contents. got, err := io.ReadAll(blob.Contents) if err != nil { t.Fatalf("gitobj: cannot read contents from blob %s: %s", path, err) } assert.Equal(t, contents, string(got)) } // AssertCommitParent asserts that the given commit has a parent equivalent to // the one provided. func AssertCommitParent(t *testing.T, db *gitobj.ObjectDatabase, sha, parent string) { commit, err := db.Commit(HexDecode(t, sha)) if err != nil { t.Fatalf("gitobj: expected to read commit: %s, couldn't: %v", sha, err) } decoded, err := hex.DecodeString(parent) if err != nil { t.Fatalf("gitobj: expected to decode parent SHA: %s, couldn't: %v", parent, err) } assert.Contains(t, commit.ParentIDs, decoded, "gitobj: expected parents of commit: %s to contain: %s", sha, parent) } // AssertCommitTree asserts that the given commit has a tree equivalent to the // one provided. func AssertCommitTree(t *testing.T, db *gitobj.ObjectDatabase, sha, tree string) { commit, err := db.Commit(HexDecode(t, sha)) if err != nil { t.Fatalf("gitobj: expected to read commit: %s, couldn't: %v", sha, err) } decoded, err := hex.DecodeString(tree) if err != nil { t.Fatalf("gitobj: expected to decode tree SHA: %s, couldn't: %v", tree, err) } assert.Equal(t, decoded, commit.TreeID, "gitobj: expected tree ID: %s (got: %x)", tree, commit.TreeID) } // AssertRef asserts that a given refname points at the expected commit. func AssertRef(t *testing.T, db *gitobj.ObjectDatabase, ref string, expected []byte) { root, ok := db.Root() assert.True(t, ok, "gitobj: expected *odb.ObjectDatabase to have Root()") cmd := exec.Command("git", "rev-parse", ref) cmd.Dir = root out, err := cmd.Output() assert.Nil(t, err) assert.Equal(t, hex.EncodeToString(expected), strings.TrimSpace(string(out))) } // HexDecode decodes the given ASCII hex-encoded string into []byte's, or fails // the test immediately if the given "sha" wasn't a valid hex-encoded sequence. func HexDecode(t *testing.T, sha string) []byte { b, err := hex.DecodeString(sha) if err != nil { t.Fatalf("gitobj: could not decode string: %q, %v", sha, err) } return b } // copyToTmp copies the given fixture to a folder in /tmp. func copyToTmp(fixture string) (string, error) { p, err := os.MkdirTemp("", fmt.Sprintf("git-lfs-fixture-%s", filepath.Dir(fixture))) if err != nil { return "", err } if err = copyDir(fixture, p); err != nil { return "", err } return p, nil } // copyDir copies a directory (and recursively all files and subdirectories) // from "from" to "to" preserving permissions and ownership. func copyDir(from, to string) error { stat, err := os.Stat(from) if err != nil { return err } if err := os.MkdirAll(to, stat.Mode()); err != nil { return err } entries, err := os.ReadDir(from) if err != nil { return err } for _, entry := range entries { sp := filepath.Join(from, entry.Name()) dp := filepath.Join(to, entry.Name()) if entry.IsDir() { err = copyDir(sp, dp) } else { err = copyFile(sp, dp) } if err != nil { return err } } return nil } // copyFile copies a file from "from" to "to" preserving permissions and // ownership. func copyFile(from, to string) error { src, err := os.Open(from) if err != nil { return err } defer src.Close() dst, err := os.Create(to) if err != nil { return err } defer dst.Close() if _, err = io.Copy(dst, src); err != nil { return err } stat, err := os.Stat(from) if err != nil { return err } return os.Chmod(to, stat.Mode()) } git-lfs-3.6.1/git/githistory/ref_updater.go000066400000000000000000000073501472372047300207230ustar00rootroot00000000000000package githistory import ( "encoding/hex" "fmt" "strings" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/git" "github.com/git-lfs/git-lfs/v3/tasklog" "github.com/git-lfs/git-lfs/v3/tools" "github.com/git-lfs/git-lfs/v3/tr" "github.com/git-lfs/gitobj/v2" ) // refUpdater is a type responsible for moving references from one point in the // Git object graph to another. type refUpdater struct { // CacheFn is a function that returns the SHA1 transformation from an // original hash to a new one. It specifies a "bool" return value // signaling whether or not that given "old" SHA1 was migrated. CacheFn func(old []byte) ([]byte, bool) // Logger logs the progress of reference updating. Logger *tasklog.Logger // Refs is a set of *git.Ref's to migrate. Refs []*git.Ref // Root is the given directory on disk in which the repository is // located. Root string db *gitobj.ObjectDatabase } // UpdateRefs performs the reference update(s) from existing locations (see: // Refs) to their respective new locations in the graph (see CacheFn). // // It creates reflog entries as well as stderr log entries as it progresses // through the reference updates. // // It returns any error encountered, or nil if the reference update(s) was/were // successful. func (r *refUpdater) UpdateRefs() error { list := r.Logger.List(fmt.Sprintf("migrate: %s", tr.Tr.Get("Updating refs"))) defer list.Complete() var maxNameLen int for _, ref := range r.Refs { maxNameLen = tools.MaxInt(maxNameLen, len(ref.Name)) } seen := make(map[string]struct{}) for _, ref := range r.Refs { if err := r.updateOneRef(list, maxNameLen, seen, ref); err != nil { return err } } return nil } func (r *refUpdater) updateOneTag(tag *gitobj.Tag, toObj []byte) ([]byte, error) { newTag, err := r.db.WriteTag(&gitobj.Tag{ Object: toObj, ObjectType: tag.ObjectType, Name: tag.Name, Tagger: tag.Tagger, Message: tag.Message, }) if err != nil { return nil, errors.Wrap(err, tr.Tr.Get("could not rewrite tag: %s", tag.Name)) } return newTag, nil } func (r *refUpdater) updateOneRef(list *tasklog.ListTask, maxNameLen int, seen map[string]struct{}, ref *git.Ref) error { sha1, err := hex.DecodeString(ref.Sha) if err != nil { return errors.Wrap(err, tr.Tr.Get("could not decode: %q", ref.Sha)) } refspec := ref.Refspec() if _, ok := seen[refspec]; ok { return nil } seen[refspec] = struct{}{} to, ok := r.CacheFn(sha1) if ref.Type == git.RefTypeLocalTag { tag, _ := r.db.Tag(sha1) if tag != nil && tag.ObjectType == gitobj.TagObjectType { innerTag, _ := r.db.Tag(tag.Object) name := fmt.Sprintf("refs/tags/%s", innerTag.Name) if _, ok := seen[name]; !ok { old, err := git.ResolveRef(name) if err != nil { return err } err = r.updateOneRef(list, maxNameLen, seen, old) if err != nil { return err } } updated, err := git.ResolveRef(name) if err != nil { return err } updatedSha, err := hex.DecodeString(updated.Sha) if err != nil { return errors.Wrap(err, tr.Tr.Get("could not decode: %q", ref.Sha)) } newTag, err := r.updateOneTag(tag, updatedSha) if newTag == nil { return err } to = newTag ok = true } else if tag != nil && tag.ObjectType == gitobj.CommitObjectType { toObj, okObj := r.CacheFn(tag.Object) if !okObj { return nil } newTag, err := r.updateOneTag(tag, toObj) if newTag == nil { return err } to = newTag ok = true } } if !ok { return nil } if err := git.UpdateRefIn(r.Root, ref, to, ""); err != nil { return err } namePadding := tools.MaxInt(maxNameLen-len(ref.Name), 0) list.Entry(fmt.Sprintf(" %s%s\t%s -> %x", ref.Name, strings.Repeat(" ", namePadding), ref.Sha, to)) return nil } git-lfs-3.6.1/git/githistory/ref_updater_test.go000066400000000000000000000042451472372047300217620ustar00rootroot00000000000000package githistory import ( "testing" "github.com/git-lfs/git-lfs/v3/git" "github.com/stretchr/testify/assert" ) func TestRefUpdaterMovesRefs(t *testing.T) { db := DatabaseFromFixture(t, "linear-history-with-tags.git") root, _ := db.Root() AssertRef(t, db, "refs/tags/middle", HexDecode(t, "228afe30855933151f7a88e70d9d88314fd2f191")) updater := &refUpdater{ CacheFn: func(old []byte) ([]byte, bool) { return HexDecode(t, "d941e4756add6b06f5bee766fcf669f55419f13f"), true }, Refs: []*git.Ref{ { Name: "middle", Sha: "228afe30855933151f7a88e70d9d88314fd2f191", Type: git.RefTypeLocalTag, }, }, Root: root, db: db, } err := updater.UpdateRefs() assert.NoError(t, err) AssertRef(t, db, "refs/tags/middle", HexDecode(t, "d941e4756add6b06f5bee766fcf669f55419f13f")) } func TestRefUpdaterMovesRefsWithAnnotatedTags(t *testing.T) { db := DatabaseFromFixture(t, "linear-history-with-annotated-tags.git") root, _ := db.Root() AssertRef(t, db, "refs/tags/middle", HexDecode(t, "05797a38b05f910e6efe40dc1a5c0a046a9403e8")) updater := &refUpdater{ CacheFn: func(old []byte) ([]byte, bool) { return HexDecode(t, "d941e4756add6b06f5bee766fcf669f55419f13f"), true }, Refs: []*git.Ref{ { Name: "middle", Sha: "05797a38b05f910e6efe40dc1a5c0a046a9403e8", Type: git.RefTypeLocalTag, }, }, Root: root, db: db, } err := updater.UpdateRefs() assert.NoError(t, err) AssertRef(t, db, "refs/tags/middle", HexDecode(t, "9a3c2b4823ad6b300ef25197f0435b267d4f0ad8")) } func TestRefUpdaterIgnoresUnovedRefs(t *testing.T) { db := DatabaseFromFixture(t, "linear-history-with-tags.git") root, _ := db.Root() AssertRef(t, db, "refs/tags/middle", HexDecode(t, "228afe30855933151f7a88e70d9d88314fd2f191")) updater := &refUpdater{ CacheFn: func(old []byte) ([]byte, bool) { return nil, false }, Refs: []*git.Ref{ { Name: "middle", Sha: "228afe30855933151f7a88e70d9d88314fd2f191", Type: git.RefTypeLocalTag, }, }, Root: root, db: db, } err := updater.UpdateRefs() assert.NoError(t, err) AssertRef(t, db, "refs/tags/middle", HexDecode(t, "228afe30855933151f7a88e70d9d88314fd2f191")) } git-lfs-3.6.1/git/githistory/rewriter.go000066400000000000000000000450701472372047300202670ustar00rootroot00000000000000package githistory import ( "encoding/hex" "fmt" "os" "strings" "sync" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/filepathfilter" "github.com/git-lfs/git-lfs/v3/git" "github.com/git-lfs/git-lfs/v3/tasklog" "github.com/git-lfs/git-lfs/v3/tr" "github.com/git-lfs/gitobj/v2" ) // Rewriter allows rewriting topologically equivalent Git histories // between two revisions. type Rewriter struct { // mu guards entries and commits (see below) mu *sync.Mutex // entries is a mapping of old tree entries to new (rewritten) ones. // Since TreeEntry contains a []byte (and is therefore not a key-able // type), a unique TreeEntry -> string function is used for map keys. entries map[string]*gitobj.TreeEntry // commits is a mapping of old commit SHAs to new ones, where the ASCII // hex encoding of the SHA1 values are used as map keys. commits map[string][]byte // filter is an optional value used to specify which tree entries // (blobs, subtrees) are modifiable given a BlobFn. If non-nil, this // filter will cull out any unmodifiable subtrees and blobs. filter *filepathfilter.Filter // db is the *ObjectDatabase from which blobs, commits, and trees are // loaded from. db *gitobj.ObjectDatabase // l is the *tasklog.Logger to which updates are written. l *tasklog.Logger } // RewriteOptions is an options type given to the Rewrite() function. type RewriteOptions struct { // Include is the list of refs of which commits reachable by that ref // will be included. Include []string // Exclude is the list of refs of which commits reachable by that ref // will be excluded. Exclude []string // UpdateRefs specifies whether the Rewriter should move refs from the // original graph onto the migrated one. If true, the refs will be // moved, and a reflog entry will be created. UpdateRefs bool // Verbose mode prints migrated objects. Verbose bool // ObjectMapFilePath is the path to the map of old sha1 to new sha1 // commits ObjectMapFilePath string // BlobFn specifies a function to rewrite blobs. // // It is called once per unique, unchanged path. That is to say, if // /a/foo and /a/bar contain identical contents, the BlobFn will be // called twice: once for /a/foo and once for /a/bar, but no more on // each blob for subsequent revisions, so long as each entry remains // unchanged. BlobFn BlobRewriteFn // TreePreCallbackFn specifies a function to be called before opening a // tree for rewriting. It will be called on all trees throughout history // in topological ordering through the tree, starting at the root. TreePreCallbackFn TreePreCallbackFn // TreeCallbackFn specifies a function to rewrite trees after they have // been reassembled by calling the above BlobFn on all existing tree // entries. TreeCallbackFn TreeCallbackFn } // blobFn returns a usable BlobRewriteFn, either the one that was given in the // *RewriteOptions, or a noopBlobFn. func (r *RewriteOptions) blobFn() BlobRewriteFn { if r.BlobFn == nil { return noopBlobFn } return r.BlobFn } // treePreFn returns a usable TreePreCallbackFn, either the one that was given // in the *RewriteOptions, or a noopTreePreFn. func (r *RewriteOptions) treePreFn() TreePreCallbackFn { if r.TreePreCallbackFn == nil { return noopTreePreFn } return r.TreePreCallbackFn } // treeFn returns a usable TreeRewriteFn, either the one that was given in the // *RewriteOptions, or a noopTreeFn. func (r *RewriteOptions) treeFn() TreeCallbackFn { if r.TreeCallbackFn == nil { return noopTreeFn } return r.TreeCallbackFn } // BlobRewriteFn is a mapping function that takes a given blob and returns a // new, modified blob. If it returns an error, the new blob will not be written // and instead the error will be returned from the Rewrite() function. // // Invocations of an instance of BlobRewriteFn are not expected to store the // returned blobs in the *git/gitobj.ObjectDatabase. // // The path argument is given to be an absolute path to the tree entry being // rewritten, where the repository root is the root of the path given. For // instance, a file "b.txt" in directory "dir" would be given as "/dir/b.txt", // where as a file "a.txt" in the root would be given as "/a.txt". // // As above, the path separators are OS specific, and equivalent to the result // of filepath.Join(...) or os.PathSeparator. type BlobRewriteFn func(path string, b *gitobj.Blob) (*gitobj.Blob, error) // TreePreCallbackFn specifies a function to call upon opening a new tree for // rewriting. // // Unlike its sibling TreeCallbackFn, TreePreCallbackFn may not modify the given // tree. // // TreePreCallbackFn can be nil, and will therefore exhibit behavior equivalent // to only calling the BlobFn on existing tree entries. // // If the TreePreCallbackFn returns an error, it will be returned from the // Rewrite() invocation. type TreePreCallbackFn func(path string, t *gitobj.Tree) error // TreeCallbackFn specifies a function to call before writing a re-written tree // to the object database. The TreeCallbackFn can return a modified tree to be // written to the object database instead of one generated from calling BlobFn // on all of the tree entries. // // TreeCallbackFn can be nil, and will therefore exhibit behavior equivalent to // only calling the BlobFn on existing tree entries. // // If the TreeCallbackFn returns an error, it will be returned from the // Rewrite() invocation. type TreeCallbackFn func(path string, t *gitobj.Tree) (*gitobj.Tree, error) type rewriterOption func(*Rewriter) var ( // WithFilter is an optional argument given to the NewRewriter // constructor function to limit invocations of the BlobRewriteFn to // only pathspecs that match the given *filepathfilter.Filter. WithFilter = func(filter *filepathfilter.Filter) rewriterOption { return func(r *Rewriter) { r.filter = filter } } // WithLogger logs updates caused by the *git/githistory.Rewriter to the // be given to the provided logger, "l". WithLogger = func(l *tasklog.Logger) rewriterOption { return func(r *Rewriter) { r.l = l } } // noopBlobFn is a no-op implementation of the BlobRewriteFn. It returns // the blob that it was given, and returns no error. noopBlobFn = func(path string, b *gitobj.Blob) (*gitobj.Blob, error) { return b, nil } // noopTreePreFn is a no-op implementation of the TreePreRewriteFn. It // returns the tree that it was given, and returns no error. noopTreePreFn = func(path string, t *gitobj.Tree) error { return nil } // noopTreeFn is a no-op implementation of the TreeRewriteFn. It returns // the tree that it was given, and returns no error. noopTreeFn = func(path string, t *gitobj.Tree) (*gitobj.Tree, error) { return t, nil } ) // NewRewriter constructs a *Rewriter from the given *ObjectDatabase instance. func NewRewriter(db *gitobj.ObjectDatabase, opts ...rewriterOption) *Rewriter { rewriter := &Rewriter{ mu: new(sync.Mutex), entries: make(map[string]*gitobj.TreeEntry), commits: make(map[string][]byte), db: db, } for _, opt := range opts { opt(rewriter) } return rewriter } // Rewrite rewrites the range of commits given by // *RewriteOptions.{Include,Exclude} using the BlobRewriteFn to rewrite // the individual blobs. func (r *Rewriter) Rewrite(opt *RewriteOptions) ([]byte, error) { // First, obtain a list of commits to rewrite. commits, err := r.commitsToMigrate(opt) if err != nil { return nil, err } var perc *tasklog.PercentageTask if opt.UpdateRefs { perc = r.l.Percentage(fmt.Sprintf("migrate: %s", tr.Tr.Get("Rewriting commits")), uint64(len(commits))) } else { perc = r.l.Percentage(fmt.Sprintf("migrate: %s", tr.Tr.Get("Examining commits")), uint64(len(commits))) } defer perc.Complete() var vPerc *tasklog.PercentageTask if opt.Verbose { vPerc = perc } var objectMapFile *os.File if len(opt.ObjectMapFilePath) > 0 { objectMapFile, err = os.OpenFile(opt.ObjectMapFilePath, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666) if err != nil { return nil, errors.New(tr.Tr.Get("could not create object map file: %v", err)) } defer objectMapFile.Close() } // Keep track of the last commit that we rewrote. Callers often want // this so that they can perform a git-update-ref(1). var tip []byte for _, oid := range commits { // Load the original commit to access the data necessary in // order to rewrite it. original, err := r.db.Commit(oid) if err != nil { return nil, err } // Rewrite the tree given at that commit. rewrittenTree, err := r.rewriteTree(oid, original.TreeID, "", opt.blobFn(), opt.treePreFn(), opt.treeFn(), vPerc) if err != nil { return nil, err } // Create a new list of parents from the original commit to // point at the rewritten parents in order to create a // topologically equivalent DAG. // // This operation is safe since we are visiting the commits in // reverse topological order and therefore have seen all parents // before children (in other words, r.uncacheCommit(...) will // always return a value, if the prospective parent is a part of // the migration). rewrittenParents := make([][]byte, 0, len(original.ParentIDs)) for _, originalParent := range original.ParentIDs { rewrittenParent, ok := r.uncacheCommit(originalParent) if !ok { // If we haven't seen the parent before, this // means that we're doing a partial migration // and the parent that we're looking for isn't // included. // // Use the original parent to properly link // history across the migration boundary. rewrittenParent = originalParent } rewrittenParents = append(rewrittenParents, rewrittenParent) } // Construct a new commit using the original header information, // but the rewritten set of parents as well as root tree. rewrittenCommit := &gitobj.Commit{ Author: original.Author, Committer: original.Committer, ExtraHeaders: original.ExtraHeaders, Message: original.Message, ParentIDs: rewrittenParents, TreeID: rewrittenTree, } var newSha []byte if original.Equal(rewrittenCommit) { newSha = make([]byte, len(oid)) copy(newSha, oid) } else { newSha, err = r.db.WriteCommit(rewrittenCommit) if err != nil { return nil, err } if objectMapFile != nil { if _, err := fmt.Fprintf(objectMapFile, "%x,%x\n", oid, newSha); err != nil { return nil, err } } } // Cache that commit so that we can reassign children of this // commit. r.cacheCommit(oid, newSha) // Increment the percentage displayed in the terminal. perc.Count(1) // Move the tip forward. tip = newSha } if opt.UpdateRefs { refs, err := r.refsToMigrate() if err != nil { return nil, errors.Wrap(err, tr.Tr.Get("could not find refs to update")) } root, _ := r.db.Root() updater := &refUpdater{ CacheFn: r.uncacheCommit, Logger: r.l, Refs: refs, Root: root, db: r.db, } if err := updater.UpdateRefs(); err != nil { return nil, errors.Wrap(err, tr.Tr.Get("could not update refs")) } } return tip, err } // rewriteTree is a recursive function which rewrites a tree given by the ID // "sha" and path "path". It uses the given BlobRewriteFn to rewrite all blobs // within the tree, either calling that function or recurring down into subtrees // by re-assigning the SHA. // // Once it is done assembling the entries in a given subtree, it then calls the // TreeCallbackFn, "tfn" to perform a final traversal of the subtree before // saving it to the object database. // // It returns the new SHA of the rewritten tree, or an error if the tree was // unable to be rewritten. func (r *Rewriter) rewriteTree(commitOID []byte, treeOID []byte, path string, fn BlobRewriteFn, tpfn TreePreCallbackFn, tfn TreeCallbackFn, perc *tasklog.PercentageTask) ([]byte, error) { tree, err := r.db.Tree(treeOID) if err != nil { return nil, err } if err := tpfn("/"+path, tree); err != nil { return nil, err } entries := make([]*gitobj.TreeEntry, 0, len(tree.Entries)) for _, entry := range tree.Entries { var fullpath string if len(path) > 0 { fullpath = strings.Join([]string{path, entry.Name}, "/") } else { fullpath = entry.Name } if !r.allows(entry.Type(), fullpath) { entries = append(entries, copyEntry(entry)) continue } // If this is a symlink, skip it if entry.Filemode == 0120000 { entries = append(entries, copyEntry(entry)) continue } if cached := r.uncacheEntry(fullpath, entry); cached != nil { entries = append(entries, copyEntryMode(cached, entry.Filemode)) continue } var oid []byte switch entry.Type() { case gitobj.BlobObjectType: oid, err = r.rewriteBlob(commitOID, entry.Oid, fullpath, fn, perc) case gitobj.TreeObjectType: oid, err = r.rewriteTree(commitOID, entry.Oid, fullpath, fn, tpfn, tfn, perc) default: oid = entry.Oid } if err != nil { return nil, err } entries = append(entries, r.cacheEntry(fullpath, entry, &gitobj.TreeEntry{ Filemode: entry.Filemode, Name: entry.Name, Oid: oid, })) } rewritten, err := tfn("/"+path, &gitobj.Tree{Entries: entries}) if err != nil { return nil, err } if tree.Equal(rewritten) { return treeOID, nil } return r.db.WriteTree(rewritten) } func copyEntry(e *gitobj.TreeEntry) *gitobj.TreeEntry { if e == nil { return nil } oid := make([]byte, len(e.Oid)) copy(oid, e.Oid) return &gitobj.TreeEntry{ Filemode: e.Filemode, Name: e.Name, Oid: oid, } } func copyEntryMode(e *gitobj.TreeEntry, mode int32) *gitobj.TreeEntry { copied := copyEntry(e) copied.Filemode = mode return copied } func (r *Rewriter) allows(typ gitobj.ObjectType, abs string) bool { switch typ { case gitobj.BlobObjectType: return r.Filter().Allows(strings.TrimPrefix(abs, "/")) case gitobj.CommitObjectType, gitobj.TreeObjectType: return true default: panic(fmt.Sprintf("git/githistory: %s", tr.Tr.Get("unknown entry type: %s", typ))) } } // rewriteBlob calls the given BlobRewriteFn "fn" on a blob given in the object // database by the SHA1 "from" []byte. It writes and returns the new blob SHA, // or an error if either the BlobRewriteFn returned one, or if the object could // not be loaded/saved. func (r *Rewriter) rewriteBlob(commitOID, from []byte, path string, fn BlobRewriteFn, perc *tasklog.PercentageTask) ([]byte, error) { blob, err := r.db.Blob(from) if err != nil { return nil, err } b, err := fn(path, blob) if err != nil { return nil, err } if !blob.Equal(b) { sha, err := r.db.WriteBlob(b) if err != nil { return nil, err } // Close the source blob, so long as it is not equal to the // rewritten blob. If the two are equal, as in the check above // this comment, calling r.db.WriteBlob(b) will have already // closed both "b" and "blob" since they are the same. // // Closing an *os.File twice causes an `os.ErrInvalid` to be // returned. if err = blob.Close(); err != nil { return nil, err } if perc != nil { perc.Entry(fmt.Sprintf("migrate: %s", tr.Tr.Get("commit %s: %s", hex.EncodeToString(commitOID), path))) } return sha, nil } // Close the source blob, since it is identical to the rewritten blob, // but neither were written. if err := blob.Close(); err != nil { return nil, err } return from, nil } // commitsToMigrate returns an in-memory copy of a list of commits according to // the output of git-rev-list(1) (given the *RewriteOptions), where each // outputted commit is 20 bytes of raw SHA1. // // If any error was encountered, it will be returned. func (r *Rewriter) commitsToMigrate(opt *RewriteOptions) ([][]byte, error) { waiter := r.l.Waiter(fmt.Sprintf("migrate: %s", tr.Tr.Get("Sorting commits"))) defer waiter.Complete() scanner, err := git.NewRevListScanner( opt.Include, opt.Exclude, r.scannerOpts()) if err != nil { return nil, err } var commits [][]byte for scanner.Scan() { commits = append(commits, scanner.OID()) } if err = scanner.Err(); err != nil { return nil, err } if err = scanner.Close(); err != nil { return nil, err } return commits, nil } // refsToMigrate returns a list of references to migrate, or an error if loading // those references failed. func (r *Rewriter) refsToMigrate() ([]*git.Ref, error) { var refs []*git.Ref var err error if root, ok := r.db.Root(); ok { refs, err = git.AllRefsIn(root) } else { refs, err = git.AllRefs() } if err != nil { return nil, err } var local []*git.Ref for _, ref := range refs { if ref.Type == git.RefTypeRemoteBranch { continue } local = append(local, ref) } return local, nil } // scannerOpts returns a *git.ScanRefsOptions instance to be given to the // *git.RevListScanner. // // If the database this *Rewriter is operating in a given root (not in memory) // it re-assigns the working directory to be there. func (r *Rewriter) scannerOpts() *git.ScanRefsOptions { opts := &git.ScanRefsOptions{ Mode: git.ScanRefsMode, Order: git.TopoRevListOrder, Reverse: true, CommitsOnly: true, SkippedRefs: make([]string, 0), Mutex: new(sync.Mutex), Names: make(map[string]string), } if root, ok := r.db.Root(); ok { opts.WorkingDir = root } return opts } // Filter returns the filter used by this *Rewriter to filter subtrees, blobs // (see above). func (r *Rewriter) Filter() *filepathfilter.Filter { return r.filter } // cacheEntry caches then given "from" entry so that it is always rewritten as // a *TreeEntry equivalent to "to". func (r *Rewriter) cacheEntry(path string, from, to *gitobj.TreeEntry) *gitobj.TreeEntry { r.mu.Lock() defer r.mu.Unlock() r.entries[r.entryKey(path, from)] = to return to } // uncacheEntry returns a *TreeEntry that is cached from the given *TreeEntry // "from". That is to say, it returns the *TreeEntry that "from" should be // rewritten to, or nil if none could be found. func (r *Rewriter) uncacheEntry(path string, from *gitobj.TreeEntry) *gitobj.TreeEntry { r.mu.Lock() defer r.mu.Unlock() return r.entries[r.entryKey(path, from)] } // entryKey returns a unique key for a given *TreeEntry "e". func (r *Rewriter) entryKey(path string, e *gitobj.TreeEntry) string { return fmt.Sprintf("%s:%x", path, e.Oid) } // cacheEntry caches then given "from" commit so that it is always rewritten as // a *git/gitobj.Commit equivalent to "to". func (r *Rewriter) cacheCommit(from, to []byte) { r.mu.Lock() defer r.mu.Unlock() r.commits[hex.EncodeToString(from)] = to } // uncacheCommit returns a *git/gitobj.Commit that is cached from the given // *git/gitobj.Commit "from". That is to say, it returns the *git/gitobj.Commit that // "from" should be rewritten to and true, or nil and false if none could be // found. func (r *Rewriter) uncacheCommit(from []byte) ([]byte, bool) { r.mu.Lock() defer r.mu.Unlock() c, ok := r.commits[hex.EncodeToString(from)] return c, ok } git-lfs-3.6.1/git/githistory/rewriter_test.go000066400000000000000000000352121472372047300213230ustar00rootroot00000000000000package githistory import ( "bytes" "encoding/hex" "io" "reflect" "strconv" "strings" "testing" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/filepathfilter" "github.com/git-lfs/gitobj/v2" "github.com/stretchr/testify/assert" ) func TestRewriterRewritesHistory(t *testing.T) { db := DatabaseFromFixture(t, "linear-history.git") r := NewRewriter(db) tip, err := r.Rewrite(&RewriteOptions{Include: []string{"refs/heads/master"}, BlobFn: func(path string, b *gitobj.Blob) (*gitobj.Blob, error) { contents, err := io.ReadAll(b.Contents) if err != nil { return nil, err } n, err := strconv.Atoi(string(contents)) if err != nil { return nil, err } rewritten := strconv.Itoa(n + 1) return &gitobj.Blob{ Contents: strings.NewReader(rewritten), Size: int64(len(rewritten)), }, nil }, }) assert.Nil(t, err) tree1 := "ad0aebd16e34cf047820994ea7538a6d4a111082" tree2 := "6e07bd31cb70c4add2c973481ad4fa38b235ca69" tree3 := "c5decfe1fcf39b8c489f4a0bf3b3823676339f80" // After rewriting, the HEAD state of the repository should contain a // tree identical to: // // 100644 blob bf0d87ab1b2b0ec1a11a3973d2845b42413d9767 hello.txt AssertCommitTree(t, db, hex.EncodeToString(tip), tree1) AssertBlobContents(t, db, tree1, "hello.txt", "4") // After rewriting, the HEAD~1 state of the repository should contain a // tree identical to: // // 100644 blob e440e5c842586965a7fb77deda2eca68612b1f53 hello.txt AssertCommitParent(t, db, hex.EncodeToString(tip), "4aaa3f49ffeabbb874250fe13ffeb8c683aba650") AssertCommitTree(t, db, "4aaa3f49ffeabbb874250fe13ffeb8c683aba650", tree2) AssertBlobContents(t, db, tree2, "hello.txt", "3") // After rewriting, the HEAD~2 state of the repository should contain a // tree identical to: // // 100644 blob d8263ee9860594d2806b0dfd1bfd17528b0ba2a4 hello.txt AssertCommitParent(t, db, "4aaa3f49ffeabbb874250fe13ffeb8c683aba650", "24a341e1ff75addc22e336a8d87f82ba56b86fcf") AssertCommitTree(t, db, "24a341e1ff75addc22e336a8d87f82ba56b86fcf", tree3) AssertBlobContents(t, db, tree3, "hello.txt", "2") } func TestRewriterRewritesOctopusMerges(t *testing.T) { db := DatabaseFromFixture(t, "octopus-merge.git") r := NewRewriter(db) tip, err := r.Rewrite(&RewriteOptions{Include: []string{"refs/heads/master"}, BlobFn: func(path string, b *gitobj.Blob) (*gitobj.Blob, error) { return &gitobj.Blob{ Contents: io.MultiReader(b.Contents, strings.NewReader("_new")), Size: b.Size + int64(len("_new")), }, nil }, }) assert.Nil(t, err) tree := "8a56716daa78325c3d0433cc163890969810b0da" // After rewriting, the HEAD state of the repository should contain a // tree identical to: // // 100644 blob 309f7fc2bfd9ae77b4131cf9cbcc3b548c42ca57 a.txt // 100644 blob 70470dc26cb3eef54fe3dcba53066f7ca7c495c0 b.txt // 100644 blob f2557f74fd5b60f959baf77091782089761e2dc3 hello.txt AssertCommitTree(t, db, hex.EncodeToString(tip), tree) AssertBlobContents(t, db, tree, "a.txt", "a_new") AssertBlobContents(t, db, tree, "b.txt", "b_new") AssertBlobContents(t, db, tree, "hello.txt", "hello_new") // And should contain the following parents: // // parent 1fe2b9577d5610e8d8fb2c3030534036fb648393 // parent ca447959bdcd20253d69b227bcc7c2e1d3126d5c AssertCommitParent(t, db, hex.EncodeToString(tip), "1fe2b9577d5610e8d8fb2c3030534036fb648393") AssertCommitParent(t, db, hex.EncodeToString(tip), "ca447959bdcd20253d69b227bcc7c2e1d3126d5c") // And each of those parents should contain the root commit as their own // parent: AssertCommitParent(t, db, "1fe2b9577d5610e8d8fb2c3030534036fb648393", "9237567f379b3c83ddf53ad9a2ae3755afb62a09") AssertCommitParent(t, db, "ca447959bdcd20253d69b227bcc7c2e1d3126d5c", "9237567f379b3c83ddf53ad9a2ae3755afb62a09") } func TestRewriterVisitsPackedObjects(t *testing.T) { db := DatabaseFromFixture(t, "packed-objects.git") r := NewRewriter(db) var contents []byte _, err := r.Rewrite(&RewriteOptions{Include: []string{"refs/heads/master"}, BlobFn: func(path string, b *gitobj.Blob) (*gitobj.Blob, error) { var err error contents, err = io.ReadAll(b.Contents) if err != nil { return nil, err } return &gitobj.Blob{ Contents: bytes.NewReader(contents), Size: int64(len(contents)), }, nil }, }) assert.NoError(t, err) assert.Equal(t, string(contents), "Hello, world!\n") } func TestRewriterDoesntVisitUnchangedSubtrees(t *testing.T) { db := DatabaseFromFixture(t, "repeated-subtrees.git") r := NewRewriter(db) seen := make(map[string]int) _, err := r.Rewrite(&RewriteOptions{Include: []string{"refs/heads/master"}, BlobFn: func(path string, b *gitobj.Blob) (*gitobj.Blob, error) { seen[path] = seen[path] + 1 return b, nil }, }) assert.Nil(t, err) assert.Equal(t, 2, seen["a.txt"]) assert.Equal(t, 1, seen["subdir/b.txt"]) } func TestRewriterVisitsUniqueEntriesWithIdenticalContents(t *testing.T) { db := DatabaseFromFixture(t, "identical-blobs.git") r := NewRewriter(db) tip, err := r.Rewrite(&RewriteOptions{Include: []string{"refs/heads/master"}, BlobFn: func(path string, b *gitobj.Blob) (*gitobj.Blob, error) { if path == "b.txt" { return b, nil } return &gitobj.Blob{ Contents: strings.NewReader("changed"), Size: int64(len("changed")), }, nil }, }) assert.Nil(t, err) tree := "bbbe0a7676523ae02234bfe874784ca2380c2d4b" AssertCommitTree(t, db, hex.EncodeToString(tip), tree) // After rewriting, the HEAD state of the repository should contain a // tree identical to: // // 100644 blob 21fb1eca31e64cd3914025058b21992ab76edcf9 a.txt // 100644 blob 94f3610c08588440112ed977376f26a8fba169b0 b.txt AssertBlobContents(t, db, tree, "a.txt", "changed") AssertBlobContents(t, db, tree, "b.txt", "original") } func TestRewriterIgnoresPathsThatDontMatchFilter(t *testing.T) { include := []string{"*.txt"} exclude := []string{"subdir/*.txt"} filter := filepathfilter.New(include, exclude, filepathfilter.GitIgnore) db := DatabaseFromFixture(t, "non-repeated-subtrees.git") r := NewRewriter(db, WithFilter(filter)) seen := make(map[string]int) _, err := r.Rewrite(&RewriteOptions{Include: []string{"refs/heads/master"}, BlobFn: func(path string, b *gitobj.Blob) (*gitobj.Blob, error) { seen[path] = seen[path] + 1 return b, nil }, }) assert.Nil(t, err) assert.Equal(t, 1, seen["a.txt"]) assert.Equal(t, 0, seen["subdir/b.txt"]) } func TestRewriterAllowsAdditionalTreeEntries(t *testing.T) { db := DatabaseFromFixture(t, "linear-history.git") r := NewRewriter(db) extra, err := db.WriteBlob(&gitobj.Blob{ Contents: strings.NewReader("extra\n"), Size: int64(len("extra\n")), }) assert.Nil(t, err) tip, err := r.Rewrite(&RewriteOptions{Include: []string{"refs/heads/master"}, BlobFn: func(path string, b *gitobj.Blob) (*gitobj.Blob, error) { return b, nil }, TreeCallbackFn: func(path string, tr *gitobj.Tree) (*gitobj.Tree, error) { return &gitobj.Tree{ Entries: append(tr.Entries, &gitobj.TreeEntry{ Name: "extra.txt", Filemode: 0100644, Oid: extra, }), }, nil }, }) assert.Nil(t, err) tree1 := "40c2eb627a3b8e84b82a47a973d32960f3898b6a" tree2 := "d7a5bcb69f2cd2652a014663a948952ea603c2c0" tree3 := "45b752554d128f85bf23d7c3ddf48c47cbc345c8" // After rewriting, the HEAD state of the repository should contain a // tree identical to: // // 100644 blob e440e5c842586965a7fb77deda2eca68612b1f53 hello.txt // 100644 blob 0f2287157f7cb0dd40498c7a92f74b6975fa2d57 extra.txt AssertCommitTree(t, db, hex.EncodeToString(tip), tree1) AssertBlobContents(t, db, tree1, "hello.txt", "3") AssertBlobContents(t, db, tree1, "extra.txt", "extra\n") // After rewriting, the HEAD~1 state of the repository should contain a // tree identical to: // // 100644 blob d8263ee9860594d2806b0dfd1bfd17528b0ba2a4 hello.txt // 100644 blob 0f2287157f7cb0dd40498c7a92f74b6975fa2d57 extra.txt AssertCommitParent(t, db, hex.EncodeToString(tip), "45af5deb9a25bc4069b15c1f5bdccb0340978707") AssertCommitTree(t, db, "45af5deb9a25bc4069b15c1f5bdccb0340978707", tree2) AssertBlobContents(t, db, tree2, "hello.txt", "2") AssertBlobContents(t, db, tree2, "extra.txt", "extra\n") // After rewriting, the HEAD~2 state of the repository should contain a // tree identical to: // // 100644 blob 56a6051ca2b02b04ef92d5150c9ef600403cb1de hello.txt // 100644 blob 0f2287157f7cb0dd40498c7a92f74b6975fa2d57 extra.txt AssertCommitParent(t, db, "45af5deb9a25bc4069b15c1f5bdccb0340978707", "99f6bd7cd69b45494afed95b026f3e450de8304f") AssertCommitTree(t, db, "99f6bd7cd69b45494afed95b026f3e450de8304f", tree3) AssertBlobContents(t, db, tree3, "hello.txt", "1") AssertBlobContents(t, db, tree3, "extra.txt", "extra\n") } // CallbackCall is a structure recording information pertinent to when a // *githistory.Rewrite called either BlobFn, TreePreCallbackFn, or // TreeCallbackFn. type CallbackCall struct { Type string Path string } var ( // collectCalls is a function that returns a *RewriteOptions that // updates a pointer to a slice of `*CallbackCall`'s with each call that // is received. collectCalls = func(calls *[]*CallbackCall) *RewriteOptions { return &RewriteOptions{Include: []string{"refs/heads/master"}, BlobFn: func(path string, b *gitobj.Blob) (*gitobj.Blob, error) { *calls = append(*calls, &CallbackCall{ Type: "blob", Path: path, }) return b, nil }, TreePreCallbackFn: func(path string, t *gitobj.Tree) error { *calls = append(*calls, &CallbackCall{ Type: "tree-pre", Path: path, }) return nil }, TreeCallbackFn: func(path string, t *gitobj.Tree) (*gitobj.Tree, error) { *calls = append(*calls, &CallbackCall{ Type: "tree-post", Path: path, }) return t, nil }, } } ) func TestHistoryRewriterCallbacks(t *testing.T) { var calls []*CallbackCall db := DatabaseFromFixture(t, "linear-history.git") r := NewRewriter(db) _, err := r.Rewrite(collectCalls(&calls)) assert.Nil(t, err) assert.Len(t, calls, 9) assert.Equal(t, calls[0], &CallbackCall{Type: "tree-pre", Path: "/"}) assert.Equal(t, calls[1], &CallbackCall{Type: "blob", Path: "hello.txt"}) assert.Equal(t, calls[2], &CallbackCall{Type: "tree-post", Path: "/"}) assert.Equal(t, calls[3], &CallbackCall{Type: "tree-pre", Path: "/"}) assert.Equal(t, calls[4], &CallbackCall{Type: "blob", Path: "hello.txt"}) assert.Equal(t, calls[5], &CallbackCall{Type: "tree-post", Path: "/"}) assert.Equal(t, calls[6], &CallbackCall{Type: "tree-pre", Path: "/"}) assert.Equal(t, calls[7], &CallbackCall{Type: "blob", Path: "hello.txt"}) assert.Equal(t, calls[8], &CallbackCall{Type: "tree-post", Path: "/"}) } func TestHistoryRewriterCallbacksSubtrees(t *testing.T) { var calls []*CallbackCall db := DatabaseFromFixture(t, "non-repeated-subtrees.git") r := NewRewriter(db) _, err := r.Rewrite(collectCalls(&calls)) assert.Nil(t, err) assert.Len(t, calls, 8) assert.Equal(t, calls[0], &CallbackCall{Type: "tree-pre", Path: "/"}) assert.Equal(t, calls[1], &CallbackCall{Type: "blob", Path: "a.txt"}) assert.Equal(t, calls[2], &CallbackCall{Type: "tree-post", Path: "/"}) assert.Equal(t, calls[3], &CallbackCall{Type: "tree-pre", Path: "/"}) assert.Equal(t, calls[4], &CallbackCall{Type: "tree-pre", Path: "/subdir"}) assert.Equal(t, calls[5], &CallbackCall{Type: "blob", Path: "subdir/b.txt"}) assert.Equal(t, calls[6], &CallbackCall{Type: "tree-post", Path: "/subdir"}) assert.Equal(t, calls[7], &CallbackCall{Type: "tree-post", Path: "/"}) } func TestHistoryRewriterTreePreCallbackPropagatesErrors(t *testing.T) { expected := errors.Errorf("my error") db := DatabaseFromFixture(t, "linear-history.git") r := NewRewriter(db) _, err := r.Rewrite(&RewriteOptions{Include: []string{"refs/heads/master"}, BlobFn: func(path string, b *gitobj.Blob) (*gitobj.Blob, error) { return b, nil }, TreePreCallbackFn: func(path string, t *gitobj.Tree) error { return expected }, }) assert.Equal(t, err, expected) } func TestHistoryRewriterUseOriginalParentsForPartialMigration(t *testing.T) { db := DatabaseFromFixture(t, "linear-history-with-tags.git") r := NewRewriter(db) tip, err := r.Rewrite(&RewriteOptions{ Include: []string{"refs/heads/master"}, Exclude: []string{"refs/tags/middle"}, BlobFn: func(path string, b *gitobj.Blob) (*gitobj.Blob, error) { return b, nil }, }) // After rewriting, the rewriter should have only modified the latest // commit (HEAD), and excluded the first two, both reachable by // refs/tags/middle. // // This should modify one commit, and appropriately link the parent as // follows: // // tree 20ecedad3e74a113695fe5f00ab003694e2e1e9c // parent 228afe30855933151f7a88e70d9d88314fd2f191 // author Taylor Blau 1496954214 -0600 // committer Taylor Blau 1496954214 -0600 // // some.txt: c expectedParent := "228afe30855933151f7a88e70d9d88314fd2f191" assert.NoError(t, err) AssertCommitParent(t, db, hex.EncodeToString(tip), expectedParent) } func TestHistoryRewriterUpdatesRefs(t *testing.T) { db := DatabaseFromFixture(t, "linear-history.git") r := NewRewriter(db) AssertRef(t, db, "refs/heads/master", HexDecode(t, "e669b63f829bfb0b91fc52a5bcea53dd7977a0ee")) tip, err := r.Rewrite(&RewriteOptions{ Include: []string{"refs/heads/master"}, UpdateRefs: true, BlobFn: func(path string, b *gitobj.Blob) (*gitobj.Blob, error) { suffix := strings.NewReader("_suffix") return &gitobj.Blob{ Contents: io.MultiReader(b.Contents, suffix), Size: b.Size + int64(suffix.Len()), }, nil }, }) assert.Nil(t, err) c1 := hex.EncodeToString(tip) c2 := "66561fe3ae68651658e18e48053dcfe66a2e9da1" c3 := "8268d8486c48024a871fa42fc487dbeabd6e3d86" AssertRef(t, db, "refs/heads/master", tip) AssertCommitParent(t, db, c1, c2) AssertCommitParent(t, db, c2, c3) } func TestHistoryRewriterReturnsFilter(t *testing.T) { f := filepathfilter.New([]string{"a"}, []string{"b"}, filepathfilter.GitIgnore) r := NewRewriter(nil, WithFilter(f)) expected := reflect.ValueOf(f).Elem().Addr().Pointer() got := reflect.ValueOf(r.Filter()).Elem().Addr().Pointer() assert.Equal(t, expected, got, "git/githistory: expected Rewriter.Filter() to return same *filepathfilter.Filter instance") } // debug is meant to be called from a defer statement to aide in debugging a // test failure among any in this file. // // Callers are expected to call it immediately after calling the Rewrite() // function. func debug(t *testing.T, db *gitobj.ObjectDatabase, tip []byte, err error) { root, ok := db.Root() t.Log(strings.Repeat("*", 80)) t.Logf("* root=%s, ok=%t\n", root, ok) t.Logf("* tip=%x\n", tip) t.Logf("* err=%s\n", err) t.Log(strings.Repeat("*", 80)) } git-lfs-3.6.1/git/ls_files.go000066400000000000000000000041161472372047300160130ustar00rootroot00000000000000package git import ( "bufio" "io" "path" "strings" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/tools" "github.com/git-lfs/git-lfs/v3/tr" "github.com/rubyist/tracerx" ) type lsFileInfo struct { BaseName string FullPath string } type LsFiles struct { Files map[string]*lsFileInfo FilesByName map[string][]*lsFileInfo } func NewLsFiles(workingDir string, standardExclude bool, untracked bool) (*LsFiles, error) { args := []string{ "ls-files", "-z", // Use a NUL separator. This also disables the escaping of special characters. "--cached", } if IsGitVersionAtLeast("2.35.0") { args = append(args, "--sparse") } if standardExclude { args = append(args, "--exclude-standard") } if untracked { args = append(args, "--others") } cmd, err := gitNoLFS(args...) if err != nil { return nil, err } cmd.Dir = workingDir tracerx.Printf("NewLsFiles: running in %s git %s", workingDir, strings.Join(args, " ")) // Capture stdout and stderr stdout, err := cmd.StdoutPipe() if err != nil { return nil, err } stderr, err := cmd.StderrPipe() if err != nil { return nil, err } scanner := bufio.NewScanner(stdout) scanner.Split(tools.SplitOnNul) if err := cmd.Start(); err != nil { return nil, err } rv := &LsFiles{ Files: make(map[string]*lsFileInfo), FilesByName: make(map[string][]*lsFileInfo), } // Setup a goroutine to drain stderr as large amounts of error output may cause // the subprocess to block. errorMessages := make(chan []byte) go func() { msg, _ := io.ReadAll(stderr) errorMessages <- msg }() // Read all files for scanner.Scan() { base := path.Base(scanner.Text()) finfo := &lsFileInfo{ BaseName: base, FullPath: scanner.Text(), } rv.Files[scanner.Text()] = finfo rv.FilesByName[base] = append(rv.FilesByName[base], finfo) } // Check the output of the subprocess, output stderr if the command failed. msg := <-errorMessages if err := cmd.Wait(); err != nil { return nil, errors.New(tr.Tr.Get("Error in `git %s`: %v %s", strings.Join(args, " "), err, msg)) } return rv, nil } git-lfs-3.6.1/git/ls_tree_scanner.go000066400000000000000000000033201472372047300173550ustar00rootroot00000000000000package git import ( "bufio" "bytes" "io" "strconv" "strings" ) // An entry from ls-tree or rev-list including a blob sha and tree path type TreeBlob struct { Oid string Size int64 Mode int32 Filename string } type LsTreeScanner struct { s *bufio.Scanner tree *TreeBlob } func NewLsTreeScanner(r io.Reader) *LsTreeScanner { s := bufio.NewScanner(r) s.Split(scanNullLines) return &LsTreeScanner{s: s} } func (s *LsTreeScanner) TreeBlob() *TreeBlob { return s.tree } func (s *LsTreeScanner) Err() error { return nil } func (s *LsTreeScanner) Scan() bool { t, hasNext := s.next() s.tree = t return hasNext } func (s *LsTreeScanner) next() (*TreeBlob, bool) { hasNext := s.s.Scan() line := s.s.Text() parts := strings.SplitN(line, "\t", 2) if len(parts) < 2 { return nil, hasNext } attrs := strings.SplitN(parts[0], " ", 4) if len(attrs) < 4 { return nil, hasNext } mode, err := strconv.ParseInt(strings.TrimSpace(attrs[0]), 8, 32) if err != nil { return nil, hasNext } if attrs[1] != "blob" { return nil, hasNext } sz, err := strconv.ParseInt(strings.TrimSpace(attrs[3]), 10, 64) if err != nil { return nil, hasNext } oid := attrs[2] filename := parts[1] return &TreeBlob{Oid: oid, Size: sz, Mode: int32(mode), Filename: filename}, hasNext } func scanNullLines(data []byte, atEOF bool) (advance int, token []byte, err error) { if atEOF && len(data) == 0 { return 0, nil, nil } if i := bytes.IndexByte(data, '\000'); i >= 0 { // We have a full null-terminated line. return i + 1, data[0:i], nil } // If we're at EOF, we have a final, non-terminated line. Return it. if atEOF { return len(data), data, nil } // Request more data. return 0, nil, nil } git-lfs-3.6.1/git/ls_tree_scanner_test.go000066400000000000000000000027361472372047300204260ustar00rootroot00000000000000package git import ( "strings" "testing" "github.com/stretchr/testify/assert" ) type genericScanner interface { Err() error Scan() bool } func assertNextScan(t *testing.T, scanner genericScanner) { assert.True(t, scanner.Scan()) assert.Nil(t, scanner.Err()) } func assertScannerDone(t *testing.T, scanner genericScanner) { assert.False(t, scanner.Scan()) assert.Nil(t, scanner.Err()) } func TestLsTreeParser(t *testing.T) { stdout := "100644 blob d899f6551a51cf19763c5955c7a06a2726f018e9 42 .gitattributes\000100644 blob 4d343e022e11a8618db494dc3c501e80c7e18197 126 PB SCN 16 Odhrán.wav" scanner := NewLsTreeScanner(strings.NewReader(stdout)) assertNextTreeBlob(t, scanner, "d899f6551a51cf19763c5955c7a06a2726f018e9", ".gitattributes") assertNextTreeBlob(t, scanner, "4d343e022e11a8618db494dc3c501e80c7e18197", "PB SCN 16 Odhrán.wav") assertScannerDone(t, scanner) } func assertNextTreeBlob(t *testing.T, scanner *LsTreeScanner, oid, filename string) { assertNextScan(t, scanner) b := scanner.TreeBlob() assert.NotNil(t, b) assert.Equal(t, oid, b.Oid) assert.Equal(t, filename, b.Filename) } func BenchmarkLsTreeParser(b *testing.B) { stdout := "100644 blob d899f6551a51cf19763c5955c7a06a2726f018e9 42 .gitattributes\000100644 blob 4d343e022e11a8618db494dc3c501e80c7e18197 126 PB SCN 16 Odhrán.wav" // run the Fib function b.N times for n := 0; n < b.N; n++ { scanner := NewLsTreeScanner(strings.NewReader(stdout)) for scanner.Scan() { } } } git-lfs-3.6.1/git/object_scanner.go000066400000000000000000000112421472372047300171700ustar00rootroot00000000000000package git import ( "encoding/hex" "io" "github.com/git-lfs/git-lfs/v3/tr" "github.com/git-lfs/gitobj/v2" "github.com/git-lfs/gitobj/v2/errors" ) // object represents a generic Git object of any type. type object struct { // Contents reads Git's internal object representation. Contents io.Reader // Oid is the ID of the object. Oid string // Size is the size in bytes of the object. Size int64 // Type is the type of the object being held. Type string // object is the gitobj object being handled. object gitobj.Object } // ObjectScanner is a scanner type that scans for Git objects reference-able in // Git's object database by their unique OID. type ObjectScanner struct { // object is the object that the ObjectScanner last scanned, or nil. object *object // err is the error (if any) that the ObjectScanner encountered during // its last scan, or nil. err error gitobj *gitobj.ObjectDatabase } // NewObjectScanner constructs a new instance of the `*ObjectScanner` type and // returns it. It backs the ObjectScanner with an ObjectDatabase from the // github.com/git-lfs/gitobj/v2 package. // If any errors are encountered while creating the ObjectDatabase, // they will be returned immediately. // Otherwise, an `*ObjectScanner` is returned with no error. func NewObjectScanner(gitEnv, osEnv Environment) (*ObjectScanner, error) { gitdir, err := GitCommonDir() if err != nil { return nil, err } gitobj, err := ObjectDatabase(osEnv, gitEnv, gitdir, "") if err != nil { return nil, err } return NewObjectScannerFrom(gitobj), nil } // NewObjectScannerFrom returns a new `*ObjectScanner` populated with data from // the given `io.Reader`, "r". It supplies no close function, and discards any // input given to the Scan() function. func NewObjectScannerFrom(db *gitobj.ObjectDatabase) *ObjectScanner { return &ObjectScanner{gitobj: db} } // Scan scans for a particular object given by the "oid" parameter. Once the // scan is complete, the Contents(), Sha1(), Size() and Type() functions may be // called and will return data corresponding to the given OID. // // Scan() returns whether the scan was successful, or in other words, whether or // not the scanner can continue to progress. func (s *ObjectScanner) Scan(oid string) bool { if err := s.reset(); err != nil { s.err = err return false } obj, err := s.scan(oid) s.object = obj if err != nil { if err != io.EOF { s.err = err } return false } return true } // Close closes and frees any resources owned by the *ObjectScanner that it is // called upon. If there were any errors in freeing that (those) resource(s), it // it will be returned, otherwise nil. func (s *ObjectScanner) Close() error { if s == nil { return nil } s.reset() s.gitobj.Close() return nil } // Contents returns an io.Reader which reads Git's representation of the object // that was last scanned for. func (s *ObjectScanner) Contents() io.Reader { return s.object.Contents } // Sha1 returns the SHA1 object ID of the object that was last scanned for. func (s *ObjectScanner) Sha1() string { return s.object.Oid } // Size returns the size in bytes of the object that was last scanned for. func (s *ObjectScanner) Size() int64 { return s.object.Size } // Type returns the type of the object that was last scanned for. func (s *ObjectScanner) Type() string { return s.object.Type } // Err returns the error (if any) that was encountered during the last Scan() // operation. func (s *ObjectScanner) Err() error { return s.err } func (s *ObjectScanner) reset() error { if s.object != nil { if c, ok := s.object.object.(interface { Close() error }); ok && c != nil { if err := c.Close(); err != nil { return err } } } s.object, s.err = nil, nil return nil } type missingErr struct { oid string } func (m *missingErr) Error() string { return tr.Tr.Get("missing object: %s", m.oid) } func IsMissingObject(err error) bool { _, ok := err.(*missingErr) return ok } func mustDecode(oid string) []byte { x, _ := hex.DecodeString(oid) return x } func (s *ObjectScanner) scan(oid string) (*object, error) { var ( obj gitobj.Object size int64 contents io.Reader ) obj, err := s.gitobj.Object(mustDecode(oid)) if err != nil { if errors.IsNoSuchObject(err) { return nil, &missingErr{oid: oid} } return nil, err } // Currently, we're only interested in the size and contents of blobs, // and gitobj only exposes the size easily for us for blobs anyway. if obj.Type() == gitobj.BlobObjectType { blob := obj.(*gitobj.Blob) size = blob.Size contents = blob.Contents } return &object{ Contents: contents, Oid: oid, Size: size, Type: obj.Type().String(), object: obj, }, nil } git-lfs-3.6.1/git/refs.go000066400000000000000000000043221472372047300151510ustar00rootroot00000000000000package git import ( "fmt" "github.com/rubyist/tracerx" ) type RefUpdate struct { git Env remote string localRef *Ref remoteRef *Ref } func NewRefUpdate(g Env, remote string, localRef, remoteRef *Ref) *RefUpdate { return &RefUpdate{ git: g, remote: remote, localRef: localRef, remoteRef: remoteRef, } } func (u *RefUpdate) LocalRef() *Ref { return u.localRef } func (u *RefUpdate) LocalRefCommitish() string { return refCommitish(u.LocalRef()) } func (u *RefUpdate) RemoteRef() *Ref { if u.remoteRef == nil { u.remoteRef = defaultRemoteRef(u.git, u.remote, u.LocalRef()) } return u.remoteRef } // defaultRemoteRef returns the remote ref receiving a push based on the current // repository config and local ref being pushed. // // See push.default rules in https://git-scm.com/docs/git-config func defaultRemoteRef(g Env, remote string, localRef *Ref) *Ref { pushMode, _ := g.Get("push.default") switch pushMode { case "", "simple": brRemote, _ := g.Get(fmt.Sprintf("branch.%s.remote", localRef.Name)) if brRemote == remote { // in centralized workflow, work like 'upstream' with an added safety to // refuse to push if the upstream branch’s name is different from the // local one. return trackingRef(g, localRef) } // When pushing to a remote that is different from the remote you normally // pull from, work as current. return localRef case "upstream", "tracking": // push the current branch back to the branch whose changes are usually // integrated into the current branch return trackingRef(g, localRef) case "current": // push the current branch to update a branch with the same name on the // receiving end. return localRef default: tracerx.Printf("WARNING: %q push mode not supported", pushMode) return localRef } } func trackingRef(g Env, localRef *Ref) *Ref { if merge, ok := g.Get(fmt.Sprintf("branch.%s.merge", localRef.Name)); ok { return ParseRef(merge, "") } return localRef } func (u *RefUpdate) RemoteRefCommitish() string { return refCommitish(u.RemoteRef()) } func refCommitish(r *Ref) string { if len(r.Sha) > 0 { return r.Sha } return r.Name } // copy of env type Env interface { Get(key string) (val string, ok bool) } git-lfs-3.6.1/git/refs_test.go000066400000000000000000000051731472372047300162150ustar00rootroot00000000000000package git import ( "testing" "github.com/stretchr/testify/assert" ) func TestRefUpdateDefault(t *testing.T) { pushModes := []string{"simple", ""} for _, pushMode := range pushModes { env := newEnv(map[string][]string{ "push.default": []string{pushMode}, "branch.local.remote": []string{"ignore"}, "branch.local.merge": []string{"me"}, }) u := NewRefUpdate(env, "origin", ParseRef("refs/heads/local", ""), nil) assert.Equal(t, "local", u.RemoteRef().Name, "pushmode=%q", pushMode) assert.Equal(t, RefTypeLocalBranch, u.RemoteRef().Type, "pushmode=%q", pushMode) } } func TestRefUpdateTrackedDefault(t *testing.T) { pushModes := []string{"simple", "upstream", "tracking", ""} for _, pushMode := range pushModes { env := newEnv(map[string][]string{ "push.default": []string{pushMode}, "branch.local.remote": []string{"origin"}, "branch.local.merge": []string{"refs/heads/tracked"}, }) u := NewRefUpdate(env, "origin", ParseRef("refs/heads/local", ""), nil) assert.Equal(t, "tracked", u.RemoteRef().Name, "pushmode=%s", pushMode) assert.Equal(t, RefTypeLocalBranch, u.RemoteRef().Type, "pushmode=%q", pushMode) } } func TestRefUpdateCurrentDefault(t *testing.T) { env := newEnv(map[string][]string{ "push.default": []string{"current"}, "branch.local.remote": []string{"origin"}, "branch.local.merge": []string{"tracked"}, }) u := NewRefUpdate(env, "origin", ParseRef("refs/heads/local", ""), nil) assert.Equal(t, "local", u.RemoteRef().Name) assert.Equal(t, RefTypeLocalBranch, u.RemoteRef().Type) } func TestRefUpdateExplicitLocalAndRemoteRefs(t *testing.T) { u := NewRefUpdate(nil, "", ParseRef("refs/heads/local", "abc123"), ParseRef("refs/heads/remote", "def456")) assert.Equal(t, "local", u.LocalRef().Name) assert.Equal(t, "abc123", u.LocalRef().Sha) assert.Equal(t, "abc123", u.LocalRefCommitish()) assert.Equal(t, "remote", u.RemoteRef().Name) assert.Equal(t, "def456", u.RemoteRef().Sha) assert.Equal(t, "def456", u.RemoteRefCommitish()) u = NewRefUpdate(nil, "", ParseRef("refs/heads/local", ""), ParseRef("refs/heads/remote", "")) assert.Equal(t, "local", u.LocalRef().Name) assert.Equal(t, "", u.LocalRef().Sha) assert.Equal(t, "local", u.LocalRefCommitish()) assert.Equal(t, "remote", u.RemoteRef().Name) assert.Equal(t, "", u.RemoteRef().Sha) assert.Equal(t, "remote", u.RemoteRefCommitish()) } func newEnv(m map[string][]string) *mapEnv { return &mapEnv{data: m} } type mapEnv struct { data map[string][]string } func (m *mapEnv) Get(key string) (string, bool) { vals, ok := m.data[key] if ok && len(vals) > 0 { return vals[0], true } return "", false } git-lfs-3.6.1/git/rev_list_scanner.go000066400000000000000000000253761472372047300175660ustar00rootroot00000000000000package git import ( "bufio" "encoding/hex" "fmt" "io" "regexp" "strings" "sync" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/tr" "github.com/rubyist/tracerx" ) // ScanningMode is a constant type that allows for variation in the range of // commits to scan when given to the `*git.RevListScanner` type. type ScanningMode int const ( // ScanRefsMode will scan between two refspecs. ScanRefsMode ScanningMode = iota // ScanAllMode will scan all history. ScanAllMode // ScanRangeToRemoteMode will scan the difference between any included // SHA1s and a remote tracking ref. ScanRangeToRemoteMode ) // RevListOrder is a constant type that allows for variation in the ordering of // revisions given by the *RevListScanner below. type RevListOrder int const ( // DefaultRevListOrder is the zero-value for this type and yields the // results as given by git-rev-list(1) without any `---order` // argument given. By default: reverse chronological order. DefaultRevListOrder RevListOrder = iota // DateRevListOrder gives the revisions such that no parents are shown // before children, and otherwise in commit timestamp order. DateRevListOrder // AuthorDateRevListOrder gives the revisions such that no parents are // shown before children, and otherwise in author date timestamp order. AuthorDateRevListOrder // TopoRevListOrder gives the revisions such that they appear in // topological order. TopoRevListOrder ) // Flag returns the command-line flag to be passed to git-rev-list(1) in order // to order the output according to the given RevListOrder. It returns both the // flag ("--date-order", "--topo-order", etc) and a bool, whether or not to // append the flag (for instance, DefaultRevListOrder requires no flag). // // Given a type other than those defined above, Flag() will panic(). func (o RevListOrder) Flag() (string, bool) { switch o { case DefaultRevListOrder: return "", false case DateRevListOrder: return "--date-order", true case AuthorDateRevListOrder: return "--author-date-order", true case TopoRevListOrder: return "--topo-order", true default: panic(fmt.Sprintf("git/rev_list_scanner: %s", tr.Tr.Get("unknown RevListOrder %d", o))) } } // ScanRefsOptions is an "options" type that is used to configure a scan // operation on the `*git.RevListScanner` instance when given to the function // `NewRevListScanner()`. type ScanRefsOptions struct { // Mode is the scan mode to apply, see above. Mode ScanningMode // Remote is the current remote to scan against, if using // ScanRangeToRemoteMode. Remote string // SkipDeletedBlobs specifies whether or not to traverse into commit // ancestry (revealing potentially deleted (unreferenced) blobs, trees, // or commits. SkipDeletedBlobs bool // Order specifies the order in which revisions are yielded from the // output of `git-rev-list(1)`. For more information, see the above // documentation on the RevListOrder type. Order RevListOrder // CommitsOnly specifies whether or not the *RevListScanner should // return only commits, or all objects in range by performing a // traversal of the graph. By default, false: show all objects. CommitsOnly bool // WorkingDir specifies the working directory in which to run // git-rev-list(1). If this is an empty string, (has len(WorkingDir) == // 0), it is equivalent to running in os.Getwd(). WorkingDir string // Reverse specifies whether or not to give the revisions in reverse // order. Reverse bool // SkippedRefs provides a list of refs to ignore. SkippedRefs []string // Mutex guards names. Mutex *sync.Mutex // Names maps Git object IDs (encoded as hex using // hex.EncodeString()) to their names, i.e., a directory name // (fully-qualified) for trees, or a pathspec for blob tree entries. Names map[string]string } // GetName returns the name associated with a given blob/tree sha and "true" if // it exists, or ("", false) if it doesn't. // // GetName is guarded by a use of o.Mutex, and is goroutine safe. func (o *ScanRefsOptions) GetName(sha string) (string, bool) { o.Mutex.Lock() defer o.Mutex.Unlock() name, ok := o.Names[sha] return name, ok } // SetName sets the name associated with a given blob/tree sha. // // SetName is guarded by a use of o.Mutex, and is therefore goroutine safe. func (o *ScanRefsOptions) SetName(sha, name string) { o.Mutex.Lock() defer o.Mutex.Unlock() o.Names[sha] = name } // RevListScanner is a Scanner type that parses through results of the `git // rev-list` command. type RevListScanner struct { // s is a buffered scanner feeding from the output (stdout) of // git-rev-list(1) invocation. s *bufio.Scanner // closeFn is an optional type returning an error yielded by closing any // resources held by an open (running) instance of the *RevListScanner // type. closeFn func() error // name is the name of the most recently read object. name string // oid is the oid of the most recently read object. oid []byte // err is the most recently encountered error. err error } var ( // ambiguousRegex is a regular expression matching the output of stderr // when ambiguous refnames are encountered. ambiguousRegex = regexp.MustCompile(`warning: refname (.*) is ambiguous`) ) // NewRevListScanner instantiates a new RevListScanner instance scanning all // revisions reachable by refs contained in "include" and not reachable by any // refs included in "excluded", using the *ScanRefsOptions "opt" configuration. // // It returns a new *RevListScanner instance, or an error if one was // encountered. Upon returning, the `git-rev-list(1)` instance is already // running, and Scan() may be called immediately. func NewRevListScanner(include, excluded []string, opt *ScanRefsOptions) (*RevListScanner, error) { stdin, args, err := revListArgs(include, excluded, opt) if err != nil { return nil, err } cmd, err := gitNoLFS(args...) if err != nil { return nil, err } if len(opt.WorkingDir) > 0 { cmd.Dir = opt.WorkingDir } cmd.Stdin = stdin stdout, err := cmd.StdoutPipe() if err != nil { return nil, err } stderr, err := cmd.StderrPipe() if err != nil { return nil, err } tracerx.Printf("run_command: git %s", strings.Join(args, " ")) if err := cmd.Start(); err != nil { return nil, err } return &RevListScanner{ s: bufio.NewScanner(stdout), closeFn: func() error { msg, _ := io.ReadAll(stderr) // First check if there was a non-zero exit code given // when Wait()-ing on the command execution. if err := cmd.Wait(); err != nil { return errors.New(tr.Tr.Get("Error in `git %s`: %v %s", strings.Join(args, " "), err, msg)) } // If the command exited cleanly, but found an ambiguous // refname, promote that to an error and return it. // // `git-rev-list(1)` does not treat ambiguous refnames // as fatal (non-zero exit status), but we do. if am := ambiguousRegex.FindSubmatch(msg); len(am) > 1 { return errors.New(tr.Tr.Get("ref %q is ambiguous", am[1])) } return nil }, }, nil } // revListArgs returns the arguments for a given included and excluded set of // SHA1s, and ScanRefsOptions instance. // // In order, it returns the contents of stdin as an io.Reader, the args passed // to git as a []string, and any error encountered in generating those if one // occurred. func revListArgs(include, exclude []string, opt *ScanRefsOptions) (io.Reader, []string, error) { var stdin io.Reader args := []string{"rev-list"} if !opt.CommitsOnly { args = append(args, "--objects") } if opt.Reverse { args = append(args, "--reverse") } if orderFlag, ok := opt.Order.Flag(); ok { args = append(args, orderFlag) } switch opt.Mode { case ScanRefsMode: if opt.SkipDeletedBlobs { args = append(args, "--no-walk") } else { args = append(args, "--do-walk") } stdin = strings.NewReader(strings.Join( includeExcludeShas(include, exclude), "\n")) case ScanAllMode: args = append(args, "--all") case ScanRangeToRemoteMode: args = append(args, "--ignore-missing") if len(opt.SkippedRefs) == 0 { args = append(args, "--not", "--remotes="+opt.Remote) stdin = strings.NewReader(strings.Join( includeExcludeShas(include, exclude), "\n")) } else { stdin = strings.NewReader(strings.Join( append(includeExcludeShas(include, exclude), opt.SkippedRefs...), "\n"), ) } default: return nil, nil, errors.New(tr.Tr.Get("unknown scan type: %d", opt.Mode)) } return stdin, append(args, "--stdin", "--"), nil } func includeExcludeShas(include, exclude []string) []string { include = nonZeroShas(include) exclude = nonZeroShas(exclude) args := make([]string, 0, len(include)+len(exclude)) for _, i := range include { args = append(args, i) } for _, x := range exclude { args = append(args, fmt.Sprintf("^%s", x)) } return args } func nonZeroShas(all []string) []string { nz := make([]string, 0, len(all)) for _, sha := range all { if len(sha) > 0 && !IsZeroObjectID(sha) { nz = append(nz, sha) } } return nz } var startsWithObjectID = regexp.MustCompile(fmt.Sprintf(`\A%s`, ObjectIDRegex)) // Name is an optional field that gives the name of the object (if the object is // a tree, blob). // // It can be called before or after Scan(), but will return "" if called // before. func (s *RevListScanner) Name() string { return s.name } // OID is the hex-decoded bytes of the object's ID. // // It can be called before or after Scan(), but will return "" if called // before. func (s *RevListScanner) OID() []byte { return s.oid } // Err returns the last encountered error (or nil) after a call to Scan(). // // It SHOULD be called, checked and handled after a call to Scan(). func (s *RevListScanner) Err() error { return s.err } // Scan scans the next entry given by git-rev-list(1), and returns true/false // indicating if there are more results to scan. func (s *RevListScanner) Scan() bool { var err error s.oid, s.name, err = s.scan() if err != nil { if err != io.EOF { s.err = err } return false } return len(s.oid) > 0 } // Close closes the RevListScanner by freeing any resources held by the // instance while running, and returns any error encountered while doing so. func (s *RevListScanner) Close() error { if s.closeFn == nil { return nil } return s.closeFn() } // scan provides the internal implementation of scanning a line of text from the // output of `git-rev-list(1)`. func (s *RevListScanner) scan() ([]byte, string, error) { if !s.s.Scan() { return nil, "", s.s.Err() } line := strings.TrimSpace(s.s.Text()) if len(line) < ObjectIDLengths[0] { return nil, "", nil } oidhex := startsWithObjectID.FindString(line) if len(oidhex) == 0 { return nil, "", errors.New(tr.Tr.Get("missing OID in line (got %q)", line)) } oid, err := hex.DecodeString(oidhex) if err != nil { return nil, "", err } var name string if len(line) > len(oidhex) { name = line[len(oidhex)+1:] } return oid, name, nil } git-lfs-3.6.1/git/rev_list_scanner_test.go000066400000000000000000000142111472372047300206070ustar00rootroot00000000000000package git import ( "bufio" "encoding/hex" "errors" "fmt" "io" "strings" "sync/atomic" "testing" "github.com/stretchr/testify/assert" ) type ArgsTestCase struct { Include []string Exclude []string Opt *ScanRefsOptions ExpectedStdin string ExpectedArgs []string ExpectedErr string } func (c *ArgsTestCase) Assert(t *testing.T) { stdin, args, err := revListArgs(c.Include, c.Exclude, c.Opt) if len(c.ExpectedErr) > 0 { assert.EqualError(t, err, c.ExpectedErr) } else { assert.Nil(t, err) } assert.EqualValues(t, c.ExpectedArgs, args) if stdin != nil { b, err := io.ReadAll(stdin) assert.Nil(t, err) assert.Equal(t, c.ExpectedStdin, string(b)) } else if len(c.ExpectedStdin) > 0 { t.Errorf("git: expected stdin contents %s, got none", c.ExpectedStdin) } } var ( s1 = "decafdecafdecafdecafdecafdecafdecafdecaf" s2 = "cafecafecafecafecafecafecafecafecafecafe" ) func TestRevListArgs(t *testing.T) { for desc, c := range map[string]*ArgsTestCase{ "scan refs deleted, include and exclude": { Include: []string{s1}, Exclude: []string{s2}, Opt: &ScanRefsOptions{ Mode: ScanRefsMode, SkipDeletedBlobs: false, }, ExpectedStdin: fmt.Sprintf("%s\n^%s", s1, s2), ExpectedArgs: []string{"rev-list", "--objects", "--do-walk", "--stdin", "--"}, }, "scan refs not deleted, include and exclude": { Include: []string{s1}, Exclude: []string{s2}, Opt: &ScanRefsOptions{ Mode: ScanRefsMode, SkipDeletedBlobs: true, }, ExpectedStdin: fmt.Sprintf("%s\n^%s", s1, s2), ExpectedArgs: []string{"rev-list", "--objects", "--no-walk", "--stdin", "--"}, }, "scan refs deleted, include only": { Include: []string{s1}, Opt: &ScanRefsOptions{ Mode: ScanRefsMode, SkipDeletedBlobs: false, }, ExpectedStdin: s1, ExpectedArgs: []string{"rev-list", "--objects", "--do-walk", "--stdin", "--"}, }, "scan refs not deleted, include only": { Include: []string{s1}, Opt: &ScanRefsOptions{ Mode: ScanRefsMode, SkipDeletedBlobs: true, }, ExpectedStdin: s1, ExpectedArgs: []string{"rev-list", "--objects", "--no-walk", "--stdin", "--"}, }, "scan all": { Include: []string{s1}, Exclude: []string{s2}, Opt: &ScanRefsOptions{ Mode: ScanAllMode, }, ExpectedArgs: []string{"rev-list", "--objects", "--all", "--stdin", "--"}, }, "scan include to remote, no skipped refs": { Include: []string{s1}, Opt: &ScanRefsOptions{ Mode: ScanRangeToRemoteMode, Remote: "origin", SkippedRefs: []string{}, }, ExpectedStdin: s1, ExpectedArgs: []string{"rev-list", "--objects", "--ignore-missing", "--not", "--remotes=origin", "--stdin", "--"}, }, "scan include to remote, skipped refs": { Include: []string{s1}, Exclude: []string{s2}, Opt: &ScanRefsOptions{ Mode: ScanRangeToRemoteMode, Remote: "origin", SkippedRefs: []string{"a", "b", "c"}, }, ExpectedArgs: []string{"rev-list", "--objects", "--ignore-missing", "--stdin", "--"}, ExpectedStdin: s1 + "\n^" + s2 + "\na\nb\nc", }, "scan unknown type": { Include: []string{s1}, Exclude: []string{s2}, Opt: &ScanRefsOptions{ Mode: ScanningMode(-1), }, ExpectedErr: "unknown scan type: -1", }, "scan date order": { Include: []string{s1}, Exclude: []string{s2}, Opt: &ScanRefsOptions{ Mode: ScanRefsMode, Order: DateRevListOrder, }, ExpectedStdin: fmt.Sprintf("%s\n^%s", s1, s2), ExpectedArgs: []string{"rev-list", "--objects", "--date-order", "--do-walk", "--stdin", "--"}, }, "scan author date order": { Include: []string{s1}, Exclude: []string{s2}, Opt: &ScanRefsOptions{ Mode: ScanRefsMode, Order: AuthorDateRevListOrder, }, ExpectedStdin: fmt.Sprintf("%s\n^%s", s1, s2), ExpectedArgs: []string{"rev-list", "--objects", "--author-date-order", "--do-walk", "--stdin", "--"}, }, "scan topo order": { Include: []string{s1}, Exclude: []string{s2}, Opt: &ScanRefsOptions{ Mode: ScanRefsMode, Order: TopoRevListOrder, }, ExpectedStdin: fmt.Sprintf("%s\n^%s", s1, s2), ExpectedArgs: []string{"rev-list", "--objects", "--topo-order", "--do-walk", "--stdin", "--"}, }, "scan commits only": { Include: []string{s1}, Exclude: []string{s2}, Opt: &ScanRefsOptions{ Mode: ScanRefsMode, CommitsOnly: true, }, ExpectedStdin: fmt.Sprintf("%s\n^%s", s1, s2), ExpectedArgs: []string{"rev-list", "--do-walk", "--stdin", "--"}, }, "scan reverse": { Include: []string{s1}, Exclude: []string{s2}, Opt: &ScanRefsOptions{ Mode: ScanRefsMode, Reverse: true, }, ExpectedStdin: fmt.Sprintf("%s\n^%s", s1, s2), ExpectedArgs: []string{"rev-list", "--objects", "--reverse", "--do-walk", "--stdin", "--"}, }, } { t.Run(desc, c.Assert) } } func TestRevListScannerCallsClose(t *testing.T) { var called uint32 err := errors.New("this is a marker error") s := &RevListScanner{ closeFn: func() error { atomic.AddUint32(&called, 1) return err }, } got := s.Close() assert.EqualValues(t, 1, atomic.LoadUint32(&called)) assert.Equal(t, err, got) } func TestRevListScannerTreatsCloseFnAsOptional(t *testing.T) { s := &RevListScanner{ closeFn: nil, } defer func() { assert.Nil(t, recover()) }() assert.Nil(t, s.Close()) } func TestRevListScannerParsesLinesWithNames(t *testing.T) { given := "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa name.dat" s := &RevListScanner{ s: bufio.NewScanner(strings.NewReader(given)), } assert.True(t, s.Scan()) assert.Equal(t, "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", hex.EncodeToString(s.OID())) assert.Equal(t, "name.dat", s.Name()) assert.Nil(t, s.Err()) assert.False(t, s.Scan()) assert.Equal(t, "", s.Name()) assert.Nil(t, s.OID()) assert.Nil(t, s.Err()) } func TestRevListScannerParsesLinesWithoutName(t *testing.T) { given := "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" s := &RevListScanner{ s: bufio.NewScanner(strings.NewReader(given)), } assert.True(t, s.Scan()) assert.Equal(t, "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", hex.EncodeToString(s.OID())) assert.Nil(t, s.Err()) assert.False(t, s.Scan()) assert.Equal(t, "", s.Name()) assert.Nil(t, s.OID()) assert.Nil(t, s.Err()) } git-lfs-3.6.1/git/version.go000066400000000000000000000040731472372047300157020ustar00rootroot00000000000000package git import ( "regexp" "strconv" "sync" "github.com/git-lfs/git-lfs/v3/subprocess" "github.com/rubyist/tracerx" ) var ( gitVersionOnce sync.Once gitVersion string gitVersionErr error ) func Version() (string, error) { gitVersionOnce.Do(func() { gitVersion, gitVersionErr = subprocess.SimpleExec("git", "version") }) return gitVersion, gitVersionErr } // IsVersionAtLeast returns whether the git version is the one specified or higher // argument is plain version string separated by '.' e.g. "2.3.1" but can omit minor/patch func IsGitVersionAtLeast(ver string) bool { gitver, err := Version() if err != nil { tracerx.Printf("Error getting git version: %v", err) return false } return IsVersionAtLeast(gitver, ver) } // IsVersionAtLeast compares 2 version strings (ok to be prefixed with 'git version', ignores) func IsVersionAtLeast(actualVersion, desiredVersion string) bool { // Capture 1-3 version digits, optionally prefixed with 'git version' and possibly // with suffixes which we'll ignore (e.g. unstable builds, MinGW versions) verregex := regexp.MustCompile(`(?:git version\s+)?(\d+)(?:.(\d+))?(?:.(\d+))?.*`) var atleast uint64 // Support up to 1000 in major/minor/patch digits const majorscale = 1000 * 1000 const minorscale = 1000 if match := verregex.FindStringSubmatch(desiredVersion); match != nil { // Ignore errors as regex won't match anything other than digits major, _ := strconv.Atoi(match[1]) atleast += uint64(major * majorscale) if len(match) > 2 { minor, _ := strconv.Atoi(match[2]) atleast += uint64(minor * minorscale) } if len(match) > 3 { patch, _ := strconv.Atoi(match[3]) atleast += uint64(patch) } } var actual uint64 if match := verregex.FindStringSubmatch(actualVersion); match != nil { major, _ := strconv.Atoi(match[1]) actual += uint64(major * majorscale) if len(match) > 2 { minor, _ := strconv.Atoi(match[2]) actual += uint64(minor * minorscale) } if len(match) > 3 { patch, _ := strconv.Atoi(match[3]) actual += uint64(patch) } } return actual >= atleast } git-lfs-3.6.1/go.mod000066400000000000000000000037111472372047300142070ustar00rootroot00000000000000// The Git LFS project does not maintain a stable API or ABI for this module. // Please do not import this module outside of the Git LFS project. module github.com/git-lfs/git-lfs/v3 require ( github.com/avast/retry-go v2.4.2+incompatible github.com/dpotapov/go-spnego v0.0.0-20210315154721-298b63a54430 github.com/git-lfs/gitobj/v2 v2.1.1 github.com/git-lfs/go-netrc v0.0.0-20210914205454-f0c862dd687a github.com/git-lfs/pktline v0.0.0-20210330133718-06e9096e2825 github.com/git-lfs/wildmatch/v2 v2.0.1 github.com/jmhodges/clock v1.2.0 github.com/leonelquinteros/gotext v1.5.0 github.com/mattn/go-isatty v0.0.4 github.com/olekukonko/ts v0.0.0-20171002115256-78ecb04241c0 github.com/pkg/errors v0.0.0-20170505043639-c605e284fe17 github.com/rubyist/tracerx v0.0.0-20170927163412-787959303086 github.com/spf13/cobra v1.7.0 github.com/ssgelm/cookiejarparser v1.0.1 github.com/stretchr/testify v1.6.1 github.com/xeipuuv/gojsonschema v0.0.0-20170210233622-6b67b3fab74d golang.org/x/net v0.23.0 golang.org/x/sync v0.1.0 golang.org/x/sys v0.18.0 ) require ( github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/hashicorp/go-uuid v1.0.2 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jcmturner/aescts/v2 v2.0.0 // indirect github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect github.com/jcmturner/gofork v1.0.0 // indirect github.com/jcmturner/goidentity/v6 v6.0.1 // indirect github.com/jcmturner/gokrb5/v8 v8.4.2 // indirect github.com/jcmturner/rpc/v2 v2.0.3 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect golang.org/x/crypto v0.21.0 // indirect golang.org/x/text v0.14.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) go 1.21 git-lfs-3.6.1/go.sum000066400000000000000000000312121472372047300142310ustar00rootroot00000000000000github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74 h1:Kk6a4nehpJ3UuJRqlA3JxYxBZEqCeOmATOvrbT4p9RA= github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4= github.com/avast/retry-go v2.4.2+incompatible h1:+ZjCypQT/CyP0kyJO2EcU4d/ZEJWSbP8NENI578cPmA= github.com/avast/retry-go v2.4.2+incompatible/go.mod h1:XtSnn+n/sHqQIpZ10K1qAevBhOOCWBLXXy3hyiqqBrY= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dpotapov/go-spnego v0.0.0-20210315154721-298b63a54430 h1:oempk9HjNt6rVKyKmpdnoN7XABQv3SXLWu3pxUI7Vlk= github.com/dpotapov/go-spnego v0.0.0-20210315154721-298b63a54430/go.mod h1:AVSs/gZKt1bOd2AhkhbS7Qh56Hv7klde22yXVbwYJhc= github.com/git-lfs/gitobj/v2 v2.1.1 h1:tf/VU6zL1kxa3he+nf6FO/syX+LGkm6WGDsMpfuXV7Q= github.com/git-lfs/gitobj/v2 v2.1.1/go.mod h1:q6aqxl6Uu3gWsip5GEKpw+7459F97er8COmU45ncAxw= github.com/git-lfs/go-netrc v0.0.0-20210914205454-f0c862dd687a h1:6pskVZacdMUL93pCpMAYnMDLjH1yDFhssPYGe32sjdk= github.com/git-lfs/go-netrc v0.0.0-20210914205454-f0c862dd687a/go.mod h1:70O4NAtvWn1jW8V8V+OKrJJYcxDLTmIozfi2fmSz5SI= github.com/git-lfs/pktline v0.0.0-20210330133718-06e9096e2825 h1:riQhgheTL7tMF4d5raz9t3+IzoR1i1wqxE1kZC6dY+U= github.com/git-lfs/pktline v0.0.0-20210330133718-06e9096e2825/go.mod h1:fenKRzpXDjNpsIBhuhUzvjCKlDjKam0boRAenTE0Q6A= github.com/git-lfs/wildmatch/v2 v2.0.1 h1:Ds+aobrV5bK0wStILUOn9irllPyf9qrFETbKzwzoER8= github.com/git-lfs/wildmatch/v2 v2.0.1/go.mod h1:EVqonpk9mXbREP3N8UkwoWdrF249uHpCUo5CPXY81gw= github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= github.com/gorilla/sessions v1.2.1 h1:DHd3rPN5lE3Ts3D8rKkQ8x/0kqfeNmBAaiSi+o7FsgI= github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM= github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8= github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o= github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= github.com/jcmturner/gokrb5/v8 v8.4.2 h1:6ZIM6b/JJN0X8UM43ZOM6Z4SJzla+a/u7scXFJzodkA= github.com/jcmturner/gokrb5/v8 v8.4.2/go.mod h1:sb+Xq/fTY5yktf/VxLsE3wlfPqQjp0aWNYyvBVK62bc= github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= github.com/jmhodges/clock v1.2.0 h1:eq4kys+NI0PLngzaHEe7AmPT90XMGIEySD1JfV1PDIs= github.com/jmhodges/clock v1.2.0/go.mod h1:qKjhA7x7u/lQpPB1XAqX1b1lCI/w3/fNuYpI/ZjLynI= github.com/leonelquinteros/gotext v1.5.0 h1:ODY7LzLpZWWSJdAHnzhreOr6cwLXTAmc914FOauSkBM= github.com/leonelquinteros/gotext v1.5.0/go.mod h1:OCiUVHuhP9LGFBQ1oAmdtNCHJCiHiQA8lf4nAifHkr0= github.com/mattn/go-isatty v0.0.4 h1:bnP0vzxcAdeI1zdubAl5PjU6zsERjGZb7raWodagDYs= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/olekukonko/ts v0.0.0-20171002115256-78ecb04241c0 h1:LiZB1h0GIcudcDci2bxbqI6DXV8bF8POAnArqvRrIyw= github.com/olekukonko/ts v0.0.0-20171002115256-78ecb04241c0/go.mod h1:F/7q8/HZz+TXjlsoZQQKVYvXTZaFH4QRa3y+j1p7MS0= github.com/pkg/errors v0.0.0-20170505043639-c605e284fe17 h1:chPfVn+gpAM5CTpTyVU9j8J+xgRGwmoDlNDLjKnJiYo= github.com/pkg/errors v0.0.0-20170505043639-c605e284fe17/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rubyist/tracerx v0.0.0-20170927163412-787959303086 h1:mncRSDOqYCng7jOD+Y6+IivdRI6Kzv2BLWYkWkdQfu0= github.com/rubyist/tracerx v0.0.0-20170927163412-787959303086/go.mod h1:YpdgDXpumPB/+EGmGTYHeiW/0QVFRzBYTNFaxWfPDk4= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/ssgelm/cookiejarparser v1.0.1 h1:cRdXauUbOTFzTPJFaeiWbHnQ+tRGlpKKzvIK9PUekE4= github.com/ssgelm/cookiejarparser v1.0.1/go.mod h1:DUfC0mpjIzlDN7DzKjXpHj0qMI5m9VrZuz3wSlI+OEI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v0.0.0-20170210233622-6b67b3fab74d h1:BJPiQVOMMtJsJIkrF4T6K3RKbzqr7rkaybMk33dlGUo= github.com/xeipuuv/gojsonschema v0.0.0-20170210233622-6b67b3fab74d/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201112155050-0c6587e931a9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191027093000-83d349e8ac1a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200221224223-e1da425f72fd/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= git-lfs-3.6.1/lfs/000077500000000000000000000000001472372047300136635ustar00rootroot00000000000000git-lfs-3.6.1/lfs/attribute.go000066400000000000000000000140221472372047300162140ustar00rootroot00000000000000package lfs import ( "errors" "fmt" "strings" "github.com/git-lfs/git-lfs/v3/git" "github.com/git-lfs/git-lfs/v3/tr" ) // Attribute wraps the structure and some operations of Git's conception of an // "attribute", as defined here: http://git-scm.com/docs/gitattributes. type Attribute struct { // The Section of an Attribute refers to the location at which all // properties are relative to. For example, for a Section with the value // "core", Git will produce something like: // // [core] // autocrlf = true // ... Section string // The Properties of an Attribute refer to all of the keys and values // that define that Attribute. Properties map[string]string // Previous values of these attributes that can be automatically upgraded Upgradeables map[string][]string } // FilterOptions serves as an argument to Install(). type FilterOptions struct { GitConfig *git.Configuration Force bool File string Local bool Worktree bool System bool SkipSmudge bool } func (o *FilterOptions) Install() error { if o.SkipSmudge { return skipSmudgeFilterAttribute().Install(o) } return filterAttribute().Install(o) } func (o *FilterOptions) Uninstall() error { attrs := filterAttribute() if err := attrs.Uninstall(o); err != nil { return err } for k := range attrs.Properties { name := fmt.Sprintf("%s.%s", attrs.Section, k) if len(o.GitConfig.Find(name)) > 0 { return errors.New(tr.Tr.Get("some filter configuration was not removed (found %s)", name)) } } return nil } func filterAttribute() *Attribute { return &Attribute{ Section: "filter.lfs", Properties: map[string]string{ "clean": "git-lfs clean -- %f", "smudge": "git-lfs smudge -- %f", "process": "git-lfs filter-process", "required": "true", }, Upgradeables: map[string][]string{ "clean": []string{ "git-lfs clean %f", }, "smudge": []string{ "git-lfs smudge %f", "git-lfs smudge --skip %f", "git-lfs smudge --skip -- %f", }, "process": []string{ "git-lfs filter", "git-lfs filter --skip", "git-lfs filter-process --skip", }, }, } } func skipSmudgeFilterAttribute() *Attribute { return &Attribute{ Section: "filter.lfs", Properties: map[string]string{ "clean": "git-lfs clean -- %f", "smudge": "git-lfs smudge --skip -- %f", "process": "git-lfs filter-process --skip", "required": "true", }, Upgradeables: map[string][]string{ "clean": []string{ "git-lfs clean -- %f", }, "smudge": []string{ "git-lfs smudge %f", "git-lfs smudge --skip %f", "git-lfs smudge -- %f", }, "process": []string{ "git-lfs filter", "git-lfs filter --skip", "git-lfs filter-process", }, }, } } // Install instructs Git to set all keys and values relative to the root // location of this Attribute. For any particular key/value pair, if a matching // key is already set, it will be overridden if it is either a) empty, or b) the // `force` argument is passed as true. If an attribute is already set to a // different value than what is given, and force is false, an error will be // returned immediately, and the rest of the attributes will not be set. func (a *Attribute) Install(opt *FilterOptions) error { for k, v := range a.Properties { var upgradeables []string if a.Upgradeables != nil { // use pre-normalised key since caller will have set up the same upgradeables = a.Upgradeables[k] } key := a.normalizeKey(k) if err := a.set(opt.GitConfig, key, v, upgradeables, opt); err != nil { return err } } return nil } // normalizeKey makes an absolute path out of a partial relative one. For a // relative path of "foo", and a root Section of "bar", "bar.foo" will be returned. func (a *Attribute) normalizeKey(relative string) string { return strings.Join([]string{a.Section, relative}, ".") } // set attempts to set a single key/value pair portion of this Attribute. If a // matching key already exists and the value is not equal to the desired value, // an error will be thrown if force is set to false. If force is true, the value // will be overridden. func (a *Attribute) set(gitConfig *git.Configuration, key, value string, upgradeables []string, opt *FilterOptions) error { var currentValue string if opt.Local { currentValue = gitConfig.FindLocal(key) } else if opt.Worktree { currentValue = gitConfig.FindWorktree(key) } else if opt.System { currentValue = gitConfig.FindSystem(key) } else if opt.File != "" { currentValue = gitConfig.FindFile(opt.File, key) } else { currentValue = gitConfig.FindGlobal(key) } if opt.Force || shouldReset(currentValue, upgradeables) { var err error if opt.Local { _, err = gitConfig.SetLocal(key, value) } else if opt.Worktree { _, err = gitConfig.SetWorktree(key, value) } else if opt.System { _, err = gitConfig.SetSystem(key, value) } else if opt.File != "" { _, err = gitConfig.SetFile(opt.File, key, value) } else { _, err = gitConfig.SetGlobal(key, value) } return err } else if currentValue != value { return errors.New(tr.Tr.Get("the %q attribute should be %q but is %q", key, value, currentValue)) } return nil } // Uninstall removes all properties in the path of this property. func (a *Attribute) Uninstall(opt *FilterOptions) error { var err error if opt.Local { _, err = opt.GitConfig.UnsetLocalSection(a.Section) } else if opt.Worktree { _, err = opt.GitConfig.UnsetWorktreeSection(a.Section) } else if opt.System { _, err = opt.GitConfig.UnsetSystemSection(a.Section) } else if opt.File != "" { _, err = opt.GitConfig.UnsetFileSection(opt.File, a.Section) } else { _, err = opt.GitConfig.UnsetGlobalSection(a.Section) } return err } // shouldReset determines whether or not a value is resettable given its current // value on the system. If the value is empty (length = 0), then it will pass. // It will also pass if it matches any upgradeable value func shouldReset(value string, upgradeables []string) bool { if len(value) == 0 { return true } for _, u := range upgradeables { if value == u { return true } } return false } git-lfs-3.6.1/lfs/config.go000066400000000000000000000042051472372047300154600ustar00rootroot00000000000000package lfs import "github.com/git-lfs/git-lfs/v3/config" // FetchPruneConfig collects together the config options that control fetching and pruning type FetchPruneConfig struct { // The number of days prior to current date for which (local) refs other than HEAD // will be fetched with --recent (default 7, 0 = only fetch HEAD) FetchRecentRefsDays int // Makes the FetchRecentRefsDays option apply to remote refs from fetch source as well (default true) FetchRecentRefsIncludeRemotes bool // number of days prior to latest commit on a ref that we'll fetch previous // LFS changes too (default 0 = only fetch at ref) FetchRecentCommitsDays int // Whether to always fetch recent even without --recent FetchRecentAlways bool // Number of days added to FetchRecent*; data outside combined window will be // deleted when prune is run. (default 3) PruneOffsetDays int // Always verify with remote before pruning reachable objects PruneVerifyRemoteAlways bool // When verifiying, always verify all reachable and unreachable objects with remote (default false) PruneVerifyUnreachableAlways bool // Name of remote to check for unpushed and verify checks PruneRemoteName string // Whether to ignore all recent options. PruneRecent bool // Whether to delete everything pushed. PruneForce bool } func NewFetchPruneConfig(git config.Environment) FetchPruneConfig { pruneRemote, _ := git.Get("lfs.pruneremotetocheck") if len(pruneRemote) == 0 { pruneRemote = "origin" } return FetchPruneConfig{ FetchRecentRefsDays: git.Int("lfs.fetchrecentrefsdays", 7), FetchRecentRefsIncludeRemotes: git.Bool("lfs.fetchrecentremoterefs", true), FetchRecentCommitsDays: git.Int("lfs.fetchrecentcommitsdays", 0), FetchRecentAlways: git.Bool("lfs.fetchrecentalways", false), PruneOffsetDays: git.Int("lfs.pruneoffsetdays", 3), PruneVerifyRemoteAlways: git.Bool("lfs.pruneverifyremotealways", false), PruneVerifyUnreachableAlways: git.Bool("lfs.pruneverifyunreachablealways", false), PruneRemoteName: pruneRemote, PruneRecent: false, PruneForce: false, } } git-lfs-3.6.1/lfs/config_test.go000066400000000000000000000027241472372047300165230ustar00rootroot00000000000000package lfs import ( "testing" "github.com/git-lfs/git-lfs/v3/config" "github.com/stretchr/testify/assert" ) func TestFetchPruneConfigDefault(t *testing.T) { cfg := config.NewFrom(config.Values{}) fp := NewFetchPruneConfig(cfg.Git) assert.Equal(t, 7, fp.FetchRecentRefsDays) assert.Equal(t, 0, fp.FetchRecentCommitsDays) assert.Equal(t, 3, fp.PruneOffsetDays) assert.True(t, fp.FetchRecentRefsIncludeRemotes) assert.Equal(t, 3, fp.PruneOffsetDays) assert.Equal(t, "origin", fp.PruneRemoteName) assert.False(t, fp.PruneVerifyRemoteAlways) assert.False(t, fp.PruneVerifyUnreachableAlways) } func TestFetchPruneConfigCustom(t *testing.T) { cfg := config.NewFrom(config.Values{ Git: map[string][]string{ "lfs.fetchrecentrefsdays": []string{"12"}, "lfs.fetchrecentremoterefs": []string{"false"}, "lfs.fetchrecentcommitsdays": []string{"9"}, "lfs.pruneoffsetdays": []string{"30"}, "lfs.pruneverifyremotealways": []string{"true"}, "lfs.pruneverifyunreachablealways": []string{"true"}, "lfs.pruneremotetocheck": []string{"upstream"}, }, }) fp := NewFetchPruneConfig(cfg.Git) assert.Equal(t, 12, fp.FetchRecentRefsDays) assert.Equal(t, 9, fp.FetchRecentCommitsDays) assert.False(t, fp.FetchRecentRefsIncludeRemotes) assert.Equal(t, 30, fp.PruneOffsetDays) assert.Equal(t, "upstream", fp.PruneRemoteName) assert.True(t, fp.PruneVerifyRemoteAlways) assert.True(t, fp.PruneVerifyUnreachableAlways) } git-lfs-3.6.1/lfs/diff_index_scanner.go000066400000000000000000000144301472372047300200240ustar00rootroot00000000000000package lfs import ( "bufio" "fmt" "strconv" "strings" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/git" "github.com/git-lfs/git-lfs/v3/tr" ) // Status represents the status of a file that appears in the output of `git // diff-index`. // // More information about each of its valid instances can be found: // https://git-scm.com/docs/git-diff-index type DiffIndexStatus rune const ( StatusAddition DiffIndexStatus = 'A' StatusCopy DiffIndexStatus = 'C' StatusDeletion DiffIndexStatus = 'D' StatusModification DiffIndexStatus = 'M' StatusRename DiffIndexStatus = 'R' StatusTypeChange DiffIndexStatus = 'T' StatusUnmerged DiffIndexStatus = 'U' StatusUnknown DiffIndexStatus = 'X' ) // String implements fmt.Stringer by returning a human-readable name for each // status. func (s DiffIndexStatus) String() string { switch s { case StatusAddition: return "addition" case StatusCopy: return "copy" case StatusDeletion: return "deletion" case StatusModification: return "modification" case StatusRename: return "rename" case StatusTypeChange: return "change" case StatusUnmerged: return "unmerged" case StatusUnknown: return "unknown" } return "" } // Format implements fmt.Formatter. If printed as "%+d", "%+s", or "%+v", the // status will be written out as an English word: i.e., "addition", "copy", // "deletion", etc. // // If the '+' flag is not given, the shorthand will be used instead: 'A', 'C', // and 'D', respectively. // // If any other format verb is given, this function will panic(). func (s DiffIndexStatus) Format(state fmt.State, c rune) { switch c { case 'd', 's', 'v': if state.Flag('+') { state.Write([]byte(s.String())) } else { state.Write([]byte{byte(rune(s))}) } default: panic(tr.Tr.Get("cannot format %v for DiffIndexStatus", c)) } } // DiffIndexEntry holds information about a single item in the results of a `git // diff-index` command. type DiffIndexEntry struct { // SrcMode is the file mode of the "src" file, stored as a string-based // octal. SrcMode string // DstMode is the file mode of the "dst" file, stored as a string-based // octal. DstMode string // SrcSha is the Git blob ID of the "src" file. SrcSha string // DstSha is the Git blob ID of the "dst" file. DstSha string // Status is the status of the file in the index. Status DiffIndexStatus // StatusScore is the optional "score" associated with a particular // status. StatusScore int // SrcName is the name of the file in its "src" state as it appears in // the index. SrcName string // DstName is the name of the file in its "dst" state as it appears in // the index. DstName string } // DiffIndexScanner scans the output of the `git diff-index` command. type DiffIndexScanner struct { // next is the next entry scanned by the Scanner. next *DiffIndexEntry // err is any error that the Scanner encountered while scanning. err error // from is the underlying scanner, scanning the `git diff-index` // command's stdout. from *bufio.Scanner } // NewDiffIndexScanner initializes a new `DiffIndexScanner` scanning at the // given ref, "ref". // // If "cache" is given, the DiffIndexScanner will scan for differences between // the given ref and the index. If "cache" is _not_ given, DiffIndexScanner will // scan for differences between the given ref and the currently checked out // tree. // // If "refresh" is given, the DiffIndexScanner will refresh the index. This is // probably what you want in all cases except fsck, where invoking a filtering // operation would be undesirable due to the possibility of corruption. It can // also be disabled where another operation will have refreshed the index. // // If "workingDir" is set, the DiffIndexScanner will be run in the given // directory. Otherwise, the DiffIndexScanner will be run in the current // working directory. // // If any error was encountered in starting the command or closing its `stdin`, // that error will be returned immediately. Otherwise, a `*DiffIndexScanner` // will be returned with a `nil` error. func NewDiffIndexScanner(ref string, cached bool, refresh bool, workingDir string) (*DiffIndexScanner, error) { scanner, err := git.DiffIndex(ref, cached, refresh, workingDir) if err != nil { return nil, err } return &DiffIndexScanner{ from: scanner, }, nil } // Scan advances the scan line and yields either a new value for Entry(), or an // Err(). It returns true or false, whether or not it can continue scanning for // more entries. func (s *DiffIndexScanner) Scan() bool { if !s.prepareScan() { return false } s.next, s.err = s.scan(s.from.Text()) if s.err != nil { s.err = errors.Wrap(s.err, tr.Tr.Get("`git diff-index` scan")) } return s.err == nil } // Entry returns the last entry that was Scan()'d by the DiffIndexScanner. func (s *DiffIndexScanner) Entry() *DiffIndexEntry { return s.next } // Entry returns the last error that was encountered by the DiffIndexScanner. func (s *DiffIndexScanner) Err() error { return s.err } // prepareScan clears out the results from the last Scan() loop, and advances // the internal scanner to fetch a new line of Text(). func (s *DiffIndexScanner) prepareScan() bool { s.next, s.err = nil, nil if !s.from.Scan() { s.err = s.from.Err() return false } return true } // scan parses the given line and returns a `*DiffIndexEntry` or an error, // depending on whether or not the parse was successful. func (s *DiffIndexScanner) scan(line string) (*DiffIndexEntry, error) { // Format is: // :100644 100644 c5b3d83a7542255ec7856487baa5e83d65b1624c 9e82ac1b514be060945392291b5b3108c22f6fe3 M foo.gif // : \t[\t] parts := strings.Split(line, "\t") if len(parts) < 2 { return nil, errors.Errorf(tr.Tr.Get("invalid line: %s", line)) } desc := strings.Fields(parts[0]) if len(desc) < 5 { return nil, errors.Errorf(tr.Tr.Get("invalid description: %s", parts[0])) } entry := &DiffIndexEntry{ SrcMode: strings.TrimPrefix(desc[0], ":"), DstMode: desc[1], SrcSha: desc[2], DstSha: desc[3], Status: DiffIndexStatus(rune(desc[4][0])), SrcName: parts[1], } if score, err := strconv.Atoi(desc[4][1:]); err != nil { entry.StatusScore = score } if len(parts) > 2 { entry.DstName = parts[2] } return entry, nil } git-lfs-3.6.1/lfs/extension.go000066400000000000000000000071231472372047300162310ustar00rootroot00000000000000package lfs import ( "bytes" "crypto/sha256" "encoding/hex" "hash" "io" "os" "strings" "github.com/git-lfs/git-lfs/v3/config" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/subprocess" "github.com/git-lfs/git-lfs/v3/tr" ) type pipeRequest struct { action string reader io.Reader fileName string extensions []config.Extension } type pipeResponse struct { file *os.File results []*pipeExtResult } type pipeExtResult struct { name string oidIn string oidOut string } type extCommand struct { cmd *subprocess.Cmd out io.WriteCloser err *bytes.Buffer hasher hash.Hash result *pipeExtResult } func pipeExtensions(cfg *config.Configuration, request *pipeRequest) (response pipeResponse, err error) { var extcmds []*extCommand defer func() { // In the case of an early return before the end of this // function (in response to an error, etc), kill all running // processes. Errors are ignored since the function has already // returned. // // In the happy path, the commands will have already been // `Wait()`-ed upon and e.cmd.Process.Kill() will return an // error, but we can ignore it. for _, e := range extcmds { if e.cmd.Process != nil { e.cmd.Process.Kill() } } }() for _, e := range request.extensions { var pieces []string switch request.action { case "clean": pieces = strings.Split(e.Clean, " ") case "smudge": pieces = strings.Split(e.Smudge, " ") default: err = errors.New(tr.Tr.Get("Invalid action: %s", request.action)) return } name := strings.Trim(pieces[0], " ") var args []string for _, value := range pieces[1:] { arg := strings.Replace(value, "%f", request.fileName, -1) args = append(args, arg) } var cmd *subprocess.Cmd cmd, err = subprocess.ExecCommand(name, args...) if err != nil { return } ec := &extCommand{cmd: cmd, result: &pipeExtResult{name: e.Name}} extcmds = append(extcmds, ec) } hasher := sha256.New() pipeReader, pipeWriter := io.Pipe() multiWriter := io.MultiWriter(hasher, pipeWriter) var input io.Reader var output io.WriteCloser input = pipeReader extcmds[0].cmd.Stdin = input if response.file, err = TempFile(cfg, ""); err != nil { return } defer response.file.Close() output = response.file last := len(extcmds) - 1 for i, ec := range extcmds { ec.hasher = sha256.New() if i == last { ec.cmd.Stdout = io.MultiWriter(ec.hasher, output) ec.out = output continue } nextec := extcmds[i+1] var nextStdin io.WriteCloser var stdout io.ReadCloser if nextStdin, err = nextec.cmd.StdinPipe(); err != nil { return } if stdout, err = ec.cmd.StdoutPipe(); err != nil { return } ec.cmd.Stdin = input ec.cmd.Stdout = io.MultiWriter(ec.hasher, nextStdin) ec.out = nextStdin input = stdout var errBuff bytes.Buffer ec.err = &errBuff ec.cmd.Stderr = ec.err } for _, ec := range extcmds { if err = ec.cmd.Start(); err != nil { return } } if _, err = io.Copy(multiWriter, request.reader); err != nil { return } if err = pipeWriter.Close(); err != nil { return } for _, ec := range extcmds { if err = ec.cmd.Wait(); err != nil { if ec.err != nil { errStr := ec.err.String() err = errors.New(tr.Tr.Get("extension '%s' failed with: %s", ec.result.name, errStr)) } return } if err = ec.out.Close(); err != nil { return } } oid := hex.EncodeToString(hasher.Sum(nil)) for _, ec := range extcmds { ec.result.oidIn = oid oid = hex.EncodeToString(ec.hasher.Sum(nil)) ec.result.oidOut = oid response.results = append(response.results, ec.result) } return } git-lfs-3.6.1/lfs/gitfilter.go000066400000000000000000000013141472372047300162020ustar00rootroot00000000000000package lfs import ( "github.com/git-lfs/git-lfs/v3/config" "github.com/git-lfs/git-lfs/v3/fs" "github.com/git-lfs/git-lfs/v3/git" "github.com/jmhodges/clock" ) // GitFilter provides clean and smudge capabilities type GitFilter struct { cfg *config.Configuration fs *fs.Filesystem clk clock.Clock } // NewGitFilter initializes a new *GitFilter func NewGitFilter(cfg *config.Configuration) *GitFilter { return &GitFilter{cfg: cfg, fs: cfg.Filesystem(), clk: clock.New()} } func (f *GitFilter) ObjectPath(oid string) (string, error) { return f.fs.ObjectPath(oid) } func (f *GitFilter) RemoteRef() *git.Ref { return git.NewRefUpdate(f.cfg.Git, f.cfg.PushRemote(), f.cfg.CurrentRef(), nil).RemoteRef() } git-lfs-3.6.1/lfs/gitfilter_clean.go000066400000000000000000000044511472372047300173510ustar00rootroot00000000000000package lfs import ( "bytes" "crypto/sha256" "encoding/hex" "io" "os" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/tools" ) type cleanedAsset struct { Filename string *Pointer } func (f *GitFilter) Clean(reader io.Reader, fileName string, fileSize int64, cb tools.CopyCallback) (*cleanedAsset, error) { extensions, err := f.cfg.SortedExtensions() if err != nil { return nil, err } var oid string var size int64 var tmp *os.File var exts []*PointerExtension if len(extensions) > 0 { request := &pipeRequest{"clean", reader, fileName, extensions} var response pipeResponse if response, err = pipeExtensions(f.cfg, request); err != nil { return nil, err } oid = response.results[len(response.results)-1].oidOut tmp = response.file var stat os.FileInfo if stat, err = os.Stat(tmp.Name()); err != nil { return nil, err } size = stat.Size() for _, result := range response.results { if result.oidIn != result.oidOut { ext := NewPointerExtension(result.name, len(exts), result.oidIn) exts = append(exts, ext) } } } else { oid, size, tmp, err = f.copyToTemp(reader, fileSize, cb) if err != nil { return nil, err } } pointer := NewPointer(oid, size, exts) return &cleanedAsset{tmp.Name(), pointer}, err } func (f *GitFilter) copyToTemp(reader io.Reader, fileSize int64, cb tools.CopyCallback) (oid string, size int64, tmp *os.File, err error) { tmp, err = TempFile(f.cfg, "") if err != nil { return } defer tmp.Close() oidHash := sha256.New() writer := io.MultiWriter(oidHash, tmp) if fileSize <= 0 { cb = nil } ptr, buf, err := DecodeFrom(reader) by := make([]byte, blobSizeCutoff) n, rerr := buf.Read(by) by = by[:n] if rerr != nil || (err == nil && len(by) < blobSizeCutoff) { err = errors.NewCleanPointerError(ptr, by) return } var from io.Reader = bytes.NewReader(by) if fileSize < 0 || int64(len(by)) < fileSize { // If there is still more data to be read from the file, tack on // the original reader and continue the read from there. from = io.MultiReader(from, reader) } size, err = tools.CopyWithCallback(writer, from, fileSize, cb) if err != nil { return } oid = hex.EncodeToString(oidHash.Sum(nil)) return } func (a *cleanedAsset) Teardown() error { return os.Remove(a.Filename) } git-lfs-3.6.1/lfs/gitfilter_smudge.go000066400000000000000000000203371472372047300175540ustar00rootroot00000000000000package lfs import ( "fmt" "io" "os" "path/filepath" "github.com/git-lfs/git-lfs/v3/config" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/tools" "github.com/git-lfs/git-lfs/v3/tools/humanize" "github.com/git-lfs/git-lfs/v3/tq" "github.com/git-lfs/git-lfs/v3/tr" "github.com/rubyist/tracerx" ) func (f *GitFilter) SmudgeToFile(filename string, ptr *Pointer, download bool, manifest tq.Manifest, cb tools.CopyCallback) error { tools.MkdirAll(filepath.Dir(filename), f.cfg) if stat, _ := os.Stat(filename); stat != nil { if ptr.Size == 0 && stat.Size() == 0 { return nil } if stat.Mode()&0200 == 0 { if err := os.Chmod(filename, stat.Mode()|0200); err != nil { return errors.Wrap(err, tr.Tr.Get("Could not restore write permission")) } // When we're done, return the file back to its normal // permission bits. defer os.Chmod(filename, stat.Mode()) } } abs, err := filepath.Abs(filename) if err != nil { return errors.New(tr.Tr.Get("could not produce absolute path for %q", filename)) } file, err := os.Create(abs) if err != nil { return errors.New(tr.Tr.Get("could not create working directory file: %v", err)) } defer file.Close() if _, err := f.Smudge(file, ptr, filename, download, manifest, cb); err != nil { if errors.IsDownloadDeclinedError(err) { // write placeholder data instead file.Seek(0, io.SeekStart) ptr.Encode(file) return err } else { return errors.New(tr.Tr.Get("could not write working directory file: %v", err)) } } return nil } func (f *GitFilter) Smudge(writer io.Writer, ptr *Pointer, workingfile string, download bool, manifest tq.Manifest, cb tools.CopyCallback) (int64, error) { mediafile, err := f.ObjectPath(ptr.Oid) if err != nil { return 0, err } LinkOrCopyFromReference(f.cfg, ptr.Oid, ptr.Size) stat, statErr := os.Stat(mediafile) if statErr == nil && stat != nil { fileSize := stat.Size() if fileSize != ptr.Size { tracerx.Printf("Removing %s, size %d is invalid", mediafile, fileSize) os.RemoveAll(mediafile) stat = nil } } var n int64 if ptr.Size == 0 { return 0, nil } else if statErr != nil || stat == nil { if download { n, err = f.downloadFile(writer, ptr, workingfile, mediafile, manifest, cb) // In case of a cherry-pick the newly created commit is likely not yet // be found in the history of a remote branch. Thus, the first attempt might fail. if err != nil && f.cfg.SearchAllRemotesEnabled() { tracerx.Printf("git: smudge: default remote failed. searching alternate remotes") n, err = f.downloadFileFallBack(writer, ptr, workingfile, mediafile, manifest, cb) } } else { return 0, errors.NewDownloadDeclinedError(statErr, tr.Tr.Get("smudge filter")) } } else { n, err = f.readLocalFile(writer, ptr, mediafile, workingfile, cb) } if err != nil { return 0, errors.NewSmudgeError(err, ptr.Oid, mediafile) } return n, nil } func (f *GitFilter) downloadFile(writer io.Writer, ptr *Pointer, workingfile, mediafile string, manifest tq.Manifest, cb tools.CopyCallback) (int64, error) { fmt.Fprintln(os.Stderr, tr.Tr.Get("Downloading %s (%s)", workingfile, humanize.FormatBytes(uint64(ptr.Size)))) // NOTE: if given, "cb" is a tools.CopyCallback which writes updates // to the logpath specified by GIT_LFS_PROGRESS. // // Either way, forward it into the *tq.TransferQueue so that updates are // sent over correctly. q := tq.NewTransferQueue(tq.Download, manifest, f.cfg.Remote(), tq.WithProgressCallback(cb), tq.RemoteRef(f.RemoteRef()), tq.WithBatchSize(f.cfg.TransferBatchSize()), ) q.Add(filepath.Base(workingfile), mediafile, ptr.Oid, ptr.Size, false, nil) q.Wait() if errs := q.Errors(); len(errs) > 0 { var multiErr error for _, e := range errs { if multiErr != nil { multiErr = fmt.Errorf("%v\n%v", multiErr, e) } else { multiErr = e } } return 0, errors.Wrapf(multiErr, tr.Tr.Get("Error downloading %s (%s)", workingfile, ptr.Oid)) } return f.readLocalFile(writer, ptr, mediafile, workingfile, nil) } func (f *GitFilter) downloadFileFallBack(writer io.Writer, ptr *Pointer, workingfile, mediafile string, manifest tq.Manifest, cb tools.CopyCallback) (int64, error) { // Attempt to find the LFS objects in all currently registered remotes. // When a valid remote is found, this remote is taken persistent for // future attempts within downloadFile(). In best case, the ordinary // call to downloadFile will then succeed for the rest of files, // otherwise this function will again search for a valid remote as fallback. remotes := f.cfg.Remotes() for index, remote := range remotes { q := tq.NewTransferQueue(tq.Download, manifest, remote, tq.WithProgressCallback(cb), tq.RemoteRef(f.RemoteRef()), tq.WithBatchSize(f.cfg.TransferBatchSize()), ) q.Add(filepath.Base(workingfile), mediafile, ptr.Oid, ptr.Size, false, nil) q.Wait() if errs := q.Errors(); len(errs) > 0 { var multiErr error for _, e := range errs { if multiErr != nil { multiErr = fmt.Errorf("%v\n%v", multiErr, e) } else { multiErr = e } } wrappedError := errors.Wrapf(multiErr, tr.Tr.Get("Error downloading %s (%s)", workingfile, ptr.Oid)) if index >= len(remotes)-1 { return 0, wrappedError } else { tracerx.Printf("git: download: remote failed %s %s", remote, wrappedError) } } else { // Set the remote persistent through all the operation as we found a valid one. // This prevents multiple trial and error searches. f.cfg.SetRemote(remote) return f.readLocalFile(writer, ptr, mediafile, workingfile, nil) } } return 0, errors.Wrapf(errors.New("No known remotes"), tr.Tr.Get("Error downloading %s (%s)", workingfile, ptr.Oid)) } func (f *GitFilter) readLocalFile(writer io.Writer, ptr *Pointer, mediafile string, workingfile string, cb tools.CopyCallback) (int64, error) { reader, err := tools.RobustOpen(mediafile) if err != nil { return 0, errors.Wrapf(err, tr.Tr.Get("error opening media file")) } defer reader.Close() if ptr.Size == 0 { if stat, _ := os.Stat(mediafile); stat != nil { ptr.Size = stat.Size() } } if len(ptr.Extensions) > 0 { registeredExts := f.cfg.Extensions() extensions := make(map[string]config.Extension) for _, ptrExt := range ptr.Extensions { ext, ok := registeredExts[ptrExt.Name] if !ok { err := errors.New(tr.Tr.Get("extension '%s' is not configured", ptrExt.Name)) return 0, errors.Wrap(err, tr.Tr.Get("smudge filter")) } ext.Priority = ptrExt.Priority extensions[ext.Name] = ext } exts, err := config.SortExtensions(extensions) if err != nil { return 0, errors.Wrap(err, tr.Tr.Get("smudge filter")) } // pipe extensions in reverse order var extsR []config.Extension for i := range exts { ext := exts[len(exts)-1-i] extsR = append(extsR, ext) } request := &pipeRequest{"smudge", reader, workingfile, extsR} response, err := pipeExtensions(f.cfg, request) if err != nil { return 0, errors.Wrap(err, tr.Tr.Get("smudge filter")) } actualExts := make(map[string]*pipeExtResult) for _, result := range response.results { actualExts[result.name] = result } // verify name, order, and oids oid := response.results[0].oidIn if ptr.Oid != oid { err = errors.New(tr.Tr.Get("actual OID %s during smudge does not match expected %s", oid, ptr.Oid)) return 0, errors.Wrap(err, tr.Tr.Get("smudge filter")) } for _, expected := range ptr.Extensions { actual := actualExts[expected.Name] if actual.name != expected.Name { err = errors.New(tr.Tr.Get("actual extension name '%s' does not match expected '%s'", actual.name, expected.Name)) return 0, errors.Wrap(err, tr.Tr.Get("smudge filter")) } if actual.oidOut != expected.Oid { err = errors.New(tr.Tr.Get("actual OID %s for extension '%s' does not match expected %s", actual.oidOut, expected.Name, expected.Oid)) return 0, errors.Wrap(err, tr.Tr.Get("smudge filter")) } } // setup reader reader, err = os.Open(response.file.Name()) if err != nil { return 0, errors.Wrapf(err, tr.Tr.Get("Error opening smudged file: %s", err)) } defer reader.Close() } n, err := tools.CopyWithCallback(writer, reader, ptr.Size, cb) if err != nil { return n, errors.Wrapf(err, tr.Tr.Get("Error reading from media file: %s", err)) } return n, nil } git-lfs-3.6.1/lfs/gitscanner.go000066400000000000000000000230471472372047300163550ustar00rootroot00000000000000package lfs import ( "errors" "time" "github.com/git-lfs/git-lfs/v3/config" "github.com/git-lfs/git-lfs/v3/filepathfilter" "github.com/git-lfs/git-lfs/v3/tr" "github.com/rubyist/tracerx" ) var missingCallbackErr = errors.New(tr.Tr.Get("no callback given")) // IsCallbackMissing returns a boolean indicating whether the error is reporting // that a GitScanner is missing a required GitScannerCallback. func IsCallbackMissing(err error) bool { return err == missingCallbackErr } // GitScanner scans objects in a Git repository for LFS pointers. type GitScanner struct { Filter *filepathfilter.Filter cfg *config.Configuration mode ScanningMode skipDeletedBlobs bool commitsOnly bool foundPointer GitScannerFoundPointer // only set by NewGitScannerForPush() remote string skippedRefs []string foundLockable GitScannerFoundLockable potentialLockables GitScannerSet } type GitScannerFoundPointer func(*WrappedPointer, error) type GitScannerFoundLockable func(filename string) type GitScannerSet interface { Contains(string) bool } type ScanningMode int const ( ScanRefsMode = ScanningMode(iota) // 0 - or default scan mode ScanAllMode = ScanningMode(iota) ScanRangeToRemoteMode = ScanningMode(iota) ) // NewGitScanner initializes a *GitScanner for a Git repository in the current // working directory. func NewGitScanner(cfg *config.Configuration, cb GitScannerFoundPointer) *GitScanner { return &GitScanner{cfg: cfg, foundPointer: cb} } // NewGitScannerForPush initializes a *GitScanner for a Git repository // in the current working directory, to scan for objects to push to the // given remote and for locks on non-LFS objects held by other users. // Needed for ScanMultiRangeToRemote(), and for ScanRefWithDeleted() when // used for a "git lfs push --all" command. func NewGitScannerForPush(cfg *config.Configuration, remote string, cb GitScannerFoundLockable, potentialLockables GitScannerSet) *GitScanner { return &GitScanner{ cfg: cfg, remote: remote, skippedRefs: calcSkippedRefs(remote), foundLockable: cb, potentialLockables: potentialLockables, } } // ScanMultiRangeToRemote scans through all unique objects reachable from the // "include" ref but not reachable from any "exclude" refs and which the // given remote does not have. See NewGitScannerForPush(). func (s *GitScanner) ScanMultiRangeToRemote(include string, exclude []string, cb GitScannerFoundPointer) error { callback, err := firstGitScannerCallback(cb, s.foundPointer) if err != nil { return err } if len(s.remote) == 0 { return errors.New(tr.Tr.Get("unable to scan starting at %q: no remote set", include)) } s.mode = ScanRangeToRemoteMode start := time.Now() err = scanRefsToChanSingleIncludeMultiExclude(s, callback, include, exclude, s.cfg.GitEnv(), s.cfg.OSEnv()) tracerx.PerformanceSince("ScanMultiRangeToRemote", start) return err } // ScanRefs scans through all unique objects reachable from the "include" refs // but not reachable from any "exclude" refs, including objects that have // been modified or deleted. func (s *GitScanner) ScanRefs(include, exclude []string, cb GitScannerFoundPointer) error { callback, err := firstGitScannerCallback(cb, s.foundPointer) if err != nil { return err } start := time.Now() err = scanRefsToChan(s, callback, include, exclude, s.cfg.GitEnv(), s.cfg.OSEnv()) tracerx.PerformanceSince("ScanRefs", start) return err } // ScanRefRange scans through all unique objects reachable from the "include" // ref but not reachable from the "exclude" ref, including objects that have // been modified or deleted. func (s *GitScanner) ScanRefRange(include, exclude string, cb GitScannerFoundPointer) error { callback, err := firstGitScannerCallback(cb, s.foundPointer) if err != nil { return err } start := time.Now() err = scanRefsToChanSingleIncludeExclude(s, callback, include, exclude, s.cfg.GitEnv(), s.cfg.OSEnv()) tracerx.PerformanceSince("ScanRefRange", start) return err } // ScanRefRangeByTree scans through all objects reachable from the "include" // ref but not reachable from the "exclude" ref, including objects that have // been modified or deleted. Objects which appear in multiple trees will // be visited once per tree. func (s *GitScanner) ScanRefRangeByTree(include, exclude string, cb GitScannerFoundPointer) error { callback, err := firstGitScannerCallback(cb, s.foundPointer) if err != nil { return err } s.commitsOnly = true start := time.Now() err = scanRefsByTree(s, callback, []string{include}, []string{exclude}, s.cfg.GitEnv(), s.cfg.OSEnv()) tracerx.PerformanceSince("ScanRefRangeByTree", start) return err } // ScanRefWithDeleted scans through all unique objects in the given ref, // including objects that have been modified or deleted. func (s *GitScanner) ScanRefWithDeleted(ref string, cb GitScannerFoundPointer) error { return s.ScanRefRange(ref, "", cb) } // ScanRef scans through all unique objects in the current ref, excluding // objects that have been modified or deleted before the ref. func (s *GitScanner) ScanRef(ref string, cb GitScannerFoundPointer) error { callback, err := firstGitScannerCallback(cb, s.foundPointer) if err != nil { return err } s.skipDeletedBlobs = true start := time.Now() err = scanRefsToChanSingleIncludeExclude(s, callback, ref, "", s.cfg.GitEnv(), s.cfg.OSEnv()) tracerx.PerformanceSince("ScanRef", start) return err } // ScanRefByTree scans through all objects in the current ref, excluding // objects that have been modified or deleted before the ref. Objects which // appear in multiple trees will be visited once per tree. func (s *GitScanner) ScanRefByTree(ref string, cb GitScannerFoundPointer) error { callback, err := firstGitScannerCallback(cb, s.foundPointer) if err != nil { return err } s.skipDeletedBlobs = true s.commitsOnly = true start := time.Now() err = scanRefsByTree(s, callback, []string{ref}, []string{}, s.cfg.GitEnv(), s.cfg.OSEnv()) tracerx.PerformanceSince("ScanRefByTree", start) return err } // ScanAll scans through all unique objects in the repository, including // objects that have been modified or deleted. func (s *GitScanner) ScanAll(cb GitScannerFoundPointer) error { callback, err := firstGitScannerCallback(cb, s.foundPointer) if err != nil { return err } s.mode = ScanAllMode start := time.Now() err = scanRefsToChanSingleIncludeExclude(s, callback, "", "", s.cfg.GitEnv(), s.cfg.OSEnv()) tracerx.PerformanceSince("ScanAll", start) return err } // ScanTree takes a ref and returns WrappedPointer objects in the tree at that // ref. Differs from ScanRefs in that multiple files in the tree with the same // content are all reported. func (s *GitScanner) ScanTree(ref string, cb GitScannerFoundPointer) error { callback, err := firstGitScannerCallback(cb, s.foundPointer) if err != nil { return err } start := time.Now() err = runScanTree(callback, ref, s.Filter, s.cfg.GitEnv(), s.cfg.OSEnv()) tracerx.PerformanceSince("ScanTree", start) return err } // ScanLFSFiles takes a ref, which points to HEAD, and returns WrappedPointer // objects in the index or tree at that ref. Differs from ScanRefs in that // multiple files in the tree with the same content are all reported. func (s *GitScanner) ScanLFSFiles(ref string, cb GitScannerFoundPointer) error { callback, err := firstGitScannerCallback(cb, s.foundPointer) if err != nil { return err } start := time.Now() err = runScanLFSFiles(callback, ref, s.Filter, s.cfg.GitEnv(), s.cfg.OSEnv()) tracerx.PerformanceSince("ScanLFSFiles", start) return err } // ScanUnpushed scans history for all LFS pointers which have been added but not // pushed to the named remote. remote can be left blank to mean 'any remote'. func (s *GitScanner) ScanUnpushed(remote string, cb GitScannerFoundPointer) error { callback, err := firstGitScannerCallback(cb, s.foundPointer) if err != nil { return err } start := time.Now() err = scanUnpushed(callback, remote) tracerx.PerformanceSince("ScanUnpushed", start) return err } // ScanStashed scans for all LFS pointers referenced solely by a stash func (s *GitScanner) ScanStashed(cb GitScannerFoundPointer) error { callback, err := firstGitScannerCallback(cb, s.foundPointer) if err != nil { return err } start := time.Now() err = scanStashed(callback) tracerx.PerformanceSince("ScanStashed", start) return err } // ScanPreviousVersions scans changes reachable from ref (commit) back to since. // Returns channel of pointers for *previous* versions that overlap that time. // Does not include pointers which were still in use at ref (use ScanRefsToChan // for that) func (s *GitScanner) ScanPreviousVersions(ref string, since time.Time, cb GitScannerFoundPointer) error { callback, err := firstGitScannerCallback(cb, s.foundPointer) if err != nil { return err } start := time.Now() err = logPreviousSHAs(callback, ref, s.Filter, since) tracerx.PerformanceSince("ScanPreviousVersions", start) return err } // ScanIndex scans the git index for modified LFS objects. func (s *GitScanner) ScanIndex(ref string, workingDir string, cb GitScannerFoundPointer) error { callback, err := firstGitScannerCallback(cb, s.foundPointer) if err != nil { return err } start := time.Now() err = scanIndex(callback, ref, workingDir, s.Filter, s.cfg.GitEnv(), s.cfg.OSEnv()) tracerx.PerformanceSince("ScanIndex", start) return err } func firstGitScannerCallback(callbacks ...GitScannerFoundPointer) (GitScannerFoundPointer, error) { for _, cb := range callbacks { if cb == nil { continue } return cb, nil } return nil, missingCallbackErr } git-lfs-3.6.1/lfs/gitscanner_catfilebatch.go000066400000000000000000000073471472372047300210530ustar00rootroot00000000000000package lfs import ( "bytes" "crypto/sha256" "fmt" "io" "github.com/git-lfs/git-lfs/v3/config" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/git" "github.com/git-lfs/git-lfs/v3/tr" ) // runCatFileBatch() uses an ObjectDatabase from the // github.com/git-lfs/gitobj/v2 package to get the contents of Git // blob objects, given their SHA1s, similar to the behaviour of // 'git cat-file --batch'. // Git blob SHA1s are read from the revs channel and fed to an // ObjectScanner which looks them up in the ObjectDatabase. // The contents will be decoded as Git LFS pointers and any valid pointers // will be sent to pointerCh. // If a Git blob is not an LFS pointer, check the lockableSet to see // if that blob is for a locked file. Any errors are sent to errCh. func runCatFileBatch(pointerCh chan *WrappedPointer, lockableCh chan string, lockableSet *lockableNameSet, revs *StringChannelWrapper, errCh chan error, gitEnv, osEnv config.Environment) error { scanner, err := NewPointerScanner(gitEnv, osEnv) if err != nil { return err } go func() { canScan := true for r := range revs.Results { canScan = scanner.Scan(r) if err := scanner.Err(); err != nil { errCh <- err } else if p := scanner.Pointer(); p != nil { pointerCh <- p } else if b := scanner.BlobSHA(); git.HasValidObjectIDLength(b) { if name, ok := lockableSet.Check(b); ok { lockableCh <- name } } if !canScan { break } } if canScan { if err := revs.Wait(); err != nil { errCh <- err } } if err := scanner.Close(); err != nil { errCh <- err } close(pointerCh) close(errCh) close(lockableCh) }() return nil } type PointerScanner struct { scanner *git.ObjectScanner blobSha string contentsSha string pointer *WrappedPointer err error } func NewPointerScanner(gitEnv, osEnv config.Environment) (*PointerScanner, error) { scanner, err := git.NewObjectScanner(gitEnv, osEnv) if err != nil { return nil, err } return &PointerScanner{scanner: scanner}, nil } func (s *PointerScanner) BlobSHA() string { return s.blobSha } func (s *PointerScanner) ContentsSha() string { return s.contentsSha } func (s *PointerScanner) Pointer() *WrappedPointer { return s.pointer } func (s *PointerScanner) Err() error { return s.err } func (s *PointerScanner) Scan(sha string) bool { s.pointer, s.err = nil, nil s.blobSha, s.contentsSha = "", "" b, c, p, err := s.next(sha) s.blobSha = b s.contentsSha = c s.pointer = p if err != nil { if err != io.EOF { s.err = err } return false } return true } func (s *PointerScanner) Close() error { return s.scanner.Close() } func (s *PointerScanner) next(blob string) (string, string, *WrappedPointer, error) { if !s.scanner.Scan(blob) { if err := s.scanner.Err(); err != nil { return "", "", nil, err } return "", "", nil, io.EOF } blobSha := s.scanner.Sha1() size := s.scanner.Size() sha := sha256.New() var buf *bytes.Buffer var to io.Writer = sha if size < blobSizeCutoff { buf = bytes.NewBuffer(make([]byte, 0, size)) to = io.MultiWriter(to, buf) } read, err := io.CopyN(to, s.scanner.Contents(), int64(size)) if err != nil { return blobSha, "", nil, err } if int64(size) != read { return blobSha, "", nil, errors.New(tr.Tr.Get("expected %d bytes, read %d bytes", size, read)) } var pointer *WrappedPointer var contentsSha string if size < blobSizeCutoff { if p, err := DecodePointer(bytes.NewReader(buf.Bytes())); err != nil { contentsSha = fmt.Sprintf("%x", sha.Sum(nil)) } else { pointer = &WrappedPointer{ Sha1: blobSha, Pointer: p, } contentsSha = p.Oid } } else { contentsSha = fmt.Sprintf("%x", sha.Sum(nil)) } return blobSha, contentsSha, pointer, err } git-lfs-3.6.1/lfs/gitscanner_catfilebatchcheck.go000066400000000000000000000053531472372047300220440ustar00rootroot00000000000000package lfs import ( "bufio" "io" "strconv" "strings" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/git" "github.com/git-lfs/git-lfs/v3/tr" ) // runCatFileBatchCheck uses 'git cat-file --batch-check' to get the type and // size of a git object. Any object that isn't of type blob and under the // blobSizeCutoff will be ignored, unless it's a locked file. revs is a channel // over which strings containing git sha1s will be sent. It returns a channel // from which sha1 strings can be read. func runCatFileBatchCheck(smallRevCh chan string, lockableCh chan string, lockableSet *lockableNameSet, revs *StringChannelWrapper, errCh chan error) error { cmd, err := git.CatFile() if err != nil { return err } go func() { scanner := &catFileBatchCheckScanner{s: bufio.NewScanner(cmd.Stdout), limit: blobSizeCutoff} for r := range revs.Results { cmd.Stdin.Write([]byte(r + "\n")) hasNext := scanner.Scan() if err := scanner.Err(); err != nil { errCh <- err } else if b := scanner.LFSBlobOID(); len(b) > 0 { smallRevCh <- b } else if b := scanner.GitBlobOID(); len(b) > 0 { if name, ok := lockableSet.Check(b); ok { lockableCh <- name } } if !hasNext { break } } if err := revs.Wait(); err != nil { errCh <- err } cmd.Stdin.Close() stderr, _ := io.ReadAll(cmd.Stderr) err := cmd.Wait() if err != nil { errCh <- errors.New(tr.Tr.Get("error in `git cat-file --batch-check`: %v %v", err, string(stderr))) } close(smallRevCh) close(errCh) close(lockableCh) }() return nil } type catFileBatchCheckScanner struct { s *bufio.Scanner limit int lfsBlobOID string gitBlobOID string } func (s *catFileBatchCheckScanner) LFSBlobOID() string { return s.lfsBlobOID } func (s *catFileBatchCheckScanner) GitBlobOID() string { return s.gitBlobOID } func (s *catFileBatchCheckScanner) Err() error { return s.s.Err() } func (s *catFileBatchCheckScanner) Scan() bool { lfsBlobSha, gitBlobSha, hasNext := s.next() s.lfsBlobOID = lfsBlobSha s.gitBlobOID = gitBlobSha return hasNext } func (s *catFileBatchCheckScanner) next() (string, string, bool) { hasNext := s.s.Scan() line := s.s.Text() lineLen := len(line) oidLen := strings.IndexByte(line, ' ') // Format is: // // type is at a fixed spot, if we see that it's "blob", we can avoid // splitting the line just to get the size. if oidLen == -1 || lineLen < oidLen+6 { return "", "", hasNext } if line[oidLen+1:oidLen+5] != "blob" { return "", "", hasNext } size, err := strconv.Atoi(line[oidLen+6 : lineLen]) if err != nil { return "", "", hasNext } blobSha := line[0:oidLen] if size >= s.limit { return "", blobSha, hasNext } return blobSha, "", hasNext } git-lfs-3.6.1/lfs/gitscanner_catfilebatchcheckscanner_test.go000066400000000000000000000031261472372047300244510ustar00rootroot00000000000000package lfs import ( "bufio" "strings" "testing" "github.com/stretchr/testify/assert" ) func TestCatFileBatchCheckScannerWithValidOutput(t *testing.T) { lines := []string{ "short line", "0000000000000000000000000000000000000000 BLOB capitalized", "0000000000000000000000000000000000000001 blob not-a-size", "0000000000000000000000000000000000000002 blob 123", "0000000000000000000000000000000000000003 blob 1 0", "0000000000000000000000000000000000000004 blob 123456789", } r := strings.NewReader(strings.Join(lines, "\n")) s := &catFileBatchCheckScanner{ s: bufio.NewScanner(r), limit: 1024, } assertNextOID(t, s, "", "") assertNextOID(t, s, "", "") assertNextOID(t, s, "", "") assertNextOID(t, s, "0000000000000000000000000000000000000002", "") assertNextOID(t, s, "", "") assertNextOID(t, s, "", "0000000000000000000000000000000000000004") assertScannerDone(t, s) assert.Equal(t, "", s.LFSBlobOID()) assert.Equal(t, "", s.GitBlobOID()) } type stringScanner interface { Next() (string, bool, error) Err() error Scan() bool } type genericScanner interface { Err() error Scan() bool } func assertNextScan(t *testing.T, scanner genericScanner) { assert.True(t, scanner.Scan()) assert.Nil(t, scanner.Err()) } func assertNextOID(t *testing.T, scanner *catFileBatchCheckScanner, lfsBlobOID, gitBlobOID string) { assertNextScan(t, scanner) assert.Equal(t, lfsBlobOID, scanner.LFSBlobOID()) assert.Equal(t, gitBlobOID, scanner.GitBlobOID()) } func assertScannerDone(t *testing.T, scanner genericScanner) { assert.False(t, scanner.Scan()) assert.Nil(t, scanner.Err()) } git-lfs-3.6.1/lfs/gitscanner_index.go000066400000000000000000000107201472372047300175360ustar00rootroot00000000000000package lfs import ( "strings" "sync" "github.com/git-lfs/git-lfs/v3/config" "github.com/git-lfs/git-lfs/v3/filepathfilter" ) // ScanIndex returns a slice of WrappedPointer objects for all Git LFS pointers // it finds in the index. // // Ref is the ref at which to scan, which may be "HEAD" if there is at least one // commit. func scanIndex(cb GitScannerFoundPointer, ref string, workingDir string, f *filepathfilter.Filter, gitEnv, osEnv config.Environment) error { indexMap := &indexFileMap{ nameMap: make(map[string][]*indexFile), nameShaPairs: make(map[string]bool), mutex: &sync.Mutex{}, } revs, err := revListIndex(ref, false, indexMap, workingDir) if err != nil { return err } cachedRevs, err := revListIndex(ref, true, indexMap, workingDir) if err != nil { return err } allRevsErr := make(chan error, 5) // can be multiple errors below allRevsChan := make(chan string, 1) allRevs := NewStringChannelWrapper(allRevsChan, allRevsErr) go func() { seenRevs := make(map[string]bool, 0) for rev := range cachedRevs.Results { if !seenRevs[rev] { allRevsChan <- rev seenRevs[rev] = true } } err = cachedRevs.Wait() if err != nil { allRevsErr <- err } for rev := range revs.Results { if !seenRevs[rev] { allRevsChan <- rev seenRevs[rev] = true } } err := revs.Wait() if err != nil { allRevsErr <- err } close(allRevsChan) close(allRevsErr) }() smallShas, _, err := catFileBatchCheck(allRevs, nil) if err != nil { return err } ch := make(chan gitscannerResult, chanBufSize) barePointerCh, _, err := catFileBatch(smallShas, nil, gitEnv, osEnv) if err != nil { return err } go func() { for p := range barePointerCh.Results { for _, file := range indexMap.FilesFor(p.Sha1) { // Append a new *WrappedPointer that combines the data // from the index file, and the pointer "p". ch <- gitscannerResult{ Pointer: &WrappedPointer{ Sha1: p.Sha1, Name: file.Name, SrcName: file.SrcName, Status: file.Status, Pointer: p.Pointer, }, } } } if err := barePointerCh.Wait(); err != nil { ch <- gitscannerResult{Err: err} } close(ch) }() for result := range ch { if f.Allows(result.Pointer.Name) { cb(result.Pointer, result.Err) } } return nil } // revListIndex uses git diff-index to return the list of object sha1s // for in the indexf. It returns a channel from which sha1 strings can be read. // The namMap will be filled indexFile pointers mapping sha1s to indexFiles. func revListIndex(atRef string, cache bool, indexMap *indexFileMap, workingDir string) (*StringChannelWrapper, error) { scanner, err := NewDiffIndexScanner(atRef, cache, false, workingDir) if err != nil { return nil, err } revs := make(chan string, chanBufSize) errs := make(chan error, 1) go func() { for scanner.Scan() { var name string = scanner.Entry().DstName if len(name) == 0 { name = scanner.Entry().SrcName } indexMap.Add(scanner.Entry().DstSha, &indexFile{ Name: name, SrcName: scanner.Entry().SrcName, Status: string(scanner.Entry().Status), }) revs <- scanner.Entry().DstSha } if err := scanner.Err(); err != nil { errs <- err } close(revs) close(errs) }() return NewStringChannelWrapper(revs, errs), nil } // indexFile is used when scanning the index. It stores the name of // the file, the status of the file in the index, and, in the case of // a moved or copied file, the original name of the file. type indexFile struct { Name string SrcName string Status string } type indexFileMap struct { // mutex guards nameMap and nameShaPairs mutex *sync.Mutex // nameMap maps SHA1s to a slice of `*indexFile`s nameMap map[string][]*indexFile // nameShaPairs maps "sha1:name" -> bool nameShaPairs map[string]bool } // FilesFor returns all `*indexFile`s that match the given `sha`. func (m *indexFileMap) FilesFor(sha string) []*indexFile { m.mutex.Lock() defer m.mutex.Unlock() return m.nameMap[sha] } // Add appends unique index files to the given SHA, "sha". A file is considered // unique if its combination of SHA and current filename have not yet been seen // by this instance "m" of *indexFileMap. func (m *indexFileMap) Add(sha string, index *indexFile) { m.mutex.Lock() defer m.mutex.Unlock() pairKey := strings.Join([]string{sha, index.Name}, ":") if m.nameShaPairs[pairKey] { return } m.nameMap[sha] = append(m.nameMap[sha], index) m.nameShaPairs[pairKey] = true } git-lfs-3.6.1/lfs/gitscanner_log.go000066400000000000000000000252421472372047300172150ustar00rootroot00000000000000package lfs import ( "bufio" "bytes" "fmt" "io" "regexp" "strconv" "strings" "time" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/filepathfilter" "github.com/git-lfs/git-lfs/v3/git" "github.com/git-lfs/git-lfs/v3/subprocess" "github.com/git-lfs/git-lfs/v3/tr" "github.com/rubyist/tracerx" ) // When scanning diffs with parseScannerLogOutput(), the direction of diff // to include data from, i.e., '+' or '-'. Depending on what you're scanning // for either might be useful. type LogDiffDirection byte const ( LogDiffAdditions = LogDiffDirection('+') // include '+' diffs LogDiffDeletions = LogDiffDirection('-') // include '-' diffs ) var ( // Arguments to append to a git log call which will limit the output to // lfs changes and format the output suitable for parseLogOutput.. method(s) logLfsSearchArgs = []string{ "--no-ext-diff", "--no-textconv", "--color=never", "-G", "oid sha256:", // only diffs which include an lfs file SHA change "-p", // include diff so we can read the SHA "-U12", // Make sure diff context is always big enough to support 10 extension lines to get whole pointer `--format=lfs-commit-sha: %H %P`, // just a predictable commit header we can detect } ) type gitscannerResult struct { Pointer *WrappedPointer Err error } func scanUnpushed(cb GitScannerFoundPointer, remote string) error { logArgs := []string{ "--branches", "--tags", // include all locally referenced commits "--not"} // but exclude everything that comes after if len(remote) == 0 { logArgs = append(logArgs, "--remotes") } else { logArgs = append(logArgs, fmt.Sprintf("--remotes=%v", remote)) } // Add standard search args to find lfs references logArgs = append(logArgs, logLfsSearchArgs...) cmd, err := git.Log(logArgs...) if err != nil { return err } parseScannerLogOutput(cb, LogDiffAdditions, cmd, nil) return nil } func scanStashed(cb GitScannerFoundPointer) error { // Stashes are actually 2-3 commits, each containing one of: // 1. Working copy (WIP) modified files // 2. Index changes // 3. Untracked files (but only if "git stash -u" was used) // The first of these, the WIP commit, is a merge whose first parent // is HEAD and whose other parent(s) are commits 2 and 3 above. // We need to get the individual diff of each of these commits to // ensure we have all of the LFS objects referenced by the stash, // so a future "git stash pop" can restore them all. // First we get the list of SHAs of the WIP merge commits from the // reflog using "git log -g --format=%h refs/stash --". Because // older Git versions (at least <=2.7) don't report merge parents in // the reflog, we can't extract the parent SHAs from "Merge:" lines // in the log; we can, however, use the "git log -m" option to force // an individual diff with the first merge parent in a second step. logArgs := []string{"-g", "--format=%h", "refs/stash", "--"} cmd, err := git.Log(logArgs...) if err != nil { return err } scanner := bufio.NewScanner(cmd.Stdout) var stashMergeShas []string for scanner.Scan() { stashMergeSha := strings.TrimSpace(scanner.Text()) stashMergeShas = append(stashMergeShas, fmt.Sprintf("%v^..%v", stashMergeSha, stashMergeSha)) } if err := scanner.Err(); err != nil { errors.New(tr.Tr.Get("error while scanning `git log` for stashed refs: %v", err)) } err = cmd.Wait() if err != nil { // Ignore this error, it really only happens when there's no refs/stash return nil } // We can use the log parser if we provide the -m and --first-parent // options to get the first WIP merge diff shown individually, then // no additional options to get the second index merge diff and // possible third untracked files merge diff in a subsequent step. stashMergeLogArgs := [][]string{{"-m", "--first-parent"}, {}} for _, logArgs := range stashMergeLogArgs { // Add standard search args to find lfs references logArgs = append(logArgs, logLfsSearchArgs...) logArgs = append(logArgs, stashMergeShas...) cmd, err = git.Log(logArgs...) if err != nil { return err } parseScannerLogOutput(cb, LogDiffAdditions, cmd, nil) } return nil } func parseScannerLogOutput(cb GitScannerFoundPointer, direction LogDiffDirection, cmd *subprocess.BufferedCmd, filter *filepathfilter.Filter) { ch := make(chan gitscannerResult, chanBufSize) cherr := make(chan []byte) go func() { stderr, _ := io.ReadAll(cmd.Stderr) cherr <- stderr close(cherr) }() go func() { scanner := newLogScanner(direction, cmd.Stdout) scanner.Filter = filter for scanner.Scan() { if p := scanner.Pointer(); p != nil { ch <- gitscannerResult{Pointer: p} } } if err := scanner.Err(); err != nil { io.ReadAll(cmd.Stdout) ch <- gitscannerResult{Err: errors.New(tr.Tr.Get("error while scanning `git log`: %v", err))} } stderr := <-cherr err := cmd.Wait() if err != nil { ch <- gitscannerResult{Err: errors.New(tr.Tr.Get("error in `git log`: %v %v", err, string(stderr)))} } close(ch) }() cmd.Stdin.Close() for result := range ch { cb(result.Pointer, result.Err) } } // logPreviousVersions scans history for all previous versions of LFS pointers // from 'since' up to (but not including) the final state at ref func logPreviousSHAs(cb GitScannerFoundPointer, ref string, filter *filepathfilter.Filter, since time.Time) error { logArgs := []string{ fmt.Sprintf("--since=%v", git.FormatGitDate(since)), } // Add standard search args to find lfs references logArgs = append(logArgs, logLfsSearchArgs...) // ending at ref logArgs = append(logArgs, ref) cmd, err := git.Log(logArgs...) if err != nil { return err } parseScannerLogOutput(cb, LogDiffDeletions, cmd, filter) return nil } // logScanner parses log output formatted as per logLfsSearchArgs & returns // pointers. type logScanner struct { // Filter will ensure file paths matching the include patterns, or not matching // the exclude patterns are skipped. Filter *filepathfilter.Filter r *bufio.Reader err error dir LogDiffDirection pointer *WrappedPointer pointerData *bytes.Buffer currentFilename string currentFileIncluded bool commitHeaderRegex *regexp.Regexp fileHeaderRegex *regexp.Regexp fileMergeHeaderRegex *regexp.Regexp pointerDataRegex *regexp.Regexp } // dir: whether to include results from + or - diffs // r: a stream of output from git log with at least logLfsSearchArgs specified func newLogScanner(dir LogDiffDirection, r io.Reader) *logScanner { return &logScanner{ r: bufio.NewReader(r), dir: dir, pointerData: &bytes.Buffer{}, currentFileIncluded: true, // no need to compile these regexes on every `git-lfs` call, just ones that // use the scanner. commitHeaderRegex: regexp.MustCompile(fmt.Sprintf(`^lfs-commit-sha: (%s)(?: (%s))*`, git.ObjectIDRegex, git.ObjectIDRegex)), fileHeaderRegex: regexp.MustCompile(`^diff --git "?a\/(.+?)\s+"?b\/(.+)`), fileMergeHeaderRegex: regexp.MustCompile(`^diff --cc (.+)`), pointerDataRegex: regexp.MustCompile(`^([\+\- ])(version https://git-lfs|oid sha256|size|ext-).*$`), } } func (s *logScanner) Pointer() *WrappedPointer { return s.pointer } func (s *logScanner) Err() error { return s.err } func (s *logScanner) Scan() bool { s.pointer = nil p, canScan := s.scan() s.pointer = p return canScan } // Utility func used at several points below (keep in narrow scope) func (s *logScanner) finishLastPointer() *WrappedPointer { if s.pointerData.Len() == 0 || !s.currentFileIncluded { return nil } p, err := DecodePointer(s.pointerData) s.pointerData.Reset() if err == nil { return &WrappedPointer{Name: s.currentFilename, Pointer: p} } else { tracerx.Printf("Unable to parse pointer from log: %v", err) return nil } } // For each commit we'll get something like this: /* lfs-commit-sha: 60fde3d23553e10a55e2a32ed18c20f65edd91e7 e2eaf1c10b57da7b98eb5d722ec5912ddeb53ea1 diff --git a/1D_Noise.png b/1D_Noise.png new file mode 100644 index 0000000..2622b4a --- /dev/null +++ b/1D_Noise.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f5d84da40ab1f6aa28df2b2bf1ade2cdcd4397133f903c12b4106641b10e1ed6 +size 1289 */ // There can be multiple diffs per commit (multiple binaries) // Also when a binary is changed the diff will include a '-' line for the old SHA func (s *logScanner) scan() (*WrappedPointer, bool) { for { line, err := s.r.ReadString('\n') if err != nil && err != io.EOF { s.err = err return nil, false } // remove trailing newline delimiter and optional single carriage return line = strings.TrimSuffix(strings.TrimRight(line, "\n"), "\r") if match := s.commitHeaderRegex.FindStringSubmatch(line); match != nil { // Currently we're not pulling out commit groupings, but could if we wanted // This just acts as a delimiter for finishing a multiline pointer if p := s.finishLastPointer(); p != nil { return p, true } } else if match := s.fileHeaderRegex.FindStringSubmatch(line); match != nil { // Finding a regular file header p := s.finishLastPointer() // Pertinent file name depends on whether we're listening to additions or removals if s.dir == LogDiffAdditions { s.setFilename(match[2]) } else { s.setFilename(match[1]) } if p != nil { return p, true } } else if match := s.fileMergeHeaderRegex.FindStringSubmatch(line); match != nil { // Git merge file header is a little different, only one file p := s.finishLastPointer() s.setFilename(match[1]) if p != nil { return p, true } } else if s.currentFileIncluded { if match := s.pointerDataRegex.FindStringSubmatch(line); match != nil { // An LFS pointer data line // Include only the entirety of one side of the diff // -U3 will ensure we always get all of it, even if only // the SHA changed (version & size the same) changeType := match[1][0] // Always include unchanged context lines (normally just the version line) if LogDiffDirection(changeType) == s.dir || changeType == ' ' { // Must skip diff +/- marker s.pointerData.WriteString(line[1:]) s.pointerData.WriteString("\n") // newline was stripped off by scanner } } } if err == io.EOF { break } } if p := s.finishLastPointer(); p != nil { return p, true } return nil, false } func (s *logScanner) setFilename(name string) { // Trim last character if it's a quote if len(name) > 0 && name[len(name)-1] == '"' { name = name[:len(name)-1] } // Convert octals to proper UTF-8 code unquotedName, err := strconv.Unquote(`"` + name + `"`) if err == nil { name = unquotedName } s.currentFilename = name s.currentFileIncluded = s.Filter.Allows(name) } git-lfs-3.6.1/lfs/gitscanner_pointerscanner_test.go000066400000000000000000000066041472372047300225260ustar00rootroot00000000000000package lfs import ( "bytes" "crypto/sha256" "encoding/hex" "fmt" "io" "math/rand" "testing" "github.com/git-lfs/git-lfs/v3/git" "github.com/git-lfs/gitobj/v2" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestPointerScannerWithValidOutput(t *testing.T) { blobs := []*Pointer{ &Pointer{ Version: "https://git-lfs.github.com/spec/v1", Oid: "e71eefd918ea175b8f362611f981f648dbf9888ff74865077cb4c9077728f350", Size: 123, OidType: "sha256", }, &Pointer{ Version: "https://git-lfs.github.com/spec/v1", Oid: "0eb69b651be65d5a61d6bebf2c53c811a5bf8031951111000e2077f4d7fe43b1", Size: 132, OidType: "sha256", }, } be, _ := gitobj.NewMemoryBackend(nil) db, _ := gitobj.FromBackend(be) shas := fakeObjectsWithRandoData(t, db, blobs) scanner := &PointerScanner{ scanner: git.NewObjectScannerFrom(db), } iter := 0 for i := 0; i < 5; i++ { assertNextEmptyPointer(t, scanner, shas[iter]) iter++ } assertNextPointer(t, scanner, shas[iter], "e71eefd918ea175b8f362611f981f648dbf9888ff74865077cb4c9077728f350") iter++ for i := 0; i < 5; i++ { assertNextEmptyPointer(t, scanner, shas[iter]) iter++ } assertNextPointer(t, scanner, shas[iter], "0eb69b651be65d5a61d6bebf2c53c811a5bf8031951111000e2077f4d7fe43b1") iter++ for i := 0; i < 5; i++ { assertNextEmptyPointer(t, scanner, shas[iter]) iter++ } } func TestPointerScannerWithLargeBlobs(t *testing.T) { buf := bytes.NewBuffer(make([]byte, 0, 1025)) sha := sha256.New() rng := rand.New(rand.NewSource(0)) _, err := io.CopyN(io.MultiWriter(sha, buf), rng, 1025) require.Nil(t, err) be, _ := gitobj.NewMemoryBackend(nil) db, _ := gitobj.FromBackend(be) fake := bytes.NewBuffer(nil) oid := writeFakeBuffer(t, db, fake, buf.Bytes(), buf.Len()) scanner := &PointerScanner{ scanner: git.NewObjectScannerFrom(db), } require.True(t, scanner.Scan(oid)) assert.Nil(t, scanner.Pointer()) assert.Equal(t, fmt.Sprintf("%x", sha.Sum(nil)), scanner.ContentsSha()) } func assertNextPointer(t *testing.T, scanner *PointerScanner, sha string, oid string) { assert.True(t, scanner.Scan(sha)) assert.Nil(t, scanner.Err()) p := scanner.Pointer() assert.NotNil(t, p) assert.Equal(t, oid, p.Oid) } func assertNextEmptyPointer(t *testing.T, scanner *PointerScanner, sha string) { assert.True(t, scanner.Scan(sha)) assert.Nil(t, scanner.Err()) assert.Nil(t, scanner.Pointer()) } func fakeObjectsWithRandoData(t *testing.T, db *gitobj.ObjectDatabase, blobs []*Pointer) []string { buf := &bytes.Buffer{} rngbuf := make([]byte, 1000) // just under blob size cutoff rng := rand.New(rand.NewSource(0)) oids := make([]string, 0) for i := 0; i < 5; i++ { n, err := io.ReadFull(rng, rngbuf) if err != nil { t.Fatalf("error reading from rng: %+v", err) } oids = append(oids, writeFakeBuffer(t, db, buf, rngbuf, n)) } for _, b := range blobs { ptrtext := b.Encoded() oids = append(oids, writeFakeBuffer(t, db, buf, []byte(ptrtext), len(ptrtext))) for i := 0; i < 5; i++ { n, err := io.ReadFull(rng, rngbuf) if err != nil { t.Fatalf("error reading from rng: %+v", err) } oids = append(oids, writeFakeBuffer(t, db, buf, rngbuf, n)) } } return oids } func writeFakeBuffer(t *testing.T, db *gitobj.ObjectDatabase, buf *bytes.Buffer, by []byte, size int) string { oid, _ := db.WriteBlob(gitobj.NewBlobFromBytes(by)) return hex.EncodeToString(oid) } git-lfs-3.6.1/lfs/gitscanner_refs.go000066400000000000000000000141641472372047300173740ustar00rootroot00000000000000package lfs import ( "encoding/hex" "sync" "github.com/git-lfs/git-lfs/v3/config" "github.com/git-lfs/git-lfs/v3/git" ) // The nameMap structure provides a goroutine-safe mapping of Git object IDs // (as SHA hex strings) to their pathspecs, either fully-qualified directory // paths for trees or file paths for blobs, as returned by "git rev-list". type nameMap struct { names map[string]string mutex *sync.Mutex } func (m *nameMap) getName(sha string) (string, bool) { m.mutex.Lock() name, ok := m.names[sha] m.mutex.Unlock() return name, ok } func (m *nameMap) setName(sha, name string) { m.mutex.Lock() m.names[sha] = name m.mutex.Unlock() } func newNameMap() *nameMap { return &nameMap{ names: make(map[string]string, 0), mutex: &sync.Mutex{}, } } type lockableNameSet struct { nameMap *nameMap set GitScannerSet } // Determines if the given blob sha matches a locked file. func (s *lockableNameSet) Check(blobSha string) (string, bool) { if s == nil || s.nameMap == nil || s.set == nil { return "", false } name, ok := s.nameMap.getName(blobSha) if !ok { return name, ok } if s.set.Contains(name) { return name, true } return name, false } func noopFoundLockable(name string) {} // scanRefsToChan scans through all unique objects reachable from the // "include" refs and not reachable from any "exclude" refs and invokes the // provided callback for each pointer file, valid or invalid, that it finds. // Reports unique OIDs once only, not multiple times if more than one file // has the same content. func scanRefsToChan(scanner *GitScanner, pointerCb GitScannerFoundPointer, include, exclude []string, gitEnv, osEnv config.Environment) error { revs, nameMap, err := revListShas(scanner, include, exclude) if err != nil { return err } lockableSet := &lockableNameSet{nameMap: nameMap, set: scanner.potentialLockables} smallShas, batchLockableCh, err := catFileBatchCheck(revs, lockableSet) if err != nil { return err } lockableCb := scanner.foundLockable if lockableCb == nil { lockableCb = noopFoundLockable } go func(cb GitScannerFoundLockable, ch chan string) { for name := range ch { cb(name) } }(lockableCb, batchLockableCh) pointers, checkLockableCh, err := catFileBatch(smallShas, lockableSet, gitEnv, osEnv) if err != nil { return err } for p := range pointers.Results { if name, ok := nameMap.getName(p.Sha1); ok { p.Name = name } if scanner.Filter.Allows(p.Name) { pointerCb(p, nil) } } for lockableName := range checkLockableCh { if scanner.Filter.Allows(lockableName) { lockableCb(lockableName) } } if err := pointers.Wait(); err != nil { pointerCb(nil, err) } return nil } // scanRefsToChanSingleIncludeExclude scans through all unique objects // reachable from the "include" ref and not reachable from the "exclude" ref // and invokes the provided callback for each pointer file, valid or invalid, // that it finds. // Reports unique OIDs once only, not multiple times if more than one file // has the same content. func scanRefsToChanSingleIncludeExclude(scanner *GitScanner, pointerCb GitScannerFoundPointer, include, exclude string, gitEnv, osEnv config.Environment) error { return scanRefsToChan(scanner, pointerCb, []string{include}, []string{exclude}, gitEnv, osEnv) } // scanRefsToChanSingleIncludeMultiExclude scans through all unique objects // reachable from the "include" ref and not reachable from any "exclude" refs // and invokes the provided callback for each pointer file, valid or invalid, // that it finds. // Reports unique OIDs once only, not multiple times if more than one file // has the same content. func scanRefsToChanSingleIncludeMultiExclude(scanner *GitScanner, pointerCb GitScannerFoundPointer, include string, exclude []string, gitEnv, osEnv config.Environment) error { return scanRefsToChan(scanner, pointerCb, []string{include}, exclude, gitEnv, osEnv) } // scanRefsByTree scans through all objects reachable from the "include" refs // and not reachable from any "exclude" refs and invokes the provided callback // for each pointer file, valid or invalid, that it finds. // Objects which appear in multiple trees will be visited once per tree. func scanRefsByTree(scanner *GitScanner, pointerCb GitScannerFoundPointer, include, exclude []string, gitEnv, osEnv config.Environment) error { revs, _, err := revListShas(scanner, include, exclude) if err != nil { return err } errchan := make(chan error, 20) // multiple errors possible wg := &sync.WaitGroup{} for r := range revs.Results { wg.Add(1) go func(rev string) { defer wg.Done() err := runScanTreeForPointers(pointerCb, rev, gitEnv, osEnv) if err != nil { errchan <- err } }(r) } wg.Wait() close(errchan) for err := range errchan { if err != nil { return err } } return revs.Wait() } // revListShas uses git rev-list to return the list of object sha1s // for the given ref. If all is true, ref is ignored. It returns a // channel from which sha1 strings can be read, and a map of the sha1 // object IDs to their pathspecs, which will be populated as the sha1 // strings are written to the channel. func revListShas(scanner *GitScanner, include, exclude []string) (*StringChannelWrapper, *nameMap, error) { nameMap := newNameMap() revListScanner, err := git.NewRevListScanner(include, exclude, &git.ScanRefsOptions{ Mode: git.ScanningMode(scanner.mode), SkipDeletedBlobs: scanner.skipDeletedBlobs, CommitsOnly: scanner.commitsOnly, Remote: scanner.remote, SkippedRefs: scanner.skippedRefs, Names: nameMap.names, Mutex: nameMap.mutex, }) if err != nil { return nil, nil, err } revs := make(chan string, chanBufSize) errs := make(chan error, 5) // may be multiple errors go func() { for revListScanner.Scan() { sha := hex.EncodeToString(revListScanner.OID()) if name := revListScanner.Name(); len(name) > 0 { nameMap.setName(sha, name) } revs <- sha } if err = revListScanner.Err(); err != nil { errs <- err } if err = revListScanner.Close(); err != nil { errs <- err } close(revs) close(errs) }() return NewStringChannelWrapper(revs, errs), nameMap, nil } git-lfs-3.6.1/lfs/gitscanner_remotes.go000066400000000000000000000027561472372047300201170ustar00rootroot00000000000000package lfs import ( "github.com/git-lfs/git-lfs/v3/git" "github.com/git-lfs/git-lfs/v3/tools" ) // calcSkippedRefs checks that locally cached versions of remote refs are still // present on the remote before they are used as a 'from' point. If the server // implements garbage collection and a remote branch had been deleted since we // last did 'git fetch --prune', then the objects in that branch may have also // been deleted on the server if unreferenced. If some refs are missing on the // remote, use a more explicit diff command. func calcSkippedRefs(remote string) []string { cachedRemoteRefs, _ := git.CachedRemoteRefs(remote) // Since CachedRemoteRefs() only returns branches, request that // RemoteRefs() ignore tags and also return only branches. actualRemoteRefs, _ := git.RemoteRefs(remote, false) // The list of remote refs can be very large, so convert them to // a set for faster lookups in the skip calculation loop. actualRemoteRefsSet := tools.NewStringSet() for _, ref := range actualRemoteRefs { actualRemoteRefsSet.Add(ref.Name) } // Only check for missing refs on remote; if the ref is different it has moved // forward probably, and if not and the ref has changed to a non-descendant // (force push) then that will cause a re-evaluation in a subsequent command. var skippedRefs []string for _, cachedRef := range cachedRemoteRefs { if actualRemoteRefsSet.Contains(cachedRef.Name) { skippedRefs = append(skippedRefs, "^"+cachedRef.Sha) } } return skippedRefs } git-lfs-3.6.1/lfs/gitscanner_tree.go000066400000000000000000000176101472372047300173730ustar00rootroot00000000000000package lfs import ( "io" "path" "path/filepath" "github.com/git-lfs/git-lfs/v3/config" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/filepathfilter" "github.com/git-lfs/git-lfs/v3/git" "github.com/git-lfs/git-lfs/v3/git/gitattr" "github.com/git-lfs/git-lfs/v3/subprocess" "github.com/git-lfs/git-lfs/v3/tr" ) func runScanTree(cb GitScannerFoundPointer, ref string, filter *filepathfilter.Filter, gitEnv, osEnv config.Environment) error { // We don't use the nameMap approach here since that's imprecise when >1 file // can be using the same content treeShas, err := lsTreeBlobs(ref, func(t *git.TreeBlob) bool { return t != nil && t.Size < blobSizeCutoff && filter.Allows(t.Filename) }) if err != nil { return err } pcw, err := catFileBatchTree(treeShas, gitEnv, osEnv) if err != nil { return err } for p := range pcw.Results { cb(p, nil) } if err := pcw.Wait(); err != nil { cb(nil, err) } return nil } func runScanLFSFiles(cb GitScannerFoundPointer, ref string, filter *filepathfilter.Filter, gitEnv, osEnv config.Environment) error { var treeShas *TreeBlobChannelWrapper var err error if git.IsGitVersionAtLeast("2.42.0") { treeShas, err = lsFilesBlobs(func(t *git.TreeBlob) bool { return t != nil && t.Size < blobSizeCutoff && filter.Allows(t.Filename) }) } else { treeShas, err = lsTreeBlobs(ref, func(t *git.TreeBlob) bool { return t != nil && t.Size < blobSizeCutoff && filter.Allows(t.Filename) }) } // We don't use the nameMap approach here since that's imprecise when >1 file // can be using the same content if err != nil { return err } pcw, err := catFileBatchTree(treeShas, gitEnv, osEnv) if err != nil { return err } for p := range pcw.Results { cb(p, nil) } if err := pcw.Wait(); err != nil { cb(nil, err) } return nil } // catFileBatchTree() uses an ObjectDatabase from the // github.com/git-lfs/gitobj/v2 package to get the contents of Git // blob objects, given their SHA1s from git.TreeBlob structs, similar // to the behaviour of 'git cat-file --batch'. // Input git.TreeBlob structs should be sent over the treeblobs channel. // The blob contents will be decoded as Git LFS pointers and any valid // pointers will be returned as pointer.Pointer structs in a new channel. func catFileBatchTree(treeblobs *TreeBlobChannelWrapper, gitEnv, osEnv config.Environment) (*PointerChannelWrapper, error) { scanner, err := NewPointerScanner(gitEnv, osEnv) if err != nil { return nil, err } pointers := make(chan *WrappedPointer, chanBufSize) errchan := make(chan error, 10) // Multiple errors possible go func() { hasNext := true for t := range treeblobs.Results { hasNext = scanner.Scan(t.Oid) if p := scanner.Pointer(); p != nil { p.Name = t.Filename pointers <- p } if err := scanner.Err(); err != nil { errchan <- err } if !hasNext { break } } // If the scanner quit early, we may still have treeblobs to // read, so waiting for it to close will cause a deadlock. if hasNext { // Deal with nested error from incoming treeblobs err := treeblobs.Wait() if err != nil { errchan <- err } } if err = scanner.Close(); err != nil { errchan <- err } close(pointers) close(errchan) }() return NewPointerChannelWrapper(pointers, errchan), nil } // Use ls-tree at ref to find a list of candidate tree blobs which might be lfs files // The returned channel will be sent these blobs which should be sent to catFileBatchTree // for final check & conversion to Pointer func lsTreeBlobs(ref string, predicate func(*git.TreeBlob) bool) (*TreeBlobChannelWrapper, error) { return lsBlobs(func() (*subprocess.BufferedCmd, error) { return git.LsTree(ref) }, predicate) } func lsBlobs(backend func() (*subprocess.BufferedCmd, error), predicate func(*git.TreeBlob) bool) (*TreeBlobChannelWrapper, error) { cmd, err := backend() if err != nil { return nil, err } cmd.Stdin.Close() blobs := make(chan git.TreeBlob, chanBufSize) errchan := make(chan error, 1) go func() { scanner := git.NewLsTreeScanner(cmd.Stdout) for scanner.Scan() { if t := scanner.TreeBlob(); predicate(t) { blobs <- *t } } stderr, _ := io.ReadAll(cmd.Stderr) err := cmd.Wait() if err != nil { errchan <- errors.New(tr.Tr.Get("error in `git ls-tree`: %v %v", err, string(stderr))) } close(blobs) close(errchan) }() return NewTreeBlobChannelWrapper(blobs, errchan), nil } // Use ls-files at ref to find a list of candidate tree blobs which might be lfs files // The returned channel will be sent these blobs which should be sent to catFileBatchTree // for final check & conversion to Pointer func lsFilesBlobs(predicate func(*git.TreeBlob) bool) (*TreeBlobChannelWrapper, error) { return lsBlobs(func() (*subprocess.BufferedCmd, error) { return git.LsFilesLFS() }, predicate) } func catFileBatchTreeForPointers(treeblobs *TreeBlobChannelWrapper, gitEnv, osEnv config.Environment) (map[string]*WrappedPointer, *filepathfilter.Filter, error) { pscanner, err := NewPointerScanner(gitEnv, osEnv) if err != nil { return nil, nil, err } oscanner, err := git.NewObjectScanner(gitEnv, osEnv) if err != nil { return nil, nil, err } pointers := make(map[string]*WrappedPointer) paths := make([]git.AttributePath, 0) processor := gitattr.NewMacroProcessor() hasNext := true for t := range treeblobs.Results { if path.Base(t.Filename) == ".gitattributes" { hasNext = oscanner.Scan(t.Oid) if rdr := oscanner.Contents(); rdr != nil { paths = append(paths, git.AttrPathsFromReader( processor, t.Filename, "", rdr, t.Filename == ".gitattributes", // Read macros from the top-level attributes )...) } if err := oscanner.Err(); err != nil { return nil, nil, err } } else if t.Size < blobSizeCutoff { hasNext = pscanner.Scan(t.Oid) // It's intentional that we insert nil for // non-pointers; we want to keep track of them // as well as pointers. p := pscanner.Pointer() if p != nil { p.Name = t.Filename } pointers[t.Filename] = p if err := pscanner.Err(); err != nil { return nil, nil, err } } else { pointers[t.Filename] = nil } if !hasNext { break } } // If the scanner quit early, we may still have treeblobs to // read, so waiting for it to close will cause a deadlock. if hasNext { // Deal with nested error from incoming treeblobs err := treeblobs.Wait() if err != nil { return nil, nil, err } } if err = pscanner.Close(); err != nil { return nil, nil, err } if err = oscanner.Close(); err != nil { return nil, nil, err } includes := make([]filepathfilter.Pattern, 0, len(paths)) excludes := make([]filepathfilter.Pattern, 0, len(paths)) for _, path := range paths { // Convert all separators to `/` before creating a pattern to // avoid characters being escaped in situations like `subtree\*.md` pattern := filepathfilter.NewPattern(filepath.ToSlash(path.Path), filepathfilter.GitAttributes) if path.Tracked { includes = append(includes, pattern) } else { excludes = append(excludes, pattern) } } return pointers, filepathfilter.NewFromPatterns(includes, excludes, filepathfilter.DefaultValue(false)), nil } func runScanTreeForPointers(cb GitScannerFoundPointer, tree string, gitEnv, osEnv config.Environment) error { treeShas, err := lsTreeBlobs(tree, func(t *git.TreeBlob) bool { return t != nil && (t.Mode == 0100644 || t.Mode == 0100755) }) if err != nil { return err } pointers, filter, err := catFileBatchTreeForPointers(treeShas, gitEnv, osEnv) if err != nil { return err } for name, p := range pointers { // This file matches the patterns in .gitattributes, so it // should be a pointer. If it is not, then it is a plain Git // blob, which we report as an error. if filter.Allows(name) { if p == nil { cb(nil, errors.NewPointerScanError(errors.NewNotAPointerError(nil), tree, name)) } else { cb(p, nil) } } } return nil } git-lfs-3.6.1/lfs/hook.go000066400000000000000000000150701472372047300151550ustar00rootroot00000000000000package lfs import ( "fmt" "io" "os" "path/filepath" "strings" "github.com/git-lfs/git-lfs/v3/config" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/tools" "github.com/git-lfs/git-lfs/v3/tr" "github.com/rubyist/tracerx" ) var ( // The basic hook which just calls 'git lfs TYPE' hookBaseContent = "#!/bin/sh\ncommand -v git-lfs >/dev/null 2>&1 || { printf >&2 \"\\n%s\\n\\n\" \"This repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting the '{{Command}}' file in the hooks directory (set by 'core.hookspath'; usually '.git/hooks').\"; exit 2; }\ngit lfs {{Command}} \"$@\"" hookOldContent = "#!/bin/sh\ncommand -v git-lfs >/dev/null 2>&1 || { echo >&2 \"\\nThis repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting the '{{Command}}' file in the hooks directory (set by 'core.hookspath'; usually '.git/hooks').\\n\"; exit 2; }\ngit lfs {{Command}} \"$@\"" hookOldContent2 = "#!/bin/sh\ncommand -v git-lfs >/dev/null 2>&1 || { echo >&2 \"\\nThis repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting '.git/hooks/{{Command}}'.\\n\"; exit 2; }\ngit lfs {{Command}} \"$@\"" hookOldContent3 = "#!/bin/sh\ncommand -v git-lfs >/dev/null 2>&1 || { echo >&2 \"\\nThis repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting .git/hooks/{{Command}}.\\n\"; exit 2; }\ngit lfs {{Command}} \"$@\"" ) // A Hook represents a githook as described in http://git-scm.com/docs/githooks. // Hooks have a type, which is the type of hook that they are, and a body, which // represents the thing they will execute when invoked by Git. type Hook struct { Type string Contents string Dir string upgradeables []string cfg *config.Configuration } func LoadHooks(hookDir string, cfg *config.Configuration) []*Hook { return []*Hook{ NewStandardHook("pre-push", hookDir, []string{ "#!/bin/sh\ngit lfs push --stdin $*", "#!/bin/sh\ngit lfs push --stdin \"$@\"", "#!/bin/sh\ngit lfs pre-push \"$@\"", "#!/bin/sh\ncommand -v git-lfs >/dev/null 2>&1 || { echo >&2 \"\\nThis repository has been set up with Git LFS but Git LFS is not installed.\\n\"; exit 0; }\ngit lfs pre-push \"$@\"", "#!/bin/sh\ncommand -v git-lfs >/dev/null 2>&1 || { echo >&2 \"\\nThis repository has been set up with Git LFS but Git LFS is not installed.\\n\"; exit 2; }\ngit lfs pre-push \"$@\"", hookOldContent, hookOldContent2, hookOldContent3, }, cfg), NewStandardHook("post-checkout", hookDir, []string{hookOldContent, hookOldContent2, hookOldContent3}, cfg), NewStandardHook("post-commit", hookDir, []string{hookOldContent, hookOldContent2, hookOldContent3}, cfg), NewStandardHook("post-merge", hookDir, []string{hookOldContent, hookOldContent2, hookOldContent3}, cfg), } } // NewStandardHook creates a new hook using the template script calling 'git lfs theType' func NewStandardHook(theType, hookDir string, upgradeables []string, cfg *config.Configuration) *Hook { formattedUpgradeables := make([]string, 0, len(upgradeables)) for _, s := range upgradeables { formattedUpgradeables = append(formattedUpgradeables, strings.Replace(s, "{{Command}}", theType, -1)) } return &Hook{ Type: theType, Contents: strings.Replace(hookBaseContent, "{{Command}}", theType, -1), Dir: hookDir, upgradeables: formattedUpgradeables, cfg: cfg, } } func (h *Hook) Exists() bool { _, err := os.Stat(h.Path()) return !os.IsNotExist(err) } // Path returns the desired (or actual, if installed) location where this hook // should be installed. It returns an absolute path in all cases. func (h *Hook) Path() string { return filepath.Join(h.Dir, h.Type) } // Install installs this Git hook on disk, or upgrades it if it does exist, and // is upgradeable. It will create a hooks directory relative to the local Git // directory. It returns and halts at any errors, and returns nil if the // operation was a success. func (h *Hook) Install(force bool) error { msg := fmt.Sprintf("Install hook: %s, force=%t, path=%s", h.Type, force, h.Path()) if err := tools.MkdirAll(h.Dir, h.cfg); err != nil { return err } if h.Exists() && !force { tracerx.Printf(msg + ", upgrading...") return h.Upgrade() } tracerx.Printf(msg) return h.write() } // write writes the contents of this Hook to disk, appending a newline at the // end, and sets the mode to octal 0755. It writes to disk unconditionally, and // returns at any error. func (h *Hook) write() error { return os.WriteFile(h.Path(), []byte(h.Contents+"\n"), 0755) } // Upgrade upgrades the (assumed to be) existing git hook to the current // contents. A hook is considered "upgrade-able" if its contents are matched in // the member variable `Upgradeables`. It halts and returns any errors as they // arise. func (h *Hook) Upgrade() error { upgradable, match, err := h.matchesCurrent() if err != nil { return err } if !upgradable || match { return nil } return h.write() } // Uninstall removes the hook on disk so long as it matches the current version, // or any of the past versions of this hook. func (h *Hook) Uninstall() error { msg := fmt.Sprintf("Uninstall hook: %s, path=%s", h.Type, h.Path()) upgradable, _, err := h.matchesCurrent() if err != nil { return err } if !upgradable { tracerx.Printf(msg + ", doesn't match...") return nil } tracerx.Printf(msg) return os.RemoveAll(h.Path()) } // matchesCurrent returns whether or not an existing git hook is able to be // written to or upgraded and additionally whether it is identical to the // current hook. A git hook matches those conditions if and only if its contents // match the current contents, or any past "upgrade-able" contents of this hook. func (h *Hook) matchesCurrent() (bool, bool, error) { file, err := os.Open(h.Path()) if err != nil { return false, false, err } by, err := io.ReadAll(io.LimitReader(file, 1024)) file.Close() if err != nil { return false, false, err } contents := strings.TrimSpace(tools.Undent(string(by))) if contents == h.Contents { return true, true, nil } else if len(contents) == 0 { return true, false, nil } for _, u := range h.upgradeables { if u == contents { return true, false, nil } } return false, false, errors.New(fmt.Sprintf("%s\n\n%s\n", tr.Tr.Get("Hook already exists: %s", string(h.Type)), tools.Indent(contents))) } git-lfs-3.6.1/lfs/lfs.go000066400000000000000000000104261472372047300150010ustar00rootroot00000000000000// Package lfs brings together the core LFS functionality // NOTE: Subject to change, do not rely on this package from outside git-lfs source package lfs import ( "fmt" "os" "sort" "strings" "github.com/git-lfs/git-lfs/v3/config" "github.com/git-lfs/git-lfs/v3/lfsapi" "github.com/git-lfs/git-lfs/v3/tools" "github.com/git-lfs/git-lfs/v3/tq" "github.com/rubyist/tracerx" ) func Environ(cfg *config.Configuration, manifest tq.Manifest, envOverrides map[string]string) []string { osEnviron := os.Environ() env := make([]string, 0, len(osEnviron)+7) api, err := lfsapi.NewClient(cfg) if err != nil { // TODO(@ttaylorr): don't panic panic(err.Error()) } if envOverrides == nil { envOverrides = make(map[string]string, 0) } download := api.Endpoints.AccessFor(api.Endpoints.Endpoint("download", cfg.Remote()).Url) upload := api.Endpoints.AccessFor(api.Endpoints.Endpoint("upload", cfg.PushRemote()).Url) dltransfers := manifest.GetDownloadAdapterNames() sort.Strings(dltransfers) ultransfers := manifest.GetUploadAdapterNames() sort.Strings(ultransfers) fetchPruneConfig := NewFetchPruneConfig(cfg.Git) references := strings.Join(cfg.LocalReferenceDirs(), ", ") env = append(env, fmt.Sprintf("LocalWorkingDir=%s", cfg.LocalWorkingDir()), fmt.Sprintf("LocalGitDir=%s", cfg.LocalGitDir()), fmt.Sprintf("LocalGitStorageDir=%s", cfg.LocalGitStorageDir()), fmt.Sprintf("LocalMediaDir=%s", cfg.LFSObjectDir()), fmt.Sprintf("LocalReferenceDirs=%s", references), fmt.Sprintf("TempDir=%s", cfg.TempDir()), fmt.Sprintf("ConcurrentTransfers=%d", api.ConcurrentTransfers()), fmt.Sprintf("TusTransfers=%v", cfg.TusTransfersAllowed()), fmt.Sprintf("BasicTransfersOnly=%v", cfg.BasicTransfersOnly()), fmt.Sprintf("SkipDownloadErrors=%v", cfg.SkipDownloadErrors()), fmt.Sprintf("FetchRecentAlways=%v", fetchPruneConfig.FetchRecentAlways), fmt.Sprintf("FetchRecentRefsDays=%d", fetchPruneConfig.FetchRecentRefsDays), fmt.Sprintf("FetchRecentCommitsDays=%d", fetchPruneConfig.FetchRecentCommitsDays), fmt.Sprintf("FetchRecentRefsIncludeRemotes=%v", fetchPruneConfig.FetchRecentRefsIncludeRemotes), fmt.Sprintf("PruneOffsetDays=%d", fetchPruneConfig.PruneOffsetDays), fmt.Sprintf("PruneVerifyRemoteAlways=%v", fetchPruneConfig.PruneVerifyRemoteAlways), fmt.Sprintf("PruneVerifyUnreachableAlways=%v", fetchPruneConfig.PruneVerifyUnreachableAlways), fmt.Sprintf("PruneRemoteName=%s", fetchPruneConfig.PruneRemoteName), fmt.Sprintf("LfsStorageDir=%s", cfg.LFSStorageDir()), fmt.Sprintf("AccessDownload=%s", download.Mode()), fmt.Sprintf("AccessUpload=%s", upload.Mode()), fmt.Sprintf("DownloadTransfers=%s", strings.Join(dltransfers, ",")), fmt.Sprintf("UploadTransfers=%s", strings.Join(ultransfers, ",")), ) if len(cfg.FetchExcludePaths()) > 0 { env = append(env, fmt.Sprintf("FetchExclude=%s", strings.Join(cfg.FetchExcludePaths(), ", "))) } if len(cfg.FetchIncludePaths()) > 0 { env = append(env, fmt.Sprintf("FetchInclude=%s", strings.Join(cfg.FetchIncludePaths(), ", "))) } for _, ext := range cfg.Extensions() { env = append(env, fmt.Sprintf("Extension[%d]=%s", ext.Priority, ext.Name)) } for _, e := range osEnviron { key := strings.SplitN(e, "=", 2)[0] if !strings.HasPrefix(key, "GIT_") { continue } if val, ok := envOverrides[key]; ok { env = append(env, fmt.Sprintf("%s=%s", key, val)) } else { env = append(env, e) } } return env } func init() { tracerx.DefaultKey = "GIT" tracerx.Prefix = "trace git-lfs: " if len(os.Getenv("GIT_TRACE")) < 1 { if tt := os.Getenv("GIT_TRANSFER_TRACE"); len(tt) > 0 { os.Setenv("GIT_TRACE", tt) } else if cv := os.Getenv("GIT_CURL_VERBOSE"); len(cv) > 0 { os.Setenv("GIT_TRACE", cv) } } } const ( gitExt = ".git" gitPtrPrefix = "gitdir: " ) func LinkOrCopyFromReference(cfg *config.Configuration, oid string, size int64) error { if cfg.LFSObjectExists(oid, size) { return nil } altMediafiles := cfg.Filesystem().ObjectReferencePaths(oid) mediafile, err := cfg.Filesystem().ObjectPath(oid) if err != nil { return err } for _, altMediafile := range altMediafiles { tracerx.Printf("altMediafile: %s", altMediafile) if altMediafile != "" && tools.FileExistsOfSize(altMediafile, size) { err = LinkOrCopy(cfg, altMediafile, mediafile) if err == nil { break } } } return err } git-lfs-3.6.1/lfs/lfs_test.go000066400000000000000000000032131472372047300160340ustar00rootroot00000000000000package lfs_test // avoid import cycle import ( "fmt" "sort" "testing" "github.com/git-lfs/git-lfs/v3/fs" "github.com/git-lfs/git-lfs/v3/lfs" test "github.com/git-lfs/git-lfs/v3/t/cmd/util" "github.com/stretchr/testify/assert" ) func TestAllCurrentObjectsNone(t *testing.T) { repo := test.NewRepo(t) repo.Pushd() defer func() { repo.Popd() repo.Cleanup() }() empty := true repo.Filesystem().EachObject(func(obj fs.Object) error { empty = false t.Logf("Found: %+v", obj) return nil }) if !empty { t.Error("Should be no objects") } } func TestAllCurrentObjectsSome(t *testing.T) { repo := test.NewRepo(t) repo.Pushd() defer func() { repo.Popd() repo.Cleanup() }() // We're not testing commits here, just storage, so just create a single // commit input with lots of files to generate many oids numFiles := 20 files := make([]*test.FileInput, 0, numFiles) for i := 0; i < numFiles; i++ { // Must be >=16 bytes for each file to be unique files = append(files, &test.FileInput{Filename: fmt.Sprintf("file%d.txt", i), Size: 30}) } inputs := []*test.CommitInput{ {Files: files}, } outputs := repo.AddCommits(inputs) expected := make([]*lfs.Pointer, 0, numFiles) for _, f := range outputs[0].Files { expected = append(expected, f) } actual := make([]*lfs.Pointer, 0) repo.Filesystem().EachObject(func(obj fs.Object) error { actual = append(actual, lfs.NewPointer(obj.Oid, obj.Size, nil)) return nil }) // sort to ensure comparison is equal sort.Sort(test.PointersByOid(expected)) sort.Sort(test.PointersByOid(actual)) assert.Equal(t, expected, actual, "Oids from disk should be the same as in commits") } git-lfs-3.6.1/lfs/pointer.go000066400000000000000000000167151472372047300157040ustar00rootroot00000000000000package lfs import ( "bufio" "bytes" "fmt" "io" "os" "regexp" "sort" "strconv" "strings" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/fs" "github.com/git-lfs/git-lfs/v3/tr" "github.com/git-lfs/gitobj/v2" ) var ( v1Aliases = []string{ "http://git-media.io/v/2", // alpha "https://hawser.github.com/spec/v1", // pre-release "https://git-lfs.github.com/spec/v1", // public launch } latest = "https://git-lfs.github.com/spec/v1" oidType = "sha256" oidRE = regexp.MustCompile(`\A[0-9a-f]{64}\z`) matcherRE = regexp.MustCompile("git-media|hawser|git-lfs") extRE = regexp.MustCompile(`\Aext-\d{1}-\w+`) pointerKeys = []string{"version", "oid", "size"} ) type Pointer struct { Version string Oid string Size int64 OidType string Extensions []*PointerExtension Canonical bool } // A PointerExtension is parsed from the Git LFS Pointer file. type PointerExtension struct { Name string Priority int Oid string OidType string } type ByPriority []*PointerExtension func (p ByPriority) Len() int { return len(p) } func (p ByPriority) Swap(i, j int) { p[i], p[j] = p[j], p[i] } func (p ByPriority) Less(i, j int) bool { return p[i].Priority < p[j].Priority } func NewPointer(oid string, size int64, exts []*PointerExtension) *Pointer { return &Pointer{latest, oid, size, oidType, exts, true} } func NewPointerExtension(name string, priority int, oid string) *PointerExtension { return &PointerExtension{name, priority, oid, oidType} } func (p *Pointer) Encode(writer io.Writer) (int, error) { return EncodePointer(writer, p) } func (p *Pointer) Encoded() string { if p.Size == 0 { return "" } var buffer bytes.Buffer buffer.WriteString(fmt.Sprintf("version %s\n", latest)) for _, ext := range p.Extensions { buffer.WriteString(fmt.Sprintf("ext-%d-%s %s:%s\n", ext.Priority, ext.Name, ext.OidType, ext.Oid)) } buffer.WriteString(fmt.Sprintf("oid %s:%s\n", p.OidType, p.Oid)) buffer.WriteString(fmt.Sprintf("size %d\n", p.Size)) return buffer.String() } func EmptyPointer() *Pointer { return NewPointer(fs.EmptyObjectSHA256, 0, nil) } func EncodePointer(writer io.Writer, pointer *Pointer) (int, error) { return writer.Write([]byte(pointer.Encoded())) } func DecodePointerFromBlob(b *gitobj.Blob) (*Pointer, error) { // Check size before reading if b.Size >= blobSizeCutoff { return nil, errors.NewNotAPointerError(errors.New(tr.Tr.Get("blob size exceeds Git LFS pointer size cutoff"))) } return DecodePointer(b.Contents) } func DecodePointerFromFile(file string) (*Pointer, error) { // Check size before reading stat, err := os.Stat(file) if err != nil { return nil, err } if stat.Size() >= blobSizeCutoff { return nil, errors.NewNotAPointerError(errors.New(tr.Tr.Get("file size exceeds Git LFS pointer size cutoff"))) } f, err := os.OpenFile(file, os.O_RDONLY, 0644) if err != nil { return nil, err } defer f.Close() return DecodePointer(f) } func DecodePointer(reader io.Reader) (*Pointer, error) { p, _, err := DecodeFrom(reader) return p, err } // DecodeFrom decodes an *lfs.Pointer from the given io.Reader, "reader". // If the pointer encoded in the reader could successfully be read and decoded, // it will be returned with a nil error. // // If the pointer could not be decoded, an io.Reader containing the entire // blob's data will be returned, along with a parse error. func DecodeFrom(reader io.Reader) (*Pointer, io.Reader, error) { buf := make([]byte, blobSizeCutoff) n, err := reader.Read(buf) buf = buf[:n] var contents io.Reader = bytes.NewReader(buf) if err != io.EOF { contents = io.MultiReader(contents, reader) } if err != nil && err != io.EOF { return nil, contents, err } if len(buf) == 0 { return EmptyPointer(), contents, nil } p, err := decodeKV(bytes.TrimSpace(buf)) if err == nil && p != nil { p.Canonical = p.Encoded() == string(buf) } return p, contents, err } func verifyVersion(version string) error { if len(version) == 0 { return errors.NewNotAPointerError(errors.New(tr.Tr.Get("Missing version"))) } for _, v := range v1Aliases { if v == version { return nil } } return errors.New(tr.Tr.Get("Invalid version: %s", version)) } func decodeKV(data []byte) (*Pointer, error) { kvps, exts, err := decodeKVData(data) if err != nil { if errors.IsBadPointerKeyError(err) { return nil, errors.StandardizeBadPointerError(err) } return nil, err } if err := verifyVersion(kvps["version"]); err != nil { return nil, err } value, ok := kvps["oid"] if !ok { return nil, errors.New(tr.Tr.Get("Invalid OID")) } oid, err := parseOid(value) if err != nil { return nil, err } value, ok = kvps["size"] size, err := strconv.ParseInt(value, 10, 64) if err != nil || size < 0 { return nil, errors.New(tr.Tr.Get("invalid size: %q", value)) } var extensions []*PointerExtension if exts != nil { for key, value := range exts { ext, err := parsePointerExtension(key, value) if err != nil { return nil, err } extensions = append(extensions, ext) } if err = validatePointerExtensions(extensions); err != nil { return nil, err } sort.Sort(ByPriority(extensions)) } return NewPointer(oid, size, extensions), nil } func parseOid(value string) (string, error) { parts := strings.SplitN(value, ":", 2) if len(parts) != 2 { return "", errors.New(tr.Tr.Get("Invalid OID value: %s", value)) } if parts[0] != oidType { return "", errors.New(tr.Tr.Get("Invalid OID type: %s", parts[0])) } oid := parts[1] if !oidRE.MatchString(oid) { return "", errors.New(tr.Tr.Get("Invalid OID: %s", oid)) } return oid, nil } func parsePointerExtension(key string, value string) (*PointerExtension, error) { keyParts := strings.SplitN(key, "-", 3) if len(keyParts) != 3 || keyParts[0] != "ext" { return nil, errors.New(tr.Tr.Get("Invalid extension value: %s", value)) } p, err := strconv.Atoi(keyParts[1]) if err != nil || p < 0 { return nil, errors.New(tr.Tr.Get("Invalid priority: %s", keyParts[1])) } name := keyParts[2] oid, err := parseOid(value) if err != nil { return nil, err } return NewPointerExtension(name, p, oid), nil } func validatePointerExtensions(exts []*PointerExtension) error { m := make(map[int]struct{}) for _, ext := range exts { if _, exist := m[ext.Priority]; exist { return errors.New(tr.Tr.Get("duplicate priority found: %d", ext.Priority)) } m[ext.Priority] = struct{}{} } return nil } func decodeKVData(data []byte) (kvps map[string]string, exts map[string]string, err error) { kvps = make(map[string]string) if !matcherRE.Match(data) { err = errors.NewNotAPointerError(errors.New(tr.Tr.Get("invalid header"))) return } scanner := bufio.NewScanner(bytes.NewBuffer(data)) line := 0 numKeys := len(pointerKeys) for scanner.Scan() { text := scanner.Text() if len(text) == 0 { continue } parts := strings.SplitN(text, " ", 2) if len(parts) < 2 { err = errors.NewNotAPointerError(errors.New(tr.Tr.Get("error reading line %d: %s", line, text))) return } key := parts[0] value := parts[1] if numKeys <= line { err = errors.NewNotAPointerError(errors.New(tr.Tr.Get("extra line: %s", text))) return } if expected := pointerKeys[line]; key != expected { if !extRE.MatchString(key) { err = errors.NewBadPointerKeyError(expected, key) return } if exts == nil { exts = make(map[string]string) } exts[key] = value continue } line += 1 kvps[key] = value } err = scanner.Err() return } git-lfs-3.6.1/lfs/pointer_test.go000066400000000000000000000304741472372047300167410ustar00rootroot00000000000000package lfs import ( "bufio" "bytes" "io" "reflect" "strings" "testing" "github.com/git-lfs/git-lfs/v3/errors" "github.com/stretchr/testify/assert" ) func TestEncode(t *testing.T) { var buf bytes.Buffer pointer := NewPointer("booya", 12345, nil) _, err := EncodePointer(&buf, pointer) assert.Nil(t, err) bufReader := bufio.NewReader(&buf) assertLine(t, bufReader, "version https://git-lfs.github.com/spec/v1\n") assertLine(t, bufReader, "oid sha256:booya\n") assertLine(t, bufReader, "size 12345\n") line, err := bufReader.ReadString('\n') if err == nil { t.Fatalf("More to read: %s", line) } assert.Equal(t, "EOF", err.Error()) } func TestEncodeEmpty(t *testing.T) { var buf bytes.Buffer pointer := NewPointer("", 0, nil) _, err := EncodePointer(&buf, pointer) assert.Equal(t, nil, err) bufReader := bufio.NewReader(&buf) val, err := bufReader.ReadString('\n') assert.Equal(t, "", val) assert.Equal(t, "EOF", err.Error()) } func TestEncodeExtensions(t *testing.T) { var buf bytes.Buffer exts := []*PointerExtension{ NewPointerExtension("foo", 0, "foo_oid"), NewPointerExtension("bar", 1, "bar_oid"), NewPointerExtension("baz", 2, "baz_oid"), } pointer := NewPointer("main_oid", 12345, exts) _, err := EncodePointer(&buf, pointer) assert.Nil(t, err) bufReader := bufio.NewReader(&buf) assertLine(t, bufReader, "version https://git-lfs.github.com/spec/v1\n") assertLine(t, bufReader, "ext-0-foo sha256:foo_oid\n") assertLine(t, bufReader, "ext-1-bar sha256:bar_oid\n") assertLine(t, bufReader, "ext-2-baz sha256:baz_oid\n") assertLine(t, bufReader, "oid sha256:main_oid\n") assertLine(t, bufReader, "size 12345\n") line, err := bufReader.ReadString('\n') if err == nil { t.Fatalf("More to read: %s", line) } assert.Equal(t, "EOF", err.Error()) } func assertLine(t *testing.T, r *bufio.Reader, expected string) { actual, err := r.ReadString('\n') assert.Nil(t, err) assert.Equal(t, expected, actual) } func TestDecodeTinyFile(t *testing.T) { ex := "this is not a git-lfs file!" p, err := DecodePointer(bytes.NewBufferString(ex)) if p != nil { t.Errorf("pointer was decoded: %v", p) } if !errors.IsNotAPointerError(err) { t.Errorf("error is not a NotAPointerError: %s: '%v'", reflect.TypeOf(err), err) } } func TestDecode(t *testing.T) { ex := `version https://git-lfs.github.com/spec/v1 oid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393 size 12345` p, err := DecodePointer(bytes.NewBufferString(ex)) assertEqualWithExample(t, ex, nil, err) assertEqualWithExample(t, ex, latest, p.Version) assertEqualWithExample(t, ex, "4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393", p.Oid) assertEqualWithExample(t, ex, "sha256", p.OidType) assertEqualWithExample(t, ex, int64(12345), p.Size) } func TestDecodeExtensions(t *testing.T) { ex := `version https://git-lfs.github.com/spec/v1 ext-0-foo sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff ext-1-bar sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb ext-2-baz sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa oid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393 size 12345` p, err := DecodePointer(bytes.NewBufferString(ex)) assertEqualWithExample(t, ex, nil, err) assertEqualWithExample(t, ex, latest, p.Version) assertEqualWithExample(t, ex, "4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393", p.Oid) assertEqualWithExample(t, ex, int64(12345), p.Size) assertEqualWithExample(t, ex, "sha256", p.OidType) assertEqualWithExample(t, ex, "foo", p.Extensions[0].Name) assertEqualWithExample(t, ex, 0, p.Extensions[0].Priority) assertEqualWithExample(t, ex, "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", p.Extensions[0].Oid) assertEqualWithExample(t, ex, "sha256", p.Extensions[0].OidType) assertEqualWithExample(t, ex, "bar", p.Extensions[1].Name) assertEqualWithExample(t, ex, 1, p.Extensions[1].Priority) assertEqualWithExample(t, ex, "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", p.Extensions[1].Oid) assertEqualWithExample(t, ex, "sha256", p.Extensions[1].OidType) assertEqualWithExample(t, ex, "baz", p.Extensions[2].Name) assertEqualWithExample(t, ex, 2, p.Extensions[2].Priority) assertEqualWithExample(t, ex, "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", p.Extensions[2].Oid) assertEqualWithExample(t, ex, "sha256", p.Extensions[2].OidType) } func TestDecodeExtensionsSort(t *testing.T) { ex := `version https://git-lfs.github.com/spec/v1 ext-2-baz sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ext-0-foo sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff ext-1-bar sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb oid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393 size 12345` p, err := DecodePointer(bytes.NewBufferString(ex)) assertEqualWithExample(t, ex, nil, err) assertEqualWithExample(t, ex, latest, p.Version) assertEqualWithExample(t, ex, "4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393", p.Oid) assertEqualWithExample(t, ex, int64(12345), p.Size) assertEqualWithExample(t, ex, "sha256", p.OidType) assertEqualWithExample(t, ex, "foo", p.Extensions[0].Name) assertEqualWithExample(t, ex, 0, p.Extensions[0].Priority) assertEqualWithExample(t, ex, "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", p.Extensions[0].Oid) assertEqualWithExample(t, ex, "sha256", p.Extensions[0].OidType) assertEqualWithExample(t, ex, "bar", p.Extensions[1].Name) assertEqualWithExample(t, ex, 1, p.Extensions[1].Priority) assertEqualWithExample(t, ex, "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", p.Extensions[1].Oid) assertEqualWithExample(t, ex, "sha256", p.Extensions[1].OidType) assertEqualWithExample(t, ex, "baz", p.Extensions[2].Name) assertEqualWithExample(t, ex, 2, p.Extensions[2].Priority) assertEqualWithExample(t, ex, "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", p.Extensions[2].Oid) assertEqualWithExample(t, ex, "sha256", p.Extensions[2].OidType) } func TestDecodePreRelease(t *testing.T) { ex := `version https://hawser.github.com/spec/v1 oid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393 size 12345` p, err := DecodePointer(bytes.NewBufferString(ex)) assertEqualWithExample(t, ex, nil, err) assertEqualWithExample(t, ex, latest, p.Version) assertEqualWithExample(t, ex, "4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393", p.Oid) assertEqualWithExample(t, ex, "sha256", p.OidType) assertEqualWithExample(t, ex, int64(12345), p.Size) } func TestDecodeFromEmptyReader(t *testing.T) { p, buf, err := DecodeFrom(strings.NewReader("")) by, _ := io.ReadAll(buf) assert.Nil(t, err) assert.Equal(t, p.Oid, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855") assert.Equal(t, p.Size, int64(0)) assert.Empty(t, by) } func TestDecodeCanonical(t *testing.T) { canonicalExamples := []string{ // standard `version https://git-lfs.github.com/spec/v1 oid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393 size 12345 `, // extensions `version https://git-lfs.github.com/spec/v1 ext-0-foo sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff ext-1-bar sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb ext-2-baz sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa oid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393 size 12345 `, // empty file "", } nonCanonicalExamples := []string{ // missing trailing newline `version https://git-lfs.github.com/spec/v1 oid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393 size 12345`, // carriage returns "version https://git-lfs.github.com/spec/v1\r\noid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393\r\nsize 12345\r\n", // trailing whitespace "version https://git-lfs.github.com/spec/v1\noid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393\nsize 12345 \n", // unsorted extensions `version https://git-lfs.github.com/spec/v1 ext-2-baz sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ext-0-foo sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff ext-1-bar sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb oid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393 size 12345 `, } for _, ex := range canonicalExamples { p, err := DecodePointer(bytes.NewBufferString(ex)) if err != nil { t.Errorf("Error decoding: %v", err) } assert.Equal(t, p.Canonical, true) } for _, ex := range nonCanonicalExamples { p, err := DecodePointer(bytes.NewBufferString(ex)) if err != nil { t.Errorf("Error decoding: %v", err) } assert.Equal(t, p.Canonical, false) } } func TestDecodeInvalid(t *testing.T) { examples := []string{ "invalid stuff", // no sha "# git-media", // bad oid `version https://git-lfs.github.com/spec/v1 oid sha256:boom size 12345`, // bad oid type `version https://git-lfs.github.com/spec/v1 oid shazam:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393 size 12345`, // no oid `version https://git-lfs.github.com/spec/v1 size 12345`, // bad version `version http://git-media.io/v/whatever oid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393 size 12345`, // no version `oid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393 size 12345`, // bad size `version https://git-lfs.github.com/spec/v1 oid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393 size fif`, // no size `version https://git-lfs.github.com/spec/v1 oid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393`, // bad `key value` format `version=https://git-lfs.github.com/spec/v1 oid=sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393 size=fif`, // no git-media `version=http://wat.io/v/2 oid=sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393 size=fif`, // extra key `version https://git-lfs.github.com/spec/v1 oid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393 size 12345 wat wat`, // keys out of order `version https://git-lfs.github.com/spec/v1 size 12345 oid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393`, // bad ext name `version https://git-lfs.github.com/spec/v1 ext-0-$$$$ sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff oid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393 size 12345`, // bad ext priority `version https://git-lfs.github.com/spec/v1 ext-#-foo sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff oid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393 size 12345`, // duplicate ext priority `version https://git-lfs.github.com/spec/v1 ext-0-foo sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff ext-0-bar sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb oid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393 size 12345`, // ext priority over 9 `version https://git-lfs.github.com/spec/v1 ext-10-foo sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff oid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393 size 12345`, // bad ext oid `version https://git-lfs.github.com/spec/v1 ext-0-foo sha256:boom oid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393 size 12345`, // bad ext oid type `version https://git-lfs.github.com/spec/v1 ext-0-foo boom:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff oid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393 size 12345`, // bad OID `version https://git-lfs.github.com/spec/v1 oid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393& size 177735`, } for _, ex := range examples { p, err := DecodePointer(bytes.NewBufferString(ex)) if err == nil { t.Errorf("No error decoding: %v\nFrom:\n%s", p, strings.TrimSpace(ex)) } } } func assertEqualWithExample(t *testing.T, example string, expected, actual interface{}) { assert.Equal(t, expected, actual, "Example:\n%s", strings.TrimSpace(example)) } git-lfs-3.6.1/lfs/scanner.go000066400000000000000000000107121472372047300156440ustar00rootroot00000000000000package lfs import ( "github.com/git-lfs/git-lfs/v3/config" "github.com/git-lfs/git-lfs/v3/git" "github.com/git-lfs/git-lfs/v3/tools" ) const ( // blobSizeCutoff is used to determine which files to scan for Git LFS // pointers. Any file with a size below this cutoff will be scanned. blobSizeCutoff = 1024 // stdoutBufSize is the size of the buffers given to a sub-process stdout stdoutBufSize = 16384 // chanBufSize is the size of the channels used to pass data from one // sub-process to another. chanBufSize = 100 ) // WrappedPointer wraps a pointer.Pointer and provides the git sha1 // and the file name associated with the object, taken from the // rev-list output. type WrappedPointer struct { Sha1 string Name string SrcName string Status string *Pointer } // catFileBatchCheck uses git cat-file --batch-check to get the type // and size of a git object. Any object that isn't of type blob and // under the blobSizeCutoff will be ignored. revs is a channel over // which strings containing git sha1s will be sent. It returns a channel // from which sha1 strings can be read. func catFileBatchCheck(revs *StringChannelWrapper, lockableSet *lockableNameSet) (*StringChannelWrapper, chan string, error) { smallRevCh := make(chan string, chanBufSize) lockableCh := make(chan string, chanBufSize) errCh := make(chan error, 2) // up to 2 errors, one from each goroutine if err := runCatFileBatchCheck(smallRevCh, lockableCh, lockableSet, revs, errCh); err != nil { return nil, nil, err } return NewStringChannelWrapper(smallRevCh, errCh), lockableCh, nil } // catFileBatch() uses an ObjectDatabase from the // github.com/git-lfs/gitobj/v2 package to get the contents of Git // blob objects, given their SHA1s, similar to the behaviour of // 'git cat-file --batch'. // Input Git blob SHA1s should be sent over the revs channel. // The blob contents will be decoded as Git LFS pointers and any valid // pointers will be returned as pointer.Pointer structs in a new channel. func catFileBatch(revs *StringChannelWrapper, lockableSet *lockableNameSet, gitEnv, osEnv config.Environment) (*PointerChannelWrapper, chan string, error) { pointerCh := make(chan *WrappedPointer, chanBufSize) lockableCh := make(chan string, chanBufSize) errCh := make(chan error, 5) // shared by 2 goroutines & may add more detail errors? if err := runCatFileBatch(pointerCh, lockableCh, lockableSet, revs, errCh, gitEnv, osEnv); err != nil { return nil, nil, err } return NewPointerChannelWrapper(pointerCh, errCh), lockableCh, nil } // ChannelWrapper for pointer Scan* functions to more easily return async error data via Wait() // See NewPointerChannelWrapper for construction / use type PointerChannelWrapper struct { *tools.BaseChannelWrapper Results <-chan *WrappedPointer } // Construct a new channel wrapper for WrappedPointer // Caller can use s.Results directly for normal processing then call Wait() to finish & check for errors // Scan function is required to create error channel large enough not to block (usually 1 is ok) func NewPointerChannelWrapper(pointerChan <-chan *WrappedPointer, errorChan <-chan error) *PointerChannelWrapper { return &PointerChannelWrapper{tools.NewBaseChannelWrapper(errorChan), pointerChan} } // ChannelWrapper for string channel functions to more easily return async error data via Wait() // Caller can use s.Results directly for normal processing then call Wait() to finish & check for errors // See NewStringChannelWrapper for construction / use type StringChannelWrapper struct { *tools.BaseChannelWrapper Results <-chan string } // Construct a new channel wrapper for string // Caller can use s.Results directly for normal processing then call Wait() to finish & check for errors func NewStringChannelWrapper(stringChan <-chan string, errorChan <-chan error) *StringChannelWrapper { return &StringChannelWrapper{tools.NewBaseChannelWrapper(errorChan), stringChan} } // ChannelWrapper for TreeBlob channel functions to more easily return async error data via Wait() // See NewTreeBlobChannelWrapper for construction / use type TreeBlobChannelWrapper struct { *tools.BaseChannelWrapper Results <-chan git.TreeBlob } // Construct a new channel wrapper for TreeBlob // Caller can use s.Results directly for normal processing then call Wait() to finish & check for errors func NewTreeBlobChannelWrapper(treeBlobChan <-chan git.TreeBlob, errorChan <-chan error) *TreeBlobChannelWrapper { return &TreeBlobChannelWrapper{tools.NewBaseChannelWrapper(errorChan), treeBlobChan} } git-lfs-3.6.1/lfs/scanner_git_test.go000066400000000000000000000132661472372047300175550ustar00rootroot00000000000000package lfs_test // to avoid import cycles // This is for doing complete git-level tests using test utils // Needs to be a separate file from scanner_test so that we can use a diff package // which avoids import cycles with testutils import ( "fmt" "sort" "testing" "time" "github.com/git-lfs/git-lfs/v3/config" . "github.com/git-lfs/git-lfs/v3/lfs" test "github.com/git-lfs/git-lfs/v3/t/cmd/util" "github.com/stretchr/testify/assert" ) func TestScanUnpushed(t *testing.T) { repo := test.NewRepo(t) repo.Pushd() defer func() { repo.Popd() repo.Cleanup() }() inputs := []*test.CommitInput{ { // 0 Files: []*test.FileInput{ {Filename: "file1.txt", Size: 20}, }, }, { // 1 NewBranch: "branch2", Files: []*test.FileInput{ {Filename: "file1.txt", Size: 25}, }, }, { // 2 ParentBranches: []string{"master"}, // back on master Files: []*test.FileInput{ {Filename: "file1.txt", Size: 30}, }, }, { // 3 NewBranch: "branch3", Files: []*test.FileInput{ {Filename: "file1.txt", Size: 32}, }, }, } repo.AddCommits(inputs) // Add a couple of remotes and test state depending on what's pushed repo.AddRemote("origin") repo.AddRemote("upstream") pointers, err := scanUnpushed("") assert.Nil(t, err, "Should be no error calling ScanUnpushed") assert.Len(t, pointers, 4, "Should be 4 pointers because none pushed") test.RunGitCommand(t, true, "push", "origin", "branch2") // Branch2 will have pushed 2 commits pointers, err = scanUnpushed("") assert.Nil(t, err, "Should be no error calling ScanUnpushed") assert.Len(t, pointers, 2, "Should be 2 pointers") test.RunGitCommand(t, true, "push", "upstream", "master") // Master pushes 1 more commit pointers, err = scanUnpushed("") assert.Nil(t, err, "Should be no error calling ScanUnpushed") assert.Len(t, pointers, 1, "Should be 1 pointer") test.RunGitCommand(t, true, "push", "origin", "branch3") // All pushed (somewhere) pointers, err = scanUnpushed("") assert.Nil(t, err, "Should be no error calling ScanUnpushed") assert.Empty(t, pointers, "Should be 0 pointers unpushed") // Check origin pointers, err = scanUnpushed("origin") assert.Nil(t, err, "Should be no error calling ScanUnpushed") assert.Empty(t, pointers, "Should be 0 pointers unpushed to origin") // Check upstream pointers, err = scanUnpushed("upstream") assert.Nil(t, err, "Should be no error calling ScanUnpushed") assert.Len(t, pointers, 2, "Should be 2 pointers unpushed to upstream") } func scanUnpushed(remoteName string) ([]*WrappedPointer, error) { pointers := make([]*WrappedPointer, 0, 10) var multiErr error gitscanner := NewGitScanner(config.New(), func(p *WrappedPointer, err error) { if err != nil { if multiErr != nil { multiErr = fmt.Errorf("%v\n%v", multiErr, err) } else { multiErr = err } return } pointers = append(pointers, p) }) if err := gitscanner.ScanUnpushed(remoteName, nil); err != nil { return nil, err } return pointers, multiErr } func TestScanPreviousVersions(t *testing.T) { repo := test.NewRepo(t) repo.Pushd() defer func() { repo.Popd() repo.Cleanup() }() now := time.Now() inputs := []*test.CommitInput{ { // 0 CommitDate: now.AddDate(0, 0, -20), Files: []*test.FileInput{ {Filename: "file1.txt", Size: 20}, {Filename: "file2.txt", Size: 30}, {Filename: "folder/nested.txt", Size: 40}, {Filename: "folder/nested2.txt", Size: 31}, }, }, { // 1 CommitDate: now.AddDate(0, 0, -10), Files: []*test.FileInput{ {Filename: "file2.txt", Size: 22}, }, }, { // 2 NewBranch: "excluded", CommitDate: now.AddDate(0, 0, -6), Files: []*test.FileInput{ {Filename: "file2.txt", Size: 12}, {Filename: "folder/nested2.txt", Size: 16}, }, }, { // 3 ParentBranches: []string{"master"}, CommitDate: now.AddDate(0, 0, -4), Files: []*test.FileInput{ {Filename: "folder/nested.txt", Size: 42}, {Filename: "folder/nested2.txt", Size: 6}, }, }, { // 4 Files: []*test.FileInput{ {Filename: "folder/nested.txt", Size: 22}, }, }, } outputs := repo.AddCommits(inputs) // Previous commits excludes final state of each file, which is: // file1.txt [0] (unchanged since first commit so excluded) // file2.txt [1] (because [2] is on another branch so excluded) // folder/nested.txt [4] (updated at last commit) // folder/nested2.txt [3] // The only changes which will be included are changes prior to final state // where the '-' side of the diff is inside the date range // 7 day limit excludes [0] commit, but includes state from that if there // was a subsequent chang pointers, err := scanPreviousVersions(t, "master", now.AddDate(0, 0, -7)) assert.Equal(t, nil, err) // Includes the following 'before' state at commits: // folder/nested.txt [-diff at 4, ie 3, -diff at 3 ie 0] // folder/nested2.txt [-diff at 3 ie 0] // others are either on diff branches, before this window, or unchanged expected := []*WrappedPointer{ {Name: "folder/nested.txt", Pointer: outputs[3].Files[0]}, {Name: "folder/nested.txt", Pointer: outputs[0].Files[2]}, {Name: "folder/nested2.txt", Pointer: outputs[0].Files[3]}, } // Need to sort to compare equality sort.Sort(test.WrappedPointersByOid(expected)) sort.Sort(test.WrappedPointersByOid(pointers)) assert.Equal(t, expected, pointers) } func scanPreviousVersions(t *testing.T, ref string, since time.Time) ([]*WrappedPointer, error) { pointers := make([]*WrappedPointer, 0, 10) gitscanner := NewGitScanner(config.New(), func(p *WrappedPointer, err error) { if err != nil { t.Error(err) return } pointers = append(pointers, p) }) err := gitscanner.ScanPreviousVersions(ref, since, nil) return pointers, err } git-lfs-3.6.1/lfs/scanner_test.go000066400000000000000000000266461472372047300167200ustar00rootroot00000000000000package lfs import ( "strings" "testing" "github.com/git-lfs/git-lfs/v3/filepathfilter" "github.com/stretchr/testify/assert" ) var ( pointerParseLogOutput = `lfs-commit-sha: 637908bf28b38ab238e1b5e6a5bfbfb2e513a0df 07d571b413957508679042e45508af5945b3f1e5 diff --git a/smoke_1.png b/smoke_1.png deleted file mode 100644 index 2fe5451..0000000 --- a/smoke_1.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:8eb65d66303acc60062f44b44ef1f7360d7189db8acf3d066e59e2528f39514e -size 35022 lfs-commit-sha: 07d571b413957508679042e45508af5945b3f1e5 8e5bd456b754f7d61c7157e82edc5ed124be4da6 diff --git a/flare_1.png b/flare_1.png deleted file mode 100644 index 1cfc5a1..0000000 --- a/flare_1.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -ext-0-foo sha256:36485434f4f8a55150282ae7c78619a89de52721c00f48159f2562463df9c043 -ext-1-bar sha256:382a2a13e705bbd8de7e2e13857c26551db17121ac57edca5dec9b5bd753e9c8 -ext-2-ray sha256:423ee9e5988fb4670bf815990e9307c3b23296210c31581dec4d4ae89dabae46 -oid sha256:ea61c67cc5e8b3504d46de77212364045f31d9a023ad4448a1ace2a2fb4eed28 -size 72982 diff --git a/radial_1.png b/radial_1.png index 9daa2e5..c648385 100644 --- a/radial_1.png +++ b/radial_1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:334c8a0a520cf9f58189dba5a9a26c7bff2769b4a3cc199650c00618bde5b9dd -size 16849 +oid sha256:3301b3da173d231f0f6b1f9bf075e573758cd79b3cfeff7623a953d708d6688b +size 3152388 diff --git "a/r\303\251publiquefran\303\247aise.bin" "b/r\303\251publiquefran\303\247aise.bin" index 9daa2e5..c648385 100644 --- "a/r\303\251publiquefran\303\247aise.bin" +++ "b/r\303\251publiquefran\303\247aise.bin" @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -ext-0-foo sha256:36485434f4f8a55150282ae7c78619a89de52721c00f48159f2562463df9c043 -ext-1-bar sha256:382a2a13e705bbd8de7e2e13857c26551db17121ac57edca5dec9b5bd753e9c8 -ext-2-ray sha256:423ee9e5988fb4670bf815990e9307c3b23296210c31581dec4d4ae89dabae46 -oid sha256:334c8a0a520cf9f58189dba5a9a26c7bff2769b4a3cc199650c00618bde5b9dd -size 16849 +ext-0-foo sha256:95d8260e8365a9dfd39842bdeee9b20e0e3fe3daf9bb4a8c0a1acb31008ed7b4 +ext-1-bar sha256:674bf4995720a43e03e174bcc1132ca95de6a8e4155fe3b2c482dceb42cbc0a5 +ext-2-ray sha256:0d323c95ae4b0a9c195ddc437c470678bddd2ee0906fb2f7b8166cd2474e22d9 +oid sha256:4b666195c133d8d0541ad0bc0e77399b9dc81861577a98314ac1ff1e9877893a +size 3152388 lfs-commit-sha: 60fde3d23553e10a55e2a32ed18c20f65edd91e7 e2eaf1c10b57da7b98eb5d722ec5912ddeb53ea1 diff --git a/1D_Noise.png b/1D_Noise.png new file mode 100644 index 0000000..2622b4a --- /dev/null +++ b/1D_Noise.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f5d84da40ab1f6aa28df2b2bf1ade2cdcd4397133f903c12b4106641b10e1ed6 +size 1289 diff --git a/waveNM.png b/waveNM.png new file mode 100644 index 0000000..8519883 --- /dev/null +++ b/waveNM.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fe2c2f236b97bba4585d9909a227a8fa64897d9bbe297fa272f714302d86c908 +size 125873 lfs-commit-sha: 64b3372e108daaa593412d5e1d9df8169a9547ea e99c9cac7ff3f3cf1b2e670a64a5a381c44ffceb diff --git "a/hobbit_5armies_2.mov" "b/hobbit_5armies_2.mov" new file mode 100644 index 0000000..92a88f8 --- /dev/null +++ "b/hobbit_5armies_2.mov" @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +ext-0-foo sha256:b37197ac149950d057521bcb7e00806f0528e19352bd72767165bc390d4f055e +ext-1-bar sha256:c71772e5ea8e8c6f053f0f1dc89f8c01243975b1a040acbcf732fe2dbc0bcb61 +oid sha256:ebff26d6b557b1416a6fded097fd9b9102e2d8195532c377ac365c736c87d4bc +size 127142413 ` ) func TestLogScannerAdditionsNoFiltering(t *testing.T) { r := strings.NewReader(pointerParseLogOutput) scanner := newLogScanner(LogDiffAdditions, r) // modification, + side assertNextScan(t, scanner) p := scanner.Pointer() if assert.NotNil(t, p) { assert.Equal(t, "radial_1.png", p.Name) assert.Equal(t, "3301b3da173d231f0f6b1f9bf075e573758cd79b3cfeff7623a953d708d6688b", p.Oid) assert.Equal(t, int64(3152388), p.Size) } // modification, + side with extensions assertNextScan(t, scanner) p = scanner.Pointer() if assert.NotNil(t, p) { assert.Equal(t, "républiquefrançaise.bin", p.Name) assert.Equal(t, "4b666195c133d8d0541ad0bc0e77399b9dc81861577a98314ac1ff1e9877893a", p.Oid) assert.Equal(t, int64(3152388), p.Size) } // addition, + side assertNextScan(t, scanner) p = scanner.Pointer() if assert.NotNil(t, p) { assert.Equal(t, "1D_Noise.png", p.Name) assert.Equal(t, "f5d84da40ab1f6aa28df2b2bf1ade2cdcd4397133f903c12b4106641b10e1ed6", p.Oid) assert.Equal(t, int64(1289), p.Size) } // addition, + side assertNextScan(t, scanner) p = scanner.Pointer() if assert.NotNil(t, p) { assert.Equal(t, "waveNM.png", p.Name) assert.Equal(t, "fe2c2f236b97bba4585d9909a227a8fa64897d9bbe297fa272f714302d86c908", p.Oid) assert.Equal(t, int64(125873), p.Size) } // addition, + side with extensions assertNextScan(t, scanner) p = scanner.Pointer() if assert.NotNil(t, p) { assert.Equal(t, "hobbit_5armies_2.mov", p.Name) assert.Equal(t, "ebff26d6b557b1416a6fded097fd9b9102e2d8195532c377ac365c736c87d4bc", p.Oid) assert.Equal(t, int64(127142413), p.Size) } assertScannerDone(t, scanner) assert.Nil(t, scanner.Pointer()) } func TestLogScannerAdditionsFilterInclude(t *testing.T) { r := strings.NewReader(pointerParseLogOutput) scanner := newLogScanner(LogDiffAdditions, r) scanner.Filter = filepathfilter.New([]string{"wave*"}, nil, filepathfilter.GitAttributes) // addition, + side assertNextScan(t, scanner) p := scanner.Pointer() if assert.NotNil(t, p) { assert.Equal(t, "waveNM.png", p.Name) assert.Equal(t, "fe2c2f236b97bba4585d9909a227a8fa64897d9bbe297fa272f714302d86c908", p.Oid) assert.Equal(t, int64(125873), p.Size) } assertScannerDone(t, scanner) assert.Nil(t, scanner.Pointer()) } func TestLogScannerAdditionsFilterIncludeOctals(t *testing.T) { r := strings.NewReader(pointerParseLogOutput) scanner := newLogScanner(LogDiffAdditions, r) scanner.Filter = filepathfilter.New([]string{"*ç*"}, nil, filepathfilter.GitAttributes) // modification, + side with extensions assertNextScan(t, scanner) p := scanner.Pointer() if assert.NotNil(t, p) { assert.Equal(t, "républiquefrançaise.bin", p.Name) assert.Equal(t, "4b666195c133d8d0541ad0bc0e77399b9dc81861577a98314ac1ff1e9877893a", p.Oid) assert.Equal(t, int64(3152388), p.Size) } assertScannerDone(t, scanner) assert.Nil(t, scanner.Pointer()) } func TestLogScannerAdditionsFilterExclude(t *testing.T) { r := strings.NewReader(pointerParseLogOutput) scanner := newLogScanner(LogDiffAdditions, r) scanner.Filter = filepathfilter.New(nil, []string{"wave*"}, filepathfilter.GitAttributes) // modification, + side assertNextScan(t, scanner) p := scanner.Pointer() if assert.NotNil(t, p) { assert.Equal(t, "radial_1.png", p.Name) assert.Equal(t, "3301b3da173d231f0f6b1f9bf075e573758cd79b3cfeff7623a953d708d6688b", p.Oid) assert.Equal(t, int64(3152388), p.Size) } // modification, + side with extensions assertNextScan(t, scanner) p = scanner.Pointer() if assert.NotNil(t, p) { assert.Equal(t, "républiquefrançaise.bin", p.Name) assert.Equal(t, "4b666195c133d8d0541ad0bc0e77399b9dc81861577a98314ac1ff1e9877893a", p.Oid) assert.Equal(t, int64(3152388), p.Size) } // addition, + side assertNextScan(t, scanner) p = scanner.Pointer() if assert.NotNil(t, p) { assert.Equal(t, "1D_Noise.png", p.Name) assert.Equal(t, "f5d84da40ab1f6aa28df2b2bf1ade2cdcd4397133f903c12b4106641b10e1ed6", p.Oid) assert.Equal(t, int64(1289), p.Size) } // addition, + side with extensions assertNextScan(t, scanner) p = scanner.Pointer() if assert.NotNil(t, p) { assert.Equal(t, "hobbit_5armies_2.mov", p.Name) assert.Equal(t, "ebff26d6b557b1416a6fded097fd9b9102e2d8195532c377ac365c736c87d4bc", p.Oid) assert.Equal(t, int64(127142413), p.Size) } assertScannerDone(t, scanner) assert.Nil(t, scanner.Pointer()) } func TestLogScannerDeletionsNoFiltering(t *testing.T) { r := strings.NewReader(pointerParseLogOutput) scanner := newLogScanner(LogDiffDeletions, r) // deletion, - side assertNextScan(t, scanner) p := scanner.Pointer() if assert.NotNil(t, p) { assert.Equal(t, "smoke_1.png", p.Name) assert.Equal(t, "8eb65d66303acc60062f44b44ef1f7360d7189db8acf3d066e59e2528f39514e", p.Oid) assert.Equal(t, int64(35022), p.Size) } // deletion, - side with extensions assertNextScan(t, scanner) p = scanner.Pointer() if assert.NotNil(t, p) { assert.Equal(t, "flare_1.png", p.Name) assert.Equal(t, "ea61c67cc5e8b3504d46de77212364045f31d9a023ad4448a1ace2a2fb4eed28", p.Oid) assert.Equal(t, int64(72982), p.Size) } // modification, - side assertNextScan(t, scanner) p = scanner.Pointer() if assert.NotNil(t, p) { assert.Equal(t, "radial_1.png", p.Name) assert.Equal(t, "334c8a0a520cf9f58189dba5a9a26c7bff2769b4a3cc199650c00618bde5b9dd", p.Oid) assert.Equal(t, int64(16849), p.Size) } // modification, - side with extensions assertNextScan(t, scanner) p = scanner.Pointer() if assert.NotNil(t, p) { assert.Equal(t, "républiquefrançaise.bin", p.Name) assert.Equal(t, "334c8a0a520cf9f58189dba5a9a26c7bff2769b4a3cc199650c00618bde5b9dd", p.Oid) assert.Equal(t, int64(16849), p.Size) } assertScannerDone(t, scanner) } func TestLogScannerDeletionsFilterInclude(t *testing.T) { r := strings.NewReader(pointerParseLogOutput) scanner := newLogScanner(LogDiffDeletions, r) scanner.Filter = filepathfilter.New([]string{"flare*"}, nil, filepathfilter.GitAttributes) // deletion, - side with extensions assertNextScan(t, scanner) p := scanner.Pointer() if assert.NotNil(t, p) { assert.Equal(t, "flare_1.png", p.Name) assert.Equal(t, "ea61c67cc5e8b3504d46de77212364045f31d9a023ad4448a1ace2a2fb4eed28", p.Oid) assert.Equal(t, int64(72982), p.Size) } assertScannerDone(t, scanner) } func TestLogScannerDeletionsFilterIncludeOctals(t *testing.T) { r := strings.NewReader(pointerParseLogOutput) scanner := newLogScanner(LogDiffDeletions, r) scanner.Filter = filepathfilter.New([]string{"*ç*"}, nil, filepathfilter.GitAttributes) // modification, - side with extensions assertNextScan(t, scanner) p := scanner.Pointer() if assert.NotNil(t, p) { assert.Equal(t, "républiquefrançaise.bin", p.Name) assert.Equal(t, "334c8a0a520cf9f58189dba5a9a26c7bff2769b4a3cc199650c00618bde5b9dd", p.Oid) assert.Equal(t, int64(16849), p.Size) } assertScannerDone(t, scanner) } func TestLogScannerDeletionsFilterExclude(t *testing.T) { r := strings.NewReader(pointerParseLogOutput) scanner := newLogScanner(LogDiffDeletions, r) scanner.Filter = filepathfilter.New(nil, []string{"flare*"}, filepathfilter.GitAttributes) // deletion, - side assertNextScan(t, scanner) p := scanner.Pointer() if assert.NotNil(t, p) { assert.Equal(t, "smoke_1.png", p.Name) assert.Equal(t, "8eb65d66303acc60062f44b44ef1f7360d7189db8acf3d066e59e2528f39514e", p.Oid) assert.Equal(t, int64(35022), p.Size) } // modification, - side assertNextScan(t, scanner) p = scanner.Pointer() if assert.NotNil(t, p) { assert.Equal(t, "radial_1.png", p.Name) assert.Equal(t, "334c8a0a520cf9f58189dba5a9a26c7bff2769b4a3cc199650c00618bde5b9dd", p.Oid) assert.Equal(t, int64(16849), p.Size) } // modification, - side with extensions assertNextScan(t, scanner) p = scanner.Pointer() if assert.NotNil(t, p) { assert.Equal(t, "républiquefrançaise.bin", p.Name) assert.Equal(t, "334c8a0a520cf9f58189dba5a9a26c7bff2769b4a3cc199650c00618bde5b9dd", p.Oid) assert.Equal(t, int64(16849), p.Size) } assertScannerDone(t, scanner) } git-lfs-3.6.1/lfs/util.go000066400000000000000000000157221472372047300151760ustar00rootroot00000000000000package lfs import ( "fmt" "io" "os" "path/filepath" "runtime" "strings" "github.com/git-lfs/git-lfs/v3/config" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/tasklog" "github.com/git-lfs/git-lfs/v3/tools" "github.com/git-lfs/git-lfs/v3/tr" ) type Platform int const ( PlatformWindows = Platform(iota) PlatformLinux = Platform(iota) PlatformOSX = Platform(iota) PlatformOther = Platform(iota) // most likely a *nix variant e.g. freebsd PlatformUndetermined = Platform(iota) ) var currentPlatform = PlatformUndetermined func join(parts ...string) string { return strings.Join(parts, "/") } func (f *GitFilter) CopyCallbackFile(event, filename string, index, totalFiles int) (tools.CopyCallback, *os.File, error) { logPath, _ := f.cfg.Os.Get("GIT_LFS_PROGRESS") if len(logPath) == 0 || len(filename) == 0 || len(event) == 0 { return nil, nil, nil } if !filepath.IsAbs(logPath) { return nil, nil, errors.New(tr.Tr.Get("GIT_LFS_PROGRESS must be an absolute path")) } cbDir := filepath.Dir(logPath) if err := tools.MkdirAll(cbDir, f.cfg); err != nil { return nil, nil, wrapProgressError(err, event, logPath) } file, err := os.OpenFile(logPath, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666) if err != nil { return nil, file, wrapProgressError(err, event, logPath) } var prevWritten int64 deadline := f.clk.Now().Add(tasklog.DefaultLoggingThrottle) cb := tools.CopyCallback(func(total int64, written int64, current int) error { now := f.clk.Now() if written != prevWritten && (!now.Before(deadline) || written >= total) { _, err := fmt.Fprintf(file, "%s %d/%d %d/%d %s\n", event, index, totalFiles, written, total, filename) file.Sync() prevWritten = written deadline = now.Add(tasklog.DefaultLoggingThrottle) return wrapProgressError(err, event, logPath) } return nil }) return cb, file, nil } func wrapProgressError(err error, event, filename string) error { if err != nil { return errors.New(tr.Tr.Get("error writing Git LFS %s progress to %s: %s", event, filename, err.Error())) } return nil } var localDirSet = tools.NewStringSetFromSlice([]string{".", "./", ".\\"}) func GetPlatform() Platform { if currentPlatform == PlatformUndetermined { switch runtime.GOOS { case "windows": currentPlatform = PlatformWindows case "linux": currentPlatform = PlatformLinux case "darwin": currentPlatform = PlatformOSX default: currentPlatform = PlatformOther } } return currentPlatform } type PathConverter interface { Convert(string) string } // Convert filenames expressed relative to the root of the repo relative to the // current working dir. Useful when needing to calling git with results from a rooted command, // but the user is in a subdir of their repo func NewRepoToCurrentPathConverter(cfg *config.Configuration) (PathConverter, error) { r, c, p, err := pathConverterArgs(cfg) if err != nil { return nil, err } return &repoToCurrentPathConverter{ repoDir: r, currDir: c, passthrough: p, }, nil } type repoToCurrentPathConverter struct { repoDir string currDir string passthrough bool } func (p *repoToCurrentPathConverter) Convert(filename string) string { if p.passthrough { return filename } abs := join(p.repoDir, filename) rel, err := filepath.Rel(p.currDir, abs) if err != nil { // Use absolute file instead return abs } return filepath.ToSlash(rel) } // Convert filenames expressed relative to the current directory to be // relative to the repo root. Useful when calling git with arguments that requires them // to be rooted but the user is in a subdir of their repo & expects to use relative args func NewCurrentToRepoPathConverter(cfg *config.Configuration) (PathConverter, error) { r, c, p, err := pathConverterArgs(cfg) if err != nil { return nil, err } return ¤tToRepoPathConverter{ repoDir: r, currDir: c, passthrough: p, }, nil } type currentToRepoPathConverter struct { repoDir string currDir string passthrough bool } func (p *currentToRepoPathConverter) Convert(filename string) string { if p.passthrough { return filename } var abs string if filepath.IsAbs(filename) { abs = tools.ResolveSymlinks(filename) } else { abs = join(p.currDir, filename) } reltoroot, err := filepath.Rel(p.repoDir, abs) if err != nil { // Can't do this, use absolute as best fallback return abs } return filepath.ToSlash(reltoroot) } // Convert filenames expressed relative to the current directory to be relative // to the repo root and convert them into wildmatch patterns. func NewCurrentToRepoPatternConverter(cfg *config.Configuration) (PathConverter, error) { r, c, p, err := pathConverterArgs(cfg) if err != nil { return nil, err } return ¤tToRepoPatternConverter{ c: ¤tToRepoPathConverter{ repoDir: r, currDir: c, passthrough: p, }, }, nil } type currentToRepoPatternConverter struct { c *currentToRepoPathConverter } func (p *currentToRepoPatternConverter) Convert(filename string) string { pattern := p.c.Convert(filename) if st, err := os.Stat(filename); err == nil && st.IsDir() { pattern += "/" } if strings.HasPrefix(pattern, "./") { pattern = pattern[2:] if len(pattern) == 0 { pattern = "**" } } return pattern } func pathConverterArgs(cfg *config.Configuration) (string, string, bool, error) { currDir, err := os.Getwd() if err != nil { return "", "", false, errors.New(tr.Tr.Get("unable to get working dir: %v", err)) } currDir = tools.ResolveSymlinks(currDir) return cfg.LocalWorkingDir(), currDir, cfg.LocalWorkingDir() == currDir, nil } // Are we running on Windows? Need to handle some extra path shenanigans func IsWindows() bool { return GetPlatform() == PlatformWindows } func CopyFileContents(cfg *config.Configuration, src string, dst string) error { tmp, err := TempFile(cfg, filepath.Base(dst)) if err != nil { return err } defer func() { tmp.Close() os.Remove(tmp.Name()) }() in, err := os.Open(src) if err != nil { return err } defer in.Close() _, err = io.Copy(tmp, in) if err != nil { return err } err = tmp.Close() if err != nil { return err } return os.Rename(tmp.Name(), dst) } func LinkOrCopy(cfg *config.Configuration, src string, dst string) error { if src == dst { return nil } err := os.Link(src, dst) if err == nil { return err } return CopyFileContents(cfg, src, dst) } // TempFile creates a temporary file in the temporary directory specified by the // configuration that has the proper permissions for the repository. On // success, it returns an open, non-nil *os.File, and the caller is responsible // for closing and/or removing it. On failure, the temporary file is // automatically cleaned up and an error returned. // // This function is designed to handle only temporary files that will be renamed // into place later somewhere within the Git repository. func TempFile(cfg *config.Configuration, pattern string) (*os.File, error) { return tools.TempFile(cfg.TempDir(), pattern, cfg) } git-lfs-3.6.1/lfs/util_generic.go000066400000000000000000000002501472372047300166600ustar00rootroot00000000000000//go:build !linux || !cgo // +build !linux !cgo package lfs import ( "io" ) func CloneFile(writer io.Writer, reader io.Reader) (bool, error) { return false, nil } git-lfs-3.6.1/lfs/util_test.go000066400000000000000000000054101472372047300162260ustar00rootroot00000000000000package lfs import ( "bytes" "os" "testing" "github.com/git-lfs/git-lfs/v3/config" "github.com/git-lfs/git-lfs/v3/tasklog" "github.com/git-lfs/git-lfs/v3/tools" "github.com/jmhodges/clock" "github.com/stretchr/testify/assert" ) func TestBodyWithCallback(t *testing.T) { called := 0 calledRead := make([]int64, 0, 2) cb := func(total int64, read int64, current int) error { called += 1 calledRead = append(calledRead, read) assert.Equal(t, 5, int(total)) return nil } reader := tools.NewByteBodyWithCallback([]byte("BOOYA"), 5, cb) readBuf := make([]byte, 3) n, err := reader.Read(readBuf) assert.Nil(t, err) assert.Equal(t, "BOO", string(readBuf[0:n])) n, err = reader.Read(readBuf) assert.Nil(t, err) assert.Equal(t, "YA", string(readBuf[0:n])) assert.Equal(t, 2, called) assert.Len(t, calledRead, 2) assert.Equal(t, 3, int(calledRead[0])) assert.Equal(t, 5, int(calledRead[1])) } func TestReadWithCallback(t *testing.T) { called := 0 calledRead := make([]int64, 0, 2) reader := &tools.CallbackReader{ TotalSize: 5, Reader: bytes.NewBufferString("BOOYA"), C: func(total int64, read int64, current int) error { called += 1 calledRead = append(calledRead, read) assert.Equal(t, 5, int(total)) return nil }, } readBuf := make([]byte, 3) n, err := reader.Read(readBuf) assert.Nil(t, err) assert.Equal(t, "BOO", string(readBuf[0:n])) n, err = reader.Read(readBuf) assert.Nil(t, err) assert.Equal(t, "YA", string(readBuf[0:n])) assert.Equal(t, 2, called) assert.Len(t, calledRead, 2) assert.Equal(t, 3, int(calledRead[0])) assert.Equal(t, 5, int(calledRead[1])) } func TestCopyCallbackFileThrottle(t *testing.T) { tmpDir := t.TempDir() logFile := tmpDir + "/git_lfs_progress.log" osMf := config.UniqMapFetcher(map[string]string{ "GIT_LFS_PROGRESS": logFile, }) gitMf := config.UniqMapFetcher(map[string]string{}) fc := clock.NewFake() gf := GitFilter{ cfg: &config.Configuration{ Os: config.EnvironmentOf(osMf), Git: config.EnvironmentOf(gitMf), }, clk: fc, } bufSize := int64(128 * 1024) cb, f, err := gf.CopyCallbackFile("clean", "test_copy", 1, 1) assert.NoError(t, err) defer f.Close() r := &tools.CallbackReader{ TotalSize: bufSize, Reader: bytes.NewReader(make([]byte, bufSize)), C: cb, } readbuf := make([]byte, 32*1024) r.Read(readbuf) // message skipped fc.Add(tasklog.DefaultLoggingThrottle) r.Read(readbuf) // message logged due to delay fc.Add(tasklog.DefaultLoggingThrottle / 2) r.Read(readbuf) // message skipped r.Read(readbuf) // message logged because reader is finished logBytes, err := os.ReadFile(logFile) assert.Nil(t, err) expectedLog := "clean 1/1 65536/131072 test_copy\nclean 1/1 131072/131072 test_copy\n" assert.Equal(t, expectedLog, string(logBytes)) } git-lfs-3.6.1/lfsapi/000077500000000000000000000000001472372047300143555ustar00rootroot00000000000000git-lfs-3.6.1/lfsapi/auth.go000066400000000000000000000272151472372047300156540ustar00rootroot00000000000000package lfsapi import ( "encoding/base64" "fmt" "net/http" "net/url" "os" "strings" "github.com/git-lfs/git-lfs/v3/creds" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/lfshttp" "github.com/git-lfs/git-lfs/v3/tr" "github.com/rubyist/tracerx" ) var ( defaultEndpointFinder = NewEndpointFinder(nil) defaultMaxAuthAttempts = 3 ) // DoWithAuth sends an HTTP request to get an HTTP response. It attempts to add // authentication from netrc or git's credential helpers if necessary, // supporting basic authentication. func (c *Client) DoWithAuth(remote string, access creds.Access, req *http.Request) (*http.Response, error) { count := 0 res, err := c.doWithAuth(remote, &count, access, req, nil) if errors.IsAuthError(err) { if len(req.Header.Get("Authorization")) == 0 { // This case represents a rejected request that // should have been authenticated but wasn't. Do // not count this against our redirection // maximum. newAccess := c.Endpoints.AccessFor(access.URL()) tracerx.Printf("api: http response indicates %q authentication. Resubmitting...", newAccess.Mode()) return c.DoWithAuth(remote, newAccess, req) } } return res, err } // DoWithAuthNoRetry sends an HTTP request to get an HTTP response. It works in // the same way as DoWithAuth, but will not retry the request if it fails with // an authorization error. func (c *Client) DoWithAuthNoRetry(remote string, access creds.Access, req *http.Request) (*http.Response, error) { count := 0 return c.doWithAuth(remote, &count, access, req, nil) } // DoAPIRequestWithAuth sends an HTTP request to get an HTTP response similarly // to DoWithAuth, but using the LFS API endpoint for the provided remote and // operation to determine the access mode. func (c *Client) DoAPIRequestWithAuth(remote string, req *http.Request) (*http.Response, error) { operation := getReqOperation(req) apiEndpoint := c.Endpoints.Endpoint(operation, remote) access := c.Endpoints.AccessFor(apiEndpoint.Url) return c.DoWithAuth(remote, access, req) } func (c *Client) doWithAuth(remote string, count *int, access creds.Access, req *http.Request, via []*http.Request) (*http.Response, error) { if *count == defaultMaxAuthAttempts { return nil, fmt.Errorf("too many authentication attempts") } req.Header = c.client.ExtraHeadersFor(req) credWrapper, err := c.getCreds(remote, access, req) if err != nil { return nil, err } c.credContext.SetStateFields(credWrapper.Creds["state[]"]) res, err := c.doWithCreds(req, count, credWrapper, access, via) if err != nil { if errors.IsAuthError(err) { multistage := credWrapper.Creds.IsMultistage() newMode, newModes, headers := getAuthAccess(res, access.Mode(), c.access, multistage) newAccess := access.Upgrade(newMode) if newAccess.Mode() != access.Mode() { c.Endpoints.SetAccess(newAccess) c.access = newModes } if credWrapper.Creds != nil { req.Header.Del("Authorization") if multistage && *count < defaultMaxAuthAttempts && res != nil && res.StatusCode == 401 { *count++ } else { credWrapper.CredentialHelper.Reject(credWrapper.Creds) } } c.credContext.SetWWWAuthHeaders(headers) } } if res != nil && res.StatusCode < 300 && res.StatusCode > 199 { credWrapper.CredentialHelper.Approve(credWrapper.Creds) } return res, err } func (c *Client) doWithCreds(req *http.Request, count *int, credWrapper creds.CredentialHelperWrapper, access creds.Access, via []*http.Request) (*http.Response, error) { if access.Mode() == creds.NegotiateAccess { return c.doWithNegotiate(req, credWrapper) } req.Header.Set("User-Agent", lfshttp.UserAgent) client, err := c.client.HttpClient(req.URL, access.Mode()) if err != nil { return nil, err } redirectedReq, res, err := c.client.DoWithRedirect(client, req, "", via) if err != nil || res != nil { return res, err } if redirectedReq == nil { return res, errors.New(tr.Tr.Get("failed to redirect request")) } return c.doWithAuth("", count, access, redirectedReq, via) } // getCreds fills the authorization header for the given request if possible, // from the following sources: // // 1. NTLM access is handled elsewhere. // 2. Existing Authorization or ?token query tells LFS that the request is ready. // 3. Netrc based on the hostname. // 4. URL authentication on the Endpoint URL or the Git Remote URL. // 5. Git Credential Helper, potentially prompting the user. // // There are three URLs in play, that make this a little confusing. // // 1. The request URL, which should be something like "https://git.com/repo.git/info/lfs/objects/batch" // 2. The LFS API URL, which should be something like "https://git.com/repo.git/info/lfs" // This URL used for the "lfs.URL.access" git config key, which determines // what kind of auth the LFS server expects. Could be BasicAccess, // NTLMAccess, NegotiateAccess, or NoneAccess, in which the Git Credential // Helper step is skipped. We do not want to prompt the user for a password // to fetch public repository data. // 3. The Git Remote URL, which should be something like "https://git.com/repo.git" // This URL is used for the Git Credential Helper. This way existing https // Git remote credentials can be re-used for LFS. func (c *Client) getCreds(remote string, access creds.Access, req *http.Request) (creds.CredentialHelperWrapper, error) { ef := c.Endpoints if ef == nil { ef = defaultEndpointFinder } operation := getReqOperation(req) apiEndpoint := ef.Endpoint(operation, remote) if access.Mode() != creds.NegotiateAccess { if requestHasAuth(req) || access.Mode() == creds.NoneAccess { return creds.CredentialHelperWrapper{CredentialHelper: creds.NullCreds, Input: nil, Url: nil, Creds: nil}, nil } credsURL, err := getCredURLForAPI(ef, operation, remote, apiEndpoint, req) if err != nil { return creds.CredentialHelperWrapper{CredentialHelper: creds.NullCreds, Input: nil, Url: nil, Creds: nil}, errors.Wrap(err, tr.Tr.Get("credentials")) } if credsURL == nil { return creds.CredentialHelperWrapper{CredentialHelper: creds.NullCreds, Input: nil, Url: nil, Creds: nil}, nil } credWrapper := c.getGitCredsWrapper(ef, req, credsURL) err = credWrapper.FillCreds() if err == nil { tracerx.Printf("Filled credentials for %s", credsURL) setRequestAuthWithCreds(req, credWrapper.Creds) } return credWrapper, err } // Negotiate only credsURL, err := url.Parse(apiEndpoint.Url) if err != nil { return creds.CredentialHelperWrapper{CredentialHelper: creds.NullCreds, Input: nil, Url: nil, Creds: nil}, errors.Wrap(err, tr.Tr.Get("credentials")) } // NTLM uses creds to create the session credWrapper := c.getGitCredsWrapper(ef, req, credsURL) return credWrapper, err } func (c *Client) getGitCredsWrapper(ef EndpointFinder, req *http.Request, u *url.URL) creds.CredentialHelperWrapper { return c.credContext.GetCredentialHelper(c.Credentials, u) } func getCredURLForAPI(ef EndpointFinder, operation, remote string, apiEndpoint lfshttp.Endpoint, req *http.Request) (*url.URL, error) { apiURL, err := url.Parse(apiEndpoint.Url) if err != nil { return nil, err } // if the LFS request doesn't match the current LFS url, don't bother // attempting to set the Authorization header from the LFS or Git remote URLs. if req.URL.Scheme != apiURL.Scheme || req.URL.Host != apiURL.Host { return req.URL, nil } if setRequestAuthFromURL(req, apiURL) { return nil, nil } if len(remote) > 0 { if u := ef.GitRemoteURL(remote, operation == "upload"); u != "" { schemedUrl, _ := fixSchemelessURL(u) gitRemoteURL, err := url.Parse(schemedUrl) if err != nil { return nil, err } if gitRemoteURL.Scheme == apiURL.Scheme && gitRemoteURL.Host == apiURL.Host { if setRequestAuthFromURL(req, gitRemoteURL) { return nil, nil } return gitRemoteURL, nil } } } return apiURL, nil } // fixSchemelessURL prepends an empty scheme "//" if none was found in // the URL and replaces the first colon with a slash in order to satisfy RFC // 3986 §3.3, and `net/url.Parse()`. // // It returns a string parse-able with `net/url.Parse()` and a boolean whether // or not an empty scheme was added. func fixSchemelessURL(u string) (string, bool) { if hasScheme(u) { return u, false } colon := strings.Index(u, ":") slash := strings.Index(u, "/") if colon >= 0 && (slash < 0 || colon < slash) { // First path segment has a colon, assumed that it's a // scheme-less URL. Append an empty scheme on top to // satisfy RFC 3986 §3.3, and `net/url.Parse()`. // // In addition, replace the first colon with a slash since // otherwise the colon looks like it's introducing a port // number. return fmt.Sprintf("//%s", strings.Replace(u, ":", "/", 1)), true } return u, true } var ( // supportedSchemes is the list of URL schemes the `lfsapi` package // supports. supportedSchemes = []string{"ssh", "http", "https"} ) // hasScheme returns whether or not a given string (taken to represent a RFC // 3986 URL) has a scheme that is supported by the `lfsapi` package. func hasScheme(what string) bool { for _, scheme := range supportedSchemes { if strings.HasPrefix(what, fmt.Sprintf("%s://", scheme)) { return true } } return false } func requestHasAuth(req *http.Request) bool { // The "Authorization" string constant is safe, since we assume that all // request headers have been canonicalized. if len(req.Header.Get("Authorization")) > 0 { return true } return len(req.URL.Query().Get("token")) > 0 } func setRequestAuthFromURL(req *http.Request, u *url.URL) bool { if u.User == nil { return false } if pass, ok := u.User.Password(); ok { fmt.Fprintln(os.Stderr, tr.Tr.Get("warning: current Git remote contains credentials")) setRequestAuth(req, u.User.Username(), pass) return true } return false } func setRequestAuth(req *http.Request, user, pass string) { if len(user) == 0 && len(pass) == 0 { return } token := fmt.Sprintf("%s:%s", user, pass) auth := "Basic " + strings.TrimSpace(base64.StdEncoding.EncodeToString([]byte(token))) req.Header.Set("Authorization", auth) } func setRequestAuthWithCreds(req *http.Request, c creds.Creds) { authtype := creds.FirstEntryForKey(c, "authtype") credential := creds.FirstEntryForKey(c, "credential") if len(authtype) == 0 && len(credential) == 0 { user := creds.FirstEntryForKey(c, "username") pass := creds.FirstEntryForKey(c, "password") setRequestAuth(req, user, pass) return } auth := fmt.Sprintf("%s %s", authtype, credential) req.Header.Set("Authorization", auth) } func getReqOperation(req *http.Request) string { operation := "download" if req.Method == "POST" || req.Method == "PUT" { operation = "upload" } return operation } var ( authenticateHeaders = []string{"Lfs-Authenticate", "Www-Authenticate"} ) func getAuthAccess(res *http.Response, access creds.AccessMode, modes []creds.AccessMode, multistage bool) (creds.AccessMode, []creds.AccessMode, []string) { newModes := make([]creds.AccessMode, 0, len(modes)) for _, mode := range modes { if multistage || access != mode { newModes = append(newModes, mode) } } headers := make([]string, 0) if res != nil { for _, headerName := range authenticateHeaders { headers = append(headers, res.Header[headerName]...) } supportedModes := make(map[creds.AccessMode]struct{}) for _, headerName := range authenticateHeaders { for _, auth := range res.Header[headerName] { pieces := strings.SplitN(strings.ToLower(auth), " ", 2) if len(pieces) == 0 { continue } supportedModes[creds.AccessMode(pieces[0])] = struct{}{} } } for _, mode := range newModes { if _, ok := supportedModes[mode]; ok { return mode, newModes, headers } } } return creds.BasicAccess, newModes, headers } git-lfs-3.6.1/lfsapi/auth_test.go000066400000000000000000000551371472372047300167170ustar00rootroot00000000000000package lfsapi import ( "encoding/base64" "encoding/json" "fmt" "net/http" "net/http/httptest" "strings" "sync/atomic" "testing" "github.com/git-lfs/git-lfs/v3/creds" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/git" "github.com/git-lfs/git-lfs/v3/lfshttp" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) type authRequest struct { Test string } func TestAuthenticateHeaderAccess(t *testing.T) { tests := map[string]creds.AccessMode{ "": creds.BasicAccess, "basic 123": creds.BasicAccess, "basic": creds.BasicAccess, "unknown": creds.BasicAccess, "NEGOTIATE": creds.NegotiateAccess, "negotiate": creds.NegotiateAccess, "NEGOTIATE 1 2 3": creds.NegotiateAccess, "negotiate 1 2 3": creds.NegotiateAccess, } for _, key := range authenticateHeaders { for value, expected := range tests { res := &http.Response{Header: make(http.Header)} res.Header.Set(key, value) t.Logf("%s: %s", key, value) result, _, _ := getAuthAccess(res, creds.NoneAccess, creds.AllAccessModes(), false) assert.Equal(t, expected, result) } } } func TestDualAccessModes(t *testing.T) { res := &http.Response{Header: make(http.Header)} res.Header["Www-Authenticate"] = []string{"Negotiate 123", "Basic 456"} access, next, _ := getAuthAccess(res, creds.NoneAccess, creds.AllAccessModes(), false) assert.Equal(t, creds.NegotiateAccess, access) access, next, _ = getAuthAccess(res, access, next, false) assert.Equal(t, creds.BasicAccess, access) access, _, _ = getAuthAccess(res, access, next, false) assert.Equal(t, creds.BasicAccess, access) } func TestDoWithAuthApprove(t *testing.T) { var called uint32 srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { atomic.AddUint32(&called, 1) assert.Equal(t, "POST", req.Method) body := &authRequest{} err := json.NewDecoder(req.Body).Decode(body) assert.Nil(t, err) assert.Equal(t, "Approve", body.Test) w.Header().Set("Lfs-Authenticate", "Basic") actual := req.Header.Get("Authorization") if len(actual) == 0 { w.WriteHeader(http.StatusUnauthorized) return } expected := "Basic " + strings.TrimSpace( base64.StdEncoding.EncodeToString([]byte("user:pass")), ) assert.Equal(t, expected, actual) })) defer srv.Close() cred := newMockCredentialHelper() c, err := NewClient(lfshttp.NewContext(git.NewReadOnlyConfig("", ""), nil, map[string]string{ "lfs.url": srv.URL + "/repo/lfs", }, )) require.Nil(t, err) c.Credentials = cred access := c.Endpoints.AccessFor(srv.URL + "/repo/lfs") assert.Equal(t, creds.NoneAccess, (&access).Mode()) req, err := http.NewRequest("POST", srv.URL+"/repo/lfs/foo", nil) require.Nil(t, err) err = MarshalToRequest(req, &authRequest{Test: "Approve"}) require.Nil(t, err) res, err := c.DoWithAuth("", c.Endpoints.AccessFor(srv.URL+"/repo/lfs"), req) require.Nil(t, err) assert.Equal(t, http.StatusOK, res.StatusCode) assert.True(t, cred.IsApproved(creds.Creds(map[string][]string{ "username": []string{"user"}, "password": []string{"pass"}, "protocol": []string{"http"}, "host": []string{srv.Listener.Addr().String()}, }))) access = c.Endpoints.AccessFor(srv.URL + "/repo/lfs") assert.Equal(t, creds.BasicAccess, (&access).Mode()) assert.EqualValues(t, 2, called) } func TestDoWithAuthReject(t *testing.T) { var called uint32 srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { atomic.AddUint32(&called, 1) assert.Equal(t, "POST", req.Method) body := &authRequest{} err := json.NewDecoder(req.Body).Decode(body) assert.Nil(t, err) assert.Equal(t, "Reject", body.Test) actual := req.Header.Get("Authorization") expected := "Basic " + strings.TrimSpace( base64.StdEncoding.EncodeToString([]byte("user:pass")), ) w.Header().Set("Lfs-Authenticate", "Basic") if actual != expected { // Write http.StatusUnauthorized to force the credential // helper to reject the credentials w.WriteHeader(http.StatusUnauthorized) } else { w.WriteHeader(http.StatusOK) } })) defer srv.Close() invalidCreds := creds.Creds(map[string][]string{ "username": []string{"user"}, "password": []string{"wrong_pass"}, "path": []string{""}, "protocol": []string{"http"}, "host": []string{srv.Listener.Addr().String()}, }) cred := newMockCredentialHelper() cred.Approve(invalidCreds) assert.True(t, cred.IsApproved(invalidCreds)) c, _ := NewClient(nil) c.Credentials = cred c.Endpoints = NewEndpointFinder(lfshttp.NewContext(git.NewReadOnlyConfig("", ""), nil, map[string]string{ "lfs.url": srv.URL, }, )) req, err := http.NewRequest("POST", srv.URL, nil) require.Nil(t, err) err = MarshalToRequest(req, &authRequest{Test: "Reject"}) require.Nil(t, err) res, err := c.DoWithAuth("", c.Endpoints.AccessFor(srv.URL), req) require.Nil(t, err) assert.Equal(t, http.StatusOK, res.StatusCode) assert.False(t, cred.IsApproved(invalidCreds)) assert.True(t, cred.IsApproved(creds.Creds(map[string][]string{ "username": []string{"user"}, "password": []string{"pass"}, "path": []string{""}, "protocol": []string{"http"}, "host": []string{srv.Listener.Addr().String()}, }))) assert.EqualValues(t, 3, called) } func TestDoWithAuthNoRetry(t *testing.T) { var called uint32 srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { atomic.AddUint32(&called, 1) assert.Equal(t, "POST", req.Method) body := &authRequest{} err := json.NewDecoder(req.Body).Decode(body) assert.Nil(t, err) assert.Equal(t, "Approve", body.Test) w.Header().Set("Lfs-Authenticate", "Basic") actual := req.Header.Get("Authorization") if len(actual) == 0 { w.WriteHeader(http.StatusUnauthorized) return } expected := "Basic " + strings.TrimSpace( base64.StdEncoding.EncodeToString([]byte("user:pass")), ) assert.Equal(t, expected, actual) })) defer srv.Close() cred := newMockCredentialHelper() c, err := NewClient(lfshttp.NewContext(git.NewReadOnlyConfig("", ""), nil, map[string]string{ "lfs.url": srv.URL + "/repo/lfs", }, )) require.Nil(t, err) c.Credentials = cred access := c.Endpoints.AccessFor(srv.URL + "/repo/lfs") assert.Equal(t, creds.NoneAccess, (&access).Mode()) req, err := http.NewRequest("POST", srv.URL+"/repo/lfs/foo", nil) require.Nil(t, err) err = MarshalToRequest(req, &authRequest{Test: "Approve"}) require.Nil(t, err) res, err := c.DoWithAuthNoRetry("", c.Endpoints.AccessFor(srv.URL+"/repo/lfs"), req) access = c.Endpoints.AccessFor(srv.URL + "/repo/lfs") assert.True(t, errors.IsAuthError(err)) assert.Equal(t, http.StatusUnauthorized, res.StatusCode) assert.Equal(t, creds.BasicAccess, (&access).Mode()) assert.EqualValues(t, 1, called) } func TestDoAPIRequestWithAuth(t *testing.T) { var called uint32 srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { atomic.AddUint32(&called, 1) assert.Equal(t, "POST", req.Method) body := &authRequest{} err := json.NewDecoder(req.Body).Decode(body) assert.Nil(t, err) assert.Equal(t, "Approve", body.Test) w.Header().Set("Lfs-Authenticate", "Basic") actual := req.Header.Get("Authorization") if len(actual) == 0 { w.WriteHeader(http.StatusUnauthorized) return } expected := "Basic " + strings.TrimSpace( base64.StdEncoding.EncodeToString([]byte("user:pass")), ) assert.Equal(t, expected, actual) })) defer srv.Close() cred := newMockCredentialHelper() c, err := NewClient(lfshttp.NewContext(git.NewReadOnlyConfig("", ""), nil, map[string]string{ "lfs.url": srv.URL + "/repo/lfs", }, )) require.Nil(t, err) c.Credentials = cred access := c.Endpoints.AccessFor(srv.URL + "/repo/lfs") assert.Equal(t, creds.NoneAccess, (&access).Mode()) req, err := http.NewRequest("POST", srv.URL+"/repo/lfs/foo", nil) require.Nil(t, err) err = MarshalToRequest(req, &authRequest{Test: "Approve"}) require.Nil(t, err) res, err := c.DoAPIRequestWithAuth("", req) require.Nil(t, err) assert.Equal(t, http.StatusOK, res.StatusCode) assert.True(t, cred.IsApproved(creds.Creds(map[string][]string{ "username": []string{"user"}, "password": []string{"pass"}, "protocol": []string{"http"}, "host": []string{srv.Listener.Addr().String()}, }))) access = c.Endpoints.AccessFor(srv.URL + "/repo/lfs") assert.Equal(t, creds.BasicAccess, (&access).Mode()) assert.EqualValues(t, 2, called) } type mockCredentialHelper struct { Approved map[string]creds.Creds } func newMockCredentialHelper() *mockCredentialHelper { return &mockCredentialHelper{ Approved: make(map[string]creds.Creds), } } func (m *mockCredentialHelper) Fill(input creds.Creds) (creds.Creds, error) { if found, ok := m.Approved[credsToKey(input)]; ok { return found, nil } output := make(creds.Creds) for key, value := range input { output[key] = value } if _, ok := output["username"]; !ok { output["username"] = []string{"user"} } output["password"] = []string{"pass"} return output, nil } func (m *mockCredentialHelper) Approve(creds creds.Creds) error { m.Approved[credsToKey(creds)] = creds return nil } func (m *mockCredentialHelper) Reject(creds creds.Creds) error { delete(m.Approved, credsToKey(creds)) return nil } func (m *mockCredentialHelper) IsApproved(creds creds.Creds) bool { if found, ok := m.Approved[credsToKey(creds)]; ok { if len(found["password"]) == 1 && len(creds["password"]) == 1 { return found["password"][0] == creds["password"][0] } else { return len(found["password"]) == 0 && len(creds["password"]) == 0 } } return false } func credsToKey(creds creds.Creds) string { var kvs []string for _, k := range []string{"protocol", "host", "path"} { value := "" if v, ok := creds[k]; ok && len(v) == 1 { value = v[0] } kvs = append(kvs, fmt.Sprintf("%s:%s", k, value)) } return strings.Join(kvs, " ") } func basicAuth(user, pass string) string { value := fmt.Sprintf("%s:%s", user, pass) return fmt.Sprintf("Basic %s", strings.TrimSpace(base64.StdEncoding.EncodeToString([]byte(value)))) } type getCredsExpected struct { Access creds.AccessMode Creds creds.Creds CredsURL string Authorization string } type getCredsTest struct { Remote string Method string Href string Endpoint string Header map[string]string Config map[string]string Expected getCredsExpected } func TestGetCreds(t *testing.T) { tests := map[string]getCredsTest{ "no access": getCredsTest{ Remote: "origin", Method: "GET", Href: "https://git-server.com/repo/lfs/locks", Endpoint: "https://git-server.com/repo/lfs", Config: map[string]string{ "lfs.url": "https://git-server.com/repo/lfs", }, Expected: getCredsExpected{ Access: creds.NoneAccess, }, }, "basic access": getCredsTest{ Remote: "origin", Method: "GET", Href: "https://git-server.com/repo/lfs/locks", Endpoint: "https://git-server.com/repo/lfs", Config: map[string]string{ "lfs.url": "https://git-server.com/repo/lfs", "lfs.https://git-server.com/repo/lfs.access": "basic", }, Expected: getCredsExpected{ Access: creds.BasicAccess, Authorization: basicAuth("git-server.com", "monkey"), CredsURL: "https://git-server.com/repo/lfs", Creds: map[string][]string{ "protocol": []string{"https"}, "host": []string{"git-server.com"}, "username": []string{"git-server.com"}, "password": []string{"monkey"}, }, }, }, "basic access with usehttppath": getCredsTest{ Remote: "origin", Method: "GET", Href: "https://git-server.com/repo/lfs/locks", Endpoint: "https://git-server.com/repo/lfs", Config: map[string]string{ "lfs.url": "https://git-server.com/repo/lfs", "lfs.https://git-server.com/repo/lfs.access": "basic", "credential.usehttppath": "true", }, Expected: getCredsExpected{ Access: creds.BasicAccess, Authorization: basicAuth("git-server.com", "monkey"), CredsURL: "https://git-server.com/repo/lfs", Creds: map[string][]string{ "protocol": []string{"https"}, "host": []string{"git-server.com"}, "username": []string{"git-server.com"}, "password": []string{"monkey"}, "path": []string{"repo/lfs"}, }, }, }, "basic access with url-specific usehttppath": getCredsTest{ Remote: "origin", Method: "GET", Href: "https://git-server.com/repo/lfs/locks", Endpoint: "https://git-server.com/repo/lfs", Config: map[string]string{ "lfs.url": "https://git-server.com/repo/lfs", "lfs.https://git-server.com/repo/lfs.access": "basic", "credential.https://git-server.com.usehttppath": "true", }, Expected: getCredsExpected{ Access: creds.BasicAccess, Authorization: basicAuth("git-server.com", "monkey"), CredsURL: "https://git-server.com/repo/lfs", Creds: map[string][]string{ "protocol": []string{"https"}, "host": []string{"git-server.com"}, "username": []string{"git-server.com"}, "password": []string{"monkey"}, "path": []string{"repo/lfs"}, }, }, }, "custom auth": getCredsTest{ Remote: "origin", Method: "GET", Href: "https://git-server.com/repo/lfs/locks", Endpoint: "https://git-server.com/repo/lfs", Header: map[string]string{ "Authorization": "custom", }, Config: map[string]string{ "lfs.url": "https://git-server.com/repo/lfs", "lfs.https://git-server.com/repo/lfs.access": "basic", }, Expected: getCredsExpected{ Access: creds.BasicAccess, Authorization: "custom", }, }, "username in url": getCredsTest{ Remote: "origin", Method: "GET", Href: "https://git-server.com/repo/lfs/locks", Endpoint: "https://git-server.com/repo/lfs", Config: map[string]string{ "lfs.url": "https://user@git-server.com/repo/lfs", "lfs.https://git-server.com/repo/lfs.access": "basic", }, Expected: getCredsExpected{ Access: creds.BasicAccess, Authorization: basicAuth("user", "monkey"), CredsURL: "https://user@git-server.com/repo/lfs", Creds: map[string][]string{ "protocol": []string{"https"}, "host": []string{"git-server.com"}, "username": []string{"user"}, "password": []string{"monkey"}, }, }, }, "different remote url, basic access": getCredsTest{ Remote: "origin", Method: "GET", Href: "https://git-server.com/repo/lfs/locks", Endpoint: "https://git-server.com/repo/lfs", Config: map[string]string{ "lfs.url": "https://git-server.com/repo/lfs", "lfs.https://git-server.com/repo/lfs.access": "basic", "remote.origin.url": "https://git-server.com/repo", }, Expected: getCredsExpected{ Access: creds.BasicAccess, Authorization: basicAuth("git-server.com", "monkey"), CredsURL: "https://git-server.com/repo", Creds: map[string][]string{ "protocol": []string{"https"}, "host": []string{"git-server.com"}, "username": []string{"git-server.com"}, "password": []string{"monkey"}, }, }, }, "api url auth": getCredsTest{ Remote: "origin", Method: "GET", Href: "https://git-server.com/repo/locks", Endpoint: "https://git-server.com/repo", Config: map[string]string{ "lfs.url": "https://user:pass@git-server.com/repo", "lfs.https://git-server.com/repo.access": "basic", }, Expected: getCredsExpected{ Access: creds.BasicAccess, Authorization: basicAuth("user", "pass"), }, }, "git url auth": getCredsTest{ Remote: "origin", Method: "GET", Href: "https://git-server.com/repo/locks", Endpoint: "https://git-server.com/repo", Config: map[string]string{ "lfs.url": "https://git-server.com/repo", "lfs.https://git-server.com/repo.access": "basic", "remote.origin.url": "https://user:pass@git-server.com/repo", }, Expected: getCredsExpected{ Access: creds.BasicAccess, Authorization: basicAuth("user", "pass"), }, }, "scheme mismatch": getCredsTest{ Remote: "origin", Method: "GET", Href: "http://git-server.com/repo/lfs/locks", Endpoint: "https://git-server.com/repo/lfs", Config: map[string]string{ "lfs.url": "https://git-server.com/repo/lfs", "lfs.https://git-server.com/repo/lfs.access": "basic", }, Expected: getCredsExpected{ Access: creds.BasicAccess, Authorization: basicAuth("git-server.com", "monkey"), CredsURL: "http://git-server.com/repo/lfs/locks", Creds: map[string][]string{ "protocol": []string{"http"}, "host": []string{"git-server.com"}, "username": []string{"git-server.com"}, "password": []string{"monkey"}, }, }, }, "host mismatch": getCredsTest{ Remote: "origin", Method: "GET", Href: "https://lfs-server.com/repo/lfs/locks", Endpoint: "https://git-server.com/repo/lfs", Config: map[string]string{ "lfs.url": "https://git-server.com/repo/lfs", "lfs.https://git-server.com/repo/lfs.access": "basic", }, Expected: getCredsExpected{ Access: creds.BasicAccess, Authorization: basicAuth("lfs-server.com", "monkey"), CredsURL: "https://lfs-server.com/repo/lfs/locks", Creds: map[string][]string{ "protocol": []string{"https"}, "host": []string{"lfs-server.com"}, "username": []string{"lfs-server.com"}, "password": []string{"monkey"}, }, }, }, "port mismatch": getCredsTest{ Remote: "origin", Method: "GET", Href: "https://git-server.com:8080/repo/lfs/locks", Endpoint: "https://git-server.com/repo/lfs", Config: map[string]string{ "lfs.url": "https://git-server.com/repo/lfs", "lfs.https://git-server.com/repo/lfs.access": "basic", }, Expected: getCredsExpected{ Access: creds.BasicAccess, Authorization: basicAuth("git-server.com:8080", "monkey"), CredsURL: "https://git-server.com:8080/repo/lfs/locks", Creds: map[string][]string{ "protocol": []string{"https"}, "host": []string{"git-server.com:8080"}, "username": []string{"git-server.com:8080"}, "password": []string{"monkey"}, }, }, }, "bare ssh URI": getCredsTest{ Remote: "origin", Method: "POST", Href: "https://git-server.com/repo/lfs/objects/batch", Endpoint: "https://git-server.com/repo/lfs", Config: map[string]string{ "lfs.url": "https://git-server.com/repo/lfs", "lfs.https://git-server.com/repo/lfs.access": "basic", "remote.origin.url": "git@git-server.com:repo.git", }, Expected: getCredsExpected{ Access: creds.BasicAccess, Authorization: basicAuth("git-server.com", "monkey"), CredsURL: "https://git-server.com/repo/lfs", Creds: map[string][]string{ "host": []string{"git-server.com"}, "password": []string{"monkey"}, "protocol": []string{"https"}, "username": []string{"git-server.com"}, }, }, }, } for desc, test := range tests { t.Log(desc) req, err := http.NewRequest(test.Method, test.Href, nil) if err != nil { t.Errorf("[%s] %s", desc, err) continue } for key, value := range test.Header { req.Header.Set(key, value) } ctx := lfshttp.NewContext(git.NewReadOnlyConfig("", ""), nil, test.Config) client, _ := NewClient(ctx) client.Credentials = &fakeCredentialFiller{} client.Endpoints = NewEndpointFinder(ctx) credWrapper, err := client.getCreds(test.Remote, client.Endpoints.AccessFor(test.Endpoint), req) if !assert.Nil(t, err) { continue } assert.Equal(t, test.Expected.Authorization, req.Header.Get("Authorization"), "authorization") if test.Expected.Creds != nil { assert.EqualValues(t, test.Expected.Creds, credWrapper.Creds) } else { assert.Nil(t, credWrapper.Creds, "creds") } if len(test.Expected.CredsURL) > 0 { if assert.NotNil(t, credWrapper.Url, "credURL") { assert.Equal(t, test.Expected.CredsURL, credWrapper.Url.String(), "credURL") } } else { assert.Nil(t, credWrapper.Url) } } } type fakeCredentialFiller struct{} func (f *fakeCredentialFiller) Fill(input creds.Creds) (creds.Creds, error) { output := make(creds.Creds) for key, value := range input { output[key] = value } if _, ok := output["username"]; !ok { output["username"] = input["host"] } output["password"] = []string{"monkey"} return output, nil } func (f *fakeCredentialFiller) Approve(creds creds.Creds) error { return errors.New("Not implemented") } func (f *fakeCredentialFiller) Reject(creds creds.Creds) error { return errors.New("Not implemented") } func TestClientRedirectReauthenticate(t *testing.T) { var srv1, srv2 *httptest.Server var called1, called2 uint32 var creds1, creds2 creds.Creds srv1 = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { atomic.AddUint32(&called1, 1) if hdr := r.Header.Get("Authorization"); len(hdr) > 0 { parts := strings.SplitN(hdr, " ", 2) typ, b64 := parts[0], parts[1] auth, err := base64.URLEncoding.DecodeString(b64) assert.Nil(t, err) assert.Equal(t, "Basic", typ) assert.Equal(t, "user1:pass1", string(auth)) http.Redirect(w, r, srv2.URL+r.URL.Path, http.StatusMovedPermanently) return } w.WriteHeader(http.StatusUnauthorized) })) srv2 = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { atomic.AddUint32(&called2, 1) parts := strings.SplitN(r.Header.Get("Authorization"), " ", 2) typ, b64 := parts[0], parts[1] auth, err := base64.URLEncoding.DecodeString(b64) assert.Nil(t, err) assert.Equal(t, "Basic", typ) assert.Equal(t, "user2:pass2", string(auth)) })) // Change the URL of srv2 to make it appears as if it is a different // host. srv2.URL = strings.Replace(srv2.URL, "127.0.0.1", "0.0.0.0", 1) creds1 = creds.Creds(map[string][]string{ "protocol": []string{"http"}, "host": []string{strings.TrimPrefix(srv1.URL, "http://")}, "username": []string{"user1"}, "password": []string{"pass1"}, }) creds2 = creds.Creds(map[string][]string{ "protocol": []string{"http"}, "host": []string{strings.TrimPrefix(srv2.URL, "http://")}, "username": []string{"user2"}, "password": []string{"pass2"}, }) defer srv1.Close() defer srv2.Close() c, err := NewClient(lfshttp.NewContext(nil, nil, nil)) cred := creds.NewCredentialCacher() cred.Approve(creds1) cred.Approve(creds2) c.Credentials = cred req, err := http.NewRequest("GET", srv1.URL, nil) require.Nil(t, err) _, err = c.DoAPIRequestWithAuth("", req) assert.Nil(t, err) // called1 is 2 since LFS tries an unauthenticated request first assert.EqualValues(t, 2, called1) assert.EqualValues(t, 1, called2) } git-lfs-3.6.1/lfsapi/body.go000066400000000000000000000011711472372047300156410ustar00rootroot00000000000000package lfsapi import ( "bytes" "encoding/json" "io" "net/http" "strconv" ) type ReadSeekCloser interface { io.Seeker io.ReadCloser } func MarshalToRequest(req *http.Request, obj interface{}) error { by, err := json.Marshal(obj) if err != nil { return err } clen := len(by) req.Header.Set("Content-Length", strconv.Itoa(clen)) req.ContentLength = int64(clen) req.Body = NewByteBody(by) return nil } func NewByteBody(by []byte) ReadSeekCloser { return &closingByteReader{Reader: bytes.NewReader(by)} } type closingByteReader struct { *bytes.Reader } func (r *closingByteReader) Close() error { return nil } git-lfs-3.6.1/lfsapi/client.go000066400000000000000000000030411472372047300161600ustar00rootroot00000000000000package lfsapi import ( "io" "net/http" "github.com/git-lfs/git-lfs/v3/config" "github.com/git-lfs/git-lfs/v3/creds" "github.com/git-lfs/git-lfs/v3/lfshttp" ) func (c *Client) NewRequest(method string, e lfshttp.Endpoint, suffix string, body interface{}) (*http.Request, error) { return c.client.NewRequest(method, e, suffix, body) } // Do sends an HTTP request to get an HTTP response. It wraps net/http, adding // extra headers, redirection handling, and error reporting. func (c *Client) Do(req *http.Request) (*http.Response, error) { return c.client.Do(req) } // do performs an *http.Request respecting redirects, and handles the response // as defined in c.handleResponse. Notably, it does not alter the headers for // the request argument in any way. func (c *Client) do(req *http.Request, remote string, via []*http.Request) (*http.Response, error) { return c.client.Do(req) } func (c *Client) doWithAccess(req *http.Request, remote string, via []*http.Request, mode creds.AccessMode) (*http.Response, error) { return c.client.DoWithAccess(req, mode) } func (c *Client) LogRequest(r *http.Request, reqKey string) *http.Request { return c.client.LogRequest(r, reqKey) } func (c *Client) GitEnv() config.Environment { return c.client.GitEnv() } func (c *Client) OSEnv() config.Environment { return c.client.OSEnv() } func (c *Client) ConcurrentTransfers() int { return c.client.ConcurrentTransfers } func (c *Client) LogHTTPStats(w io.WriteCloser) { c.client.LogHTTPStats(w) } func (c *Client) Close() error { return c.client.Close() } git-lfs-3.6.1/lfsapi/endpoint_finder.go000066400000000000000000000244441472372047300200630ustar00rootroot00000000000000package lfsapi import ( "bufio" "fmt" "net/url" "os" "path" "regexp" "strings" "sync" "github.com/git-lfs/git-lfs/v3/config" "github.com/git-lfs/git-lfs/v3/creds" "github.com/git-lfs/git-lfs/v3/git" "github.com/git-lfs/git-lfs/v3/lfshttp" "github.com/git-lfs/git-lfs/v3/tr" "github.com/rubyist/tracerx" ) const ( defaultRemote = "origin" ) type EndpointFinder interface { NewEndpointFromCloneURL(operation, rawurl string) lfshttp.Endpoint NewEndpoint(operation, rawurl string) lfshttp.Endpoint Endpoint(operation, remote string) lfshttp.Endpoint RemoteEndpoint(operation, remote string) lfshttp.Endpoint GitRemoteURL(remote string, forpush bool) string AccessFor(rawurl string) creds.Access SetAccess(access creds.Access) GitProtocol() string } type endpointGitFinder struct { gitConfig *git.Configuration gitEnv config.Environment gitProtocol string gitDir string aliasMu sync.Mutex aliases map[string]string pushAliases map[string]string remoteList []string accessMu sync.Mutex urlAccess map[string]creds.AccessMode urlConfig *config.URLConfig } func NewEndpointFinder(ctx lfshttp.Context) EndpointFinder { if ctx == nil { ctx = lfshttp.NewContext(nil, nil, nil) } var gitDir string cfg := ctx.GitConfig() if cfg != nil && cfg.GitDir != "" { gitDir = cfg.GitDir } else if dir, err := git.GitDir(); err == nil { gitDir = dir } e := &endpointGitFinder{ gitConfig: ctx.GitConfig(), gitEnv: ctx.GitEnv(), gitProtocol: "https", gitDir: gitDir, aliases: make(map[string]string), pushAliases: make(map[string]string), urlAccess: make(map[string]creds.AccessMode), } remotes, _ := git.RemoteList() e.remoteList = remotes e.urlConfig = config.NewURLConfig(e.gitEnv) if v, ok := e.gitEnv.Get("lfs.gitprotocol"); ok { e.gitProtocol = v } initAliases(e, e.gitEnv) return e } func (e *endpointGitFinder) Endpoint(operation, remote string) lfshttp.Endpoint { ep := e.getEndpoint(operation, remote) ep.Operation = operation return ep } func (e *endpointGitFinder) getEndpoint(operation, remote string) lfshttp.Endpoint { if e.gitEnv == nil { return lfshttp.Endpoint{} } if operation == "upload" { if url, ok := e.gitEnv.Get("lfs.pushurl"); ok { return e.NewEndpoint(operation, url) } } if url, ok := e.gitEnv.Get("lfs.url"); ok { return e.NewEndpoint(operation, url) } if len(remote) > 0 && remote != defaultRemote { if e := e.RemoteEndpoint(operation, remote); len(e.Url) > 0 { return e } } return e.RemoteEndpoint(operation, defaultRemote) } func (e *endpointGitFinder) RemoteEndpoint(operation, remote string) lfshttp.Endpoint { if e.gitEnv == nil { return lfshttp.Endpoint{} } if len(remote) == 0 { remote = defaultRemote } // Support separate push URL if specified and pushing if operation == "upload" { if url, ok := e.gitEnv.Get("remote." + remote + ".lfspushurl"); ok { return e.NewEndpoint(operation, url) } } if url, ok := e.gitEnv.Get("remote." + remote + ".lfsurl"); ok { return e.NewEndpoint(operation, url) } // fall back on git remote url (also supports pushurl) if url := e.GitRemoteURL(remote, operation == "upload"); url != "" { return e.NewEndpointFromCloneURL(operation, url) } // Finally, fall back on .git/FETCH_HEAD but only if it exists and no specific remote was requested // We can't know which remote FETCH_HEAD is pointing to if e.gitDir != "" && remote == defaultRemote { url, err := parseFetchHead(strings.Join([]string{e.gitDir, "FETCH_HEAD"}, "/")) if err == nil { endpoint := e.NewEndpointFromCloneURL("download", url) return endpoint } else { tracerx.Printf("failed parsing FETCH_HEAD: %s", err) } } return lfshttp.Endpoint{} } func parseFetchHead(filePath string) (string, error) { file, err := os.Open(filePath) if err != nil { return "", err } defer file.Close() scanner := bufio.NewScanner(file) if scanner.Scan() { line := scanner.Text() return ExtractRemoteUrl(line) } return "", fmt.Errorf("Failed to read content from %s", filePath) } func ExtractRemoteUrl(line string) (string, error) { // see https://regex101.com/r/lYla7c/1 re := regexp.MustCompile(`^[a-f0-9]{40,64}\t(not-for-merge)?\t(tag |branch |)'.*' of (?P[\/\.\-\:\_a-zA-Z0-9]+)$`) match := re.FindStringSubmatch(line) for i, name := range re.SubexpNames() { if name == "url" { if len(match) < i { break } return strings.TrimSpace(match[i]), nil } } return "", fmt.Errorf("failed to extract remote URL from \"%s\"", line) } func (e *endpointGitFinder) GitRemoteURL(remote string, forpush bool) string { if e.gitEnv != nil { if forpush { if u, ok := e.gitEnv.Get("remote." + remote + ".pushurl"); ok { return u } } if u, ok := e.gitEnv.Get("remote." + remote + ".url"); ok { return u } } if err := git.ValidateRemoteFromList(e.remoteList, remote); err == nil { return remote } return "" } func (e *endpointGitFinder) NewEndpointFromCloneURL(operation, rawurl string) lfshttp.Endpoint { ep := e.NewEndpoint(operation, rawurl) if ep.Url == lfshttp.UrlUnknown { return ep } if strings.HasSuffix(ep.Url, "/") { ep.Url = ep.Url[0 : len(ep.Url)-1] } if strings.HasPrefix(ep.Url, "file://") { return ep } // When using main remote URL for HTTP, append info/lfs if path.Ext(ep.Url) == ".git" { ep.Url += "/info/lfs" } else { ep.Url += ".git/info/lfs" } return ep } func (e *endpointGitFinder) NewEndpoint(operation, rawurl string) lfshttp.Endpoint { rawurl = e.ReplaceUrlAlias(operation, rawurl) if strings.HasPrefix(rawurl, "/") { return lfshttp.EndpointFromLocalPath(rawurl) } u, err := url.Parse(rawurl) if err != nil { return lfshttp.EndpointFromBareSshUrl(rawurl) } switch u.Scheme { case "ssh", "git+ssh", "ssh+git": return lfshttp.EndpointFromSshUrl(u) case "http", "https": return lfshttp.EndpointFromHttpUrl(u) case "git": return endpointFromGitUrl(u, e) case "file": return lfshttp.EndpointFromFileUrl(u) case "": // If it looks like a local path, it probably is. if _, err := os.Stat(rawurl); err == nil { return lfshttp.EndpointFromLocalPath(rawurl) } return lfshttp.EndpointFromBareSshUrl(u.String()) default: if strings.HasPrefix(rawurl, u.Scheme+"::") { // Looks like a remote helper; just pass it through. return lfshttp.Endpoint{Url: rawurl} } // If it looks like a local path, it probably is. if _, err := os.Stat(rawurl); err == nil { return lfshttp.EndpointFromLocalPath(rawurl) } // We probably got here because the "scheme" that was parsed is // a hostname (whether FQDN or single word) and the URL parser // didn't know what to do with it. Do what Git does and treat // it as an SSH URL. This ensures we handle SSH config aliases // properly. return lfshttp.EndpointFromBareSshUrl(u.String()) } } func (e *endpointGitFinder) AccessFor(rawurl string) creds.Access { accessurl := urlWithoutAuth(rawurl) if e.gitEnv == nil { return creds.NewAccess(creds.NoneAccess, accessurl) } e.accessMu.Lock() defer e.accessMu.Unlock() if cached, ok := e.urlAccess[accessurl]; ok { return creds.NewAccess(cached, accessurl) } e.urlAccess[accessurl] = e.fetchGitAccess(accessurl) return creds.NewAccess(e.urlAccess[accessurl], accessurl) } func (e *endpointGitFinder) SetAccess(access creds.Access) { key := fmt.Sprintf("lfs.%s.access", access.URL()) tracerx.Printf("setting repository access to %s", access.Mode()) e.accessMu.Lock() defer e.accessMu.Unlock() switch access.Mode() { case creds.EmptyAccess, creds.NoneAccess: e.gitConfig.UnsetLocalKey(key) e.urlAccess[access.URL()] = creds.NoneAccess default: e.gitConfig.SetLocal(key, string(access.Mode())) e.urlAccess[access.URL()] = access.Mode() } } func urlWithoutAuth(rawurl string) string { if !strings.Contains(rawurl, "@") { return rawurl } u, err := url.Parse(rawurl) if err != nil { fmt.Fprintf(os.Stderr, "Error parsing URL %q: %s", rawurl, err) return rawurl } u.User = nil return u.String() } func (e *endpointGitFinder) fetchGitAccess(rawurl string) creds.AccessMode { if v, _ := e.urlConfig.Get("lfs", rawurl, "access"); len(v) > 0 { access := creds.AccessMode(strings.ToLower(v)) if access == creds.PrivateAccess { return creds.BasicAccess } return access } return creds.NoneAccess } func (e *endpointGitFinder) GitProtocol() string { return e.gitProtocol } // ReplaceUrlAlias returns a url with a prefix from a `url.*.insteadof` git // config setting. If multiple aliases match, use the longest one. // See https://git-scm.com/docs/git-config for Git's docs. func (e *endpointGitFinder) ReplaceUrlAlias(operation, rawurl string) string { e.aliasMu.Lock() defer e.aliasMu.Unlock() if operation == "upload" { if rawurl, replaced := e.replaceUrlAlias(e.pushAliases, rawurl); replaced { return rawurl } } rawurl, _ = e.replaceUrlAlias(e.aliases, rawurl) return rawurl } // replaceUrlAlias is a helper function for ReplaceUrlAlias. It must only be // called while the e.aliasMu mutex is held. func (e *endpointGitFinder) replaceUrlAlias(aliases map[string]string, rawurl string) (string, bool) { var longestalias string for alias, _ := range aliases { if !strings.HasPrefix(rawurl, alias) { continue } if longestalias < alias { longestalias = alias } } if len(longestalias) > 0 { return aliases[longestalias] + rawurl[len(longestalias):], true } return rawurl, false } const ( aliasPrefix = "url." ) func initAliases(e *endpointGitFinder, git config.Environment) { suffix := ".insteadof" pushSuffix := ".pushinsteadof" for gitkey, gitval := range git.All() { if len(gitval) == 0 || !strings.HasPrefix(gitkey, aliasPrefix) { continue } if strings.HasSuffix(gitkey, suffix) { storeAlias(e.aliases, gitkey, gitval, suffix) } else if strings.HasSuffix(gitkey, pushSuffix) { storeAlias(e.pushAliases, gitkey, gitval, pushSuffix) } } } func storeAlias(aliases map[string]string, key string, values []string, suffix string) { for _, value := range values { url := key[len(aliasPrefix) : len(key)-len(suffix)] if v, ok := aliases[value]; ok && v != url { fmt.Fprintln(os.Stderr, tr.Tr.Get("warning: Multiple 'url.*.%s' keys with the same alias: %q", suffix, value)) } aliases[value] = url } } func endpointFromGitUrl(u *url.URL, e *endpointGitFinder) lfshttp.Endpoint { u.Scheme = e.gitProtocol return lfshttp.Endpoint{Url: u.String()} } git-lfs-3.6.1/lfsapi/endpoint_finder_test.go000066400000000000000000000613511472372047300211200ustar00rootroot00000000000000package lfsapi import ( "os" "regexp" "runtime" "testing" "github.com/git-lfs/git-lfs/v3/creds" "github.com/git-lfs/git-lfs/v3/lfshttp" "github.com/git-lfs/git-lfs/v3/ssh" "github.com/stretchr/testify/assert" ) func TestEndpointDefaultsToOrigin(t *testing.T) { finder := NewEndpointFinder(lfshttp.NewContext(nil, nil, map[string]string{ "remote.origin.lfsurl": "abc", })) e := finder.Endpoint("download", "") assert.Equal(t, "abc", e.Url) assert.Equal(t, "", e.SSHMetadata.UserAndHost) assert.Equal(t, "", e.SSHMetadata.Path) } func TestEndpointOverridesOrigin(t *testing.T) { finder := NewEndpointFinder(lfshttp.NewContext(nil, nil, map[string]string{ "lfs.url": "abc", "remote.origin.lfsurl": "def", })) e := finder.Endpoint("download", "") assert.Equal(t, "abc", e.Url) assert.Equal(t, "", e.SSHMetadata.UserAndHost) assert.Equal(t, "", e.SSHMetadata.Path) } func TestEndpointNoOverrideDefaultRemote(t *testing.T) { finder := NewEndpointFinder(lfshttp.NewContext(nil, nil, map[string]string{ "remote.origin.lfsurl": "abc", "remote.other.lfsurl": "def", })) e := finder.Endpoint("download", "") assert.Equal(t, "abc", e.Url) assert.Equal(t, "", e.SSHMetadata.UserAndHost) assert.Equal(t, "", e.SSHMetadata.Path) } func TestEndpointUseAlternateRemote(t *testing.T) { finder := NewEndpointFinder(lfshttp.NewContext(nil, nil, map[string]string{ "remote.origin.lfsurl": "abc", "remote.other.lfsurl": "def", })) e := finder.Endpoint("download", "other") assert.Equal(t, "def", e.Url) assert.Equal(t, "", e.SSHMetadata.UserAndHost) assert.Equal(t, "", e.SSHMetadata.Path) } func TestEndpointAddsLfsSuffix(t *testing.T) { finder := NewEndpointFinder(lfshttp.NewContext(nil, nil, map[string]string{ "remote.origin.url": "https://example.com/foo/bar", })) e := finder.Endpoint("download", "") assert.Equal(t, "https://example.com/foo/bar.git/info/lfs", e.Url) assert.Equal(t, "", e.SSHMetadata.UserAndHost) assert.Equal(t, "", e.SSHMetadata.Path) } func TestBareEndpointAddsLfsSuffix(t *testing.T) { finder := NewEndpointFinder(lfshttp.NewContext(nil, nil, map[string]string{ "remote.origin.url": "https://example.com/foo/bar.git", })) e := finder.Endpoint("download", "") assert.Equal(t, "https://example.com/foo/bar.git/info/lfs", e.Url) assert.Equal(t, "", e.SSHMetadata.UserAndHost) assert.Equal(t, "", e.SSHMetadata.Path) } func TestEndpointSeparateClonePushUrl(t *testing.T) { finder := NewEndpointFinder(lfshttp.NewContext(nil, nil, map[string]string{ "remote.origin.url": "https://example.com/foo/bar.git", "remote.origin.pushurl": "https://readwrite.com/foo/bar.git", })) e := finder.Endpoint("download", "") assert.Equal(t, "https://example.com/foo/bar.git/info/lfs", e.Url) assert.Equal(t, "", e.SSHMetadata.UserAndHost) assert.Equal(t, "", e.SSHMetadata.Path) e = finder.Endpoint("upload", "") assert.Equal(t, "https://readwrite.com/foo/bar.git/info/lfs", e.Url) assert.Equal(t, "", e.SSHMetadata.UserAndHost) assert.Equal(t, "", e.SSHMetadata.Path) } func TestEndpointOverriddenSeparateClonePushLfsUrl(t *testing.T) { finder := NewEndpointFinder(lfshttp.NewContext(nil, nil, map[string]string{ "remote.origin.url": "https://example.com/foo/bar.git", "remote.origin.pushurl": "https://readwrite.com/foo/bar.git", "remote.origin.lfsurl": "https://examplelfs.com/foo/bar", "remote.origin.lfspushurl": "https://readwritelfs.com/foo/bar", })) e := finder.Endpoint("download", "") assert.Equal(t, "https://examplelfs.com/foo/bar", e.Url) assert.Equal(t, "", e.SSHMetadata.UserAndHost) assert.Equal(t, "", e.SSHMetadata.Path) e = finder.Endpoint("upload", "") assert.Equal(t, "https://readwritelfs.com/foo/bar", e.Url) assert.Equal(t, "", e.SSHMetadata.UserAndHost) assert.Equal(t, "", e.SSHMetadata.Path) } func TestEndpointGlobalSeparateLfsPush(t *testing.T) { finder := NewEndpointFinder(lfshttp.NewContext(nil, nil, map[string]string{ "lfs.url": "https://readonly.com/foo/bar", "lfs.pushurl": "https://write.com/foo/bar", })) e := finder.Endpoint("download", "") assert.Equal(t, "https://readonly.com/foo/bar", e.Url) assert.Equal(t, "", e.SSHMetadata.UserAndHost) assert.Equal(t, "", e.SSHMetadata.Path) e = finder.Endpoint("upload", "") assert.Equal(t, "https://write.com/foo/bar", e.Url) assert.Equal(t, "", e.SSHMetadata.UserAndHost) assert.Equal(t, "", e.SSHMetadata.Path) } func TestSSHEndpointOverridden(t *testing.T) { finder := NewEndpointFinder(lfshttp.NewContext(nil, nil, map[string]string{ "remote.origin.url": "git@example.com:foo/bar", "remote.origin.lfsurl": "lfs", })) e := finder.Endpoint("download", "") assert.Equal(t, "lfs", e.Url) assert.Equal(t, "", e.SSHMetadata.UserAndHost) assert.Equal(t, "", e.SSHMetadata.Path) assert.Equal(t, "", e.SSHMetadata.Port) } func TestSSHEndpointAddsLfsSuffix(t *testing.T) { finder := NewEndpointFinder(lfshttp.NewContext(nil, nil, map[string]string{ "remote.origin.url": "ssh://git@example.com/foo/bar", })) e := finder.Endpoint("download", "") assert.Equal(t, "https://example.com/foo/bar.git/info/lfs", e.Url) assert.Equal(t, "git@example.com", e.SSHMetadata.UserAndHost) assert.Equal(t, "/foo/bar", e.SSHMetadata.Path) assert.Equal(t, "", e.SSHMetadata.Port) } func TestSSHCustomPortEndpointAddsLfsSuffix(t *testing.T) { finder := NewEndpointFinder(lfshttp.NewContext(nil, nil, map[string]string{ "remote.origin.url": "ssh://git@example.com:9000/foo/bar", })) e := finder.Endpoint("download", "") assert.Equal(t, "https://example.com/foo/bar.git/info/lfs", e.Url) assert.Equal(t, "git@example.com", e.SSHMetadata.UserAndHost) assert.Equal(t, "/foo/bar", e.SSHMetadata.Path) assert.Equal(t, "9000", e.SSHMetadata.Port) } func TestGitSSHEndpointAddsLfsSuffix(t *testing.T) { finder := NewEndpointFinder(lfshttp.NewContext(nil, nil, map[string]string{ "remote.origin.url": "git+ssh://git@example.com/foo/bar", })) e := finder.Endpoint("download", "") assert.Equal(t, "https://example.com/foo/bar.git/info/lfs", e.Url) assert.Equal(t, "git@example.com", e.SSHMetadata.UserAndHost) assert.Equal(t, "/foo/bar", e.SSHMetadata.Path) assert.Equal(t, "", e.SSHMetadata.Port) } func TestGitSSHCustomPortEndpointAddsLfsSuffix(t *testing.T) { finder := NewEndpointFinder(lfshttp.NewContext(nil, nil, map[string]string{ "remote.origin.url": "git+ssh://git@example.com:9000/foo/bar", })) e := finder.Endpoint("download", "") assert.Equal(t, "https://example.com/foo/bar.git/info/lfs", e.Url) assert.Equal(t, "git@example.com", e.SSHMetadata.UserAndHost) assert.Equal(t, "/foo/bar", e.SSHMetadata.Path) assert.Equal(t, "9000", e.SSHMetadata.Port) } func TestSSHGitEndpointAddsLfsSuffix(t *testing.T) { finder := NewEndpointFinder(lfshttp.NewContext(nil, nil, map[string]string{ "remote.origin.url": "ssh+git://git@example.com/foo/bar", })) e := finder.Endpoint("download", "") assert.Equal(t, "https://example.com/foo/bar.git/info/lfs", e.Url) assert.Equal(t, "git@example.com", e.SSHMetadata.UserAndHost) assert.Equal(t, "/foo/bar", e.SSHMetadata.Path) assert.Equal(t, "", e.SSHMetadata.Port) } func TestSSHGitCustomPortEndpointAddsLfsSuffix(t *testing.T) { finder := NewEndpointFinder(lfshttp.NewContext(nil, nil, map[string]string{ "remote.origin.url": "ssh+git://git@example.com:9000/foo/bar", })) e := finder.Endpoint("download", "") assert.Equal(t, "https://example.com/foo/bar.git/info/lfs", e.Url) assert.Equal(t, "git@example.com", e.SSHMetadata.UserAndHost) assert.Equal(t, "/foo/bar", e.SSHMetadata.Path) assert.Equal(t, "9000", e.SSHMetadata.Port) } func TestBareSSHEndpointAddsLfsSuffix(t *testing.T) { finder := NewEndpointFinder(lfshttp.NewContext(nil, nil, map[string]string{ "remote.origin.url": "git@example.com:foo/bar.git", })) e := finder.Endpoint("download", "") assert.Equal(t, "https://example.com/foo/bar.git/info/lfs", e.Url) assert.Equal(t, "git@example.com", e.SSHMetadata.UserAndHost) assert.Equal(t, "foo/bar.git", e.SSHMetadata.Path) assert.Equal(t, "", e.SSHMetadata.Port) } func TestBareSSSHEndpointWithCustomPortInBrackets(t *testing.T) { finder := NewEndpointFinder(lfshttp.NewContext(nil, nil, map[string]string{ "remote.origin.url": "[git@example.com:2222]:foo/bar.git", })) e := finder.Endpoint("download", "") assert.Equal(t, "https://example.com/foo/bar.git/info/lfs", e.Url) assert.Equal(t, "git@example.com", e.SSHMetadata.UserAndHost) assert.Equal(t, "foo/bar.git", e.SSHMetadata.Path) assert.Equal(t, "2222", e.SSHMetadata.Port) } func TestSSHEndpointFromGlobalLfsUrl(t *testing.T) { finder := NewEndpointFinder(lfshttp.NewContext(nil, nil, map[string]string{ "lfs.url": "git@example.com:foo/bar.git", })) e := finder.Endpoint("download", "") assert.Equal(t, "https://example.com/foo/bar.git", e.Url) assert.Equal(t, "git@example.com", e.SSHMetadata.UserAndHost) assert.Equal(t, "foo/bar.git", e.SSHMetadata.Path) assert.Equal(t, "", e.SSHMetadata.Port) } func TestHTTPEndpointAddsLfsSuffix(t *testing.T) { finder := NewEndpointFinder(lfshttp.NewContext(nil, nil, map[string]string{ "remote.origin.url": "http://example.com/foo/bar", })) e := finder.Endpoint("download", "") assert.Equal(t, "http://example.com/foo/bar.git/info/lfs", e.Url) assert.Equal(t, "", e.SSHMetadata.UserAndHost) assert.Equal(t, "", e.SSHMetadata.Path) assert.Equal(t, "", e.SSHMetadata.Port) } func TestBareHTTPEndpointAddsLfsSuffix(t *testing.T) { finder := NewEndpointFinder(lfshttp.NewContext(nil, nil, map[string]string{ "remote.origin.url": "http://example.com/foo/bar.git", })) e := finder.Endpoint("download", "") assert.Equal(t, "http://example.com/foo/bar.git/info/lfs", e.Url) assert.Equal(t, "", e.SSHMetadata.UserAndHost) assert.Equal(t, "", e.SSHMetadata.Path) assert.Equal(t, "", e.SSHMetadata.Port) } func TestGitEndpointAddsLfsSuffix(t *testing.T) { finder := NewEndpointFinder(lfshttp.NewContext(nil, nil, map[string]string{ "remote.origin.url": "git://example.com/foo/bar", })) e := finder.Endpoint("download", "") assert.Equal(t, "https://example.com/foo/bar.git/info/lfs", e.Url) assert.Equal(t, "", e.SSHMetadata.UserAndHost) assert.Equal(t, "", e.SSHMetadata.Path) assert.Equal(t, "", e.SSHMetadata.Port) } func TestGitEndpointAddsLfsSuffixWithCustomProtocol(t *testing.T) { finder := NewEndpointFinder(lfshttp.NewContext(nil, nil, map[string]string{ "remote.origin.url": "git://example.com/foo/bar", "lfs.gitprotocol": "http", })) e := finder.Endpoint("download", "") assert.Equal(t, "http://example.com/foo/bar.git/info/lfs", e.Url) assert.Equal(t, "", e.SSHMetadata.UserAndHost) assert.Equal(t, "", e.SSHMetadata.Path) assert.Equal(t, "", e.SSHMetadata.Port) } func TestBareGitEndpointAddsLfsSuffix(t *testing.T) { finder := NewEndpointFinder(lfshttp.NewContext(nil, nil, map[string]string{ "remote.origin.url": "git://example.com/foo/bar.git", })) e := finder.Endpoint("download", "") assert.Equal(t, "https://example.com/foo/bar.git/info/lfs", e.Url) assert.Equal(t, "", e.SSHMetadata.UserAndHost) assert.Equal(t, "", e.SSHMetadata.Path) assert.Equal(t, "", e.SSHMetadata.Port) } func TestLocalPathEndpointAddsDotGitForWorkingRepo(t *testing.T) { // Windows will add a drive letter to the paths below since we // canonicalize them. if runtime.GOOS == "windows" { return } path := t.TempDir() + "/local/path" err := os.MkdirAll(path+"/.git", 0755) assert.Nil(t, err) finder := NewEndpointFinder(lfshttp.NewContext(nil, nil, map[string]string{ "remote.origin.url": path, })) e := finder.Endpoint("download", "") assert.Equal(t, "file://"+path+"/.git", e.Url) os.RemoveAll(path) } func TestLocalPathEndpointPreservesDotGitForWorkingRepo(t *testing.T) { // Windows will add a drive letter to the paths below since we // canonicalize them. if runtime.GOOS == "windows" { return } path := t.TempDir() + "/local/path/.git" err := os.MkdirAll(path, 0755) assert.Nil(t, err) finder := NewEndpointFinder(lfshttp.NewContext(nil, nil, map[string]string{ "remote.origin.url": path, })) e := finder.Endpoint("download", "") assert.Equal(t, "file://"+path, e.Url) os.RemoveAll(path) } func TestLocalPathEndpointPreservesNoDotGitForBareRepo(t *testing.T) { // Windows will add a drive letter to the paths below since we // canonicalize them. if runtime.GOOS == "windows" { return } path := t.TempDir() + "/local/path" err := os.MkdirAll(path, 0755) assert.Nil(t, err) finder := NewEndpointFinder(lfshttp.NewContext(nil, nil, map[string]string{ "remote.origin.url": path, })) e := finder.Endpoint("download", "") assert.Equal(t, "file://"+path, e.Url) os.RemoveAll(path) } func TestLocalPathEndpointRemovesDotGitForBareRepo(t *testing.T) { // Windows will add a drive letter to the paths below since we // canonicalize them. if runtime.GOOS == "windows" { return } path := t.TempDir() + "/local/path" err := os.MkdirAll(path, 0755) assert.Nil(t, err) finder := NewEndpointFinder(lfshttp.NewContext(nil, nil, map[string]string{ "remote.origin.url": path + "/.git", })) e := finder.Endpoint("download", "") assert.Equal(t, "file://"+path, e.Url) os.RemoveAll(path) } func TestAccessConfig(t *testing.T) { type accessTest struct { AccessMode string PrivateAccess bool } tests := map[string]accessTest{ "": {"none", false}, "basic": {"basic", true}, "BASIC": {"basic", true}, "private": {"basic", true}, "PRIVATE": {"basic", true}, "invalidauth": {"invalidauth", true}, } for value, expected := range tests { finder := NewEndpointFinder(lfshttp.NewContext(nil, nil, map[string]string{ "lfs.url": "http://example.com", "lfs.http://example.com.access": value, "lfs.https://example.com.access": "bad", })) dl := finder.Endpoint("upload", "") ul := finder.Endpoint("download", "") if access := finder.AccessFor(dl.Url); access.Mode() != creds.AccessMode(expected.AccessMode) { t.Errorf("Expected creds.AccessMode() with value %q to be %v, got %v", value, expected.AccessMode, access) } if access := finder.AccessFor(ul.Url); access.Mode() != creds.AccessMode(expected.AccessMode) { t.Errorf("Expected creds.AccessMode() with value %q to be %v, got %v", value, expected.AccessMode, access) } } // Test again but with separate push url for value, expected := range tests { finder := NewEndpointFinder(lfshttp.NewContext(nil, nil, map[string]string{ "lfs.url": "http://example.com", "lfs.pushurl": "http://examplepush.com", "lfs.http://example.com.access": value, "lfs.http://examplepush.com.access": value, "lfs.https://example.com.access": "bad", })) dl := finder.Endpoint("upload", "") ul := finder.Endpoint("download", "") if access := finder.AccessFor(dl.Url); access.Mode() != creds.AccessMode(expected.AccessMode) { t.Errorf("Expected creds.AccessMode() with value %q to be %v, got %v", value, expected.AccessMode, access) } if access := finder.AccessFor(ul.Url); access.Mode() != creds.AccessMode(expected.AccessMode) { t.Errorf("Expected creds.AccessMode() with value %q to be %v, got %v", value, expected.AccessMode, access) } } } func TestAccessAbsentConfig(t *testing.T) { finder := NewEndpointFinder(nil) downloadAccess := finder.AccessFor(finder.Endpoint("download", "").Url) assert.Equal(t, creds.NoneAccess, downloadAccess.Mode()) uploadAccess := finder.AccessFor(finder.Endpoint("upload", "").Url) assert.Equal(t, creds.NoneAccess, uploadAccess.Mode()) } func TestSetAccess(t *testing.T) { finder := NewEndpointFinder(lfshttp.NewContext(nil, nil, map[string]string{})) url := "http://example.com" access := finder.AccessFor(url) assert.Equal(t, creds.NoneAccess, access.Mode()) assert.Equal(t, url, access.URL()) finder.SetAccess(access.Upgrade(creds.NegotiateAccess)) newAccess := finder.AccessFor(url) assert.Equal(t, creds.NegotiateAccess, newAccess.Mode()) assert.Equal(t, url, newAccess.URL()) } func TestChangeAccess(t *testing.T) { finder := NewEndpointFinder(lfshttp.NewContext(nil, nil, map[string]string{ "lfs.http://example.com.access": "basic", })) url := "http://example.com" access := finder.AccessFor(url) assert.Equal(t, creds.BasicAccess, access.Mode()) assert.Equal(t, url, access.URL()) finder.SetAccess(access.Upgrade(creds.NegotiateAccess)) newAccess := finder.AccessFor(url) assert.Equal(t, creds.NegotiateAccess, newAccess.Mode()) assert.Equal(t, url, newAccess.URL()) } func TestDeleteAccessWithNone(t *testing.T) { finder := NewEndpointFinder(lfshttp.NewContext(nil, nil, map[string]string{ "lfs.http://example.com.access": "basic", })) url := "http://example.com" access := finder.AccessFor(url) assert.Equal(t, creds.BasicAccess, access.Mode()) assert.Equal(t, url, access.URL()) finder.SetAccess(access.Upgrade(creds.NoneAccess)) newAccess := finder.AccessFor(url) assert.Equal(t, creds.NoneAccess, newAccess.Mode()) assert.Equal(t, url, newAccess.URL()) } func TestDeleteAccessWithEmptyString(t *testing.T) { finder := NewEndpointFinder(lfshttp.NewContext(nil, nil, map[string]string{ "lfs.http://example.com.access": "basic", })) url := "http://example.com" access := finder.AccessFor(url) assert.Equal(t, creds.BasicAccess, access.Mode()) assert.Equal(t, url, access.URL()) finder.SetAccess(access.Upgrade(creds.AccessMode(""))) newAccess := finder.AccessFor(url) assert.Equal(t, creds.NoneAccess, newAccess.Mode()) assert.Equal(t, url, newAccess.URL()) } type EndpointParsingTestCase struct { Given string Expected lfshttp.Endpoint } func (c *EndpointParsingTestCase) Assert(t *testing.T) { finder := NewEndpointFinder(lfshttp.NewContext(nil, nil, map[string]string{ "url.https://github.com/.insteadof": "gh:", })) actual := finder.NewEndpoint("upload", c.Given) assert.Equal(t, c.Expected, actual, "lfsapi: expected endpoint for %q to be %#v (was %#v)", c.Given, c.Expected, actual) } func TestEndpointParsing(t *testing.T) { // Note that many of these tests will produce silly or completely broken // values for the Url, and that's okay: they work nevertheless. for desc, c := range map[string]EndpointParsingTestCase{ "simple bare ssh": { "git@github.com:git-lfs/git-lfs.git", lfshttp.Endpoint{ Url: "https://github.com/git-lfs/git-lfs.git", SSHMetadata: ssh.SSHMetadata{ UserAndHost: "git@github.com", Path: "git-lfs/git-lfs.git", Port: "", }, Operation: "", OriginalUrl: "ssh://git@github.com/git-lfs/git-lfs.git", }, }, "port bare ssh": { "[git@lfshttp.github.com:443]:git-lfs/git-lfs.git", lfshttp.Endpoint{ Url: "https://lfshttp.github.com/git-lfs/git-lfs.git", SSHMetadata: ssh.SSHMetadata{ UserAndHost: "git@lfshttp.github.com", Path: "git-lfs/git-lfs.git", Port: "443", }, Operation: "", OriginalUrl: "ssh://git@lfshttp.github.com:443/git-lfs/git-lfs.git", }, }, "no user bare ssh": { "github.com:git-lfs/git-lfs.git", lfshttp.Endpoint{ Url: "https://github.com/git-lfs/git-lfs.git", SSHMetadata: ssh.SSHMetadata{ UserAndHost: "github.com", Path: "git-lfs/git-lfs.git", Port: "", }, Operation: "", OriginalUrl: "ssh://github.com/git-lfs/git-lfs.git", }, }, "bare word bare ssh": { "github:git-lfs/git-lfs.git", lfshttp.Endpoint{ Url: "https://github/git-lfs/git-lfs.git", SSHMetadata: ssh.SSHMetadata{ UserAndHost: "github", Path: "git-lfs/git-lfs.git", Port: "", }, Operation: "", OriginalUrl: "ssh://github/git-lfs/git-lfs.git", }, }, "insteadof alias": { "gh:git-lfs/git-lfs.git", lfshttp.Endpoint{ Url: "https://github.com/git-lfs/git-lfs.git", SSHMetadata: ssh.SSHMetadata{ UserAndHost: "", Path: "", Port: "", }, Operation: "", OriginalUrl: "https://github.com/git-lfs/git-lfs.git", }, }, "remote helper": { "remote::git-lfs/git-lfs.git", lfshttp.Endpoint{ Url: "remote::git-lfs/git-lfs.git", SSHMetadata: ssh.SSHMetadata{ UserAndHost: "", Path: "", Port: "", }, Operation: "", }, }, } { t.Run(desc, c.Assert) } } type InsteadOfTestCase struct { Given string Operation string Expected lfshttp.Endpoint } func (c *InsteadOfTestCase) Assert(t *testing.T) { finder := NewEndpointFinder(lfshttp.NewContext(nil, nil, map[string]string{ "remote.test.url": c.Given, "url.https://example.com/.insteadof": "ex:", "url.ssh://example.com/.pushinsteadof": "ex:", "url.ssh://example.com/.insteadof": "exp:", })) actual := finder.Endpoint(c.Operation, "test") assert.Equal(t, c.Expected, actual, "lfsapi: expected endpoint for %q to be %#v (was %#v)", c.Given, c.Expected, actual) } func TestInsteadOf(t *testing.T) { // Note that many of these tests will produce silly or completely broken // values for the Url, and that's okay: they work nevertheless. for desc, c := range map[string]InsteadOfTestCase{ "insteadof alias (download)": { "ex:git-lfs/git-lfs.git", "download", lfshttp.Endpoint{ Url: "https://example.com/git-lfs/git-lfs.git/info/lfs", SSHMetadata: ssh.SSHMetadata{ UserAndHost: "", Path: "", Port: "", }, Operation: "download", OriginalUrl: "https://example.com/git-lfs/git-lfs.git", }, }, "pushinsteadof alias (upload)": { "ex:git-lfs/git-lfs.git", "upload", lfshttp.Endpoint{ Url: "https://example.com/git-lfs/git-lfs.git/info/lfs", SSHMetadata: ssh.SSHMetadata{ UserAndHost: "example.com", Path: "/git-lfs/git-lfs.git", Port: "", }, Operation: "upload", OriginalUrl: "ssh://example.com/git-lfs/git-lfs.git", }, }, "exp alias (download)": { "exp:git-lfs/git-lfs.git", "download", lfshttp.Endpoint{ Url: "https://example.com/git-lfs/git-lfs.git/info/lfs", SSHMetadata: ssh.SSHMetadata{ UserAndHost: "example.com", Path: "/git-lfs/git-lfs.git", Port: "", }, Operation: "download", OriginalUrl: "ssh://example.com/git-lfs/git-lfs.git", }, }, "exp alias (upload)": { "exp:git-lfs/git-lfs.git", "upload", lfshttp.Endpoint{ Url: "https://example.com/git-lfs/git-lfs.git/info/lfs", SSHMetadata: ssh.SSHMetadata{ UserAndHost: "example.com", Path: "/git-lfs/git-lfs.git", Port: "", }, Operation: "upload", OriginalUrl: "ssh://example.com/git-lfs/git-lfs.git", }, }, } { t.Run(desc, c.Assert) } } func TestNewEndpointFromCloneURLWithConfig(t *testing.T) { expected := "https://foo/bar.git/info/lfs" tests := []string{ "https://foo/bar", "https://foo/bar/", "https://foo/bar.git", "https://foo/bar.git/", } finder := NewEndpointFinder(nil) for _, actual := range tests { e := finder.NewEndpointFromCloneURL("upload", actual) if e.Url != expected { t.Errorf("%s returned bad endpoint url %s", actual, e.Url) } } } func TestExtractRemoteUrlForHTTPS(t *testing.T) { line := "14d0e09d4643d7547267c1cbf9972ac1c4db0b2d not-for-merge branch 'master' of https://example.com/git-lfs/git-lfs" expected := "https://example.com/git-lfs/git-lfs" result, err := ExtractRemoteUrl(line) assert.Nil(t, err) assert.Equal(t, expected, result) } func TestExtractRemoteUrlForSSH(t *testing.T) { line := "cb2ad9f68531e6afe76326d46acf566acf8af4f9 branch 'master' of ssh://example.com/git-lfs/git-lfs" expected := "ssh://example.com/git-lfs/git-lfs" result, err := ExtractRemoteUrl(line) assert.Nil(t, err) assert.Equal(t, expected, result) } func TestExtractRemoteUrlForGit(t *testing.T) { line := "90ed234fb0708235a733bcae0e5b90bd4fac5321 branch 'master' of example.com:git-lfs/git-lfs" expected := "example.com:git-lfs/git-lfs" result, err := ExtractRemoteUrl(line) assert.Nil(t, err) assert.Equal(t, expected, result) } func TestExtractRemoteUrlNoURL(t *testing.T) { invalid := []string{ "text without url", // invalid characters in base64 git hash "qwert34fb0708235a733bcae0e5b90bd4fac5321 branch 'master' of example.com:git-lfs/git-lfs", // invalid git hash length "90ed234fb0708235a733bcae0e5b90bd4fac532 branch 'master' of example.com:git-lfs/git-lfs", // other label present where only `not-for-merge` label allowed "90ed234fb0708235a733bcae0e5b90bd4fac532 disallowed-label branch 'master' of example.com:git-lfs/git-lfs", // other type present where only `tag` or `branch` allowed "90ed234fb0708235a733bcae0e5b90bd4fac532 othertype 'master' of example.com:git-lfs/git-lfs", // missing `of` "90ed234fb0708235a733bcae0e5b90bd4fac5321 branch 'master' example.com:git-lfs/git-lfs", // missing `'` "90ed234fb0708235a733bcae0e5b90bd4fac5321 branch of example.com:git-lfs/git-lfs", } for _, line := range invalid { result, err := ExtractRemoteUrl(line) assert.NotNil(t, err) assert.Regexp(t, regexp.MustCompile("^failed to extract remote URL.*$"), err.Error()) assert.Equal(t, "", result) } } git-lfs-3.6.1/lfsapi/kerberos.go000066400000000000000000000007551472372047300165270ustar00rootroot00000000000000package lfsapi import ( "net/http" "github.com/git-lfs/git-lfs/v3/creds" ) func (c *Client) doWithNegotiate(req *http.Request, credWrapper creds.CredentialHelperWrapper) (*http.Response, error) { // There are two possibilities here if we're using Negotiate // authentication. One is that we're using Kerberos, which we try // first. The other is that we're using NTLM, which we no longer // support. Fail in that case. return c.doWithAccess(req, "", nil, creds.NegotiateAccess) } git-lfs-3.6.1/lfsapi/lfsapi.go000066400000000000000000000041041472372047300161610ustar00rootroot00000000000000package lfsapi import ( "github.com/git-lfs/git-lfs/v3/config" "github.com/git-lfs/git-lfs/v3/creds" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/lfshttp" "github.com/git-lfs/git-lfs/v3/ssh" "github.com/git-lfs/git-lfs/v3/tr" "github.com/rubyist/tracerx" ) type Client struct { Endpoints EndpointFinder Credentials creds.CredentialHelper credContext *creds.CredentialHelperContext client *lfshttp.Client context lfshttp.Context access []creds.AccessMode } func NewClient(ctx lfshttp.Context) (*Client, error) { if ctx == nil { ctx = lfshttp.NewContext(nil, nil, nil) } gitEnv := ctx.GitEnv() osEnv := ctx.OSEnv() httpClient, err := lfshttp.NewClient(ctx) if err != nil { return nil, errors.Wrap(err, tr.Tr.Get("error creating HTTP client")) } c := &Client{ Endpoints: NewEndpointFinder(ctx), client: httpClient, context: ctx, credContext: creds.NewCredentialHelperContext(gitEnv, osEnv), access: creds.AllAccessModes(), } return c, nil } func (c *Client) Context() lfshttp.Context { return c.context } // SSHTransfer returns either an suitable transfer object or nil if the // server is not using an SSH remote or the git-lfs-transfer style of SSH // remote. func (c *Client) SSHTransfer(operation, remote string) *ssh.SSHTransfer { if len(operation) == 0 { return nil } endpoint := c.Endpoints.Endpoint(operation, remote) if len(endpoint.SSHMetadata.UserAndHost) == 0 { return nil } uc := config.NewURLConfig(c.context.GitEnv()) if val, ok := uc.Get("lfs", endpoint.OriginalUrl, "sshtransfer"); ok && val != "negotiate" && val != "always" { tracerx.Printf("skipping pure SSH protocol connection by request (%s, %s)", operation, remote) return nil } ctx := c.Context() tracerx.Printf("attempting pure SSH protocol connection (%s, %s)", operation, remote) sshTransfer, err := ssh.NewSSHTransfer(ctx.OSEnv(), ctx.GitEnv(), &endpoint.SSHMetadata, operation) if err != nil { tracerx.Printf("pure SSH protocol connection failed (%s, %s): %s", operation, remote, err) return nil } return sshTransfer } git-lfs-3.6.1/lfsapi/response_test.go000066400000000000000000000110071472372047300176000ustar00rootroot00000000000000package lfsapi import ( "net/http" "net/http/httptest" "strings" "sync/atomic" "testing" "github.com/git-lfs/git-lfs/v3/errors" "github.com/stretchr/testify/assert" ) func TestAuthErrWithBody(t *testing.T) { var called uint32 srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.String() != "/test" { w.WriteHeader(http.StatusNotFound) return } atomic.AddUint32(&called, 1) w.Header().Set("Content-Type", "application/json") w.WriteHeader(401) w.Write([]byte(`{"message":"custom auth error"}`)) })) defer srv.Close() req, err := http.NewRequest("GET", srv.URL+"/test", nil) assert.Nil(t, err) c, _ := NewClient(nil) _, err = c.Do(req) assert.NotNil(t, err) assert.True(t, errors.IsAuthError(err)) assert.Equal(t, "Authentication required: custom auth error", err.Error()) assert.EqualValues(t, 1, called) } func TestFatalWithBody(t *testing.T) { var called uint32 srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.String() != "/test" { w.WriteHeader(http.StatusNotFound) return } atomic.AddUint32(&called, 1) w.Header().Set("Content-Type", "application/json") w.WriteHeader(500) w.Write([]byte(`{"message":"custom fatal error"}`)) })) defer srv.Close() req, err := http.NewRequest("GET", srv.URL+"/test", nil) assert.Nil(t, err) c, _ := NewClient(nil) _, err = c.Do(req) assert.NotNil(t, err) assert.True(t, errors.IsFatalError(err)) assert.Equal(t, "Fatal error: custom fatal error", err.Error()) assert.EqualValues(t, 1, called) } func TestWithNonFatal500WithBody(t *testing.T) { c, _ := NewClient(nil) var called uint32 nonFatalCodes := map[int]string{ 501: "custom 501 error", 507: "custom 507 error", 509: "custom 509 error", } for nonFatalCode, expectedErr := range nonFatalCodes { srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.String() != "/test" { w.WriteHeader(http.StatusNotFound) return } atomic.AddUint32(&called, 1) w.Header().Set("Content-Type", "application/json") w.WriteHeader(nonFatalCode) w.Write([]byte(`{"message":"` + expectedErr + `"}`)) })) req, err := http.NewRequest("GET", srv.URL+"/test", nil) assert.Nil(t, err) _, err = c.Do(req) t.Logf("non fatal code %d", nonFatalCode) assert.NotNil(t, err) assert.Equal(t, expectedErr, err.Error()) srv.Close() } assert.EqualValues(t, 3, called) } func TestAuthErrWithoutBody(t *testing.T) { var called uint32 srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.String() != "/test" { w.WriteHeader(http.StatusNotFound) return } atomic.AddUint32(&called, 1) w.WriteHeader(401) })) defer srv.Close() req, err := http.NewRequest("GET", srv.URL+"/test", nil) assert.Nil(t, err) c, _ := NewClient(nil) _, err = c.Do(req) assert.NotNil(t, err) assert.True(t, errors.IsAuthError(err)) assert.True(t, strings.HasPrefix(err.Error(), "Authentication required: Authorization error:"), err.Error()) assert.EqualValues(t, 1, called) } func TestFatalWithoutBody(t *testing.T) { var called uint32 srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.String() != "/test" { w.WriteHeader(http.StatusNotFound) return } atomic.AddUint32(&called, 1) w.WriteHeader(500) })) defer srv.Close() req, err := http.NewRequest("GET", srv.URL+"/test", nil) assert.Nil(t, err) c, _ := NewClient(nil) _, err = c.Do(req) assert.NotNil(t, err) assert.True(t, errors.IsFatalError(err)) assert.True(t, strings.HasPrefix(err.Error(), "Fatal error: Server error:"), err.Error()) assert.EqualValues(t, 1, called) } func TestWithNonFatal500WithoutBody(t *testing.T) { c, _ := NewClient(nil) var called uint32 nonFatalCodes := map[int]string{ 501: "Not Implemented:", 507: "Insufficient server storage:", 509: "Bandwidth limit exceeded:", } for nonFatalCode, errPrefix := range nonFatalCodes { srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.String() != "/test" { w.WriteHeader(http.StatusNotFound) return } atomic.AddUint32(&called, 1) w.WriteHeader(nonFatalCode) })) req, err := http.NewRequest("GET", srv.URL+"/test", nil) assert.Nil(t, err) _, err = c.Do(req) t.Logf("non fatal code %d", nonFatalCode) assert.NotNil(t, err) assert.True(t, strings.HasPrefix(err.Error(), errPrefix)) srv.Close() } assert.EqualValues(t, 3, called) } git-lfs-3.6.1/lfshttp/000077500000000000000000000000001472372047300145635ustar00rootroot00000000000000git-lfs-3.6.1/lfshttp/body.go000066400000000000000000000011721472372047300160500ustar00rootroot00000000000000package lfshttp import ( "bytes" "encoding/json" "io" "net/http" "strconv" ) type ReadSeekCloser interface { io.Seeker io.ReadCloser } func MarshalToRequest(req *http.Request, obj interface{}) error { by, err := json.Marshal(obj) if err != nil { return err } clen := len(by) req.Header.Set("Content-Length", strconv.Itoa(clen)) req.ContentLength = int64(clen) req.Body = NewByteBody(by) return nil } func NewByteBody(by []byte) ReadSeekCloser { return &closingByteReader{Reader: bytes.NewReader(by)} } type closingByteReader struct { *bytes.Reader } func (r *closingByteReader) Close() error { return nil } git-lfs-3.6.1/lfshttp/certs.go000066400000000000000000000167721472372047300162470ustar00rootroot00000000000000package lfshttp import ( "crypto/tls" "crypto/x509" "encoding/pem" "fmt" "net/url" "os" "path/filepath" "github.com/git-lfs/git-lfs/v3/config" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/tools" "github.com/git-lfs/git-lfs/v3/tr" "github.com/rubyist/tracerx" ) // isCertVerificationDisabledForHost returns whether SSL certificate verification // has been disabled for the given host, or globally func isCertVerificationDisabledForHost(c *Client, host string) bool { hostSslVerify, _ := c.uc.Get("http", fmt.Sprintf("https://%v", host), "sslverify") if hostSslVerify == "false" { return true } return c.SkipSSLVerify } // isClientCertEnabledForHost returns whether client certificate // are configured for the given host func isClientCertEnabledForHost(c *Client, host string) bool { _, hostSslKeyOk := c.uc.Get("http", fmt.Sprintf("https://%v/", host), "sslKey") _, hostSslCertOk := c.uc.Get("http", fmt.Sprintf("https://%v/", host), "sslCert") return hostSslKeyOk && hostSslCertOk } // decryptPEMBlock decrypts an encrypted PEM block representing a private key, // prompting for credentials using the credential helper, and returns a // decrypted PEM block representing that same private key. func decryptPEMBlock(c *Client, block *pem.Block, path string, key []byte) ([]byte, error) { fileurl := fmt.Sprintf("cert:///%s", filepath.ToSlash(path)) url, err := url.Parse(fileurl) if err != nil { return nil, err } credWrapper := c.credHelperContext.GetCredentialHelper(nil, url) credWrapper.Input["username"] = []string{""} creds, err := credWrapper.CredentialHelper.Fill(credWrapper.Input) if err != nil { tracerx.Printf("Error filling credentials for %q: %v", fileurl, err) return nil, err } pass := creds["password"][0] decrypted, err := x509.DecryptPEMBlock(block, []byte(pass)) if err != nil { credWrapper.CredentialHelper.Reject(creds) return nil, err } credWrapper.CredentialHelper.Approve(creds) // decrypted is a DER blob, but we need a PEM-encoded block. toEncode := &pem.Block{Type: block.Type, Headers: nil, Bytes: decrypted} buf := pem.EncodeToMemory(toEncode) return buf, nil } // getClientCertForHost returns a client certificate for a specific host (which may // be "host:port" loaded from the gitconfig func getClientCertForHost(c *Client, host string) (*tls.Certificate, error) { hostSslKey, _ := c.uc.Get("http", fmt.Sprintf("https://%v/", host), "sslKey") hostSslCert, _ := c.uc.Get("http", fmt.Sprintf("https://%v/", host), "sslCert") hostSslKey, err := tools.ExpandPath(hostSslKey, false) if err != nil { return nil, errors.Wrapf(err, tr.Tr.Get("Error resolving key path %q", hostSslKey)) } hostSslCert, err = tools.ExpandPath(hostSslCert, false) if err != nil { return nil, errors.Wrapf(err, tr.Tr.Get("Error resolving cert path %q", hostSslCert)) } cert, err := os.ReadFile(hostSslCert) if err != nil { tracerx.Printf("Error reading client cert file %q: %v", hostSslCert, err) return nil, errors.Wrapf(err, tr.Tr.Get("Error reading client cert file %q", hostSslCert)) } key, err := os.ReadFile(hostSslKey) if err != nil { tracerx.Printf("Error reading client key file %q: %v", hostSslKey, err) return nil, errors.Wrapf(err, tr.Tr.Get("Error reading client key file %q", hostSslKey)) } block, _ := pem.Decode(key) if block == nil { return nil, errors.New(tr.Tr.Get("Error decoding PEM block from %q", hostSslKey)) } if x509.IsEncryptedPEMBlock(block) { key, err = decryptPEMBlock(c, block, hostSslKey, key) if err != nil { tracerx.Printf("Unable to decrypt client key file %q: %v", hostSslKey, err) return nil, errors.Wrapf(err, tr.Tr.Get("Error reading client key file %q (not a PKCS#1 file?)", hostSslKey)) } } certobj, err := tls.X509KeyPair(cert, key) if err != nil { tracerx.Printf("Error reading client cert/key %v", err) return nil, errors.Wrapf(err, tr.Tr.Get("Error reading client cert/key")) } return &certobj, nil } // getRootCAsForHost returns a certificate pool for that specific host (which may // be "host:port" loaded from either the gitconfig or from a platform-specific // source which is not included by default in the golang certificate search) // May return nil if it doesn't have anything to add, in which case the default // RootCAs will be used if passed to TLSClientConfig.RootCAs func getRootCAsForHost(c *Client, host string) *x509.CertPool { // don't init pool, want to return nil not empty if none found; init only on successful add cert var pool *x509.CertPool // gitconfig first pool = appendRootCAsForHostFromGitconfig(c.osEnv, c.gitEnv, pool, host) // Platform specific return appendRootCAsForHostFromPlatform(pool, host) } func appendRootCAsForHostFromGitconfig(osEnv, gitEnv config.Environment, pool *x509.CertPool, host string) *x509.CertPool { url := fmt.Sprintf("https://%v/", host) uc := config.NewURLConfig(gitEnv) backend, _ := uc.Get("http", url, "sslbackend") schannelUseSslCaInfoStrValue, _ := uc.Get("http", url, "schannelusesslcainfo") schannelUseSslCaInfo := config.Bool(schannelUseSslCaInfoStrValue, false) if backend == "schannel" && !schannelUseSslCaInfo { return pool } // Accumulate certs from all these locations: // GIT_SSL_CAINFO first if cafile, _ := osEnv.Get("GIT_SSL_CAINFO"); len(cafile) > 0 { return appendCertsFromFile(pool, cafile) } // http./.sslcainfo or http..sslcainfo if cafile, ok := uc.Get("http", url, "sslcainfo"); ok { return appendCertsFromFile(pool, cafile) } // GIT_SSL_CAPATH if cadir, _ := osEnv.Get("GIT_SSL_CAPATH"); len(cadir) > 0 { return appendCertsFromFilesInDir(pool, cadir) } // http.sslcapath if cadir, ok := gitEnv.Get("http.sslcapath"); ok { return appendCertsFromFilesInDir(pool, cadir) } return pool } func appendCertsFromFilesInDir(pool *x509.CertPool, dir string) *x509.CertPool { dirpath, errpath := tools.TranslateCygwinPath(dir) if errpath != nil { tracerx.Printf("Error reading cert dir %q: %v", dirpath, errpath) } files, err := os.ReadDir(dirpath) if err != nil { tracerx.Printf("Error reading cert dir %q: %v", dir, err) return pool } for _, f := range files { pool = appendCertsFromFile(pool, filepath.Join(dir, f.Name())) } return pool } func appendCertsFromFile(pool *x509.CertPool, filename string) *x509.CertPool { filenamepath, errfile := tools.TranslateCygwinPath(filename) if errfile != nil { tracerx.Printf("Error reading cert dir %q: %v", filenamepath, errfile) } data, err := os.ReadFile(filenamepath) if err != nil { tracerx.Printf("Error reading cert file %q: %v", filename, err) return pool } // Firstly, try parsing as binary certificate if certs, err := x509.ParseCertificates(data); err == nil { return appendCerts(pool, certs) } // If not binary certs, try PEM data return appendCertsFromPEMData(pool, data) } func appendCerts(pool *x509.CertPool, certs []*x509.Certificate) *x509.CertPool { if len(certs) == 0 { // important to return unmodified (may be nil) return pool } if pool == nil { pool = x509.NewCertPool() } for _, cert := range certs { pool.AddCert(cert) } return pool } func appendCertsFromPEMData(pool *x509.CertPool, data []byte) *x509.CertPool { if len(data) == 0 { return pool } // Bit of a dance, need to ensure if AppendCertsFromPEM fails we still return // nil and not an empty pool, so system roots still get used var ret *x509.CertPool if pool == nil { ret = x509.NewCertPool() } else { ret = pool } if !ret.AppendCertsFromPEM(data) { // Return unmodified input pool (may be nil, do not replace with empty) return pool } return ret } git-lfs-3.6.1/lfshttp/certs_darwin.go000066400000000000000000000042351472372047300176020ustar00rootroot00000000000000package lfshttp import ( "crypto/x509" "regexp" "strings" "github.com/git-lfs/git-lfs/v3/subprocess" "github.com/rubyist/tracerx" ) func appendRootCAsForHostFromPlatform(pool *x509.CertPool, host string) *x509.CertPool { // Go loads only the system root certificates by default // see https://github.com/golang/go/blob/master/src/crypto/x509/root_darwin.go // We want to load certs configured in the System keychain too, this is separate // from the system root certificates. It's also where other tools such as // browsers (e.g. Chrome) will load custom trusted certs from. They often // don't load certs from the login keychain so that's not included here // either, for consistency. // find system.keychain for user-added certs (don't assume location) cmd, err := subprocess.ExecCommand("/usr/bin/security", "list-keychains") if err != nil { tracerx.Printf("Error getting command to list keychains: %v", err) return nil } kcout, err := cmd.Output() if err != nil { tracerx.Printf("Error listing keychains: %v", err) return nil } var systemKeychain string keychains := strings.Split(string(kcout), "\n") for _, keychain := range keychains { lc := strings.ToLower(keychain) if !strings.Contains(lc, "/system.keychain") { continue } systemKeychain = strings.Trim(keychain, " \t\"") break } if len(systemKeychain) == 0 { return nil } pool = appendRootCAsFromKeychain(pool, host, systemKeychain) // Also check host without port portreg := regexp.MustCompile(`([^:]+):\d+`) if match := portreg.FindStringSubmatch(host); match != nil { hostwithoutport := match[1] pool = appendRootCAsFromKeychain(pool, hostwithoutport, systemKeychain) } return pool } func appendRootCAsFromKeychain(pool *x509.CertPool, name, keychain string) *x509.CertPool { cmd, err := subprocess.ExecCommand("/usr/bin/security", "find-certificate", "-a", "-p", "-c", name, keychain) if err != nil { tracerx.Printf("Error getting command to read keychain %q: %v", keychain, err) return pool } data, err := cmd.Output() if err != nil { tracerx.Printf("Error reading keychain %q: %v", keychain, err) return pool } return appendCertsFromPEMData(pool, data) } git-lfs-3.6.1/lfshttp/certs_nix.go000066400000000000000000000003551472372047300171130ustar00rootroot00000000000000//go:build !darwin && !windows // +build !darwin,!windows package lfshttp import "crypto/x509" func appendRootCAsForHostFromPlatform(pool *x509.CertPool, host string) *x509.CertPool { // Do nothing, use golang default return pool } git-lfs-3.6.1/lfshttp/certs_test.go000066400000000000000000000203661472372047300173000ustar00rootroot00000000000000package lfshttp import ( "fmt" "net/http" "net/url" "os" "path/filepath" "testing" "github.com/git-lfs/git-lfs/v3/creds" "github.com/stretchr/testify/assert" ) var testCert = `-----BEGIN CERTIFICATE----- MIIDyjCCArKgAwIBAgIJAMi9TouXnW+ZMA0GCSqGSIb3DQEBBQUAMEwxCzAJBgNV BAYTAlVTMRMwEQYDVQQIEwpTb21lLVN0YXRlMRAwDgYDVQQKEwdnaXQtbGZzMRYw FAYDVQQDEw1naXQtbGZzLmxvY2FsMB4XDTE2MDMwOTEwNTk1NFoXDTI2MDMwNzEw NTk1NFowTDELMAkGA1UEBhMCVVMxEzARBgNVBAgTClNvbWUtU3RhdGUxEDAOBgNV BAoTB2dpdC1sZnMxFjAUBgNVBAMTDWdpdC1sZnMubG9jYWwwggEiMA0GCSqGSIb3 DQEBAQUAA4IBDwAwggEKAoIBAQCXmsI2w44nOsP7n3kL1Lz04U5FMZRErBSXLOE+ dpd4tMpgrjOncJPD9NapHabsVIOnuVvMDuBbWYwU9PwbN4tjQzch8DRxBju6fCp/ Pm+QF6p2Ga+NuSHWoVfNFuF2776aF9gSLC0rFnBekD3HCz+h6I5HFgHBvRjeVyAs PRw471Y28Je609SoYugxaQNzRvahP0Qf43tE74/WN3FTGXy1+iU+uXpfp8KxnsuB gfj+Wi6mPt8Q2utcA1j82dJ0K8ZbHSbllzmI+N/UuRLsbTUEdeFWYdZ0AlZNd/Vc PlOSeoExwvOHIuUasT/cLIrEkdXNud2QLg2GpsB6fJi3NEUhAgMBAAGjga4wgasw HQYDVR0OBBYEFC8oVPRQbekTwfkntgdL7PADXNDbMHwGA1UdIwR1MHOAFC8oVPRQ bekTwfkntgdL7PADXNDboVCkTjBMMQswCQYDVQQGEwJVUzETMBEGA1UECBMKU29t ZS1TdGF0ZTEQMA4GA1UEChMHZ2l0LWxmczEWMBQGA1UEAxMNZ2l0LWxmcy5sb2Nh bIIJAMi9TouXnW+ZMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBACIl /CBLIhC3drrYme4cGArhWyXIyRpMoy9Z+9Dru8rSuOr/RXR6sbYhlE1iMGg4GsP8 4Cj7aIct6Vb9NFv5bGNyFJAmDesm3SZlEcWxU3YBzNPiJXGiUpQHCkp0BH+gvsXc tb58XoiDZPVqrl0jNfX/nHpHR9c3DaI3Tjx0F/No0ZM6mLQ1cNMikFyEWQ4U0zmW LvV+vvKuOixRqbcVnB5iTxqMwFG0X3tUql0cftGBgoCoR1+FSBOs0EXLODCck6ql aW6vZwkA+ccj/pDTx8LBe2lnpatrFeIt6znAUJW3G8r6SFHKVBWHwmESZS4kxhjx NpW5Hh0w4/5iIetCkJ0= -----END CERTIFICATE-----` var sslCAInfoConfigHostNames = []string{ "git-lfs.local", "git-lfs.local/", } var sslCAInfoMatchedHostTests = []struct { hostName string shouldMatch bool }{ {"git-lfs.local", true}, {"git-lfs.local:8443", false}, {"wronghost.com", false}, } func clientForHost(c *Client, host string) *http.Client { u, _ := url.Parse(fmt.Sprintf("https://%v", host)) client, _ := c.HttpClient(u, creds.BasicAccess) return client } func TestCertFromSSLCAInfoConfig(t *testing.T) { tempfile, err := os.CreateTemp("", "testcert") assert.Nil(t, err, "Error creating temp cert file") defer os.Remove(tempfile.Name()) _, err = tempfile.WriteString(testCert) assert.Nil(t, err, "Error writing temp cert file") tempfile.Close() // Test http..sslcainfo for _, hostName := range sslCAInfoConfigHostNames { hostKey := fmt.Sprintf("http.https://%v.sslcainfo", hostName) c, err := NewClient(NewContext(nil, nil, map[string]string{ hostKey: tempfile.Name(), })) assert.Nil(t, err) for _, matchedHostTest := range sslCAInfoMatchedHostTests { pool := getRootCAsForHost(c, matchedHostTest.hostName) var shouldOrShouldnt string if matchedHostTest.shouldMatch { shouldOrShouldnt = "should" } else { shouldOrShouldnt = "should not" } assert.Equal(t, matchedHostTest.shouldMatch, pool != nil, "Cert lookup for \"%v\" %v have succeeded with \"%v\"", matchedHostTest.hostName, shouldOrShouldnt, hostKey) } } // Test http.sslcainfo c, err := NewClient(NewContext(nil, nil, map[string]string{ "http.sslcainfo": tempfile.Name(), })) assert.Nil(t, err) // Should match any host at all for _, matchedHostTest := range sslCAInfoMatchedHostTests { pool := getRootCAsForHost(c, matchedHostTest.hostName) assert.NotNil(t, pool) } } func TestCertFromSSLCAInfoEnv(t *testing.T) { tempfile, err := os.CreateTemp("", "testcert") assert.Nil(t, err, "Error creating temp cert file") defer os.Remove(tempfile.Name()) _, err = tempfile.WriteString(testCert) assert.Nil(t, err, "Error writing temp cert file") tempfile.Close() c, err := NewClient(NewContext(nil, map[string]string{ "GIT_SSL_CAINFO": tempfile.Name(), }, nil)) assert.Nil(t, err) // Should match any host at all for _, matchedHostTest := range sslCAInfoMatchedHostTests { pool := getRootCAsForHost(c, matchedHostTest.hostName) assert.NotNil(t, pool) } } func TestCertFromSSLCAInfoEnvIsIgnoredForSchannelBackend(t *testing.T) { tempfile, err := os.CreateTemp("", "testcert") assert.Nil(t, err, "Error creating temp cert file") defer os.Remove(tempfile.Name()) _, err = tempfile.WriteString(testCert) assert.Nil(t, err, "Error writing temp cert file") tempfile.Close() c, err := NewClient(NewContext(nil, map[string]string{ "GIT_SSL_CAINFO": tempfile.Name(), }, map[string]string{ "http.sslbackend": "schannel", })) assert.Nil(t, err) // Should match any host at all for _, matchedHostTest := range sslCAInfoMatchedHostTests { pool := getRootCAsForHost(c, matchedHostTest.hostName) assert.Nil(t, pool) } } func TestCertFromSSLCAInfoEnvWithSchannelBackend(t *testing.T) { tempfile, err := os.CreateTemp("", "testcert") assert.Nil(t, err, "Error creating temp cert file") defer os.Remove(tempfile.Name()) _, err = tempfile.WriteString(testCert) assert.Nil(t, err, "Error writing temp cert file") tempfile.Close() c, err := NewClient(NewContext(nil, map[string]string{ "GIT_SSL_CAINFO": tempfile.Name(), }, map[string]string{ "http.sslbackend": "schannel", "http.schannelusesslcainfo": "1", })) assert.Nil(t, err) // Should match any host at all for _, matchedHostTest := range sslCAInfoMatchedHostTests { pool := getRootCAsForHost(c, matchedHostTest.hostName) assert.NotNil(t, pool) } } func TestCertFromSSLCAPathConfig(t *testing.T) { tempdir := t.TempDir() err := os.WriteFile(filepath.Join(tempdir, "cert1.pem"), []byte(testCert), 0644) assert.Nil(t, err, "Error creating cert file") c, err := NewClient(NewContext(nil, nil, map[string]string{ "http.sslcapath": tempdir, })) assert.Nil(t, err) // Should match any host at all for _, matchedHostTest := range sslCAInfoMatchedHostTests { pool := getRootCAsForHost(c, matchedHostTest.hostName) assert.NotNil(t, pool) } } func TestCertFromSSLCAPathEnv(t *testing.T) { tempdir := t.TempDir() err := os.WriteFile(filepath.Join(tempdir, "cert1.pem"), []byte(testCert), 0644) assert.Nil(t, err, "Error creating cert file") c, err := NewClient(NewContext(nil, map[string]string{ "GIT_SSL_CAPATH": tempdir, }, nil)) assert.Nil(t, err) // Should match any host at all for _, matchedHostTest := range sslCAInfoMatchedHostTests { pool := getRootCAsForHost(c, matchedHostTest.hostName) assert.NotNil(t, pool) } } func TestCertVerifyDisabledGlobalEnv(t *testing.T) { empty, _ := NewClient(nil) httpClient := clientForHost(empty, "anyhost.com") tr, ok := httpClient.Transport.(*http.Transport) if assert.True(t, ok) { assert.False(t, tr.TLSClientConfig.InsecureSkipVerify) } c, err := NewClient(NewContext(nil, map[string]string{ "GIT_SSL_NO_VERIFY": "1", }, nil)) assert.Nil(t, err) httpClient = clientForHost(c, "anyhost.com") tr, ok = httpClient.Transport.(*http.Transport) if assert.True(t, ok) { assert.True(t, tr.TLSClientConfig.InsecureSkipVerify) } } func TestCertVerifyDisabledGlobalConfig(t *testing.T) { def, _ := NewClient(nil) httpClient := clientForHost(def, "anyhost.com") tr, ok := httpClient.Transport.(*http.Transport) if assert.True(t, ok) { assert.False(t, tr.TLSClientConfig.InsecureSkipVerify) } c, err := NewClient(NewContext(nil, nil, map[string]string{ "http.sslverify": "false", })) assert.Nil(t, err) httpClient = clientForHost(c, "anyhost.com") tr, ok = httpClient.Transport.(*http.Transport) if assert.True(t, ok) { assert.True(t, tr.TLSClientConfig.InsecureSkipVerify) } } func TestCertVerifyDisabledHostConfig(t *testing.T) { def, _ := NewClient(nil) httpClient := clientForHost(def, "specifichost.com") tr, ok := httpClient.Transport.(*http.Transport) if assert.True(t, ok) { assert.False(t, tr.TLSClientConfig.InsecureSkipVerify) } httpClient = clientForHost(def, "otherhost.com") tr, ok = httpClient.Transport.(*http.Transport) if assert.True(t, ok) { assert.False(t, tr.TLSClientConfig.InsecureSkipVerify) } c, err := NewClient(NewContext(nil, nil, map[string]string{ "http.https://specifichost.com/.sslverify": "false", })) assert.Nil(t, err) httpClient = clientForHost(c, "specifichost.com") tr, ok = httpClient.Transport.(*http.Transport) if assert.True(t, ok) { assert.True(t, tr.TLSClientConfig.InsecureSkipVerify) } httpClient = clientForHost(c, "otherhost.com") tr, ok = httpClient.Transport.(*http.Transport) if assert.True(t, ok) { assert.False(t, tr.TLSClientConfig.InsecureSkipVerify) } } git-lfs-3.6.1/lfshttp/certs_windows.go000066400000000000000000000003331472372047300200030ustar00rootroot00000000000000package lfshttp import "crypto/x509" func appendRootCAsForHostFromPlatform(pool *x509.CertPool, host string) *x509.CertPool { // golang already supports Windows Certificate Store for self-signed certs return pool } git-lfs-3.6.1/lfshttp/client.go000066400000000000000000000367111472372047300164000ustar00rootroot00000000000000package lfshttp import ( "context" "crypto/tls" goerrors "errors" "fmt" "io" "net" "net/http" "net/textproto" "net/url" "os" "regexp" "strconv" "strings" "sync" "time" spnego "github.com/dpotapov/go-spnego" "github.com/git-lfs/git-lfs/v3/config" "github.com/git-lfs/git-lfs/v3/creds" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/tools" "github.com/git-lfs/git-lfs/v3/tr" "github.com/rubyist/tracerx" "golang.org/x/net/http2" ) const MediaType = "application/vnd.git-lfs+json" const RequestContentType = MediaType + "; charset=utf-8" var ( UserAgent = "git-lfs" httpRE = regexp.MustCompile(`\Ahttps?://`) ) type hostData struct { host string mode creds.AccessMode } type Client struct { SSH SSHResolver DialTimeout int KeepaliveTimeout int TLSTimeout int ConcurrentTransfers int SkipSSLVerify bool Verbose bool DebuggingVerbose bool VerboseOut io.Writer hostClients map[hostData]*http.Client clientMu sync.Mutex httpLogger *syncLogger gitEnv config.Environment osEnv config.Environment uc *config.URLConfig credHelperContext *creds.CredentialHelperContext sshTries int } func NewClient(ctx Context) (*Client, error) { if ctx == nil { ctx = NewContext(nil, nil, nil) } gitEnv := ctx.GitEnv() osEnv := ctx.OSEnv() cacheCreds := gitEnv.Bool("lfs.cachecredentials", true) var sshResolver SSHResolver = &sshAuthClient{os: osEnv, git: gitEnv} if cacheCreds { sshResolver = withSSHCache(sshResolver) } c := &Client{ SSH: sshResolver, DialTimeout: gitEnv.Int("lfs.dialtimeout", 0), KeepaliveTimeout: gitEnv.Int("lfs.keepalive", 0), TLSTimeout: gitEnv.Int("lfs.tlstimeout", 0), ConcurrentTransfers: gitEnv.Int("lfs.concurrenttransfers", 8), SkipSSLVerify: !gitEnv.Bool("http.sslverify", true) || osEnv.Bool("GIT_SSL_NO_VERIFY", false), Verbose: osEnv.Bool("GIT_CURL_VERBOSE", false), DebuggingVerbose: osEnv.Bool("LFS_DEBUG_HTTP", false), gitEnv: gitEnv, osEnv: osEnv, uc: config.NewURLConfig(gitEnv), sshTries: gitEnv.Int("lfs.ssh.retries", 5), credHelperContext: creds.NewCredentialHelperContext(gitEnv, osEnv), } return c, nil } func (c *Client) GitEnv() config.Environment { return c.gitEnv } func (c *Client) OSEnv() config.Environment { return c.osEnv } func (c *Client) URLConfig() *config.URLConfig { return c.uc } func (c *Client) NewRequest(method string, e Endpoint, suffix string, body interface{}) (*http.Request, error) { if strings.HasPrefix(e.Url, "file://") { // Initial `\n` to avoid overprinting `Downloading LFS...`. fmt.Fprint(os.Stderr, "\n", tr.Tr.Get(`hint: The remote resolves to a file:// URL, which can only work with a hint: standalone transfer agent. See section "Using a Custom Transfer Type hint: without the API server" in custom-transfers.md for details.`), "\n") } sshRes, err := c.sshResolveWithRetries(e, method) if err != nil { return nil, err } prefix := e.Url if len(sshRes.Href) > 0 { prefix = sshRes.Href } if !httpRE.MatchString(prefix) { urlfragment := strings.SplitN(prefix, "?", 2)[0] return nil, errors.New(tr.Tr.Get("missing protocol: %q", urlfragment)) } req, err := http.NewRequest(method, joinURL(prefix, suffix), nil) if err != nil { return req, err } for key, value := range sshRes.Header { req.Header.Set(key, value) } req.Header.Set("Accept", MediaType) if body != nil { if merr := MarshalToRequest(req, body); merr != nil { return req, merr } req.Header.Set("Content-Type", RequestContentType) } return req, err } const slash = "/" func joinURL(prefix, suffix string) string { if strings.HasSuffix(prefix, slash) { return prefix + suffix } return prefix + slash + suffix } // Do sends an HTTP request to get an HTTP response. It wraps net/http, adding // extra headers, redirection handling, and error reporting. func (c *Client) Do(req *http.Request) (*http.Response, error) { req.Header = c.ExtraHeadersFor(req) return c.do(req, "", nil, creds.NoneAccess) } // DoWithAccess sends an HTTP request to get an HTTP response using the // specified access mode. It wraps net/http, adding extra headers, redirection // handling, and error reporting. func (c *Client) DoWithAccess(req *http.Request, mode creds.AccessMode) (*http.Response, error) { req.Header = c.ExtraHeadersFor(req) return c.do(req, "", nil, mode) } // do performs an *http.Request respecting redirects, and handles the response // as defined in c.handleResponse. Notably, it does not alter the headers for // the request argument in any way. func (c *Client) do(req *http.Request, remote string, via []*http.Request, mode creds.AccessMode) (*http.Response, error) { req.Header.Set("User-Agent", UserAgent) client, err := c.HttpClient(req.URL, mode) if err != nil { return nil, err } res, err := c.doWithRedirects(client, req, remote, via) if err != nil { return res, err } return res, c.handleResponse(res) } // Close closes any resources that this client opened. func (c *Client) Close() error { return c.httpLogger.Close() } func (c *Client) sshResolveWithRetries(e Endpoint, method string) (*sshAuthResponse, error) { var sshRes sshAuthResponse var err error uc := config.NewURLConfig(c.gitEnv) if val, ok := uc.Get("lfs", e.OriginalUrl, "sshtransfer"); ok && val != "negotiate" && val != "never" { tracerx.Printf("skipping SSH-HTTPS hybrid protocol connection by request") return nil, errors.New("git-lfs-authenticate has been disabled by request") } requests := tools.MaxInt(0, c.sshTries) + 1 for i := 0; i < requests; i++ { sshRes, err = c.SSH.Resolve(e, method) if err == nil { return &sshRes, nil } tracerx.Printf( "ssh: %s failed, error: %s, message: %s (try: %d/%d)", e.SSHMetadata.UserAndHost, err.Error(), sshRes.Message, i, requests, ) } if len(sshRes.Message) > 0 { return nil, errors.Wrap(err, sshRes.Message) } return nil, err } func (c *Client) ExtraHeadersFor(req *http.Request) http.Header { extraHeaders := c.extraHeaders(req.URL) if len(extraHeaders) == 0 { return req.Header } copy := make(http.Header, len(req.Header)) for k, vs := range req.Header { copy[k] = vs } for k, vs := range extraHeaders { for _, v := range vs { copy[k] = append(copy[k], v) } } return copy } func (c *Client) extraHeaders(u *url.URL) map[string][]string { hdrs := c.uc.GetAll("http", u.String(), "extraHeader") m := make(map[string][]string, len(hdrs)) for _, hdr := range hdrs { parts := strings.SplitN(hdr, ":", 2) if len(parts) < 2 { continue } k, v := parts[0], strings.TrimSpace(parts[1]) // If header keys are given in non-canonicalized form (e.g., // "AUTHORIZATION" as opposed to "Authorization") they will not // be returned in calls to net/http.Header.Get(). // // So, we avoid this problem by first canonicalizing header keys // for extra headers. k = textproto.CanonicalMIMEHeaderKey(k) m[k] = append(m[k], v) } return m } func (c *Client) DoWithRedirect(cli *http.Client, req *http.Request, remote string, via []*http.Request) (*http.Request, *http.Response, error) { tracedReq, err := c.traceRequest(req) if err != nil { return nil, nil, err } var retries int if n, ok := Retries(req); ok { retries = n } else { retries = defaultRequestRetries } var res *http.Response requests := tools.MaxInt(0, retries) + 1 for i := 0; i < requests; i++ { res, err = cli.Do(req) if err == nil { break } if seek, ok := req.Body.(io.Seeker); ok { seek.Seek(0, io.SeekStart) } c.traceResponse(req, tracedReq, nil) } if err != nil { c.traceResponse(req, tracedReq, nil) // SPNEGO (Negotiate) errors are authentication errors. var spnegoErr *spnego.Error if goerrors.As(err, &spnegoErr) { tracerx.Printf("http: got Negotiate error: %s", spnegoErr.Error()) return nil, nil, errors.NewAuthError(err) } return nil, nil, err } if res == nil { return nil, nil, nil } if res.Uncompressed { tracerx.Printf("http: decompressed gzipped response") } c.traceResponse(req, tracedReq, res) if res.StatusCode != 301 && res.StatusCode != 302 && res.StatusCode != 303 && res.StatusCode != 307 && res.StatusCode != 308 { // Above are the list of 3xx status codes that we know // how to handle below. If the status code contained in // the HTTP response was none of them, return the (res, // err) tuple as-is, otherwise handle the redirect. return nil, res, c.handleResponse(res) } redirectTo := res.Header.Get("Location") locurl, err := url.Parse(redirectTo) if err == nil && !locurl.IsAbs() { locurl = req.URL.ResolveReference(locurl) redirectTo = locurl.String() } via = append(via, req) if len(via) >= 3 { return nil, res, errors.New(tr.Tr.Get("too many redirects")) } redirectedReq, err := newRequestForRetry(req, redirectTo) if err != nil { return nil, res, err } res.Body.Close() return redirectedReq, nil, nil } func (c *Client) doWithRedirects(cli *http.Client, req *http.Request, remote string, via []*http.Request) (*http.Response, error) { redirectedReq, res, err := c.DoWithRedirect(cli, req, remote, via) if err != nil || res != nil { return res, err } if redirectedReq == nil { return nil, errors.New(tr.Tr.Get("failed to redirect request")) } return c.doWithRedirects(cli, redirectedReq, remote, via) } func (c *Client) configureProtocols(u *url.URL, transport *http.Transport) error { version, _ := c.uc.Get("http", u.String(), "version") switch version { case "HTTP/1.1": // This disables HTTP/2, according to the documentation. transport.TLSNextProto = make(map[string]func(authority string, c *tls.Conn) http.RoundTripper) case "HTTP/2": if u.Scheme != "https" { return errors.New(tr.Tr.Get("HTTP/2 cannot be used except with TLS")) } http2.ConfigureTransport(transport) delete(transport.TLSNextProto, "http/1.1") case "": http2.ConfigureTransport(transport) default: return errors.New(tr.Tr.Get("Unknown HTTP version %q", version)) } return nil } func (c *Client) Transport(u *url.URL, access creds.AccessMode) (http.RoundTripper, error) { host := u.Host if c.gitEnv == nil { c.gitEnv = make(testEnv) } if c.osEnv == nil { c.osEnv = make(testEnv) } concurrentTransfers := c.ConcurrentTransfers if concurrentTransfers < 1 { concurrentTransfers = 8 } dialtime := c.DialTimeout if dialtime < 1 { dialtime = 30 } keepalivetime := c.KeepaliveTimeout if keepalivetime < 1 { keepalivetime = 1800 } tlstime := c.TLSTimeout if tlstime < 1 { tlstime = 30 } tr := &http.Transport{ Proxy: proxyFromClient(c), TLSHandshakeTimeout: time.Duration(tlstime) * time.Second, MaxIdleConnsPerHost: concurrentTransfers, } activityTimeout := 30 if v, ok := c.uc.Get("lfs", u.String(), "activitytimeout"); ok { if i, err := strconv.Atoi(v); err == nil { activityTimeout = i } else { activityTimeout = 0 } } dialer := &net.Dialer{ Timeout: time.Duration(dialtime) * time.Second, KeepAlive: time.Duration(keepalivetime) * time.Second, DualStack: true, } if activityTimeout > 0 { activityDuration := time.Duration(activityTimeout) * time.Second tr.DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) { c, err := dialer.DialContext(ctx, network, addr) if c == nil { return c, err } if tc, ok := c.(*net.TCPConn); ok { tc.SetKeepAlive(true) tc.SetKeepAlivePeriod(dialer.KeepAlive) } return &deadlineConn{Timeout: activityDuration, Conn: c}, err } } else { tr.DialContext = dialer.DialContext } tr.TLSClientConfig = &tls.Config{ Renegotiation: tls.RenegotiateFreelyAsClient, } if isClientCertEnabledForHost(c, host) { tracerx.Printf("http: client cert for %s", host) cert, err := getClientCertForHost(c, host) if err != nil { return nil, err } if cert != nil { tr.TLSClientConfig.Certificates = []tls.Certificate{*cert} tr.TLSClientConfig.BuildNameToCertificate() } } if isCertVerificationDisabledForHost(c, host) { tr.TLSClientConfig.InsecureSkipVerify = true } else { tr.TLSClientConfig.RootCAs = getRootCAsForHost(c, host) } if err := c.configureProtocols(u, tr); err != nil { return nil, err } if access == creds.NegotiateAccess { // This technically copies a mutex, but we know since we've just created // the object that this mutex is unlocked. return &spnego.Transport{Transport: *tr}, nil } return tr, nil } func (c *Client) HttpClient(u *url.URL, access creds.AccessMode) (*http.Client, error) { c.clientMu.Lock() defer c.clientMu.Unlock() host := u.Host if c.hostClients == nil { c.hostClients = make(map[hostData]*http.Client) } hd := hostData{host: host, mode: access} if client, ok := c.hostClients[hd]; ok { return client, nil } tr, err := c.Transport(u, access) if err != nil { return nil, err } httpClient := &http.Client{ Transport: tr, CheckRedirect: func(*http.Request, []*http.Request) error { return http.ErrUseLastResponse }, } if isCookieJarEnabledForHost(c, host) { tracerx.Printf("http: cookieFile for %s", host) if cookieJar, err := getCookieJarForHost(c, host); err == nil { httpClient.Jar = cookieJar } else { tracerx.Printf("http: error while reading cookieFile: %s", err.Error()) } } c.hostClients[hd] = httpClient if c.VerboseOut == nil { c.VerboseOut = os.Stderr } return httpClient, nil } func (c *Client) CurrentUser() (string, string) { userName, _ := c.gitEnv.Get("user.name") userEmail, _ := c.gitEnv.Get("user.email") return userName, userEmail } func newRequestForRetry(req *http.Request, location string) (*http.Request, error) { newReq, err := http.NewRequest(req.Method, location, nil) if err != nil { return nil, err } if req.URL.Scheme == "https" && newReq.URL.Scheme == "http" { return nil, errors.New(tr.Tr.Get("refusing insecure redirect: HTTPS to HTTP")) } sameHost := req.URL.Host == newReq.URL.Host for key := range req.Header { if key == "Authorization" { if !sameHost { continue } } newReq.Header.Set(key, req.Header.Get(key)) } oldestURL := strings.SplitN(req.URL.String(), "?", 2)[0] newURL := strings.SplitN(newReq.URL.String(), "?", 2)[0] tracerx.Printf("api: redirect %s %s to %s", req.Method, oldestURL, newURL) // This body will have already been rewound from a call to // lfsapi.Client.traceRequest(). newReq.Body = req.Body newReq.ContentLength = req.ContentLength // Copy the request's context.Context, if any. newReq = newReq.WithContext(req.Context()) return newReq, nil } type deadlineConn struct { Timeout time.Duration net.Conn } func (c *deadlineConn) Read(b []byte) (int, error) { if err := c.Conn.SetDeadline(time.Now().Add(c.Timeout)); err != nil { return 0, err } return c.Conn.Read(b) } func (c *deadlineConn) Write(b []byte) (int, error) { if err := c.Conn.SetDeadline(time.Now().Add(c.Timeout)); err != nil { return 0, err } return c.Conn.Write(b) } func init() { UserAgent = config.VersionDesc } type testEnv map[string]string func (e testEnv) Get(key string) (v string, ok bool) { v, ok = e[key] return } func (e testEnv) GetAll(key string) []string { if v, ok := e.Get(key); ok { return []string{v} } return make([]string, 0) } func (e testEnv) Int(key string, def int) int { s, _ := e.Get(key) return config.Int(s, def) } func (e testEnv) Bool(key string, def bool) bool { s, _ := e.Get(key) return config.Bool(s, def) } func (e testEnv) All() map[string][]string { m := make(map[string][]string) for k, _ := range e { m[k] = e.GetAll(k) } return m } git-lfs-3.6.1/lfshttp/client_test.go000066400000000000000000000260771472372047300174430ustar00rootroot00000000000000package lfshttp import ( "crypto/tls" "encoding/json" "fmt" "net" "net/http" "net/http/httptest" "sync/atomic" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) type redirectTest struct { Test string } func TestClientRedirect(t *testing.T) { var srv3Https, srv3Http string var called1 uint32 var called2 uint32 var called3 uint32 srv3 := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { atomic.AddUint32(&called3, 1) t.Logf("srv3 req %s %s", r.Method, r.URL.Path) assert.Equal(t, "POST", r.Method) switch r.URL.Path { case "/upgrade": assert.Equal(t, "auth", r.Header.Get("Authorization")) assert.Equal(t, "1", r.Header.Get("A")) w.Header().Set("Location", srv3Https+"/upgraded") w.WriteHeader(301) case "/upgraded": // Since srv3 listens on both a TLS-enabled socket and a // TLS-disabled one, they are two different hosts. // Ensure that, even though this is a "secure" upgrade, // the authorization header is stripped. assert.Equal(t, "", r.Header.Get("Authorization")) assert.Equal(t, "1", r.Header.Get("A")) case "/downgrade": assert.Equal(t, "auth", r.Header.Get("Authorization")) assert.Equal(t, "1", r.Header.Get("A")) w.Header().Set("Location", srv3Http+"/404") w.WriteHeader(301) default: w.WriteHeader(404) } })) srv2 := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { atomic.AddUint32(&called2, 1) t.Logf("srv2 req %s %s", r.Method, r.URL.Path) assert.Equal(t, "POST", r.Method) switch r.URL.Path { case "/ok": assert.Equal(t, "", r.Header.Get("Authorization")) assert.Equal(t, "1", r.Header.Get("A")) body := &redirectTest{} err := json.NewDecoder(r.Body).Decode(body) assert.Nil(t, err) assert.Equal(t, "External", body.Test) w.WriteHeader(200) default: w.WriteHeader(404) } })) srv1 := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { atomic.AddUint32(&called1, 1) t.Logf("srv1 req %s %s", r.Method, r.URL.Path) assert.Equal(t, "POST", r.Method) switch r.URL.Path { case "/local": w.Header().Set("Location", "/ok") w.WriteHeader(307) case "/external": w.Header().Set("Location", srv2.URL+"/ok") w.WriteHeader(307) case "/ok": assert.Equal(t, "auth", r.Header.Get("Authorization")) assert.Equal(t, "1", r.Header.Get("A")) body := &redirectTest{} err := json.NewDecoder(r.Body).Decode(body) assert.Nil(t, err) assert.Equal(t, "Local", body.Test) w.WriteHeader(200) default: w.WriteHeader(404) } })) defer srv1.Close() defer srv2.Close() defer srv3.Close() srv3InsecureListener, err := net.Listen("tcp", "127.0.0.1:0") require.Nil(t, err) go http.Serve(srv3InsecureListener, srv3.Config.Handler) defer srv3InsecureListener.Close() srv3Https = srv3.URL srv3Http = fmt.Sprintf("http://%s", srv3InsecureListener.Addr().String()) c, err := NewClient(NewContext(nil, nil, map[string]string{ fmt.Sprintf("http.%s.sslverify", srv3Https): "false", fmt.Sprintf("http.%s/.sslverify", srv3Https): "false", fmt.Sprintf("http.%s.sslverify", srv3Http): "false", fmt.Sprintf("http.%s/.sslverify", srv3Http): "false", fmt.Sprintf("http.sslverify"): "false", })) require.Nil(t, err) // local redirect req, err := http.NewRequest("POST", srv1.URL+"/local", nil) require.Nil(t, err) req.Header.Set("Authorization", "auth") req.Header.Set("A", "1") require.Nil(t, MarshalToRequest(req, &redirectTest{Test: "Local"})) res, err := c.Do(req) require.Nil(t, err) assert.Equal(t, 200, res.StatusCode) assert.EqualValues(t, 2, called1) assert.EqualValues(t, 0, called2) // external redirect req, err = http.NewRequest("POST", srv1.URL+"/external", nil) require.Nil(t, err) req.Header.Set("Authorization", "auth") req.Header.Set("A", "1") require.Nil(t, MarshalToRequest(req, &redirectTest{Test: "External"})) res, err = c.Do(req) require.Nil(t, err) assert.Equal(t, 200, res.StatusCode) assert.EqualValues(t, 3, called1) assert.EqualValues(t, 1, called2) // http -> https (secure upgrade) req, err = http.NewRequest("POST", srv3Http+"/upgrade", nil) require.Nil(t, err) req.Header.Set("Authorization", "auth") req.Header.Set("A", "1") require.Nil(t, MarshalToRequest(req, &redirectTest{Test: "http->https"})) res, err = c.Do(req) require.Nil(t, err) assert.Equal(t, 200, res.StatusCode) assert.EqualValues(t, 2, atomic.LoadUint32(&called3)) // https -> http (insecure downgrade) req, err = http.NewRequest("POST", srv3Https+"/downgrade", nil) require.Nil(t, err) req.Header.Set("Authorization", "auth") req.Header.Set("A", "1") require.Nil(t, MarshalToRequest(req, &redirectTest{Test: "https->http"})) _, err = c.Do(req) assert.EqualError(t, err, "refusing insecure redirect: HTTPS to HTTP") } func TestNewClient(t *testing.T) { c, err := NewClient(NewContext(nil, nil, map[string]string{ "lfs.dialtimeout": "151", "lfs.keepalive": "152", "lfs.tlstimeout": "153", "lfs.concurrenttransfers": "154", })) require.Nil(t, err) assert.Equal(t, 151, c.DialTimeout) assert.Equal(t, 152, c.KeepaliveTimeout) assert.Equal(t, 153, c.TLSTimeout) assert.Equal(t, 154, c.ConcurrentTransfers) } func TestNewClientWithGitSSLVerify(t *testing.T) { c, err := NewClient(nil) assert.Nil(t, err) assert.False(t, c.SkipSSLVerify) for _, value := range []string{"true", "1", "t"} { c, err = NewClient(NewContext(nil, nil, map[string]string{ "http.sslverify": value, })) t.Logf("http.sslverify: %q", value) assert.Nil(t, err) assert.False(t, c.SkipSSLVerify) } for _, value := range []string{"false", "0", "f"} { c, err = NewClient(NewContext(nil, nil, map[string]string{ "http.sslverify": value, })) t.Logf("http.sslverify: %q", value) assert.Nil(t, err) assert.True(t, c.SkipSSLVerify) } } func TestNewClientWithOSSSLVerify(t *testing.T) { c, err := NewClient(nil) assert.Nil(t, err) assert.False(t, c.SkipSSLVerify) for _, value := range []string{"false", "0", "f"} { c, err = NewClient(NewContext(nil, map[string]string{ "GIT_SSL_NO_VERIFY": value, }, nil)) t.Logf("GIT_SSL_NO_VERIFY: %q", value) assert.Nil(t, err) assert.False(t, c.SkipSSLVerify) } for _, value := range []string{"true", "1", "t"} { c, err = NewClient(NewContext(nil, map[string]string{ "GIT_SSL_NO_VERIFY": value, }, nil)) t.Logf("GIT_SSL_NO_VERIFY: %q", value) assert.Nil(t, err) assert.True(t, c.SkipSSLVerify) } } func TestNewRequest(t *testing.T) { tests := [][]string{ {"https://example.com", "a", "https://example.com/a"}, {"https://example.com/", "a", "https://example.com/a"}, {"https://example.com/a", "b", "https://example.com/a/b"}, {"https://example.com/a/", "b", "https://example.com/a/b"}, } for _, test := range tests { c, err := NewClient(NewContext(nil, nil, nil)) require.Nil(t, err) req, err := c.NewRequest("POST", Endpoint{Url: test[0]}, test[1], nil) require.Nil(t, err) assert.Equal(t, "POST", req.Method) assert.Equal(t, test[2], req.URL.String(), fmt.Sprintf("endpoint: %s, suffix: %s, expected: %s", test[0], test[1], test[2])) } } func TestNewRequestWithBody(t *testing.T) { c, err := NewClient(NewContext(nil, nil, nil)) require.Nil(t, err) body := struct { Test string }{Test: "test"} req, err := c.NewRequest("POST", Endpoint{Url: "https://example.com"}, "body", body) require.Nil(t, err) assert.NotNil(t, req.Body) assert.Equal(t, "15", req.Header.Get("Content-Length")) assert.EqualValues(t, 15, req.ContentLength) } func TestMarshalToRequest(t *testing.T) { req, err := http.NewRequest("POST", "https://foo/bar", nil) require.Nil(t, err) assert.Nil(t, req.Body) assert.Equal(t, "", req.Header.Get("Content-Length")) assert.EqualValues(t, 0, req.ContentLength) body := struct { Test string }{Test: "test"} require.Nil(t, MarshalToRequest(req, body)) assert.NotNil(t, req.Body) assert.Equal(t, "15", req.Header.Get("Content-Length")) assert.EqualValues(t, 15, req.ContentLength) } func TestHttp2(t *testing.T) { var calledSrvTLS uint32 var calledSrv uint32 srvTLS := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { atomic.AddUint32(&calledSrvTLS, 1) assert.Equal(t, "GET", r.Method) assert.Equal(t, "HTTP/2.0", r.Proto) w.WriteHeader(200) })) srvTLS.TLS = &tls.Config{NextProtos: []string{"h2", "http/1.1"}} srvTLS.StartTLS() srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { atomic.AddUint32(&calledSrv, 1) assert.Equal(t, "GET", r.Method) assert.Equal(t, "HTTP/1.1", r.Proto) w.WriteHeader(200) })) defer srvTLS.Close() defer srv.Close() c, err := NewClient(NewContext(nil, nil, map[string]string{ fmt.Sprintf("http.sslverify"): "false", })) require.Nil(t, err) req, err := http.NewRequest("GET", srvTLS.URL, nil) require.Nil(t, err) res, err := c.Do(req) require.Nil(t, err) assert.Equal(t, 200, res.StatusCode) assert.EqualValues(t, 1, calledSrvTLS) req, err = http.NewRequest("GET", srv.URL, nil) require.Nil(t, err) res, err = c.Do(req) require.Nil(t, err) assert.Equal(t, 200, res.StatusCode) assert.EqualValues(t, 1, calledSrv) } func TestHttpVersion(t *testing.T) { testcases := []struct { Proto string Setting string TLSOk bool PlaintextOk bool Error string }{ {"HTTP/2.0", "HTTP/2", true, false, "HTTP/2 cannot be used except with TLS"}, {"HTTP/1.1", "HTTP/1.1", true, true, ""}, {"HTTP/2.0", "lalala", false, false, `Unknown HTTP version "lalala"`}, } for _, test := range testcases { var calledSrvTLS uint32 var calledSrv uint32 srvTLS := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { atomic.AddUint32(&calledSrvTLS, 1) assert.Equal(t, "GET", r.Method) assert.Equal(t, test.Proto, r.Proto) w.WriteHeader(200) })) srvTLS.TLS = &tls.Config{NextProtos: []string{"h2", "http/1.1"}} srvTLS.StartTLS() srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { atomic.AddUint32(&calledSrv, 1) assert.Equal(t, "GET", r.Method) assert.Equal(t, "HTTP/1.1", r.Proto) w.WriteHeader(200) })) defer srvTLS.Close() defer srv.Close() c, err := NewClient(NewContext(nil, nil, map[string]string{ "http.sslverify": "false", "http.version": test.Setting, })) require.Nil(t, err) req, err := http.NewRequest("GET", srvTLS.URL, nil) require.Nil(t, err) if test.TLSOk { res, err := c.Do(req) require.Nil(t, err) assert.Equal(t, 200, res.StatusCode) assert.EqualValues(t, 1, calledSrvTLS) } else { _, err := c.Do(req) require.NotNil(t, err) assert.EqualValues(t, err.Error(), test.Error) assert.EqualValues(t, 0, calledSrv) } req, err = http.NewRequest("GET", srv.URL, nil) require.Nil(t, err) if test.PlaintextOk { res, err := c.Do(req) require.Nil(t, err) assert.Equal(t, 200, res.StatusCode) assert.EqualValues(t, 1, calledSrv) } else { _, err := c.Do(req) require.NotNil(t, err) assert.EqualValues(t, err.Error(), test.Error) assert.EqualValues(t, 0, calledSrv) } } } git-lfs-3.6.1/lfshttp/cookies.go000066400000000000000000000011451472372047300165470ustar00rootroot00000000000000package lfshttp import ( "fmt" "net/http" "github.com/git-lfs/git-lfs/v3/tools" "github.com/ssgelm/cookiejarparser" ) func isCookieJarEnabledForHost(c *Client, host string) bool { _, cookieFileOk := c.uc.Get("http", fmt.Sprintf("https://%v", host), "cookieFile") return cookieFileOk } func getCookieJarForHost(c *Client, host string) (http.CookieJar, error) { cookieFile, _ := c.uc.Get("http", fmt.Sprintf("https://%v", host), "cookieFile") cookieFilePath, err := tools.ExpandPath(cookieFile, false) if err != nil { return nil, err } return cookiejarparser.LoadCookieJarFile(cookieFilePath) } git-lfs-3.6.1/lfshttp/endpoint.go000066400000000000000000000057051472372047300167410ustar00rootroot00000000000000package lfshttp import ( "fmt" "net/url" "regexp" "strings" "github.com/git-lfs/git-lfs/v3/git" "github.com/git-lfs/git-lfs/v3/ssh" ) const UrlUnknown = "" // An Endpoint describes how to access a Git LFS server. type Endpoint struct { Url string SSHMetadata ssh.SSHMetadata Operation string OriginalUrl string } func endpointOperation(e Endpoint, method string) string { if len(e.Operation) > 0 { return e.Operation } switch method { case "GET", "HEAD": return "download" default: return "upload" } } // EndpointFromSshUrl constructs a new endpoint from an ssh:// URL func EndpointFromSshUrl(u *url.URL) Endpoint { var endpoint Endpoint // Pull out port now, we need it separately for SSH regex := regexp.MustCompile(`^([^\:]+)(?:\:(\d+))?$`) match := regex.FindStringSubmatch(u.Host) if match == nil || len(match) < 2 { endpoint.Url = UrlUnknown return endpoint } endpoint.OriginalUrl = u.String() host := match[1] if u.User != nil && u.User.Username() != "" { endpoint.SSHMetadata.UserAndHost = fmt.Sprintf("%s@%s", u.User.Username(), host) } else { endpoint.SSHMetadata.UserAndHost = host } if len(match) > 2 { endpoint.SSHMetadata.Port = match[2] } endpoint.SSHMetadata.Path = u.Path // Fallback URL for using HTTPS while still using SSH for git // u.Host includes host & port so can't use SSH port endpoint.Url = fmt.Sprintf("https://%s%s", host, u.Path) return endpoint } // EndpointFromBareSshUrl constructs a new endpoint from a bare SSH URL: // // user@host.com:path/to/repo.git or // [user@host.com:port]:path/to/repo.git func EndpointFromBareSshUrl(rawurl string) Endpoint { parts := strings.Split(rawurl, ":") partsLen := len(parts) if partsLen < 2 { return Endpoint{Url: rawurl} } // Treat presence of ':' as a bare URL var newPath string if len(parts) > 2 { // port included; really should only ever be 3 parts // Correctly handle [host:port]:path URLs parts[0] = strings.TrimPrefix(parts[0], "[") parts[1] = strings.TrimSuffix(parts[1], "]") newPath = fmt.Sprintf("%v:%v", parts[0], strings.Join(parts[1:], "/")) } else { newPath = strings.Join(parts, "/") } newrawurl := fmt.Sprintf("ssh://%v", newPath) newu, err := url.Parse(newrawurl) if err != nil { return Endpoint{Url: UrlUnknown} } endpoint := EndpointFromSshUrl(newu) if strings.HasPrefix(endpoint.SSHMetadata.Path, "/") { endpoint.SSHMetadata.Path = endpoint.SSHMetadata.Path[1:] } return endpoint } // Construct a new endpoint from a HTTP URL func EndpointFromHttpUrl(u *url.URL) Endpoint { // just pass this straight through return Endpoint{Url: u.String(), OriginalUrl: u.String()} } func EndpointFromLocalPath(path string) Endpoint { url := git.RewriteLocalPathAsURL(path) return Endpoint{Url: url, OriginalUrl: url} } // Construct a new endpoint from a file URL func EndpointFromFileUrl(u *url.URL) Endpoint { // just pass this straight through return Endpoint{Url: u.String(), OriginalUrl: u.String()} } git-lfs-3.6.1/lfshttp/errors.go000066400000000000000000000061021472372047300164250ustar00rootroot00000000000000package lfshttp import ( "fmt" "net/http" "strings" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/tr" ) type httpError interface { Error() string HTTPResponse() *http.Response } func IsHTTP(err error) (*http.Response, bool) { if httpErr, ok := err.(httpError); ok { return httpErr.HTTPResponse(), true } return nil, false } type ClientError struct { Message string `json:"message"` DocumentationUrl string `json:"documentation_url,omitempty"` RequestId string `json:"request_id,omitempty"` response *http.Response } func (e *ClientError) HTTPResponse() *http.Response { return e.response } func (e *ClientError) Error() string { return e.Message } func (c *Client) handleResponse(res *http.Response) error { if res.StatusCode < 400 { return nil } cliErr := &ClientError{response: res} err := DecodeJSON(res, cliErr) if IsDecodeTypeError(err) { err = nil } if err == nil { if len(cliErr.Message) == 0 { err = defaultError(res) } else { err = cliErr } } if res.StatusCode == 401 { return errors.NewAuthError(err) } if res.StatusCode == 422 { return errors.NewUnprocessableEntityError(err) } if res.StatusCode == 429 { // The Retry-After header could be set, check to see if it exists. h := res.Header.Get("Retry-After") retLaterErr := errors.NewRetriableLaterError(err, h) if retLaterErr != nil { return retLaterErr } return errors.NewRetriableError(err) } if res.StatusCode > 499 && res.StatusCode != 501 && res.StatusCode != 507 && res.StatusCode != 509 { return errors.NewFatalError(err) } return err } type statusCodeError struct { response *http.Response } func NewStatusCodeError(res *http.Response) error { return &statusCodeError{response: res} } func (e *statusCodeError) Error() string { req := e.response.Request return tr.Tr.Get("Invalid HTTP status for %s %s: %d", req.Method, strings.SplitN(req.URL.String(), "?", 2)[0], e.response.StatusCode, ) } func (e *statusCodeError) HTTPResponse() *http.Response { return e.response } func defaultError(res *http.Response) error { var msgFmt string defaultErrors := map[int]string{ 400: tr.Tr.Get("Client error: %%s"), 401: tr.Tr.Get("Authorization error: %%s\nCheck that you have proper access to the repository"), 403: tr.Tr.Get("Authorization error: %%s\nCheck that you have proper access to the repository"), 404: tr.Tr.Get("Repository or object not found: %%s\nCheck that it exists and that you have proper access to it"), 422: tr.Tr.Get("Unprocessable entity: %%s"), 429: tr.Tr.Get("Rate limit exceeded: %%s"), 500: tr.Tr.Get("Server error: %%s"), 501: tr.Tr.Get("Not Implemented: %%s"), 507: tr.Tr.Get("Insufficient server storage: %%s"), 509: tr.Tr.Get("Bandwidth limit exceeded: %%s"), } if f, ok := defaultErrors[res.StatusCode]; ok { msgFmt = f } else if res.StatusCode < 500 { msgFmt = tr.Tr.Get("Client error %%s from HTTP %d", res.StatusCode) } else { msgFmt = tr.Tr.Get("Server error %%s from HTTP %d", res.StatusCode) } return errors.Errorf(fmt.Sprintf(msgFmt), res.Request.URL) } git-lfs-3.6.1/lfshttp/lfshttp.go000066400000000000000000000036211472372047300166000ustar00rootroot00000000000000package lfshttp import ( "encoding/json" "net/http" "regexp" "github.com/git-lfs/git-lfs/v3/config" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/git" "github.com/git-lfs/git-lfs/v3/tr" ) var ( lfsMediaTypeRE = regexp.MustCompile(`\Aapplication/vnd\.git\-lfs\+json(;|\z)`) jsonMediaTypeRE = regexp.MustCompile(`\Aapplication/json(;|\z)`) ) type Context interface { GitConfig() *git.Configuration OSEnv() config.Environment GitEnv() config.Environment } func NewContext(gitConf *git.Configuration, osEnv, gitEnv map[string]string) Context { c := &testContext{gitConfig: gitConf} if c.gitConfig == nil { c.gitConfig = git.NewConfig("", "") } if osEnv != nil { c.osEnv = testEnv(osEnv) } else { c.osEnv = make(testEnv) } if gitEnv != nil { c.gitEnv = testEnv(gitEnv) } else { c.gitEnv = make(testEnv) } return c } type testContext struct { gitConfig *git.Configuration osEnv config.Environment gitEnv config.Environment } func (c *testContext) GitConfig() *git.Configuration { return c.gitConfig } func (c *testContext) OSEnv() config.Environment { return c.osEnv } func (c *testContext) GitEnv() config.Environment { return c.gitEnv } func IsDecodeTypeError(err error) bool { _, ok := err.(*decodeTypeError) return ok } type decodeTypeError struct { Type string } func (e *decodeTypeError) TypeError() {} func (e *decodeTypeError) Error() string { return tr.Tr.Get("Expected JSON type, got: %q", e.Type) } func DecodeJSON(res *http.Response, obj interface{}) error { ctype := res.Header.Get("Content-Type") if !(lfsMediaTypeRE.MatchString(ctype) || jsonMediaTypeRE.MatchString(ctype)) { return &decodeTypeError{Type: ctype} } err := json.NewDecoder(res.Body).Decode(obj) res.Body.Close() if err != nil { return errors.Wrapf(err, tr.Tr.Get("Unable to parse HTTP response for %s %s", res.Request.Method, res.Request.URL)) } return nil } git-lfs-3.6.1/lfshttp/proxy.go000066400000000000000000000037731472372047300163050ustar00rootroot00000000000000package lfshttp import ( "net/http" "net/url" "strings" "github.com/git-lfs/git-lfs/v3/config" "golang.org/x/net/http/httpproxy" ) // Logic is copied, with small changes, from "net/http".ProxyFromEnvironment in the go std lib. func proxyFromClient(c *Client) func(req *http.Request) (*url.URL, error) { return func(req *http.Request) (*url.URL, error) { httpsProxy, httpProxy, noProxy := getProxyServers(req.URL, c.uc, c.osEnv) var proxy string if req.URL.Scheme == "https" { proxy = httpsProxy } if len(proxy) == 0 { proxy = httpProxy } if len(proxy) == 0 { return nil, nil } if strings.HasPrefix(proxy, "socks5h://") { proxy = strings.Replace(proxy, "socks5h://", "socks5://", 1) } cfg := &httpproxy.Config{ HTTPProxy: proxy, HTTPSProxy: proxy, NoProxy: noProxy, CGI: false, } // We want to use the standard logic except that we want to // allow proxies for localhost, which the standard library does // not. Since the proxy code looks only at the URL, we // synthesize a fake URL except that we rewrite "localhost" to // "127.0.0.1" for purposes of looking up the proxy. u := *(req.URL) if u.Host == "localhost" { u.Host = "127.0.0.1" } return cfg.ProxyFunc()(&u) } } func getProxyServers(u *url.URL, urlCfg *config.URLConfig, osEnv config.Environment) (httpsProxy string, httpProxy string, noProxy string) { if osEnv == nil { return } if len(httpsProxy) == 0 { httpsProxy, _ = osEnv.Get("HTTPS_PROXY") } if len(httpsProxy) == 0 { httpsProxy, _ = osEnv.Get("https_proxy") } if len(httpProxy) == 0 { httpProxy, _ = osEnv.Get("HTTP_PROXY") } if len(httpProxy) == 0 { httpProxy, _ = osEnv.Get("http_proxy") } if urlCfg != nil { gitProxy, ok := urlCfg.Get("http", u.String(), "proxy") if len(gitProxy) > 0 && ok { if u.Scheme == "https" { httpsProxy = gitProxy } httpProxy = gitProxy } } noProxy, _ = osEnv.Get("NO_PROXY") if len(noProxy) == 0 { noProxy, _ = osEnv.Get("no_proxy") } return } git-lfs-3.6.1/lfshttp/proxy_test.go000066400000000000000000000112401472372047300173300ustar00rootroot00000000000000package lfshttp import ( "net/http" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestHttpsProxyFromGitConfig(t *testing.T) { c, err := NewClient(NewContext(nil, map[string]string{ "HTTPS_PROXY": "https://proxy-from-env:8080", }, map[string]string{ "http.proxy": "https://proxy-from-git-config:8080", })) require.Nil(t, err) req, err := http.NewRequest("GET", "https://some-host.com:123/foo/bar", nil) require.Nil(t, err) proxyURL, err := proxyFromClient(c)(req) assert.Equal(t, "proxy-from-git-config:8080", proxyURL.Host) assert.Nil(t, err) } func TestProxyForURL(t *testing.T) { c, err := NewClient(NewContext(nil, nil, map[string]string{ "http.proxy": "https://proxy-for-everyone:8080", "http.https://some-host.com:123.proxy": "https://proxy-for-some-host:8080", })) require.Nil(t, err) req, err := http.NewRequest("GET", "https://some-host.com:123/foo/bar", nil) require.Nil(t, err) proxyURL, err := proxyFromClient(c)(req) assert.Equal(t, "proxy-for-some-host:8080", proxyURL.Host) assert.Nil(t, err) } func TestHttpProxyFromGitConfig(t *testing.T) { c, err := NewClient(NewContext(nil, map[string]string{ "HTTPS_PROXY": "https://proxy-from-env:8080", }, map[string]string{ "http.proxy": "http://proxy-from-git-config:8080", })) require.Nil(t, err) req, err := http.NewRequest("GET", "http://some-host.com:123/foo/bar", nil) require.Nil(t, err) proxyURL, err := proxyFromClient(c)(req) assert.Equal(t, "proxy-from-git-config:8080", proxyURL.Host) assert.Nil(t, err) } func TestProxyFromEnvironment(t *testing.T) { c, err := NewClient(NewContext(nil, map[string]string{ "HTTPS_PROXY": "https://proxy-from-env:8080", }, nil)) require.Nil(t, err) req, err := http.NewRequest("GET", "https://some-host.com:123/foo/bar", nil) require.Nil(t, err) proxyURL, err := proxyFromClient(c)(req) assert.Equal(t, "proxy-from-env:8080", proxyURL.Host) assert.Nil(t, err) } func TestHTTPSProxyFromEnvironment(t *testing.T) { c, err := NewClient(NewContext(nil, map[string]string{ "HTTPS_PROXY": "http://proxy-from-env:8080", }, nil)) require.Nil(t, err) req, err := http.NewRequest("GET", "https://some-host.com:123/foo/bar", nil) require.Nil(t, err) proxyURL, err := proxyFromClient(c)(req) assert.Equal(t, "proxy-from-env:8080", proxyURL.Host) assert.Nil(t, err) } func TestHTTPProxyFromEnvironment(t *testing.T) { c, err := NewClient(NewContext(nil, map[string]string{ "HTTPS_PROXY": "http://proxy-from-env:8080", }, nil)) require.Nil(t, err) req, err := http.NewRequest("GET", "http://some-host.com:123/foo/bar", nil) require.Nil(t, err) proxyURL, err := proxyFromClient(c)(req) assert.Nil(t, proxyURL) assert.Nil(t, err) } func TestProxyIsNil(t *testing.T) { c, _ := NewClient(nil) req, err := http.NewRequest("GET", "http://some-host.com:123/foo/bar", nil) require.Nil(t, err) proxyURL, err := proxyFromClient(c)(req) assert.Nil(t, proxyURL) assert.Nil(t, err) } func TestProxyNoProxy(t *testing.T) { c, err := NewClient(NewContext(nil, map[string]string{ "NO_PROXY": "some-host", }, map[string]string{ "http.proxy": "https://proxy-from-git-config:8080", })) require.Nil(t, err) req, err := http.NewRequest("GET", "https://some-host:8080", nil) require.Nil(t, err) proxyURL, err := proxyFromClient(c)(req) assert.Nil(t, proxyURL) assert.Nil(t, err) } func TestProxyNoProxyWithWildcard(t *testing.T) { c, err := NewClient(NewContext(nil, map[string]string{ "NO_PROXY": "*.example.com", }, map[string]string{ "http.proxy": "https://proxy-from-git-config:8080", })) require.Nil(t, err) req, err := http.NewRequest("GET", "https://foo.example.com:8080", nil) require.Nil(t, err) proxyURL, err := proxyFromClient(c)(req) assert.Nil(t, proxyURL) assert.Nil(t, err) } func TestSocksProxyFromEnvironment(t *testing.T) { c, err := NewClient(NewContext(nil, map[string]string{ "HTTPS_PROXY": "socks5://proxy-from-env:3128", }, nil)) require.Nil(t, err) req, err := http.NewRequest("GET", "https://some-host.com:123/foo/bar", nil) require.Nil(t, err) proxyURL, err := proxyFromClient(c)(req) assert.Equal(t, "socks5", proxyURL.Scheme) assert.Equal(t, "proxy-from-env:3128", proxyURL.Host) assert.Nil(t, err) } func TestSocks5hProxyFromEnvironment(t *testing.T) { c, err := NewClient(NewContext(nil, map[string]string{ "HTTPS_PROXY": "socks5h://proxy-from-env:3128", }, nil)) require.Nil(t, err) req, err := http.NewRequest("GET", "https://some-host.com:123/foo/bar", nil) require.Nil(t, err) proxyURL, err := proxyFromClient(c)(req) assert.Equal(t, "socks5", proxyURL.Scheme) assert.Equal(t, "proxy-from-env:3128", proxyURL.Host) assert.Nil(t, err) } git-lfs-3.6.1/lfshttp/retries.go000066400000000000000000000017151472372047300165730ustar00rootroot00000000000000package lfshttp import ( "context" "net/http" ) // ckey is a type that wraps a string for package-unique context.Context keys. type ckey string const ( // contextKeyRetries is a context.Context key for storing the desired // number of retries for a given request. contextKeyRetries ckey = "retries" // defaultRequestRetries is the default number of retries to perform on // a given HTTP request. defaultRequestRetries = 0 ) // WithRetries stores the desired number of retries "n" on the given // http.Request, and causes it to be retried "n" times in the case of a non-nil // network related error. func WithRetries(req *http.Request, n int) *http.Request { ctx := req.Context() ctx = context.WithValue(ctx, contextKeyRetries, n) return req.WithContext(ctx) } // Retries returns the number of retries requested for a given http.Request. func Retries(req *http.Request) (int, bool) { n, ok := req.Context().Value(contextKeyRetries).(int) return n, ok } git-lfs-3.6.1/lfshttp/retries_test.go000066400000000000000000000035171472372047300176340ustar00rootroot00000000000000package lfshttp import ( "encoding/json" "net/http" "net/http/httptest" "sync/atomic" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestWithRetries(t *testing.T) { req, _ := http.NewRequest("GET", "/", nil) req = WithRetries(req, 1) n, ok := Retries(req) assert.True(t, ok) assert.Equal(t, 1, n) } func TestRetriesOnUnannotatedRequest(t *testing.T) { req, _ := http.NewRequest("GET", "/", nil) n, ok := Retries(req) assert.False(t, ok) assert.Equal(t, 0, n) } func TestRequestWithRetries(t *testing.T) { type T struct { S string `json:"s"` } var hasRaw bool = true var requests uint32 var berr error srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { var payload T if err := json.NewDecoder(r.Body).Decode(&payload); err != nil { berr = err } assert.Equal(t, "Hello, world!", payload.S) if atomic.AddUint32(&requests, 1) < 3 { raw, ok := w.(http.Hijacker) if !ok { hasRaw = false return } conn, _, err := raw.Hijack() require.NoError(t, err) require.NoError(t, conn.Close()) return } })) defer srv.Close() c, err := NewClient(nil) require.NoError(t, err) req, err := http.NewRequest("POST", srv.URL, nil) require.NoError(t, err) require.NoError(t, MarshalToRequest(req, &T{"Hello, world!"})) if !hasRaw { // Skip tests where the implementation of // net/http/httptest.Server does not provide raw access to the // connection. // // Defer the skip outside of the server, since t.Skip halts the // running goroutine. t.Skip("lfsapi: net/http/httptest.Server does not provide raw access") } res, err := c.Do(WithRetries(req, 8)) assert.NoError(t, berr) assert.NoError(t, err) require.NotNil(t, res, "lfsapi: expected response") assert.Equal(t, http.StatusOK, res.StatusCode) } git-lfs-3.6.1/lfshttp/ssh.go000066400000000000000000000055321472372047300157140ustar00rootroot00000000000000package lfshttp import ( "bytes" "encoding/json" "strings" "time" "github.com/git-lfs/git-lfs/v3/config" "github.com/git-lfs/git-lfs/v3/ssh" "github.com/git-lfs/git-lfs/v3/subprocess" "github.com/git-lfs/git-lfs/v3/tools" "github.com/rubyist/tracerx" ) type SSHResolver interface { Resolve(Endpoint, string) (sshAuthResponse, error) } func withSSHCache(ssh SSHResolver) SSHResolver { return &sshCache{ endpoints: make(map[string]*sshAuthResponse), ssh: ssh, } } type sshCache struct { endpoints map[string]*sshAuthResponse ssh SSHResolver } func (c *sshCache) Resolve(e Endpoint, method string) (sshAuthResponse, error) { if len(e.SSHMetadata.UserAndHost) == 0 { return sshAuthResponse{}, nil } key := strings.Join([]string{e.SSHMetadata.UserAndHost, e.SSHMetadata.Port, e.SSHMetadata.Path, method}, "//") if res, ok := c.endpoints[key]; ok { if _, expired := res.IsExpiredWithin(5 * time.Second); !expired { tracerx.Printf("ssh cache: %s git-lfs-authenticate %s %s", e.SSHMetadata.UserAndHost, e.SSHMetadata.Path, endpointOperation(e, method)) return *res, nil } else { tracerx.Printf("ssh cache expired: %s git-lfs-authenticate %s %s", e.SSHMetadata.UserAndHost, e.SSHMetadata.Path, endpointOperation(e, method)) } } res, err := c.ssh.Resolve(e, method) if err == nil { c.endpoints[key] = &res } return res, err } type sshAuthResponse struct { Message string `json:"-"` Href string `json:"href"` Header map[string]string `json:"header"` ExpiresAt time.Time `json:"expires_at"` ExpiresIn int `json:"expires_in"` createdAt time.Time } func (r *sshAuthResponse) IsExpiredWithin(d time.Duration) (time.Time, bool) { return tools.IsExpiredAtOrIn(r.createdAt, d, r.ExpiresAt, time.Duration(r.ExpiresIn)*time.Second) } type sshAuthClient struct { os config.Environment git config.Environment } func (c *sshAuthClient) Resolve(e Endpoint, method string) (sshAuthResponse, error) { res := sshAuthResponse{} if len(e.SSHMetadata.UserAndHost) == 0 { return res, nil } exe, args, _, _ := ssh.GetLFSExeAndArgs(c.os, c.git, &e.SSHMetadata, "git-lfs-authenticate", endpointOperation(e, method), false, "") cmd, err := subprocess.ExecCommand(exe, args...) if err != nil { return res, err } // Save stdout and stderr in separate buffers var outbuf, errbuf bytes.Buffer cmd.Stdout = &outbuf cmd.Stderr = &errbuf now := time.Now() // Execute command err = cmd.Start() if err == nil { err = cmd.Wait() } // Processing result if err != nil { res.Message = strings.TrimSpace(errbuf.String()) } else { err = json.Unmarshal(outbuf.Bytes(), &res) if res.ExpiresIn == 0 && res.ExpiresAt.IsZero() { ttl := c.git.Int("lfs.defaulttokenttl", 0) if ttl < 0 { ttl = 0 } res.ExpiresIn = ttl } res.createdAt = now } return res, err } git-lfs-3.6.1/lfshttp/ssh_test.go000066400000000000000000000126341472372047300167540ustar00rootroot00000000000000package lfshttp import ( "testing" "time" "github.com/git-lfs/git-lfs/v3/errors" sshp "github.com/git-lfs/git-lfs/v3/ssh" "github.com/stretchr/testify/assert" ) func TestSSHCacheResolveFromCache(t *testing.T) { ssh := newFakeResolver() cache := withSSHCache(ssh).(*sshCache) cache.endpoints["userandhost//1//path//post"] = &sshAuthResponse{ Href: "cache", createdAt: time.Now(), } ssh.responses["userandhost"] = sshAuthResponse{Href: "real"} e := Endpoint{ SSHMetadata: sshp.SSHMetadata{ UserAndHost: "userandhost", Port: "1", Path: "path", }, } res, err := cache.Resolve(e, "post") assert.Nil(t, err) assert.Equal(t, "cache", res.Href) } func TestSSHCacheResolveFromCacheWithFutureExpiresAt(t *testing.T) { ssh := newFakeResolver() cache := withSSHCache(ssh).(*sshCache) cache.endpoints["userandhost//1//path//post"] = &sshAuthResponse{ Href: "cache", ExpiresAt: time.Now().Add(time.Duration(1) * time.Hour), createdAt: time.Now(), } ssh.responses["userandhost"] = sshAuthResponse{Href: "real"} e := Endpoint{ SSHMetadata: sshp.SSHMetadata{ UserAndHost: "userandhost", Port: "1", Path: "path", }, } res, err := cache.Resolve(e, "post") assert.Nil(t, err) assert.Equal(t, "cache", res.Href) } func TestSSHCacheResolveFromCacheWithFutureExpiresIn(t *testing.T) { ssh := newFakeResolver() cache := withSSHCache(ssh).(*sshCache) cache.endpoints["userandhost//1//path//post"] = &sshAuthResponse{ Href: "cache", ExpiresIn: 60 * 60, createdAt: time.Now(), } ssh.responses["userandhost"] = sshAuthResponse{Href: "real"} e := Endpoint{ SSHMetadata: sshp.SSHMetadata{ UserAndHost: "userandhost", Port: "1", Path: "path", }, } res, err := cache.Resolve(e, "post") assert.Nil(t, err) assert.Equal(t, "cache", res.Href) } func TestSSHCacheResolveFromCacheWithPastExpiresAt(t *testing.T) { ssh := newFakeResolver() cache := withSSHCache(ssh).(*sshCache) cache.endpoints["userandhost//1//path//post"] = &sshAuthResponse{ Href: "cache", ExpiresAt: time.Now().Add(time.Duration(-1) * time.Hour), createdAt: time.Now(), } ssh.responses["userandhost"] = sshAuthResponse{Href: "real"} e := Endpoint{ SSHMetadata: sshp.SSHMetadata{ UserAndHost: "userandhost", Port: "1", Path: "path", }, } res, err := cache.Resolve(e, "post") assert.Nil(t, err) assert.Equal(t, "real", res.Href) } func TestSSHCacheResolveFromCacheWithPastExpiresIn(t *testing.T) { ssh := newFakeResolver() cache := withSSHCache(ssh).(*sshCache) cache.endpoints["userandhost//1//path//post"] = &sshAuthResponse{ Href: "cache", ExpiresIn: -60 * 60, createdAt: time.Now(), } ssh.responses["userandhost"] = sshAuthResponse{Href: "real"} e := Endpoint{ SSHMetadata: sshp.SSHMetadata{ UserAndHost: "userandhost", Port: "1", Path: "path", }, } res, err := cache.Resolve(e, "post") assert.Nil(t, err) assert.Equal(t, "real", res.Href) } func TestSSHCacheResolveFromCacheWithAmbiguousExpirationInfo(t *testing.T) { ssh := newFakeResolver() cache := withSSHCache(ssh).(*sshCache) cache.endpoints["userandhost//1//path//post"] = &sshAuthResponse{ Href: "cache", ExpiresIn: 60 * 60, ExpiresAt: time.Now().Add(-1 * time.Hour), createdAt: time.Now(), } ssh.responses["userandhost"] = sshAuthResponse{Href: "real"} e := Endpoint{ SSHMetadata: sshp.SSHMetadata{ UserAndHost: "userandhost", Port: "1", Path: "path", }, } res, err := cache.Resolve(e, "post") assert.Nil(t, err) assert.Equal(t, "cache", res.Href) } func TestSSHCacheResolveWithoutError(t *testing.T) { ssh := newFakeResolver() cache := withSSHCache(ssh).(*sshCache) assert.Equal(t, 0, len(cache.endpoints)) ssh.responses["userandhost"] = sshAuthResponse{Href: "real"} e := Endpoint{ SSHMetadata: sshp.SSHMetadata{ UserAndHost: "userandhost", Port: "1", Path: "path", }, } res, err := cache.Resolve(e, "post") assert.Nil(t, err) assert.Equal(t, "real", res.Href) assert.Equal(t, 1, len(cache.endpoints)) cacheres, ok := cache.endpoints["userandhost//1//path//post"] assert.True(t, ok) assert.NotNil(t, cacheres) assert.Equal(t, "real", cacheres.Href) delete(ssh.responses, "userandhost") res2, err := cache.Resolve(e, "post") assert.Nil(t, err) assert.Equal(t, "real", res2.Href) } func TestSSHCacheResolveWithError(t *testing.T) { ssh := newFakeResolver() cache := withSSHCache(ssh).(*sshCache) assert.Equal(t, 0, len(cache.endpoints)) ssh.responses["userandhost"] = sshAuthResponse{Message: "resolve error", Href: "real"} e := Endpoint{ SSHMetadata: sshp.SSHMetadata{ UserAndHost: "userandhost", Port: "1", Path: "path", }, } res, err := cache.Resolve(e, "post") assert.NotNil(t, err) assert.Equal(t, "real", res.Href) assert.Equal(t, 0, len(cache.endpoints)) delete(ssh.responses, "userandhost") res2, err := cache.Resolve(e, "post") assert.Nil(t, err) assert.Equal(t, "", res2.Href) } func newFakeResolver() *fakeResolver { return &fakeResolver{responses: make(map[string]sshAuthResponse)} } type fakeResolver struct { responses map[string]sshAuthResponse } func (r *fakeResolver) Resolve(e Endpoint, method string) (sshAuthResponse, error) { res := r.responses[e.SSHMetadata.UserAndHost] var err error if len(res.Message) > 0 { err = errors.New(res.Message) } res.createdAt = time.Now() return res, err } git-lfs-3.6.1/lfshttp/standalone/000077500000000000000000000000001472372047300167135ustar00rootroot00000000000000git-lfs-3.6.1/lfshttp/standalone/standalone.go000066400000000000000000000211251472372047300213730ustar00rootroot00000000000000package standalone import ( "bufio" "encoding/json" "fmt" "net/url" "os" "os/exec" "path/filepath" "regexp" "runtime" "strings" "github.com/git-lfs/git-lfs/v3/config" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/lfs" "github.com/git-lfs/git-lfs/v3/lfsapi" "github.com/git-lfs/git-lfs/v3/subprocess" "github.com/git-lfs/git-lfs/v3/tools" "github.com/git-lfs/git-lfs/v3/tr" "github.com/rubyist/tracerx" ) // inputMessage represents a message from Git LFS to the standalone transfer // agent. Not all fields will be filled in on all requests. type inputMessage struct { Event string `json:"event"` Operation string `json:"operation"` Remote string `json:"remote"` Oid string `json:"oid"` Size int64 `json:"size"` Path string `json:"path"` } // errorMessage represents an optional error message that may occur in a // completion response. type errorMessage struct { Message string `json:"message"` } // outputErrorMessage represents an error message that may occur during startup. type outputErrorMessage struct { Error errorMessage `json:"error"` } // completeMessage represents a completion response. type completeMessage struct { Event string `json:"event"` Oid string `json:"oid"` Path string `json:"path,omitempty"` Error *errorMessage `json:"error,omitempty"` } type fileHandler struct { remotePath string remoteConfig *config.Configuration output *os.File config *config.Configuration tempdir string } // fileUrlFromRemote looks up the URL depending on the remote. The remote can be // a literal URL or the name of a remote. // // In this situation, we only accept file URLs. func fileUrlFromRemote(cfg *config.Configuration, name string, direction string) (*url.URL, error) { if strings.HasPrefix(name, "file://") { if url, err := url.Parse(name); err == nil { return url, nil } } apiClient, err := lfsapi.NewClient(cfg) if err != nil { return nil, err } for _, remote := range cfg.Remotes() { if remote != name { continue } remoteEndpoint := apiClient.Endpoints.Endpoint(direction, remote) if !strings.HasPrefix(remoteEndpoint.Url, "file://") { return nil, nil } return url.Parse(remoteEndpoint.Url) } return nil, nil } // gitDirAtPath finds the .git directory corresponding to the given path, which // may be the .git directory itself, the working tree, or the root of a bare // repository. // // We filter out the GIT_DIR environment variable to ensure we get the expected // result, and we change directories to ensure that we can make use of // filepath.Abs. Using --absolute-git-dir instead of --git-dir is not an option // because we support Git versions that don't have --absolute-git-dir. func gitDirAtPath(path string) (string, error) { // Filter out all the GIT_* environment variables. env := os.Environ() n := 0 for _, val := range env { if !strings.HasPrefix(val, "GIT_") { env[n] = val n++ } } env = env[:n] // Trim any trailing .git path segment. if filepath.Base(path) == ".git" { path = filepath.Dir(path) } curdir, err := os.Getwd() if err != nil { return "", err } err = os.Chdir(path) if err != nil { return "", err } cmd, err := subprocess.ExecCommand("git", "rev-parse", "--git-dir") if err != nil { return "", errors.Wrap(err, tr.Tr.Get("failed to find `git rev-parse --git-dir`")) } cmd.Cmd.Env = env out, err := cmd.Output() if err != nil { if err, ok := err.(*exec.ExitError); ok && len(err.Stderr) > 0 { return "", errors.New(tr.Tr.Get("failed to call `git rev-parse --git-dir`: %s", string(err.Stderr))) } return "", errors.Wrap(err, tr.Tr.Get("failed to call `git rev-parse --git-dir`")) } gitdir, err := tools.TranslateCygwinPath(strings.TrimRight(string(out), "\n")) if err != nil { return "", errors.Wrap(err, tr.Tr.Get("unable to translate path")) } gitdir, err = filepath.Abs(gitdir) if err != nil { return "", errors.Wrap(err, tr.Tr.Get("unable to canonicalize path")) } err = os.Chdir(curdir) if err != nil { return "", err } return tools.CanonicalizeSystemPath(gitdir) } func fixUrlPath(path string) string { if runtime.GOOS != "windows" { return path } // When parsing a file URL, Go produces a path starting with a slash. If // it looks like there's a Windows drive letter at the beginning, strip // off the beginning slash. If this is a Unix-style path from a // Cygwin-like environment, we'll canonicalize it later. re := regexp.MustCompile("/[A-Za-z]:/") if re.MatchString(path) { return path[1:] } return path } // newHandler creates a new handler for the protocol. func newHandler(cfg *config.Configuration, output *os.File, msg *inputMessage) (*fileHandler, error) { url, err := fileUrlFromRemote(cfg, msg.Remote, msg.Operation) if err != nil { return nil, err } if url == nil { return nil, errors.New(tr.Tr.Get("no valid file:// URLs found")) } path, err := tools.TranslateCygwinPath(fixUrlPath(url.Path)) if err != nil { return nil, err } gitdir, err := gitDirAtPath(path) if err != nil { return nil, err } tempdir, err := os.MkdirTemp(cfg.TempDir(), "lfs-standalone-file-*") if err != nil { return nil, err } tracerx.Printf("using %q as remote git directory", gitdir) return &fileHandler{ remotePath: path, remoteConfig: config.NewIn(gitdir, gitdir), output: output, config: cfg, tempdir: tempdir, }, nil } // dispatch dispatches the event depending on the message type. func (h *fileHandler) dispatch(msg *inputMessage) bool { switch msg.Event { case "init": fmt.Fprintln(h.output, "{}") case "upload": h.respond(h.upload(msg.Oid, msg.Size, msg.Path)) case "download": h.respond(h.download(msg.Oid, msg.Size)) case "terminate": return false default: standaloneFailure(tr.Tr.Get("unknown event %q", msg.Event), nil) } return true } // respond sends a response to an upload or download command, using the return // values from those functions. func (h *fileHandler) respond(oid string, path string, err error) { response := &completeMessage{ Event: "complete", Oid: oid, Path: path, } if err != nil { response.Error = &errorMessage{Message: err.Error()} } json.NewEncoder(h.output).Encode(response) } // upload performs the upload action for the given OID, size, and path. It // returns arguments suitable for the respond method. func (h *fileHandler) upload(oid string, size int64, path string) (string, string, error) { if h.remoteConfig.LFSObjectExists(oid, size) { // Already there, nothing to do. return oid, "", nil } dest, err := h.remoteConfig.Filesystem().ObjectPath(oid) if err != nil { return oid, "", err } return oid, "", lfs.LinkOrCopy(h.remoteConfig, path, dest) } // download performs the download action for the given OID and size. It returns // arguments suitable for the respond method. func (h *fileHandler) download(oid string, size int64) (string, string, error) { if !h.remoteConfig.LFSObjectExists(oid, size) { tracerx.Printf("missing object in %q (%s)", h.remotePath, oid) return oid, "", errors.Errorf(tr.Tr.Get("remote missing object %s", oid)) } src, err := h.remoteConfig.Filesystem().ObjectPath(oid) if err != nil { return oid, "", err } tmp, err := os.CreateTemp(h.tempdir, "download") if err != nil { return oid, "", err } tmp.Close() os.Remove(tmp.Name()) path := tmp.Name() return oid, path, lfs.LinkOrCopy(h.config, src, path) } // standaloneFailure reports a fatal error. func standaloneFailure(msg string, err error) { fmt.Fprintf(os.Stderr, "%s: %s\n", msg, err) os.Exit(2) } // ProcessStandaloneData is the primary endpoint for processing data with a // standalone transfer agent. It reads input from the specified input file and // produces output to the specified output file. func ProcessStandaloneData(cfg *config.Configuration, input *os.File, output *os.File) error { var handler *fileHandler scanner := bufio.NewScanner(input) for scanner.Scan() { var msg inputMessage if err := json.NewDecoder(strings.NewReader(scanner.Text())).Decode(&msg); err != nil { return errors.Wrapf(err, tr.Tr.Get("error decoding JSON")) } if handler == nil { var err error handler, err = newHandler(cfg, output, &msg) if err != nil { err := errors.Wrapf(err, tr.Tr.Get("error creating handler")) errMsg := outputErrorMessage{ Error: errorMessage{ Message: err.Error(), }, } json.NewEncoder(output).Encode(errMsg) return err } } if !handler.dispatch(&msg) { break } } if handler != nil { os.RemoveAll(handler.tempdir) } if err := scanner.Err(); err != nil { return errors.Wrapf(err, tr.Tr.Get("error reading input")) } return nil } git-lfs-3.6.1/lfshttp/stats.go000066400000000000000000000103221472372047300162460ustar00rootroot00000000000000package lfshttp import ( "context" "crypto/tls" "fmt" "io" "net/http" "net/http/httptrace" "strings" "sync" "sync/atomic" "time" "github.com/git-lfs/git-lfs/v3/tools" ) type httpTransfer struct { // members managed via sync/atomic must be aligned at the top of this // structure (see: https://github.com/git-lfs/git-lfs/pull/2880). RequestBodySize int64 Start int64 ResponseStart int64 ConnStart int64 ConnEnd int64 DNSStart int64 DNSEnd int64 TLSStart int64 TLSEnd int64 URL string Method string Key string } type statsContextKey string const transferKey = statsContextKey("transfer") func (c *Client) LogHTTPStats(w io.WriteCloser) { fmt.Fprintf(w, "concurrent=%d time=%d version=%s\n", c.ConcurrentTransfers, time.Now().Unix(), UserAgent) c.httpLogger = newSyncLogger(w) } // LogStats is intended to be called after all HTTP operations for the // command have finished. It dumps k/v logs, one line per httpTransfer into // a log file with the current timestamp. // // DEPRECATED: Call LogHTTPStats() before the first HTTP request. func (c *Client) LogStats(out io.Writer) {} // LogRequest tells the client to log the request's stats to the http log // after the response body has been read. func (c *Client) LogRequest(r *http.Request, reqKey string) *http.Request { if c.httpLogger == nil { return r } t := &httpTransfer{ URL: strings.SplitN(r.URL.String(), "?", 2)[0], Method: r.Method, Key: reqKey, } ctx := httptrace.WithClientTrace(r.Context(), &httptrace.ClientTrace{ GetConn: func(_ string) { atomic.CompareAndSwapInt64(&t.Start, 0, time.Now().UnixNano()) }, DNSStart: func(_ httptrace.DNSStartInfo) { atomic.CompareAndSwapInt64(&t.DNSStart, 0, time.Now().UnixNano()) }, DNSDone: func(_ httptrace.DNSDoneInfo) { atomic.CompareAndSwapInt64(&t.DNSEnd, 0, time.Now().UnixNano()) }, ConnectStart: func(_, _ string) { atomic.CompareAndSwapInt64(&t.ConnStart, 0, time.Now().UnixNano()) }, ConnectDone: func(_, _ string, _ error) { atomic.CompareAndSwapInt64(&t.ConnEnd, 0, time.Now().UnixNano()) }, TLSHandshakeStart: func() { atomic.CompareAndSwapInt64(&t.TLSStart, 0, time.Now().UnixNano()) }, TLSHandshakeDone: func(_ tls.ConnectionState, _ error) { atomic.CompareAndSwapInt64(&t.TLSEnd, 0, time.Now().UnixNano()) }, GotFirstResponseByte: func() { atomic.CompareAndSwapInt64(&t.ResponseStart, 0, time.Now().UnixNano()) }, }) return r.WithContext(context.WithValue(ctx, transferKey, t)) } // LogResponse sends the current response stats to the http log. // // DEPRECATED: Use LogRequest() instead. func (c *Client) LogResponse(key string, res *http.Response) {} func newSyncLogger(w io.WriteCloser) *syncLogger { ch := make(chan string, 100) wg := &sync.WaitGroup{} wg.Add(1) go func(c chan string, w io.Writer, wg *sync.WaitGroup) { for l := range c { w.Write([]byte(l)) wg.Done() } }(ch, w, wg) return &syncLogger{w: w, ch: ch, wg: wg} } type syncLogger struct { w io.WriteCloser ch chan string wg *sync.WaitGroup } func (l *syncLogger) LogRequest(req *http.Request, bodySize int64) { if l == nil { return } if v := req.Context().Value(transferKey); v != nil { l.logTransfer(v.(*httpTransfer), "request", fmt.Sprintf(" body=%d", bodySize)) } } func (l *syncLogger) LogResponse(req *http.Request, status int, bodySize int64) { if l == nil { return } if v := req.Context().Value(transferKey); v != nil { t := v.(*httpTransfer) now := time.Now().UnixNano() l.logTransfer(t, "response", fmt.Sprintf(" status=%d body=%d conntime=%d dnstime=%d tlstime=%d restime=%d time=%d", status, bodySize, tools.MaxInt64(t.ConnEnd-t.ConnStart, 0), tools.MaxInt64(t.DNSEnd-t.DNSStart, 0), tools.MaxInt64(t.TLSEnd-t.TLSStart, 0), tools.MaxInt64(now-t.ResponseStart, 0), tools.MaxInt64(now-t.Start, 0), )) } } func (l *syncLogger) logTransfer(t *httpTransfer, event, extra string) { l.wg.Add(1) l.ch <- fmt.Sprintf("key=%s event=%s url=%s method=%s%s\n", t.Key, event, t.URL, t.Method, extra, ) } func (l *syncLogger) Close() error { if l == nil { return nil } l.wg.Done() l.wg.Wait() return l.w.Close() } git-lfs-3.6.1/lfshttp/stats_test.go000066400000000000000000000106701472372047300173130ustar00rootroot00000000000000package lfshttp import ( "bytes" "encoding/json" "io" "net/http" "net/http/httptest" "strings" "sync/atomic" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestStatsWithKey(t *testing.T) { var called uint32 srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { atomic.AddUint32(&called, 1) t.Logf("srv req %s %s", r.Method, r.URL.Path) assert.Equal(t, "POST", r.Method) assert.Equal(t, "Basic ABC", r.Header.Get("Authorization")) body := &verboseTest{} err := json.NewDecoder(r.Body).Decode(body) assert.Nil(t, err) assert.Equal(t, "Verbose", body.Test) w.Header().Set("Content-Type", "application/json") w.Write([]byte(`{"Status":"Ok"}`)) })) defer srv.Close() out := &bytes.Buffer{} c, _ := NewClient(nil) c.ConcurrentTransfers = 5 c.LogHTTPStats(nopCloser(out)) req, err := http.NewRequest("POST", srv.URL, nil) req = c.LogRequest(req, "stats-test") req.Header.Set("Authorization", "Basic ABC") req.Header.Set("Content-Type", "application/json") require.Nil(t, err) require.Nil(t, MarshalToRequest(req, verboseTest{"Verbose"})) res, err := c.Do(req) require.Nil(t, err) io.Copy(io.Discard, res.Body) res.Body.Close() assert.Nil(t, c.Close()) assert.Equal(t, 200, res.StatusCode) assert.EqualValues(t, 1, called) stats := strings.TrimSpace(out.String()) t.Log(stats) lines := strings.Split(stats, "\n") require.Equal(t, 3, len(lines)) assert.True(t, strings.Contains(lines[0], "concurrent=5")) expected := []string{ "key=stats-test", "event=request", "body=18", "url=" + srv.URL, } for _, substr := range expected { assert.True(t, strings.Contains(lines[1], substr), "missing: "+substr) } expected = []string{ "key=stats-test", "event=response", "url=" + srv.URL, } for _, substr := range expected { assert.True(t, strings.Contains(lines[2], substr), "missing: "+substr) } } func TestStatsWithoutKey(t *testing.T) { var called uint32 srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { atomic.AddUint32(&called, 1) t.Logf("srv req %s %s", r.Method, r.URL.Path) assert.Equal(t, "POST", r.Method) assert.Equal(t, "Basic ABC", r.Header.Get("Authorization")) body := &verboseTest{} err := json.NewDecoder(r.Body).Decode(body) assert.Nil(t, err) assert.Equal(t, "Verbose", body.Test) w.Header().Set("Content-Type", "application/json") w.Write([]byte(`{"Status":"Ok"}`)) })) defer srv.Close() out := &bytes.Buffer{} c, _ := NewClient(nil) c.ConcurrentTransfers = 5 c.LogHTTPStats(nopCloser(out)) req, err := http.NewRequest("POST", srv.URL, nil) req.Header.Set("Authorization", "Basic ABC") req.Header.Set("Content-Type", "application/json") require.Nil(t, err) require.Nil(t, MarshalToRequest(req, verboseTest{"Verbose"})) res, err := c.Do(req) require.Nil(t, err) io.Copy(io.Discard, res.Body) res.Body.Close() assert.Nil(t, c.Close()) assert.Equal(t, 200, res.StatusCode) assert.EqualValues(t, 1, called) stats := strings.TrimSpace(out.String()) t.Log(stats) assert.True(t, strings.Contains(stats, "concurrent=5")) assert.Equal(t, 1, len(strings.Split(stats, "\n"))) } func TestStatsDisabled(t *testing.T) { var called uint32 srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { atomic.AddUint32(&called, 1) t.Logf("srv req %s %s", r.Method, r.URL.Path) assert.Equal(t, "POST", r.Method) assert.Equal(t, "Basic ABC", r.Header.Get("Authorization")) body := &verboseTest{} err := json.NewDecoder(r.Body).Decode(body) assert.Nil(t, err) assert.Equal(t, "Verbose", body.Test) w.Header().Set("Content-Type", "application/json") w.Write([]byte(`{"Status":"Ok"}`)) })) defer srv.Close() c, _ := NewClient(nil) c.ConcurrentTransfers = 5 req, err := http.NewRequest("POST", srv.URL, nil) req = c.LogRequest(req, "stats-test") req.Header.Set("Authorization", "Basic ABC") req.Header.Set("Content-Type", "application/json") require.Nil(t, err) require.Nil(t, MarshalToRequest(req, verboseTest{"Verbose"})) res, err := c.Do(req) require.Nil(t, err) io.Copy(io.Discard, res.Body) res.Body.Close() assert.Equal(t, 200, res.StatusCode) assert.EqualValues(t, 1, called) out := &bytes.Buffer{} c.LogStats(out) assert.Equal(t, 0, out.Len()) } func nopCloser(w io.Writer) io.WriteCloser { return nopWCloser{w} } type nopWCloser struct { io.Writer } func (w nopWCloser) Close() error { return nil } git-lfs-3.6.1/lfshttp/verbose.go000066400000000000000000000070631472372047300165650ustar00rootroot00000000000000package lfshttp import ( "bufio" "bytes" "fmt" "io" "net/http" "net/http/httputil" "strings" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/tr" "github.com/rubyist/tracerx" ) func (c *Client) traceRequest(req *http.Request) (*tracedRequest, error) { tracerx.Printf("HTTP: %s", traceReq(req)) if c.Verbose { if dump, err := httputil.DumpRequest(req, false); err == nil { c.traceHTTPDump(">", dump) } } body, ok := req.Body.(ReadSeekCloser) if body != nil && !ok { return nil, errors.New(tr.Tr.Get("Request body must implement io.ReadCloser and io.Seeker: %T", body)) } if body != nil && ok { body.Seek(0, io.SeekStart) tr := &tracedRequest{ verbose: c.Verbose && isTraceableContent(req.Header), verboseOut: c.VerboseOut, ReadSeekCloser: body, } req.Body = tr return tr, nil } return nil, nil } type tracedRequest struct { BodySize int64 verbose bool verboseOut io.Writer ReadSeekCloser } func (r *tracedRequest) Read(b []byte) (int, error) { n, err := tracedRead(r.ReadSeekCloser, b, r.verboseOut, false, r.verbose) r.BodySize += int64(n) return n, err } func (c *Client) traceResponse(req *http.Request, tracedReq *tracedRequest, res *http.Response) { if tracedReq != nil { c.httpLogger.LogRequest(req, tracedReq.BodySize) } if res == nil { c.httpLogger.LogResponse(req, -1, 0) return } tracerx.Printf("HTTP: %d", res.StatusCode) verboseBody := isTraceableContent(res.Header) res.Body = &tracedResponse{ httpLogger: c.httpLogger, response: res, gitTrace: verboseBody, verbose: verboseBody && c.Verbose, verboseOut: c.VerboseOut, ReadCloser: res.Body, } if !c.Verbose { return } if dump, err := httputil.DumpResponse(res, false); err == nil { if verboseBody { fmt.Fprintf(c.VerboseOut, "\n\n") } else { fmt.Fprintf(c.VerboseOut, "\n") } c.traceHTTPDump("<", dump) } } type tracedResponse struct { BodySize int64 httpLogger *syncLogger response *http.Response verbose bool gitTrace bool verboseOut io.Writer eof bool io.ReadCloser } func (r *tracedResponse) Read(b []byte) (int, error) { n, err := tracedRead(r.ReadCloser, b, r.verboseOut, r.gitTrace, r.verbose) r.BodySize += int64(n) if err == io.EOF && !r.eof { r.httpLogger.LogResponse(r.response.Request, r.response.StatusCode, r.BodySize) r.eof = true } return n, err } func tracedRead(r io.Reader, b []byte, verboseOut io.Writer, gitTrace, verbose bool) (int, error) { n, err := r.Read(b) if err == nil || err == io.EOF { if n > 0 && (gitTrace || verbose) { chunk := string(b[0:n]) if gitTrace { tracerx.Printf("HTTP: %s", chunk) } if verbose { fmt.Fprint(verboseOut, chunk) } } } return n, err } func (c *Client) traceHTTPDump(direction string, dump []byte) { scanner := bufio.NewScanner(bytes.NewBuffer(dump)) for scanner.Scan() { line := scanner.Text() if !c.DebuggingVerbose && strings.HasPrefix(strings.ToLower(line), "authorization: basic") { fmt.Fprintf(c.VerboseOut, "%s Authorization: Basic * * * * *\n", direction) } else { fmt.Fprintf(c.VerboseOut, "%s %s\n", direction, line) } } } var tracedTypes = []string{"json", "text", "xml", "html"} func isTraceableContent(h http.Header) bool { ctype := strings.ToLower(strings.SplitN(h.Get("Content-Type"), ";", 2)[0]) for _, tracedType := range tracedTypes { if strings.Contains(ctype, tracedType) { return true } } return false } func traceReq(req *http.Request) string { return fmt.Sprintf("%s %s", req.Method, strings.SplitN(req.URL.String(), "?", 2)[0]) } git-lfs-3.6.1/lfshttp/verbose_test.go000066400000000000000000000134771472372047300176320ustar00rootroot00000000000000package lfshttp import ( "bytes" "encoding/json" "io" "net/http" "net/http/httptest" "strings" "sync/atomic" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) type verboseTest struct { Test string } func TestVerboseEnabled(t *testing.T) { var called uint32 srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { atomic.AddUint32(&called, 1) t.Logf("srv req %s %s", r.Method, r.URL.Path) assert.Equal(t, "POST", r.Method) assert.Equal(t, "Basic ABC", r.Header.Get("Authorization")) body := &verboseTest{} err := json.NewDecoder(r.Body).Decode(body) assert.Nil(t, err) assert.Equal(t, "Verbose", body.Test) w.Header().Set("Content-Type", "application/json") w.Write([]byte(`{"Status":"Ok"}`)) })) defer srv.Close() out := &bytes.Buffer{} c, _ := NewClient(nil) c.Verbose = true c.VerboseOut = out req, err := http.NewRequest("POST", srv.URL, nil) req.Header.Set("Authorization", "Basic ABC") req.Header.Set("Content-Type", "application/json") require.Nil(t, err) require.Nil(t, MarshalToRequest(req, verboseTest{"Verbose"})) res, err := c.Do(req) require.Nil(t, err) io.Copy(io.Discard, res.Body) res.Body.Close() assert.Equal(t, 200, res.StatusCode) assert.EqualValues(t, 1, called) s := out.String() t.Log(s) expected := []string{ "> Host: 127.0.0.1:", "\n> Authorization: Basic * * * * *\n", "\n> Content-Type: application/json\n", "\n> \n" + `{"Test":"Verbose"}` + "\n\n", "\n< HTTP/1.1 200 OK\n", "\n< Content-Type: application/json\n", "\n< \n" + `{"Status":"Ok"}`, } for _, substr := range expected { if !assert.True(t, strings.Contains(s, substr)) { t.Logf("missing: %q", substr) } } } func TestVerboseWithBinaryBody(t *testing.T) { var called uint32 srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { atomic.AddUint32(&called, 1) t.Logf("srv req %s %s", r.Method, r.URL.Path) assert.Equal(t, "POST", r.Method) assert.Equal(t, "Basic ABC", r.Header.Get("Authorization")) by, err := io.ReadAll(r.Body) assert.Nil(t, err) assert.Equal(t, "binary-request", string(by)) w.Header().Set("Content-Type", "application/octet-stream") w.Write([]byte(`binary-response`)) })) defer srv.Close() out := &bytes.Buffer{} c, _ := NewClient(nil) c.Verbose = true c.VerboseOut = out buf := bytes.NewBufferString("binary-request") req, err := http.NewRequest("POST", srv.URL, buf) req.Header.Set("Authorization", "Basic ABC") req.Header.Set("Content-Type", "application/octet-stream") require.Nil(t, err) res, err := c.Do(req) require.Nil(t, err) io.Copy(io.Discard, res.Body) res.Body.Close() assert.Equal(t, 200, res.StatusCode) assert.EqualValues(t, 1, called) s := out.String() t.Log(s) expected := []string{ "> Host: 127.0.0.1:", "\n> Authorization: Basic * * * * *\n", "\n> Content-Type: application/octet-stream\n", "\n< HTTP/1.1 200 OK\n", "\n< Content-Type: application/octet-stream\n", } for _, substr := range expected { if !assert.True(t, strings.Contains(s, substr)) { t.Logf("missing: %q", substr) } } assert.False(t, strings.Contains(s, "binary"), "contains binary request or response body") } func TestVerboseEnabledWithDebugging(t *testing.T) { var called uint32 srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { atomic.AddUint32(&called, 1) t.Logf("srv req %s %s", r.Method, r.URL.Path) assert.Equal(t, "POST", r.Method) assert.Equal(t, "Basic ABC", r.Header.Get("Authorization")) body := &verboseTest{} err := json.NewDecoder(r.Body).Decode(body) assert.Nil(t, err) assert.Equal(t, "Verbose", body.Test) w.Header().Set("Content-Type", "application/json") w.Write([]byte(`{"Status":"Ok"}`)) })) defer srv.Close() out := &bytes.Buffer{} c, _ := NewClient(nil) c.Verbose = true c.VerboseOut = out c.DebuggingVerbose = true req, err := http.NewRequest("POST", srv.URL, nil) req.Header.Set("Authorization", "Basic ABC") req.Header.Set("Content-Type", "application/json") require.Nil(t, err) require.Nil(t, MarshalToRequest(req, verboseTest{"Verbose"})) res, err := c.Do(req) require.Nil(t, err) io.Copy(io.Discard, res.Body) res.Body.Close() assert.Equal(t, 200, res.StatusCode) assert.EqualValues(t, 1, called) s := out.String() t.Log(s) expected := []string{ "> Host: 127.0.0.1:", "\n> Authorization: Basic ABC\n", "\n> Content-Type: application/json\n", "\n> \n" + `{"Test":"Verbose"}` + "\n\n", "\n< HTTP/1.1 200 OK\n", "\n< Content-Type: application/json\n", "\n< \n" + `{"Status":"Ok"}`, } for _, substr := range expected { if !assert.True(t, strings.Contains(s, substr)) { t.Logf("missing: %q", substr) } } } func TestVerboseDisabled(t *testing.T) { var called uint32 srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { atomic.AddUint32(&called, 1) t.Logf("srv req %s %s", r.Method, r.URL.Path) assert.Equal(t, "POST", r.Method) assert.Equal(t, "Basic ABC", r.Header.Get("Authorization")) body := &verboseTest{} err := json.NewDecoder(r.Body).Decode(body) assert.Nil(t, err) assert.Equal(t, "Verbose", body.Test) w.Header().Set("Content-Type", "application/json") w.Write([]byte(`{"Status":"Ok"}`)) })) defer srv.Close() out := &bytes.Buffer{} c, _ := NewClient(nil) c.Verbose = false c.VerboseOut = out c.DebuggingVerbose = true req, err := http.NewRequest("POST", srv.URL, nil) req.Header.Set("Authorization", "Basic ABC") req.Header.Set("Content-Type", "application/json") require.Nil(t, err) require.Nil(t, MarshalToRequest(req, verboseTest{"Verbose"})) res, err := c.Do(req) require.Nil(t, err) io.Copy(io.Discard, res.Body) res.Body.Close() assert.Equal(t, 200, res.StatusCode) assert.EqualValues(t, 1, called) assert.EqualValues(t, 0, out.Len(), out.String()) } git-lfs-3.6.1/locking/000077500000000000000000000000001472372047300145255ustar00rootroot00000000000000git-lfs-3.6.1/locking/api.go000066400000000000000000000271671472372047300156420ustar00rootroot00000000000000package locking import ( "fmt" "net/http" "strconv" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/git" "github.com/git-lfs/git-lfs/v3/lfsapi" "github.com/git-lfs/git-lfs/v3/lfshttp" "github.com/git-lfs/git-lfs/v3/tr" ) type lockClient interface { Lock(remote string, lockReq *lockRequest) (*lockResponse, int, error) Unlock(ref *git.Ref, remote, id string, force bool) (*unlockResponse, int, error) Search(remote string, searchReq *lockSearchRequest) (*lockList, int, error) SearchVerifiable(remote string, vreq *lockVerifiableRequest) (*lockVerifiableList, int, error) } type httpLockClient struct { *lfsapi.Client } type lockRef struct { Name string `json:"name,omitempty"` } // LockRequest encapsulates the payload sent across the API when a client would // like to obtain a lock against a particular path on a given remote. type lockRequest struct { // Path is the path that the client would like to obtain a lock against. Path string `json:"path"` Ref *lockRef `json:"ref,omitempty"` } // LockResponse encapsulates the information sent over the API in response to // a `LockRequest`. type lockResponse struct { // Lock is the Lock that was optionally created in response to the // payload that was sent (see above). If the lock already exists, then // the existing lock is sent in this field instead, and the author of // that lock remains the same, meaning that the client failed to obtain // that lock. An HTTP status of "409 - Conflict" is used here. // // If the lock was unable to be created, this field will hold the // zero-value of Lock and the Err field will provide a more detailed set // of information. // // If an error was experienced in creating this lock, then the // zero-value of Lock should be sent here instead. Lock *Lock `json:"lock"` // Message is the optional error that was encountered while trying to create // the above lock. Message string `json:"message,omitempty"` DocumentationURL string `json:"documentation_url,omitempty"` RequestID string `json:"request_id,omitempty"` } func (c *httpLockClient) Lock(remote string, lockReq *lockRequest) (*lockResponse, int, error) { e := c.Endpoints.Endpoint("upload", remote) req, err := c.NewRequest("POST", e, "locks", lockReq) if err != nil { return nil, 0, err } req = c.Client.LogRequest(req, "lfs.locks.lock") res, err := c.DoAPIRequestWithAuth(remote, req) if err != nil { if res != nil { return nil, res.StatusCode, err } return nil, 0, err } lockRes := &lockResponse{} err = lfshttp.DecodeJSON(res, lockRes) if err != nil { return nil, res.StatusCode, err } if lockRes.Lock == nil && len(lockRes.Message) == 0 { return nil, res.StatusCode, errors.New(tr.Tr.Get("invalid server response")) } return lockRes, res.StatusCode, nil } // UnlockRequest encapsulates the data sent in an API request to remove a lock. type unlockRequest struct { // Force determines whether or not the lock should be "forcibly" // unlocked; that is to say whether or not a given individual should be // able to break a different individual's lock. Force bool `json:"force"` Ref *lockRef `json:"ref,omitempty"` } // UnlockResponse is the result sent back from the API when asked to remove a // lock. type unlockResponse struct { // Lock is the lock corresponding to the asked-about lock in the // `UnlockPayload` (see above). If no matching lock was found, this // field will take the zero-value of Lock, and Err will be non-nil. Lock *Lock `json:"lock"` // Message is an optional field which holds any error that was experienced // while removing the lock. Message string `json:"message,omitempty"` DocumentationURL string `json:"documentation_url,omitempty"` RequestID string `json:"request_id,omitempty"` } func (c *httpLockClient) Unlock(ref *git.Ref, remote, id string, force bool) (*unlockResponse, int, error) { e := c.Endpoints.Endpoint("upload", remote) suffix := fmt.Sprintf("locks/%s/unlock", id) req, err := c.NewRequest("POST", e, suffix, &unlockRequest{ Force: force, Ref: &lockRef{Name: ref.Refspec()}, }) if err != nil { return nil, 0, err } req = c.Client.LogRequest(req, "lfs.locks.unlock") res, err := c.DoAPIRequestWithAuth(remote, req) if err != nil { if res != nil { return nil, res.StatusCode, err } return nil, 0, err } unlockRes := &unlockResponse{} err = lfshttp.DecodeJSON(res, unlockRes) if err != nil { return nil, res.StatusCode, err } if unlockRes.Lock == nil && len(unlockRes.Message) == 0 { return nil, res.StatusCode, errors.New(tr.Tr.Get("invalid server response")) } return unlockRes, res.StatusCode, nil } // Filter represents a single qualifier to apply against a set of locks. type lockFilter struct { // Property is the property to search against. // Value is the value that the property must take. Property, Value string } // LockSearchRequest encapsulates the request sent to the server when the client // would like a list of locks that match the given criteria. type lockSearchRequest struct { // Filters is the set of filters to query against. If the client wishes // to obtain a list of all locks, an empty array should be passed here. Filters []lockFilter // Cursor is an optional field used to tell the server which lock was // seen last, if scanning through multiple pages of results. // // Servers must return a list of locks sorted in reverse chronological // order, so the Cursor provides a consistent method of viewing all // locks, even if more were created between two requests. Cursor string // Limit is the maximum number of locks to return in a single page. Limit int Refspec string } func (r *lockSearchRequest) QueryValues() map[string]string { q := make(map[string]string) for _, filter := range r.Filters { q[filter.Property] = filter.Value } if len(r.Cursor) > 0 { q["cursor"] = r.Cursor } if r.Limit > 0 { q["limit"] = strconv.Itoa(r.Limit) } if len(r.Refspec) > 0 { q["refspec"] = r.Refspec } return q } // LockList encapsulates a set of Locks. type lockList struct { // Locks is the set of locks returned back, typically matching the query // parameters sent in the LockListRequest call. If no locks were matched // from a given query, then `Locks` will be represented as an empty // array. Locks []Lock `json:"locks"` // NextCursor returns the Id of the Lock the client should update its // cursor to, if there are multiple pages of results for a particular // `LockListRequest`. NextCursor string `json:"next_cursor,omitempty"` // Message populates any error that was encountered during the search. If no // error was encountered and the operation was successful, then a value // of nil will be passed here. Message string `json:"message,omitempty"` DocumentationURL string `json:"documentation_url,omitempty"` RequestID string `json:"request_id,omitempty"` } func (c *httpLockClient) Search(remote string, searchReq *lockSearchRequest) (*lockList, int, error) { e := c.Endpoints.Endpoint("download", remote) req, err := c.NewRequest("GET", e, "locks", nil) if err != nil { return nil, 0, err } q := req.URL.Query() for key, value := range searchReq.QueryValues() { q.Add(key, value) } req.URL.RawQuery = q.Encode() req = c.Client.LogRequest(req, "lfs.locks.search") res, err := c.DoAPIRequestWithAuth(remote, req) if err != nil { if res != nil { return nil, res.StatusCode, err } return nil, 0, err } locks := &lockList{} if res.StatusCode == http.StatusOK { err = lfshttp.DecodeJSON(res, locks) } return locks, res.StatusCode, err } // lockVerifiableRequest encapsulates the request sent to the server when the // client would like a list of locks to verify a Git push. type lockVerifiableRequest struct { Ref *lockRef `json:"ref,omitempty"` // Cursor is an optional field used to tell the server which lock was // seen last, if scanning through multiple pages of results. // // Servers must return a list of locks sorted in reverse chronological // order, so the Cursor provides a consistent method of viewing all // locks, even if more were created between two requests. Cursor string `json:"cursor,omitempty"` // Limit is the maximum number of locks to return in a single page. Limit int `json:"limit,omitempty"` } // lockVerifiableList encapsulates a set of Locks to verify a Git push. type lockVerifiableList struct { // Ours is the set of locks returned back matching filenames that the user // is allowed to edit. Ours []Lock `json:"ours"` // Their is the set of locks returned back matching filenames that the user // is NOT allowed to edit. Any edits matching these files should reject // the Git push. Theirs []Lock `json:"theirs"` // NextCursor returns the Id of the Lock the client should update its // cursor to, if there are multiple pages of results for a particular // `LockListRequest`. NextCursor string `json:"next_cursor,omitempty"` // Message populates any error that was encountered during the search. If no // error was encountered and the operation was successful, then a value // of nil will be passed here. Message string `json:"message,omitempty"` DocumentationURL string `json:"documentation_url,omitempty"` RequestID string `json:"request_id,omitempty"` } func (c *httpLockClient) SearchVerifiable(remote string, vreq *lockVerifiableRequest) (*lockVerifiableList, int, error) { e := c.Endpoints.Endpoint("upload", remote) req, err := c.NewRequest("POST", e, "locks/verify", vreq) if err != nil { return nil, 0, err } req = c.Client.LogRequest(req, "lfs.locks.verify") res, err := c.DoAPIRequestWithAuth(remote, req) if err != nil { if res != nil { return nil, res.StatusCode, err } return nil, 0, err } locks := &lockVerifiableList{} if res.StatusCode == http.StatusOK { err = lfshttp.DecodeJSON(res, locks) } return locks, res.StatusCode, err } // User represents the owner of a lock. type User struct { // Name is the name of the individual who would like to obtain the // lock, for instance: "Rick Sanchez". Name string `json:"name"` } func NewUser(name string) *User { return &User{Name: name} } // String implements the fmt.Stringer interface. func (u *User) String() string { return u.Name } type lockClientInfo struct { remote string operation string } type genericLockClient struct { client *lfsapi.Client lclients map[lockClientInfo]lockClient } func newGenericLockClient(client *lfsapi.Client) *genericLockClient { return &genericLockClient{ client: client, lclients: make(map[lockClientInfo]lockClient), } } func (c *genericLockClient) getClient(remote, operation string) lockClient { info := lockClientInfo{ remote: remote, operation: operation, } if client := c.lclients[info]; client != nil { return client } transfer := c.client.SSHTransfer(operation, remote) var lclient lockClient if transfer != nil { lclient = &sshLockClient{transfer: transfer, Client: c.client} } else { lclient = &httpLockClient{Client: c.client} } c.lclients[info] = lclient return lclient } func (c *genericLockClient) Lock(remote string, lockReq *lockRequest) (*lockResponse, int, error) { return c.getClient(remote, "upload").Lock(remote, lockReq) } func (c *genericLockClient) Unlock(ref *git.Ref, remote, id string, force bool) (*unlockResponse, int, error) { return c.getClient(remote, "upload").Unlock(ref, remote, id, force) } func (c *genericLockClient) Search(remote string, searchReq *lockSearchRequest) (*lockList, int, error) { return c.getClient(remote, "download").Search(remote, searchReq) } func (c *genericLockClient) SearchVerifiable(remote string, vreq *lockVerifiableRequest) (*lockVerifiableList, int, error) { return c.getClient(remote, "upload").SearchVerifiable(remote, vreq) } git-lfs-3.6.1/locking/api_test.go000066400000000000000000000171701472372047300166720ustar00rootroot00000000000000package locking import ( "encoding/json" "fmt" "net/http" "net/http/httptest" "os" "path/filepath" "strings" "testing" "github.com/git-lfs/git-lfs/v3/git" "github.com/git-lfs/git-lfs/v3/lfsapi" "github.com/git-lfs/git-lfs/v3/lfshttp" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/xeipuuv/gojsonschema" ) func TestAPILock(t *testing.T) { require.NotNil(t, createReqSchema) require.NotNil(t, createResSchema) srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path != "/api/locks" { w.WriteHeader(404) return } assert.Equal(t, "POST", r.Method) assert.Equal(t, lfshttp.MediaType, r.Header.Get("Accept")) assert.Equal(t, lfshttp.RequestContentType, r.Header.Get("Content-Type")) assert.Equal(t, "53", r.Header.Get("Content-Length")) reqLoader, body := gojsonschema.NewReaderLoader(r.Body) lockReq := &lockRequest{} err := json.NewDecoder(body).Decode(lockReq) r.Body.Close() assert.Nil(t, err) assert.Equal(t, "refs/heads/master", lockReq.Ref.Name) assert.Equal(t, "request", lockReq.Path) assertSchema(t, createReqSchema, reqLoader) w.Header().Set("Content-Type", "application/json") resLoader, resWriter := gojsonschema.NewWriterLoader(w) err = json.NewEncoder(resWriter).Encode(&lockResponse{ Lock: &Lock{ Id: "1", Path: "response", }, }) assert.Nil(t, err) assertSchema(t, createResSchema, resLoader) })) defer srv.Close() c, err := lfsapi.NewClient(lfshttp.NewContext(nil, nil, map[string]string{ "lfs.url": srv.URL + "/api", })) require.Nil(t, err) lc := &httpLockClient{Client: c} lockRes, status, err := lc.Lock("", &lockRequest{Path: "request", Ref: &lockRef{Name: "refs/heads/master"}}) require.Nil(t, err) assert.Equal(t, 200, status) assert.Equal(t, "1", lockRes.Lock.Id) assert.Equal(t, "response", lockRes.Lock.Path) } func TestAPIUnlock(t *testing.T) { require.NotNil(t, delReqSchema) require.NotNil(t, createResSchema) srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path != "/api/locks/123/unlock" { w.WriteHeader(404) return } assert.Equal(t, "POST", r.Method) assert.Equal(t, lfshttp.MediaType, r.Header.Get("Accept")) assert.Equal(t, lfshttp.RequestContentType, r.Header.Get("Content-Type")) reqLoader, body := gojsonschema.NewReaderLoader(r.Body) unlockReq := &unlockRequest{} err := json.NewDecoder(body).Decode(unlockReq) r.Body.Close() assert.Nil(t, err) assert.True(t, unlockReq.Force) assertSchema(t, delReqSchema, reqLoader) w.Header().Set("Content-Type", "application/json") resLoader, resWriter := gojsonschema.NewWriterLoader(w) err = json.NewEncoder(resWriter).Encode(&unlockResponse{ Lock: &Lock{ Id: "123", Path: "response", }, }) assert.Nil(t, err) assertSchema(t, createResSchema, resLoader) })) defer srv.Close() c, err := lfsapi.NewClient(lfshttp.NewContext(nil, nil, map[string]string{ "lfs.url": srv.URL + "/api", })) require.Nil(t, err) lc := &httpLockClient{Client: c} unlockRes, status, err := lc.Unlock(&git.Ref{ Name: "master", Sha: "6161616161616161616161616161616161616161", Type: git.RefTypeLocalBranch, }, "", "123", true) require.Nil(t, err) assert.Equal(t, 200, status) assert.Equal(t, "123", unlockRes.Lock.Id) assert.Equal(t, "response", unlockRes.Lock.Path) } func TestAPISearch(t *testing.T) { require.NotNil(t, listResSchema) srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path != "/api/locks" { w.WriteHeader(404) return } assert.Equal(t, "GET", r.Method) assert.Equal(t, lfshttp.MediaType, r.Header.Get("Accept")) assert.Equal(t, "", r.Header.Get("Content-Type")) q := r.URL.Query() assert.Equal(t, "A", q.Get("a")) assert.Equal(t, "cursor", q.Get("cursor")) assert.Equal(t, "5", q.Get("limit")) w.Header().Set("Content-Type", "application/json") resLoader, resWriter := gojsonschema.NewWriterLoader(w) err := json.NewEncoder(resWriter).Encode(&lockList{ Locks: []Lock{ {Id: "1"}, {Id: "2"}, }, }) assert.Nil(t, err) assertSchema(t, listResSchema, resLoader) })) defer srv.Close() c, err := lfsapi.NewClient(lfshttp.NewContext(nil, nil, map[string]string{ "lfs.url": srv.URL + "/api", })) require.Nil(t, err) lc := &httpLockClient{Client: c} locks, status, err := lc.Search("", &lockSearchRequest{ Filters: []lockFilter{ {Property: "a", Value: "A"}, }, Cursor: "cursor", Limit: 5, }) require.Nil(t, err) assert.Equal(t, 200, status) assert.Equal(t, 2, len(locks.Locks)) assert.Equal(t, "1", locks.Locks[0].Id) assert.Equal(t, "2", locks.Locks[1].Id) } func TestAPISearchVerifiable(t *testing.T) { require.NotNil(t, verifyResSchema) srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path != "/api/locks/verify" { w.WriteHeader(404) return } assert.Equal(t, "POST", r.Method) assert.Equal(t, lfshttp.MediaType, r.Header.Get("Accept")) assert.Equal(t, lfshttp.RequestContentType, r.Header.Get("Content-Type")) body := lockVerifiableRequest{} if assert.Nil(t, json.NewDecoder(r.Body).Decode(&body)) { assert.Equal(t, "cursor", body.Cursor) assert.Equal(t, 5, body.Limit) } w.Header().Set("Content-Type", "application/json") resLoader, resWriter := gojsonschema.NewWriterLoader(w) err := json.NewEncoder(resWriter).Encode(&lockVerifiableList{ Ours: []Lock{ {Id: "1"}, {Id: "2"}, }, Theirs: []Lock{ {Id: "3"}, }, }) assert.Nil(t, err) assertSchema(t, verifyResSchema, resLoader) })) defer srv.Close() c, err := lfsapi.NewClient(lfshttp.NewContext(nil, nil, map[string]string{ "lfs.url": srv.URL + "/api", })) require.Nil(t, err) lc := &httpLockClient{Client: c} locks, status, err := lc.SearchVerifiable("", &lockVerifiableRequest{ Cursor: "cursor", Limit: 5, }) require.Nil(t, err) assert.Equal(t, 200, status) assert.Equal(t, 2, len(locks.Ours)) assert.Equal(t, "1", locks.Ours[0].Id) assert.Equal(t, "2", locks.Ours[1].Id) assert.Equal(t, 1, len(locks.Theirs)) assert.Equal(t, "3", locks.Theirs[0].Id) } var ( createReqSchema *sourcedSchema createResSchema *sourcedSchema delReqSchema *sourcedSchema listResSchema *sourcedSchema verifyResSchema *sourcedSchema ) func init() { wd, err := os.Getwd() if err != nil { fmt.Println("getwd error:", err) return } createReqSchema = getSchema(wd, "schemas/http-lock-create-request-schema.json") createResSchema = getSchema(wd, "schemas/http-lock-create-response-schema.json") delReqSchema = getSchema(wd, "schemas/http-lock-delete-request-schema.json") listResSchema = getSchema(wd, "schemas/http-lock-list-response-schema.json") verifyResSchema = getSchema(wd, "schemas/http-lock-verify-response-schema.json") } type sourcedSchema struct { Source string *gojsonschema.Schema } func getSchema(wd, relpath string) *sourcedSchema { abspath := filepath.ToSlash(filepath.Join(wd, relpath)) s, err := gojsonschema.NewSchema(gojsonschema.NewReferenceLoader(fmt.Sprintf("file:///%s", abspath))) if err != nil { fmt.Printf("schema load error for %q: %+v\n", relpath, err) } return &sourcedSchema{Source: relpath, Schema: s} } func assertSchema(t *testing.T, schema *sourcedSchema, dataLoader gojsonschema.JSONLoader) { res, err := schema.Validate(dataLoader) if assert.Nil(t, err) { if res.Valid() { return } resErrors := res.Errors() valErrors := make([]string, 0, len(resErrors)) for _, resErr := range resErrors { valErrors = append(valErrors, resErr.String()) } t.Errorf("Schema: %s\n%s", schema.Source, strings.Join(valErrors, "\n")) } } git-lfs-3.6.1/locking/cache.go000066400000000000000000000043341472372047300161230ustar00rootroot00000000000000package locking import ( "strings" "github.com/git-lfs/git-lfs/v3/tools/kv" ) const ( // We want to use a single cache file for integrity, but to make it easy to // list all locks, prefix the id->path map in a way we can identify (something // that won't be in a path) idKeyPrefix string = "*id*://" ) type LockCache struct { kv *kv.Store } func NewLockCache(filepath string) (*LockCache, error) { kv, err := kv.NewStore(filepath) if err != nil { return nil, err } return &LockCache{kv}, nil } // Cache a successful lock for faster local lookup later func (c *LockCache) Add(l Lock) error { // Store reference in both directions // Path -> Lock c.kv.Set(l.Path, &l) // EncodedId -> Lock (encoded so we can easily identify) c.kv.Set(c.encodeIdKey(l.Id), &l) return nil } // Remove a cached lock by path because it's been relinquished func (c *LockCache) RemoveByPath(filePath string) error { ilock := c.kv.Get(filePath) if lock, ok := ilock.(*Lock); ok && lock != nil { c.kv.Remove(lock.Path) // Id as key is encoded c.kv.Remove(c.encodeIdKey(lock.Id)) } return nil } // Remove a cached lock by id because it's been relinquished func (c *LockCache) RemoveById(id string) error { // Id as key is encoded idkey := c.encodeIdKey(id) ilock := c.kv.Get(idkey) if lock, ok := ilock.(*Lock); ok && lock != nil { c.kv.Remove(idkey) c.kv.Remove(lock.Path) } return nil } // Get the list of cached locked files func (c *LockCache) Locks() []Lock { var locks []Lock c.kv.Visit(func(key string, val interface{}) bool { // Only report file->id entries not reverse if !c.isIdKey(key) { lock := val.(*Lock) locks = append(locks, *lock) } return true // continue }) return locks } // Clear the cache func (c *LockCache) Clear() { c.kv.RemoveAll() } // Save the cache func (c *LockCache) Save() error { return c.kv.Save() } func (c *LockCache) encodeIdKey(id string) string { // Safety against accidents if !c.isIdKey(id) { return idKeyPrefix + id } return id } func (c *LockCache) decodeIdKey(key string) string { // Safety against accidents if c.isIdKey(key) { return key[len(idKeyPrefix):] } return key } func (c *LockCache) isIdKey(key string) bool { return strings.HasPrefix(key, idKeyPrefix) } git-lfs-3.6.1/locking/cache_test.go000066400000000000000000000022531472372047300171600ustar00rootroot00000000000000package locking import ( "os" "testing" "github.com/stretchr/testify/assert" ) func TestLockCache(t *testing.T) { var err error tmpf, err := os.CreateTemp("", "testCacheLock") assert.Nil(t, err) defer func() { os.Remove(tmpf.Name()) }() tmpf.Close() cache, err := NewLockCache(tmpf.Name()) assert.Nil(t, err) testLocks := []Lock{ Lock{Path: "folder/test1.dat", Id: "101"}, Lock{Path: "folder/test2.dat", Id: "102"}, Lock{Path: "root.dat", Id: "103"}, } for _, l := range testLocks { err = cache.Add(l) assert.Nil(t, err) } locks := cache.Locks() for _, l := range testLocks { assert.Contains(t, locks, l) } assert.Equal(t, len(testLocks), len(locks)) err = cache.RemoveByPath("folder/test2.dat") assert.Nil(t, err) locks = cache.Locks() // delete item 1 from test locls testLocks = append(testLocks[:1], testLocks[2:]...) for _, l := range testLocks { assert.Contains(t, locks, l) } assert.Equal(t, len(testLocks), len(locks)) err = cache.RemoveById("101") assert.Nil(t, err) locks = cache.Locks() testLocks = testLocks[1:] for _, l := range testLocks { assert.Contains(t, locks, l) } assert.Equal(t, len(testLocks), len(locks)) } git-lfs-3.6.1/locking/lockable.go000066400000000000000000000153311472372047300166330ustar00rootroot00000000000000package locking import ( "os" "path/filepath" "strings" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/filepathfilter" "github.com/git-lfs/git-lfs/v3/git" "github.com/git-lfs/git-lfs/v3/git/gitattr" "github.com/git-lfs/git-lfs/v3/tools" "github.com/git-lfs/git-lfs/v3/tr" ) // GetLockablePatterns returns a list of patterns in .gitattributes which are // marked as lockable func (c *Client) GetLockablePatterns() []string { c.ensureLockablesLoaded() return c.lockablePatterns } // getLockableFilter returns the internal filter used to check if a file is lockable func (c *Client) getLockableFilter() *filepathfilter.Filter { c.ensureLockablesLoaded() return c.lockableFilter } func (c *Client) ensureLockablesLoaded() { c.lockableMutex.Lock() defer c.lockableMutex.Unlock() // Only load once if c.lockablePatterns == nil { c.refreshLockablePatterns() } } // Internal function to repopulate lockable patterns // You must have locked the c.lockableMutex in the caller func (c *Client) refreshLockablePatterns() { paths := git.GetAttributePaths(gitattr.NewMacroProcessor(), c.LocalWorkingDir, c.LocalGitDir) // Always make non-nil even if empty c.lockablePatterns = make([]string, 0, len(paths)) for _, p := range paths { if p.Lockable { c.lockablePatterns = append(c.lockablePatterns, filepath.ToSlash(p.Path)) } } c.lockableFilter = filepathfilter.New(c.lockablePatterns, nil, filepathfilter.GitAttributes, filepathfilter.DefaultValue(false)) } // IsFileLockable returns whether a specific file path is marked as Lockable, // ie has the 'lockable' attribute in .gitattributes // Lockable patterns are cached once for performance, unless you call RefreshLockablePatterns // path should be relative to repository root func (c *Client) IsFileLockable(path string) bool { return c.getLockableFilter().Allows(path) } // FixAllLockableFileWriteFlags recursively scans the repo looking for files which // are lockable, and makes sure their write flags are set correctly based on // whether they are currently locked or unlocked. // Files which are unlocked are made read-only, files which are locked are made // writeable. // This function can be used after a clone or checkout to ensure that file // state correctly reflects the locking state func (c *Client) FixAllLockableFileWriteFlags() error { return c.fixFileWriteFlags(c.LocalWorkingDir, c.LocalWorkingDir, c.getLockableFilter(), nil) } // FixFileWriteFlagsInDir scans dir (which can either be a relative dir // from the root of the repo, or an absolute dir within the repo) looking for // files to change permissions for. // If lockablePatterns is non-nil, then any file matching those patterns will be // checked to see if it is currently locked by the current committer, and if so // it will be writeable, and if not locked it will be read-only. // If unlockablePatterns is non-nil, then any file matching those patterns will // be made writeable if it is not already. This can be used to reset files to // writeable when their 'lockable' attribute is turned off. func (c *Client) FixFileWriteFlagsInDir(dir string, lockablePatterns, unlockablePatterns []string) error { // early-out if no patterns if len(lockablePatterns) == 0 && len(unlockablePatterns) == 0 { return nil } absPath := dir if !filepath.IsAbs(dir) { absPath = filepath.Join(c.LocalWorkingDir, dir) } stat, err := os.Stat(absPath) if err != nil { return err } if !stat.IsDir() { return errors.New(tr.Tr.Get("%q is not a valid directory", dir)) } var lockableFilter *filepathfilter.Filter var unlockableFilter *filepathfilter.Filter if lockablePatterns != nil { lockableFilter = filepathfilter.New(lockablePatterns, nil, filepathfilter.GitAttributes) } if unlockablePatterns != nil { unlockableFilter = filepathfilter.New(unlockablePatterns, nil, filepathfilter.GitAttributes) } return c.fixFileWriteFlags(absPath, c.LocalWorkingDir, lockableFilter, unlockableFilter) } // Internal implementation of fixing file write flags with precompiled filters func (c *Client) fixFileWriteFlags(absPath, workingDir string, lockable, unlockable *filepathfilter.Filter) error { // Build a list of files lsFiles, err := git.NewLsFiles(workingDir, !c.ModifyIgnoredFiles, false) if err != nil { return err } for f := range lsFiles.Files { err = c.fixSingleFileWriteFlags(f, lockable, unlockable) if err != nil { return err } } return nil } // FixLockableFileWriteFlags checks each file in the provided list, and for // those which are lockable, makes sure their write flags are set correctly // based on whether they are currently locked or unlocked. Files which are // unlocked are made read-only, files which are locked are made writeable. // Files which are not lockable are ignored. // This function can be used after a clone or checkout to ensure that file // state correctly reflects the locking state, and is more efficient than // FixAllLockableFileWriteFlags when you know which files changed func (c *Client) FixLockableFileWriteFlags(files []string) error { // early-out if no lockable patterns if len(c.GetLockablePatterns()) == 0 { return nil } var errs []error for _, f := range files { err := c.fixSingleFileWriteFlags(f, c.getLockableFilter(), nil) if err != nil { errs = append(errs, err) } } return errors.Combine(errs) } // fixSingleFileWriteFlags fixes write flags on a single file // If lockablePatterns is non-nil, then any file matching those patterns will be // checked to see if it is currently locked by the current committer, and if so // it will be writeable, and if not locked it will be read-only. // If unlockablePatterns is non-nil, then any file matching those patterns will // be made writeable if it is not already. This can be used to reset files to // writeable when their 'lockable' attribute is turned off. func (c *Client) fixSingleFileWriteFlags(file string, lockable, unlockable *filepathfilter.Filter) error { // Convert to git-style forward slash separators if necessary // Necessary to match attributes if filepath.Separator == '\\' { file = strings.Replace(file, "\\", "/", -1) } if lockable != nil && lockable.Allows(file) { // Lockable files are writeable only if they're currently locked err := tools.SetFileWriteFlag(file, c.IsFileLockedByCurrentCommitter(file)) // Ignore not exist errors if err != nil && !os.IsNotExist(err) { return err } } else if unlockable != nil && unlockable.Allows(file) { // Unlockable files are always writeable // We only check files which match the incoming patterns to avoid // checking every file in the system all the time, and only do it // when a file has had its lockable attribute removed err := tools.SetFileWriteFlag(file, true) if err != nil && !os.IsNotExist(err) { return err } } return nil } git-lfs-3.6.1/locking/locks.go000066400000000000000000000340261472372047300161740ustar00rootroot00000000000000package locking import ( "encoding/json" "io" "net/http" "os" "path/filepath" "sync" "time" "github.com/git-lfs/git-lfs/v3/config" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/filepathfilter" "github.com/git-lfs/git-lfs/v3/git" "github.com/git-lfs/git-lfs/v3/lfsapi" "github.com/git-lfs/git-lfs/v3/tools" "github.com/git-lfs/git-lfs/v3/tools/kv" "github.com/git-lfs/git-lfs/v3/tr" "github.com/rubyist/tracerx" ) var ( // ErrNoMatchingLocks is an error returned when no matching locks were // able to be resolved ErrNoMatchingLocks = errors.New(tr.Tr.Get("no matching locks found")) // ErrLockAmbiguous is an error returned when multiple matching locks // were found ErrLockAmbiguous = errors.New(tr.Tr.Get("multiple locks found; ambiguous")) ) type LockCacher interface { Add(l Lock) error RemoveByPath(filePath string) error RemoveById(id string) error Locks() []Lock Clear() Save() error } // Client is the main interface object for the locking package type Client struct { Remote string RemoteRef *git.Ref client lockClient cache LockCacher cacheDir string cfg *config.Configuration lockablePatterns []string lockableFilter *filepathfilter.Filter lockableMutex sync.Mutex LocalWorkingDir string LocalGitDir string SetLockableFilesReadOnly bool ModifyIgnoredFiles bool } // NewClient creates a new locking client with the given configuration // You must call the returned object's `Close` method when you are finished with // it func NewClient(remote string, lfsClient *lfsapi.Client, cfg *config.Configuration) (*Client, error) { return &Client{ Remote: remote, client: newGenericLockClient(lfsClient), cache: &nilLockCacher{}, cfg: cfg, ModifyIgnoredFiles: lfsClient.GitEnv().Bool("lfs.lockignoredfiles", false), LocalWorkingDir: cfg.LocalWorkingDir(), LocalGitDir: cfg.LocalGitDir(), }, nil } func (c *Client) SetupFileCache(path string) error { stat, err := os.Stat(path) if err != nil { return errors.Wrap(err, tr.Tr.Get("lock cache initialization")) } lockFile := path if stat.IsDir() { lockFile = filepath.Join(path, "lockcache.db") } cache, err := NewLockCache(lockFile) if err != nil { return errors.Wrap(err, tr.Tr.Get("lock cache initialization")) } c.cache = cache c.cacheDir = filepath.Join(path, "cache") return nil } // Close this client instance; must be called to dispose of resources func (c *Client) Close() error { return c.cache.Save() } // LockFile attempts to lock a file on the current remote // path must be relative to the root of the repository // Returns the lock id if successful, or an error func (c *Client) LockFile(path string) (Lock, error) { lockRes, _, err := c.client.Lock(c.Remote, &lockRequest{ Path: path, Ref: &lockRef{Name: c.RemoteRef.Refspec()}, }) if err != nil { return Lock{}, errors.Wrap(err, tr.Tr.Get("locking API")) } if len(lockRes.Message) > 0 { if len(lockRes.RequestID) > 0 { tracerx.Printf("Server Request ID: %s", lockRes.RequestID) } return Lock{}, errors.New(tr.Tr.Get("server unable to create lock: %s", lockRes.Message)) } lock := *lockRes.Lock if err := c.cache.Add(lock); err != nil { return Lock{}, errors.Wrap(err, tr.Tr.Get("lock cache")) } abs, err := c.getAbsolutePath(path) if err != nil { return Lock{}, errors.Wrap(err, tr.Tr.Get("make lock path absolute")) } // If the file exists, ensure that it's writeable on return if tools.FileExists(abs) { if err := tools.SetFileWriteFlag(abs, true); err != nil { return Lock{}, errors.Wrap(err, tr.Tr.Get("set file write flag")) } } return lock, nil } // getAbsolutePath takes a repository-relative path and makes it absolute. // // For instance, given a repository in /usr/local/src/my-repo and a file called // dir/foo/bar.txt, getAbsolutePath will return: // // /usr/local/src/my-repo/dir/foo/bar.txt func (c *Client) getAbsolutePath(p string) (string, error) { return filepath.Join(c.LocalWorkingDir, p), nil } // UnlockFile attempts to unlock a file on the current remote // path must be relative to the root of the repository // Force causes the file to be unlocked from other users as well func (c *Client) UnlockFile(path string, force bool) error { id, err := c.lockIdFromPath(path) if err != nil { return errors.New(tr.Tr.Get("unable to get lock ID: %v", err)) } return c.UnlockFileById(id, force) } // UnlockFileById attempts to unlock a lock with a given id on the current remote // Force causes the file to be unlocked from other users as well func (c *Client) UnlockFileById(id string, force bool) error { unlockRes, _, err := c.client.Unlock(c.RemoteRef, c.Remote, id, force) if err != nil { return errors.Wrap(err, tr.Tr.Get("locking API")) } if len(unlockRes.Message) > 0 { if len(unlockRes.RequestID) > 0 { tracerx.Printf("Server Request ID: %s", unlockRes.RequestID) } return errors.New(tr.Tr.Get("server unable to unlock: %s", unlockRes.Message)) } if err := c.cache.RemoveById(id); err != nil { return errors.New(tr.Tr.Get("error caching unlock information: %v", err)) } if unlockRes.Lock != nil { abs, err := c.getAbsolutePath(unlockRes.Lock.Path) if err != nil { return errors.Wrap(err, tr.Tr.Get("make lock path absolute")) } // Make non-writeable if required if c.SetLockableFilesReadOnly && c.IsFileLockable(unlockRes.Lock.Path) { return tools.SetFileWriteFlag(abs, false) } } return nil } // Lock is a record of a locked file type Lock struct { // Id is the unique identifier corresponding to this particular Lock. It // must be consistent with the local copy, and the server's copy. Id string `json:"id"` // Path is an absolute path to the file that is locked as a part of this // lock. Path string `json:"path"` // Owner is the identity of the user that created this lock. Owner *User `json:"owner,omitempty"` // LockedAt is the time at which this lock was acquired. LockedAt time.Time `json:"locked_at"` } // SearchLocks returns a channel of locks which match the given name/value filter // If limit > 0 then search stops at that number of locks // If localOnly = true, don't query the server & report only own local locks func (c *Client) SearchLocks(filter map[string]string, limit int, localOnly bool, cached bool) ([]Lock, error) { if localOnly { return c.searchLocalLocks(filter, limit) } else if cached { if len(filter) > 0 || limit != 0 { return []Lock{}, errors.New(tr.Tr.Get("can't search cached locks when filter or limit is set")) } locks := []Lock{} err := c.readLocksFromCacheFile("remote", func(decoder *json.Decoder) error { return decoder.Decode(&locks) }) return locks, err } else { locks, err := c.searchRemoteLocks(filter, limit) if err != nil { return locks, err } if len(filter) == 0 && limit == 0 { err = c.writeLocksToCacheFile("remote", func(writer io.Writer) error { return c.EncodeLocks(locks, writer) }) } return locks, err } } func (c *Client) SearchLocksVerifiable(limit int, cached bool) (ourLocks, theirLocks []Lock, err error) { ourLocks = make([]Lock, 0, limit) theirLocks = make([]Lock, 0, limit) if cached { if limit != 0 { return []Lock{}, []Lock{}, errors.New(tr.Tr.Get("can't search cached locks when limit is set")) } locks := &lockVerifiableList{} err := c.readLocksFromCacheFile("verifiable", func(decoder *json.Decoder) error { return decoder.Decode(&locks) }) return locks.Ours, locks.Theirs, err } else { var requestRef *lockRef if c.RemoteRef != nil { requestRef = &lockRef{Name: c.RemoteRef.Refspec()} } body := &lockVerifiableRequest{ Ref: requestRef, Limit: limit, } c.cache.Clear() for { list, status, err := c.client.SearchVerifiable(c.Remote, body) switch status { case http.StatusNotFound, http.StatusNotImplemented: return ourLocks, theirLocks, errors.NewNotImplementedError(err) case http.StatusForbidden: return ourLocks, theirLocks, errors.NewAuthError(err) } if err != nil { return ourLocks, theirLocks, err } if list.Message != "" { if len(list.RequestID) > 0 { tracerx.Printf("Server Request ID: %s", list.RequestID) } return ourLocks, theirLocks, errors.New(tr.Tr.Get("server error searching locks: %s", list.Message)) } for _, l := range list.Ours { c.cache.Add(l) ourLocks = append(ourLocks, l) if limit > 0 && (len(ourLocks)+len(theirLocks)) >= limit { return ourLocks, theirLocks, nil } } for _, l := range list.Theirs { c.cache.Add(l) theirLocks = append(theirLocks, l) if limit > 0 && (len(ourLocks)+len(theirLocks)) >= limit { return ourLocks, theirLocks, nil } } if list.NextCursor != "" { body.Cursor = list.NextCursor } else { break } } if limit == 0 { err = c.writeLocksToCacheFile("verifiable", func(writer io.Writer) error { return c.EncodeLocksVerifiable(ourLocks, theirLocks, writer) }) } return ourLocks, theirLocks, err } } func (c *Client) searchLocalLocks(filter map[string]string, limit int) ([]Lock, error) { cachedlocks := c.cache.Locks() path, filterByPath := filter["path"] id, filterById := filter["id"] lockCount := 0 locks := make([]Lock, 0, len(cachedlocks)) for _, l := range cachedlocks { // Manually filter by Path/Id if (filterByPath && path != l.Path) || (filterById && id != l.Id) { continue } locks = append(locks, l) lockCount++ if limit > 0 && lockCount >= limit { break } } return locks, nil } func (c *Client) searchRemoteLocks(filter map[string]string, limit int) ([]Lock, error) { locks := make([]Lock, 0, limit) apifilters := make([]lockFilter, 0, len(filter)) for k, v := range filter { apifilters = append(apifilters, lockFilter{Property: k, Value: v}) } query := &lockSearchRequest{ Filters: apifilters, Limit: limit, Refspec: c.RemoteRef.Refspec(), } for { list, _, err := c.client.Search(c.Remote, query) if err != nil { return locks, errors.Wrap(err, tr.Tr.Get("locking")) } if list.Message != "" { if len(list.RequestID) > 0 { tracerx.Printf("Server Request ID: %s", list.RequestID) } return locks, errors.New(tr.Tr.Get("server error searching for locks: %s", list.Message)) } for _, l := range list.Locks { locks = append(locks, l) if limit > 0 && len(locks) >= limit { // Exit outer loop too return locks, nil } } if list.NextCursor != "" { query.Cursor = list.NextCursor } else { break } } return locks, nil } // lockIdFromPath makes a call to the LFS API and resolves the ID for the locked // locked at the given path. // // If the API call failed, an error will be returned. If multiple locks matched // the given path (should not happen during real-world usage), an error will be // returned. If no locks matched the given path, an error will be returned. // // If the API call is successful, and only one lock matches the given filepath, // then its ID will be returned, along with a value of "nil" for the error. func (c *Client) lockIdFromPath(path string) (string, error) { list, _, err := c.client.Search(c.Remote, &lockSearchRequest{ Filters: []lockFilter{ {Property: "path", Value: path}, }, Refspec: c.RemoteRef.Refspec(), }) if err != nil { return "", err } switch len(list.Locks) { case 0: return "", ErrNoMatchingLocks case 1: return list.Locks[0].Id, nil default: return "", ErrLockAmbiguous } } // IsFileLockedByCurrentCommitter returns whether a file is locked by the // current user, as cached locally func (c *Client) IsFileLockedByCurrentCommitter(path string) bool { filter := map[string]string{"path": path} locks, err := c.searchLocalLocks(filter, 1) if err != nil { tracerx.Printf("Error searching cached locks: %s\nForcing remote search", err) locks, _ = c.searchRemoteLocks(filter, 1) } return len(locks) > 0 } func init() { kv.RegisterTypeForStorage(&Lock{}) } func (c *Client) prepareCacheDirectory(kind string) (string, error) { cacheDir := filepath.Join(c.cacheDir, "locks") if c.RemoteRef != nil { cacheDir = filepath.Join(cacheDir, c.RemoteRef.Refspec()) } stat, err := os.Stat(cacheDir) if err == nil { if !stat.IsDir() { return cacheDir, errors.New(tr.Tr.Get("inititalization of cache directory %s failed: already exists, but is no directory", cacheDir)) } } else if os.IsNotExist(err) { err = tools.MkdirAll(cacheDir, c.cfg) if err != nil { return cacheDir, errors.Wrap(err, tr.Tr.Get("initiailization of cache directory %s failed: directory creation failed", cacheDir)) } } else { return cacheDir, errors.Wrap(err, tr.Tr.Get("initialization of cache directory %s failed", cacheDir)) } return filepath.Join(cacheDir, kind), nil } func (c *Client) readLocksFromCacheFile(kind string, decoder func(*json.Decoder) error) error { cacheFile, err := c.prepareCacheDirectory(kind) if err != nil { return err } _, err = os.Stat(cacheFile) if err != nil { if os.IsNotExist(err) { return errors.New(tr.Tr.Get("no cached locks present")) } return err } file, err := os.Open(cacheFile) if err != nil { return err } defer file.Close() return decoder(json.NewDecoder(file)) } func (c *Client) EncodeLocks(locks []Lock, writer io.Writer) error { return json.NewEncoder(writer).Encode(locks) } func (c *Client) EncodeLocksVerifiable(ourLocks, theirLocks []Lock, writer io.Writer) error { return json.NewEncoder(writer).Encode(&lockVerifiableList{ Ours: ourLocks, Theirs: theirLocks, }) } func (c *Client) writeLocksToCacheFile(kind string, writer func(io.Writer) error) error { cacheFile, err := c.prepareCacheDirectory(kind) if err != nil { return err } file, err := os.Create(cacheFile) if err != nil { return err } defer file.Close() return writer(file) } type nilLockCacher struct{} func (c *nilLockCacher) Add(l Lock) error { return nil } func (c *nilLockCacher) RemoveByPath(filePath string) error { return nil } func (c *nilLockCacher) RemoveById(id string) error { return nil } func (c *nilLockCacher) Locks() []Lock { return nil } func (c *nilLockCacher) Clear() {} func (c *nilLockCacher) Save() error { return nil } git-lfs-3.6.1/locking/locks_test.go000066400000000000000000000230301472372047300172240ustar00rootroot00000000000000package locking import ( "encoding/json" "net/http" "net/http/httptest" "os" "sort" "testing" "time" "github.com/git-lfs/git-lfs/v3/config" "github.com/git-lfs/git-lfs/v3/git" "github.com/git-lfs/git-lfs/v3/lfsapi" "github.com/git-lfs/git-lfs/v3/lfshttp" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) type LocksById []Lock func (a LocksById) Len() int { return len(a) } func (a LocksById) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a LocksById) Less(i, j int) bool { return a[i].Id < a[j].Id } func TestRemoteLocksWithCache(t *testing.T) { var err error tempDir := t.TempDir() remoteQueries := 0 srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { remoteQueries++ assert.Equal(t, "GET", r.Method) assert.Equal(t, "/api/locks", r.URL.Path) w.Header().Set("Content-Type", "application/json") err = json.NewEncoder(w).Encode(&lockList{ Locks: []Lock{ Lock{Id: "100", Path: "folder/test1.dat", Owner: &User{Name: "Alice"}}, Lock{Id: "101", Path: "folder/test2.dat", Owner: &User{Name: "Charles"}}, Lock{Id: "102", Path: "folder/test3.dat", Owner: &User{Name: "Fred"}}, }, }) assert.Nil(t, err) })) defer func() { srv.Close() }() lfsclient, err := lfsapi.NewClient(lfshttp.NewContext(nil, nil, map[string]string{ "lfs.url": srv.URL + "/api", "user.name": "Fred", "user.email": "fred@bloggs.com", })) require.Nil(t, err) client, err := NewClient("", lfsclient, config.New()) assert.Nil(t, err) assert.Nil(t, client.SetupFileCache(tempDir)) client.RemoteRef = &git.Ref{Name: "refs/heads/master"} cacheFile, err := client.prepareCacheDirectory("remote") assert.Nil(t, err) // Cache file should not exist fi, err := os.Stat(cacheFile) assert.True(t, os.IsNotExist(err)) // Querying non-existing cache file will report nothing locks, err := client.SearchLocks(nil, 0, false, true) assert.NotNil(t, err) assert.Empty(t, locks) assert.Equal(t, 0, remoteQueries) // Need to include zero time in structure for equal to work zeroTime := time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC) // REMOTE QUERY: No cache file will be created when querying with a filter locks, err = client.SearchLocks(map[string]string{ "key": "value", }, 0, false, false) assert.Nil(t, err) // Just make sure we have have received anything, content doesn't matter assert.Equal(t, 3, len(locks)) assert.Equal(t, 1, remoteQueries) fi, err = os.Stat(cacheFile) assert.True(t, os.IsNotExist(err)) // REMOTE QUERY: No cache file will be created when querying with a limit locks, err = client.SearchLocks(nil, 1, false, false) assert.Nil(t, err) // Just make sure we have have received anything, content doesn't matter assert.Equal(t, 1, len(locks)) assert.Equal(t, 2, remoteQueries) fi, err = os.Stat(cacheFile) assert.True(t, os.IsNotExist(err)) // REMOTE QUERY: locks will be reported and cache file should be created locks, err = client.SearchLocks(nil, 0, false, false) assert.Nil(t, err) assert.Equal(t, 3, remoteQueries) fi, err = os.Stat(cacheFile) assert.Nil(t, err) const size int64 = 300 assert.Equal(t, size, fi.Size()) expectedLocks := []Lock{ Lock{Path: "folder/test1.dat", Id: "100", Owner: &User{Name: "Alice"}, LockedAt: zeroTime}, Lock{Path: "folder/test2.dat", Id: "101", Owner: &User{Name: "Charles"}, LockedAt: zeroTime}, Lock{Path: "folder/test3.dat", Id: "102", Owner: &User{Name: "Fred"}, LockedAt: zeroTime}, } sort.Sort(LocksById(locks)) assert.Equal(t, expectedLocks, locks) // Querying cache file should report same locks locks, err = client.SearchLocks(nil, 0, false, true) assert.Nil(t, err) assert.Equal(t, 3, remoteQueries) sort.Sort(LocksById(locks)) assert.Equal(t, expectedLocks, locks) } func TestRefreshCache(t *testing.T) { var err error tempDir := t.TempDir() srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, "POST", r.Method) assert.Equal(t, "/api/locks/verify", r.URL.Path) w.Header().Set("Content-Type", "application/json") err = json.NewEncoder(w).Encode(lockVerifiableList{ Theirs: []Lock{ Lock{Id: "99", Path: "folder/test3.dat", Owner: &User{Name: "Alice"}}, Lock{Id: "199", Path: "other/test1.dat", Owner: &User{Name: "Charles"}}, }, Ours: []Lock{ Lock{Id: "101", Path: "folder/test1.dat", Owner: &User{Name: "Fred"}}, Lock{Id: "102", Path: "folder/test2.dat", Owner: &User{Name: "Fred"}}, Lock{Id: "103", Path: "root.dat", Owner: &User{Name: "Fred"}}, }, }) assert.Nil(t, err) })) defer func() { srv.Close() }() lfsclient, err := lfsapi.NewClient(lfshttp.NewContext(nil, nil, map[string]string{ "lfs.url": srv.URL + "/api", "user.name": "Fred", "user.email": "fred@bloggs.com", })) require.Nil(t, err) client, err := NewClient("", lfsclient, config.New()) assert.Nil(t, err) assert.Nil(t, client.SetupFileCache(tempDir)) // Should start with no cached items locks, err := client.SearchLocks(nil, 0, true, false) assert.Nil(t, err) assert.Empty(t, locks) client.RemoteRef = &git.Ref{Name: "refs/heads/master"} _, _, err = client.SearchLocksVerifiable(100, false) assert.Nil(t, err) locks, err = client.SearchLocks(nil, 0, true, false) assert.Nil(t, err) // Need to include zero time in structure for equal to work zeroTime := time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC) // Sort locks for stable comparison sort.Sort(LocksById(locks)) assert.Equal(t, []Lock{ Lock{Path: "folder/test1.dat", Id: "101", Owner: &User{Name: "Fred"}, LockedAt: zeroTime}, Lock{Path: "folder/test2.dat", Id: "102", Owner: &User{Name: "Fred"}, LockedAt: zeroTime}, Lock{Path: "root.dat", Id: "103", Owner: &User{Name: "Fred"}, LockedAt: zeroTime}, Lock{Path: "other/test1.dat", Id: "199", Owner: &User{Name: "Charles"}, LockedAt: zeroTime}, Lock{Path: "folder/test3.dat", Id: "99", Owner: &User{Name: "Alice"}, LockedAt: zeroTime}, }, locks) } func TestSearchLocksVerifiableWithCache(t *testing.T) { var err error tempDir := t.TempDir() remoteQueries := 0 srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { remoteQueries++ assert.Equal(t, "POST", r.Method) assert.Equal(t, "/api/locks/verify", r.URL.Path) body := lockVerifiableRequest{} if assert.Nil(t, json.NewDecoder(r.Body).Decode(&body)) { w.Header().Set("Content-Type", "application/json") list := lockVerifiableList{} if body.Cursor == "1" { list.Ours = []Lock{ Lock{Path: "folder/1/test1.dat", Id: "111"}, } list.Theirs = []Lock{ Lock{Path: "folder/1/test2.dat", Id: "112"}, Lock{Path: "folder/1/test3.dat", Id: "113"}, } } else { list.Ours = []Lock{ Lock{Path: "folder/0/test1.dat", Id: "101"}, Lock{Path: "folder/0/test2.dat", Id: "102"}, } list.Theirs = []Lock{ Lock{Path: "folder/0/test3.dat", Id: "103"}, } list.NextCursor = "1" } err := json.NewEncoder(w).Encode(&list) assert.Nil(t, err) } else { w.WriteHeader(500) } })) defer srv.Close() lfsclient, err := lfsapi.NewClient(lfshttp.NewContext(nil, nil, map[string]string{ "lfs.url": srv.URL + "/api", "user.name": "Fred", "user.email": "fred@bloggs.com", })) require.Nil(t, err) client, err := NewClient("", lfsclient, config.New()) assert.Nil(t, client.SetupFileCache(tempDir)) client.RemoteRef = &git.Ref{Name: "refs/heads/master"} cacheFile, err := client.prepareCacheDirectory("verifiable") assert.Nil(t, err) // Cache file should not exist fi, err := os.Stat(cacheFile) assert.True(t, os.IsNotExist(err)) // Querying non-existing cache file will report nothing ourLocks, theirLocks, err := client.SearchLocksVerifiable(0, true) assert.NotNil(t, err) assert.Empty(t, ourLocks) assert.Empty(t, theirLocks) assert.Equal(t, 0, remoteQueries) // REMOTE QUERY: No cache file will be created when querying with a limit ourLocks, theirLocks, err = client.SearchLocksVerifiable(1, false) assert.Nil(t, err) // Just make sure we have have received anything, content doesn't matter assert.Equal(t, 1, len(ourLocks)) assert.Equal(t, 0, len(theirLocks)) assert.Equal(t, 1, remoteQueries) fi, err = os.Stat(cacheFile) assert.True(t, os.IsNotExist(err)) // REMOTE QUERY: locks will be reported and cache file should be created ourLocks, theirLocks, err = client.SearchLocksVerifiable(0, false) assert.Nil(t, err) assert.Equal(t, 3, remoteQueries) fi, err = os.Stat(cacheFile) assert.Nil(t, err) const size int64 = 478 assert.Equal(t, size, fi.Size()) // Need to include zero time in structure for equal to work zeroTime := time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC) // Sort locks for stable comparison expectedOurLocks := []Lock{ Lock{Path: "folder/0/test1.dat", Id: "101", LockedAt: zeroTime}, Lock{Path: "folder/0/test2.dat", Id: "102", LockedAt: zeroTime}, Lock{Path: "folder/1/test1.dat", Id: "111", LockedAt: zeroTime}, } expectedTheirLocks := []Lock{ Lock{Path: "folder/0/test3.dat", Id: "103", LockedAt: zeroTime}, Lock{Path: "folder/1/test2.dat", Id: "112", LockedAt: zeroTime}, Lock{Path: "folder/1/test3.dat", Id: "113", LockedAt: zeroTime}, } sort.Sort(LocksById(ourLocks)) assert.Equal(t, expectedOurLocks, ourLocks) sort.Sort(LocksById(theirLocks)) assert.Equal(t, expectedTheirLocks, theirLocks) // Querying cache file should report same locks ourLocks, theirLocks, err = client.SearchLocksVerifiable(0, true) assert.Nil(t, err) assert.Equal(t, 3, remoteQueries) sort.Sort(LocksById(ourLocks)) assert.Equal(t, expectedOurLocks, ourLocks) sort.Sort(LocksById(theirLocks)) assert.Equal(t, expectedTheirLocks, theirLocks) } git-lfs-3.6.1/locking/schemas/000077500000000000000000000000001472372047300161505ustar00rootroot00000000000000git-lfs-3.6.1/locking/schemas/http-lock-create-request-schema.json000066400000000000000000000005731472372047300251420ustar00rootroot00000000000000{ "$schema": "http://json-schema.org/draft-04/schema", "title": "Git LFS HTTPS Lock Creation API Request", "type": "object", "properties": { "path": { "type": "string" }, "ref": { "type": "object", "properties": { "name": { "type": "string" } }, "required": ["name"] } }, "required": ["path"] } git-lfs-3.6.1/locking/schemas/http-lock-create-response-schema.json000066400000000000000000000014241472372047300253040ustar00rootroot00000000000000{ "$schema": "http://json-schema.org/draft-04/schema", "title": "Git LFS HTTPS Lock Creation API Response", "type": "object", "properties": { "lock": { "type": "object", "properties": { "id": { "type": "string" }, "path": { "type": "string" }, "locked_at": { "type": "string" }, "owner": { "type": "object", "properties": { "name": { "type": "string" } } } }, "required": ["id", "path", "locked_at"] }, "message": { "type": "string" }, "request_id": { "type": "string" }, "documentation_url": { "type": "string" } }, "required": ["lock"] } git-lfs-3.6.1/locking/schemas/http-lock-delete-request-schema.json000066400000000000000000000005451472372047300251400ustar00rootroot00000000000000{ "$schema": "http://json-schema.org/draft-04/schema", "title": "Git LFS HTTPS Lock Deletion API Request", "type": "object", "properties": { "force": { "type": "boolean" }, "ref": { "type": "object", "properties": { "name": { "type": "string" } }, "required": ["name"] } } } git-lfs-3.6.1/locking/schemas/http-lock-list-response-schema.json000066400000000000000000000013241472372047300250130ustar00rootroot00000000000000{ "$schema": "http://json-schema.org/draft-04/schema", "title": "Git LFS HTTPS Lock List API Response", "type": "object", "properties": { "locks": { "type": "array", "items": { "type": "object", "properties": { "id": { "type": "string" }, "path": { "type": "string" }, "locked_at": { "type": "string" }, "owner": { "type": "object", "properties": { "name": { "type": "string" } } } } } }, "next_cursor": { "type": "string" } }, "required": ["locks"] } git-lfs-3.6.1/locking/schemas/http-lock-verify-response-schema.json000066400000000000000000000016271472372047300253520ustar00rootroot00000000000000{ "$schema": "http://json-schema.org/draft-04/schema", "title": "Git LFS HTTPS Lock Verify API Response", "type": "object", "definitions": { "lock": { "type": "object", "properties": { "id": { "type": "string" }, "path": { "type": "string" }, "locked_at": { "type": "string" }, "owner": { "type": "object", "properties": { "name": { "type": "string" } } } }, "required": ["id", "path"] } }, "properties": { "ours": { "type": "array", "items": { "$ref": "#/definitions/lock" } }, "theirs": { "type": "array", "items": { "$ref": "#/definitions/lock" } }, "next_cursor": { "type": "string" } }, "required": ["ours", "theirs"] } git-lfs-3.6.1/locking/ssh.go000066400000000000000000000162571472372047300156640ustar00rootroot00000000000000package locking import ( "fmt" "strings" "time" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/git" "github.com/git-lfs/git-lfs/v3/lfsapi" "github.com/git-lfs/git-lfs/v3/ssh" "github.com/git-lfs/git-lfs/v3/tr" ) type sshLockClient struct { transfer *ssh.SSHTransfer *lfsapi.Client } func (c *sshLockClient) connection() (*ssh.PktlineConnection, error) { return c.transfer.Connection(0) } func (c *sshLockClient) parseLockResponse(status int, args []string, lines []string) (*Lock, string, error) { var lock *Lock var message string var err error seen := make(map[string]struct{}) if status >= 200 && status <= 299 || status == 409 { lock = &Lock{} for _, entry := range args { if strings.HasPrefix(entry, "id=") { lock.Id = entry[3:] seen["id"] = struct{}{} } else if strings.HasPrefix(entry, "path=") { lock.Path = entry[5:] seen["path"] = struct{}{} } else if strings.HasPrefix(entry, "ownername=") { lock.Owner = &User{} lock.Owner.Name = entry[10:] seen["ownername"] = struct{}{} } else if strings.HasPrefix(entry, "locked-at=") { lock.LockedAt, err = time.Parse(time.RFC3339, entry[10:]) if err != nil { return lock, "", errors.New(tr.Tr.Get("lock response: invalid locked-at: %s", entry)) } seen["locked-at"] = struct{}{} } } if len(seen) != 4 { return nil, "", errors.New(tr.Tr.Get("incomplete fields for lock")) } } if status > 299 && len(lines) > 0 { message = lines[0] } return lock, message, nil } type owner string const ( ownerOurs = owner("ours") ownerTheirs = owner("theirs") ownerUnknown = owner("") ) type lockData struct { lock Lock who owner } func (c *sshLockClient) lockDataIsIncomplete(data *lockData) bool { return data.lock.Path == "" || data.lock.Owner == nil || data.lock.LockedAt.IsZero() } func (c *sshLockClient) parseListLockResponse(status int, args []string, lines []string) (all []Lock, ours []Lock, theirs []Lock, nextCursor string, message string, err error) { locks := make(map[string]*lockData) var last *lockData if status >= 200 && status <= 299 { for _, entry := range args { if strings.HasPrefix(entry, "next-cursor=") { if len(nextCursor) > 0 { return nil, nil, nil, "", "", errors.New(tr.Tr.Get("lock response: multiple next-cursor responses")) } nextCursor = entry[12:] } } for _, entry := range lines { values := strings.SplitN(entry, " ", 3) var cmd string if len(values) > 0 { cmd = values[0] } if cmd == "lock" { if len(values) != 2 { return nil, nil, nil, "", "", errors.New(tr.Tr.Get("lock response: invalid response: %q", entry)) } else if last != nil && c.lockDataIsIncomplete(last) { return nil, nil, nil, "", "", errors.New(tr.Tr.Get("lock response: incomplete lock data")) } id := values[1] last = &lockData{who: ownerUnknown} last.lock.Id = id locks[id] = last } else if len(values) != 3 { return nil, nil, nil, "", "", errors.New(tr.Tr.Get("lock response: invalid response: %q", entry)) } else if last == nil || last.lock.Id != values[1] { return nil, nil, nil, "", "", errors.New(tr.Tr.Get("lock response: interspersed response: %q", entry)) } else { switch cmd { case "path": last.lock.Path = values[2] case "owner": last.who = owner(values[2]) case "ownername": last.lock.Owner = &User{} last.lock.Owner.Name = values[2] case "locked-at": last.lock.LockedAt, err = time.Parse(time.RFC3339, values[2]) if err != nil { return nil, nil, nil, "", "", errors.New(tr.Tr.Get("lock response: invalid locked-at: %s", entry)) } } } } if last != nil && c.lockDataIsIncomplete(last) { return nil, nil, nil, "", "", errors.New(tr.Tr.Get("lock response: incomplete lock data")) } for _, lock := range locks { all = append(all, lock.lock) if lock.who == ownerOurs { ours = append(ours, lock.lock) } else if lock.who == ownerTheirs { theirs = append(theirs, lock.lock) } } } else if status > 299 && len(lines) > 0 { message = lines[0] } return all, ours, theirs, nextCursor, message, nil } func (c *sshLockClient) Lock(remote string, lockReq *lockRequest) (*lockResponse, int, error) { args := make([]string, 0, 3) args = append(args, fmt.Sprintf("path=%s", lockReq.Path)) if lockReq.Ref != nil { args = append(args, fmt.Sprintf("refname=%s", lockReq.Ref.Name)) } conn, err := c.connection() if err != nil { return nil, 0, err } conn.Lock() defer conn.Unlock() err = conn.SendMessage("lock", args) if err != nil { return nil, 0, err } status, args, lines, err := conn.ReadStatusWithLines() if err != nil { return nil, status, err } var lock lockResponse lock.Lock, lock.Message, err = c.parseLockResponse(status, args, lines) return &lock, status, err } func (c *sshLockClient) Unlock(ref *git.Ref, remote, id string, force bool) (*unlockResponse, int, error) { args := make([]string, 0, 3) if ref != nil { args = append(args, fmt.Sprintf("refname=%s", ref.Name)) } conn, err := c.connection() if err != nil { return nil, 0, err } conn.Lock() defer conn.Unlock() err = conn.SendMessage(fmt.Sprintf("unlock %s", id), args) if err != nil { return nil, 0, err } status, args, lines, err := conn.ReadStatusWithLines() if err != nil { return nil, status, err } var lock unlockResponse lock.Lock, lock.Message, err = c.parseLockResponse(status, args, lines) return &lock, status, err } func (c *sshLockClient) Search(remote string, searchReq *lockSearchRequest) (*lockList, int, error) { values := searchReq.QueryValues() args := make([]string, 0, len(values)) for key, value := range values { args = append(args, fmt.Sprintf("%s=%s", key, value)) } conn, err := c.connection() if err != nil { return nil, 0, err } conn.Lock() defer conn.Unlock() err = conn.SendMessage("list-lock", args) if err != nil { return nil, 0, err } status, args, lines, err := conn.ReadStatusWithLines() if err != nil { return nil, status, err } locks, _, _, nextCursor, message, err := c.parseListLockResponse(status, args, lines) if err != nil { return nil, status, err } list := &lockList{ Locks: locks, NextCursor: nextCursor, Message: message, } return list, status, nil } func (c *sshLockClient) SearchVerifiable(remote string, vreq *lockVerifiableRequest) (*lockVerifiableList, int, error) { args := make([]string, 0, 3) if vreq.Ref != nil { args = append(args, fmt.Sprintf("refname=%s", vreq.Ref.Name)) } if len(vreq.Cursor) > 0 { args = append(args, fmt.Sprintf("cursor=%s", vreq.Cursor)) } if vreq.Limit > 0 { args = append(args, fmt.Sprintf("limit=%d", vreq.Limit)) } conn, err := c.connection() if err != nil { return nil, 0, err } conn.Lock() defer conn.Unlock() err = conn.SendMessage("list-lock", args) if err != nil { return nil, 0, err } status, args, lines, err := conn.ReadStatusWithLines() if err != nil { return nil, status, err } _, ours, theirs, nextCursor, message, err := c.parseListLockResponse(status, args, lines) if err != nil { return nil, status, err } list := &lockVerifiableList{ Ours: ours, Theirs: theirs, NextCursor: nextCursor, Message: message, } return list, status, nil } git-lfs-3.6.1/po/000077500000000000000000000000001472372047300135155ustar00rootroot00000000000000git-lfs-3.6.1/po/es.po000066400000000000000000000005721472372047300144700ustar00rootroot00000000000000msgid "" msgstr "" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "Language: \n" "X-Generator: xgotext\n" #: command_filter_process.go:40 msgid "This command should be run by the Git filter process" msgstr "Este comando debe ser invocado por el proceso de filtración de Git"git-lfs-3.6.1/rpm/000077500000000000000000000000001472372047300136755ustar00rootroot00000000000000git-lfs-3.6.1/rpm/INSTALL.md000066400000000000000000000056351472372047300153360ustar00rootroot00000000000000# Building RPMs # All of the code to build the RPM is stored in a SPECS/git-lfs.spec file. The source code tarball needs to be put in a SOURCES directory. The BUILD and BUILDROOT directories are used during the build process. The final RPM ends up in the RPMS directory and a source-rpm in SRPMS. In order to expedite installing all dependencies (mainly asciidoctor and golang) and download any needed files a build_rpms.bsh script is included. This is the **RECOMMENDED** way to build the rpms. It will install all yum packages in order to build the rpm. Simple run: ``` ./clean.bsh ./build_rpms.bsh ``` The clean.bsh script removes previous rpms, etc... and removed the source tar.gz file. Otherwise you might end up creating an rpm with pieces from different versions. Practice is to run rpmbuild as non-root user. This prevents inadvertently installing files in the operating system. The intent is to run build_rpms.bsh as a non-root user with sudo privileges. If you have a different command for sudo, set the SUDO environment variable to the other command. When all is down, install (or distribute) RPMS/git-lfs.rpm ``` yum install RPMS/x86_64/git-lfs*.rpm ``` ### Alternative build method ### If you want to use your own ruby/golang without using the version from build_rpms.bsh, you will have to disable dependencies on the rpms. It's pretty easy, just make sure asciidoctor and go are in the path, and run ``` NODEPS=1 ./build_rpms.bsh ``` ### Manual build method ### If you want to use your own ruby/golang without using build_rpms.bsh, it's a little more complicated. You have to make sure asciidoctor and go are in the path, and create the build structure, and download/create the tar.gz file used. This is not recommended, but it is possible. ``` mkdir -p {BUILD,BUILDROOT,SOURCES,RPMS,SRPMS} #download file to SOURCES/v{version number}.tar.gz rpmbuild --define "_topdir `pwd`" -bb SPECS/git-lfs.spec --nodeps #(and optionally) rpmbuild --define "_topdir `pwd`" -bs SPECS/git-lfs.spec --nodeps ``` ### Releases ### It is no longer necessary to update SPECS/git-lfs.spec every version. As long as lfs/lfs.go is updated, build_rpms.bsh parses the version number using the pattern ```s|const Version = "\([0-9.]*\)"|\1|``` and updates SPECS/git-lfs.spec. The version number is then used to download: https://github.com/git-lfs/git-lfs/archive/v%{version}.tar.gz This way when a new version is archived, it will get downloaded and built against. When developing, it is advantageous to use the currently checked out version to test against. In order do that, after running ```./clean.bsh```, set the environment variable BUILD_LOCAL to 1 ``` ./clean.bsh BUILD_LOCAL=1 ./build_rpms.bsh ``` ### Troubleshooting ### **Q**) I ran build_rpms.bsh as root and now there are root owned files in the rpm dir **A**) That happens. Either run build_rpms.bsh as a user with sudo permissions or ```chown -R username:groupname rpm``` as root after building.git-lfs-3.6.1/rpm/SPECS/000077500000000000000000000000001472372047300145525ustar00rootroot00000000000000git-lfs-3.6.1/rpm/SPECS/git-lfs.spec000066400000000000000000000061161472372047300167770ustar00rootroot00000000000000Name: git-lfs Version: 3.6.1 Release: 1%{?dist} Summary: Git extension for versioning large files Group: Applications/Archiving License: MIT URL: https://git-lfs.com/ Source0: https://github.com/git-lfs/git-lfs/archive/v%{version}/%{name}-%{version}.tar.gz BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) BuildRequires: perl-Digest-SHA BuildRequires: golang, tar, rubygem-asciidoctor, which, git >= 1.8.2, gettext-devel Requires: git >= 1.8.2 %define debug_package %{nil} #I think this is because go links with --build-id=none for linux %description Git Large File Storage (LFS) replaces large files such as audio samples, videos, datasets, and graphics with text pointers inside Git, while storing the file contents on a remote server like GitHub.com or GitHub Enterprise. %prep %setup -q -n %{name}-%{version} mkdir -p src/github.com/git-lfs ln -s $(pwd) src/github.com/git-lfs/%{name} %build %if 0%{?rhel} == 5 export CGO_ENABLED=0 %endif pushd src/github.com/git-lfs/%{name} %if "%{_arch}" == "i386" GOARCH=386 FORCE_LOCALIZE=true make %else GOARCH=amd64 FORCE_LOCALIZE=true make %endif popd make man %install [ "$RPM_BUILD_ROOT" != "/" ] && rm -rf $RPM_BUILD_ROOT install -D bin/git-lfs ${RPM_BUILD_ROOT}/usr/bin/git-lfs mkdir -p -m 755 ${RPM_BUILD_ROOT}/usr/share/man/man1 mkdir -p -m 755 ${RPM_BUILD_ROOT}/usr/share/man/man5 mkdir -p -m 755 ${RPM_BUILD_ROOT}/usr/share/man/man7 install -D man/man1/*.1 ${RPM_BUILD_ROOT}/usr/share/man/man1 install -D man/man5/*.5 ${RPM_BUILD_ROOT}/usr/share/man/man5 install -D man/man7/*.7 ${RPM_BUILD_ROOT}/usr/share/man/man7 %post # The --skip-repo option prevents failure if / is a Git repository with existing # non-git-lfs hooks. git lfs install --skip-repo --system %preun # The --skip-repo option avoids mutating / if it is a Git repository. (Maybe the # user wants to replace this package with a different installation.) git lfs uninstall --skip-repo --system %check export GIT_LFS_TEST_DIR=$(mktemp -d) # test/git-lfs-server-api/main.go does not compile because github.com/spf13/cobra # cannot be found in vendor, for some reason. It's not needed for installs, so # skip it. export SKIPAPITESTCOMPILE=1 pushd src/github.com/git-lfs/%{name} make test make -C t PROVE_EXTRA_ARGS=-j9 test popd rm -rf ${GIT_LFS_TEST_DIR} %clean rm -rf %{buildroot} %files %defattr(-,root,root,-) %doc LICENSE.md README.md /usr/bin/git-lfs /usr/share/man/man1/*.1.gz /usr/share/man/man5/*.5.gz /usr/share/man/man7/*.7.gz %changelog * Sun Dec 6 2015 Andrew Neff - 1.1.0-1 - Added Requires and version for git back in * Sat Oct 31 2015 Andrew Neff - 1.0.3-1 - Added GIT_LFS_TEST_DIR to prevent future test race condition * Sun Aug 2 2015 Andrew Neff - 0.5.4-1 - Added tests back in * Sat Jul 18 2015 Andrew Neff - 0.5.2-1 - Changed Source0 filename * Mon May 18 2015 Andrew Neff - 0.5.1-1 - Initial Spec git-lfs-3.6.1/rpm/SPECS/rubygem-asciidoctor.spec000066400000000000000000000033361472372047300214060ustar00rootroot00000000000000#global gemdir %(ruby -rubygems -e 'puts Gem::dir' 2>/dev/null) %global gemdir %(IFS=: R=($(gem env gempath)); echo ${R[${#R[@]}-1]}) %define gem_name asciidoctor Name: rubygem-%{gem_name} Version: 2.0.17 Release: 1%{?dist} Summary: Builds manuals Group: Applications/Programming License: N/A URL: https://rubygems.org/gems/%{gem_name} Source0: https://rubygems.org/downloads/%{gem_name}-%{version}.gem BuildRoot: %(echo %{_topdir}/BUILDROOT/%{gem_name}-%{version}) %if 0%{?el7} BuildRequires: rh-ruby30-ruby, rh-ruby30-build Requires: rh-ruby30-ruby %else BuildRequires: gem Requires: ruby %endif BuildArch: noarch %description Builds Manuals %prep %if 0%{?el7} %setup -q -c -T %else %setup -q -n %{gem_name}-%{version} %endif %if 0%{?el7} mkdir -p ./usr/local gem install -V --local --force --install-dir ./%{gemdir} --wrappers --bindir ./usr/local/bin %{SOURCE0} %endif %build %if 0%{?el8}%{?el9} gem build ../%{gem_name}-%{version}.gemspec gem install -V --local --build-root . --force --no-document %{gem_name}-%{version}.gem %endif %install mkdir -p ${RPM_BUILD_ROOT} cp -a ./usr ${RPM_BUILD_ROOT}/usr %if 0%{?el7} cp -a ./opt ${RPM_BUILD_ROOT}/opt %endif %clean rm -rf %{buildroot} %files %defattr(-,root,root,-) %if 0%{?el8}%{?el9} %dir %{gem_instdir} %{gem_libdir} %exclude %{gem_cache} %{gem_spec} /usr/share/gems /usr/bin/%{gem_name} %else %{gemdir}/gems/%{gem_name}-%{version} /opt/rh/rh-ruby30/root/usr/local/share/gems/cache/%{gem_name}-%{version}.gem /opt/rh/rh-ruby30/root/usr/local/share/gems/doc/%{gem_name}-%{version} /opt/rh/rh-ruby30/root/usr/local/share/gems/specifications/%{gem_name}-%{version}.gemspec /usr/local/bin %endif %changelog git-lfs-3.6.1/rpm/build_rpms.bsh000077500000000000000000000046341472372047300165450ustar00rootroot00000000000000#!/usr/bin/env bash set -eu CURDIR=$(cd $(dirname ${BASH_SOURCE[0]}); pwd) VERSION_ID=$(source /etc/os-release; echo ${VERSION_ID%%.*}) OS_NAME=$(source /etc/os-release; echo ${NAME}) OS_NAME=${OS_NAME,,} case "${OS_NAME}" in centos*|red*|almalinux|rocky*) RPM_DIST=".el${VERSION_ID}" ;; fedora) RPM_DIST=".fc${VERSION_ID}" ;; sles) RPM_DIST=".sles${VERSION_ID}" ;; opensuse) RPM_DIST=".opensuse${VERSION_ID}" ;; *) RPM_DIST="%{nil}" ;; esac RPMBUILD=(rpmbuild --define "_topdir ${CURDIR}" --define "dist ${RPM_DIST}") SUDO=${SUDO=`if command -v sudo > /dev/null 2>&1; then echo sudo; fi`} export PATH=${PATH}:/usr/local/bin set -vx SPEC=${CURDIR}/SPECS/git-lfs.spec $SUDO yum install -y rpm-build mkdir -p ${CURDIR}/{BUILD,BUILDROOT,SOURCES,RPMS,SRPMS} if ! command -v asciidoctor; then echo "Downloading Asciidoctor ruby gem..." pushd ${CURDIR}/SOURCES curl -L -O https://rubygems.org/downloads/asciidoctor-2.0.17.gem popd echo "Building Asciidoctor ruby gem..." "${RPMBUILD[@]}" -ba ${CURDIR}/SPECS/rubygem-asciidoctor.spec echo "Installing Asciidoctor ruby gem..." $SUDO yum install -y --nogpgcheck $(ls ${CURDIR}/RPMS/noarch/rubygem-*.rpm | grep -v debuginfo) fi rm -fr ${CURDIR}/{BUILD,BUILDROOT} mkdir -p ${CURDIR}/{BUILD,BUILDROOT} pushd ${CURDIR}/.. #Yes, compile lfs before compiling lfs... FORCE_LOCALIZE=true make #Use the version output to grab the version number and short sha #(that yes, I could have gotten from git myself) LFS_VERSION=$(./bin/git-lfs version | sed -r 's|.*/([0-9.]*).*|\1|') sed -i 's|\(^Version:\s*\).*|\1'"${LFS_VERSION}"'|' "$SPEC" popd #Prep the SOURCES dir for git-lfs echo "Zipping up current checkout of git-lfs..." echo "Cleaning ${CURDIR}/tmptar" rm -rf ${CURDIR}/tmptar mkdir -p ${CURDIR}/tmptar/git-lfs-${LFS_VERSION} pushd ${CURDIR}/.. #I started running out of space in the docker, so I needed to copy a little less waste tar -c --exclude tmptar --exclude repos . | tar -x -C ${CURDIR}/tmptar/git-lfs-${LFS_VERSION}/ popd pushd ${CURDIR}/tmptar tar -zcf ${CURDIR}/SOURCES/git-lfs-${LFS_VERSION}.tar.gz git-lfs-${LFS_VERSION} popd echo "Cleaning ${CURDIR}/tmptar again" rm -rf ${CURDIR}/tmptar echo "Build git-lfs RPMs..." #--no-deps added for now so you can compile without official rpms installed "${RPMBUILD[@]}" --nodeps -ba "$SPEC" "${RPMBUILD[@]}" --nodeps --target=i686 -bb "$SPEC" echo "All Done!" git-lfs-3.6.1/script/000077500000000000000000000000001472372047300144035ustar00rootroot00000000000000git-lfs-3.6.1/script/build-git000077500000000000000000000020171472372047300162110ustar00rootroot00000000000000#!/bin/sh -ex DIR="$1" case $(uname -s) in Darwin) brew install curl zlib pcre2 openssl brew link --force curl zlib pcre2 openssl CURLDIR="$(curl-config --prefix)";; Linux) export DEBIAN_FRONTEND=noninteractive if test -f /etc/apt/sources.list.d/ubuntu.sources; then # Ubuntu 24.04 sed -e 's/^Types: deb$/Types: deb deb-src/' /etc/apt/sources.list.d/ubuntu.sources | sudo tee /etc/apt/sources.list.d/ubuntu.sources else # Ubuntu 22.04 and earlier sed -e 's/^deb/deb-src/' /etc/apt/sources.list | sudo tee /etc/apt/sources.list.d/src.list fi sudo apt-get update sudo apt-get install build-essential sudo apt-get -y build-dep git;; esac GIT_INSTALL_PATH="${GIT_INSTALL_DIR:-"/usr/local"}" cd "$DIR" printf "%s\n" \ "NO_GETTEXT=YesPlease" \ "NO_OPENSSL=YesPlease" \ "prefix=$GIT_INSTALL_PATH" \ > config.mak if test -n "$CURLDIR"; then printf "%s\n" \ "CURLDIR=$CURLDIR" \ >> config.mak fi make -j2 sudo make install echo "Git version:" git --version git-lfs-3.6.1/script/changelog000077500000000000000000000044441472372047300162660ustar00rootroot00000000000000#!/usr/bin/env bash # # Interactively generates a changelog over a range of commits: commit_summary() { local hash="$1" pr=$(git show $hash | grep -o "#\([0-9]*\)" | cut -c 2-) prjson="$(curl -n https://api.github.com/repos/git-lfs/git-lfs/pulls/$pr 2>/dev/null)" title="$(echo $prjson | jq -r -e ".title")" id="$(echo $prjson | jq -r -e ".number")" author="$(echo $prjson | jq -r -e ".user.login")" # If the title begins with "Backport", then strip everything until the actual # pull-request title. if grep -q "Backport" <(echo $title); then title="$(echo $title | sed 's/^[^:]*: //g')" fi echo "* $title #$id (@$author)" } revisions_in () { if [ "$patch" -eq 1 ]; then git rev-list --first-parent "$1" else git rev-list --merges --first-parent "$1" fi } noninteractive () { local range="$1" printf '### Uncategorized\n' for rev in $(revisions_in "$range"); do commit_summary $rev done cat <<-EOF ### Features ### Bugs ### Misc EOF } if [ "$1" = "--noninteractive" ]; then noninteractive=1 shift fi if [ "$1" = "--patch" ]; then patch=1 shift else patch=0 fi range="$1" if [ "$range" = "" ]; then echo "Usage: $0 [options] base..next" exit 1 fi if [ -n "$noninteractive" ] then noninteractive "$range" exit fi features="" bugs="" misc="" for rev in $(revisions_in "$range"); do git show -s $rev processed=0 while [ $processed -eq 0 ]; do echo "Categorize this change: [f,b,m,s,?] ?" read -n 1 opt echo "" case $opt in [fbms]) processed=1 ;; ?) echo "f - mark this merge as a feature" echo "b - mark this merge as a bugfix" echo "m - make this merge as a misc. change" echo "s - skip this merge, excluding it from the changelog" echo "? - display this help message" ;; *) echo "Unknown option: $opt, try again." ;; esac done if [ $opt != "s" ]; then summary="$(commit_summary $rev)" fi case $opt in f) features="$(printf "%s\n%s\n" "$features" "$summary")" ;; b) bugs="$(printf "%s\n%s\n" "$bugs" "$summary")" ;; m) misc="$(printf "%s\n%s\n" "$misc" "$summary")" ;; esac done echo "" >&2 cat <<- EOF ### Features $features ### Bugs $bugs ### Misc $misc EOF git-lfs-3.6.1/script/cibuild000077500000000000000000000023571472372047300157530ustar00rootroot00000000000000#!/usr/bin/env bash set -e # Strip out CI environment variables which cause tests to fail. unset $(env | grep -E '^GIT(HUB)?_' | grep -v '^GIT_DEFAULT_HASH=' | sed -e 's/=.*$//') UNAME=$(uname -s) X="" if [[ $UNAME == MINGW* || $UNAME == MSYS* || $UNAME == CYGWIN* ]]; then X=".exe" WINDOWS=1 fi # Build git-lfs-transfer from scutiger. cargo install --root t/scutiger scutiger-lfs # Set GOPATH if it isn't already set. eval "$(go env | grep GOPATH)" go install golang.org/x/tools/cmd/goimports@latest GOIMPORTS="$GOPATH/bin/goimports" make GOIMPORTS="$GOIMPORTS" && make GOIMPORTS="$GOIMPORTS" test # re-run test to ensure GIT_TRACE output doesn't leak into the git package GIT_TRACE=1 make GOIMPORTS="$GOIMPORTS" PKGS=git test pushd t >/dev/null PROVE="prove" PROVE_EXTRA_ARGS="-j9" VERBOSE_LOGS=1 make X="$X" clean VERBOSE_LOGS=1 make X="$X" PROVE="$PROVE" PROVE_EXTRA_ARGS="$PROVE_EXTRA_ARGS" popd >/dev/null echo "Looking for trailing whitespace..." if git grep -lE '[[:space:]]+$' | \ grep -vE '(^vendor/|\.git/(objects/|index)|\.(bat|ico|bmp)$)' then exit 1 fi echo "Formatting files..." make GOIMPORTS="$GOIMPORTS" fmt echo "Looking for files that are not formatted correctly..." git status -s [ -z "$(git status --porcelain)" ] git-lfs-3.6.1/script/compile-win-installer-unsigned.bat000066400000000000000000000001541472372047300231230ustar00rootroot00000000000000"%ProgramFiles(x86)%\Inno Setup 5\iscc.exe" /Qp "%~dp0\windows-installer\inno-setup-git-lfs-installer.iss" git-lfs-3.6.1/script/distro-tool000077700000000000000000000000001472372047300211772lib/distro.rbustar00rootroot00000000000000git-lfs-3.6.1/script/gen-i-reverse000077500000000000000000000041701472372047300170030ustar00rootroot00000000000000#!/usr/bin/env ruby if ARGV.length < 2 $stderr.puts "Usage: gen-i-reverse INPUT-FILE OUTPUT-FILE" exit 1 end input = File.open(ARGV[0]) output = File.open(ARGV[1], "w") $state = :idle $singular = nil $plural = nil def reset_state $state = :idle $singular = nil $plural = nil end def translate(s) items = s.split(/ /) items = items.map do |chunk| case chunk when /^%/ chunk else chunk.split(/(\\n|\W+)/).map do |c| c =~ /^\w/ ? c.reverse : c end.join end end items.join(" ").gsub("\n", "\\n") end while line = input.gets line.chomp! case $state when :idle case line when /^msgid ""$/ $state = :copy output.puts line when /^msgid "(.*)"$/ $state = :msgid $singular = $1 output.puts line when /^msgid `(.*)$/ $state = :msgid_multi $singular = $1.gsub('"', "\\\"") + "\n" end when :copy if line == "" reset_state end output.puts line when :msgid_multi case line # Note that PO files are not supposed to contain backtick-delimited strings, # but xgotext emits them anyway, so we fix them up until it gets fixed. when /^(.*)`$/ $state = :msgid $singular += $1.gsub('"', "\\\"") output.puts "msgid \"#{$singular.gsub("\n", "\\n")}\"" else $singular += line.gsub('"', "\\\"") + "\n" end when :msgid_plural_multi case line when /^(.*)`$/ $state = :msgid $plural += $1.gsub('"', "\\\"") output.puts "msgid_plural \"#{$plural.gsub("\n", "\\n")}\"" else $plural += line.gsub('"', "\\\"") + "\n" end when :msgid case line when /^msgid_plural ""$/ output.puts line when /^msgid_plural "(.*)"$/ $plural = $1 output.puts line when /^msgid_plural `(.*)$/ $state = :msgid_plural_multi $plural = $1.gsub('"', "\\\"") + "\n" output.puts line when /^msgstr(\[0\])? ""$/ output.puts "msgstr#{$1} \"#{translate($singular)}\"" when /^msgstr\[1\] ""$/ output.puts "msgstr[1] \"#{translate($plural)}\"" when "" reset_state output.puts line end end end git-lfs-3.6.1/script/hash-files000077500000000000000000000026161472372047300163610ustar00rootroot00000000000000#!/usr/bin/env ruby require "openssl" # This maps the OpenSSL name to the name used in the output file. # The order used is the order they should appear in the output file. DIGESTS = { 'BLAKE2b512' => 'BLAKE2b', 'BLAKE2s256' => 'BLAKE2s', 'SHA256' => 'SHA256', 'SHA384' => 'SHA384', 'SHA512' => 'SHA512', 'SHA512-256' => 'SHA512/256', 'SHA3-256' => 'SHA3-256', 'SHA3-384' => 'SHA3-384', 'SHA3-512' => 'SHA3-512', } class Hasher def initialize(file) @file = file @hashers = DIGESTS.map do |openssl, output| [output, OpenSSL::Digest.new(openssl)] end.to_h end def update(s) @hashers.values.each { |h| h.update(s) } end def to_a @hashers.map do |name, ctx| "#{name} (#{@file}) = #{ctx.digest.unpack("H*")[0]}\n" end.to_a end end results = [] ARGV.each do |file| f = File.open(file) h = Hasher.new(file) while chunk = f.read(65536) do h.update(chunk) end results += h.to_a end # Sort entries first by order of algorithm name in DIGESTS, then by filename, # then print them. # Create a mapping of output name digest to order in the hash. names = DIGESTS.values.each_with_index.to_a.to_h results.sort_by do |s| # Split into digest name and remainder. The remainder starts with the # filename. pair = s.split(' ', 2).to_a # Order by the index of the digest and then the filename. [names[pair[0]], pair[1]] end.each { |l| puts l } git-lfs-3.6.1/script/install-git-source000077500000000000000000000002261472372047300200560ustar00rootroot00000000000000#!/usr/bin/env bash # Builds git from a given git ref. Used for CircleCI OSX builds cd git-source git checkout "$1" make --jobs=2 make install cd .. git-lfs-3.6.1/script/install.sh000077500000000000000000000014431472372047300164120ustar00rootroot00000000000000#!/usr/bin/env bash set -eu prefix="/usr/local" if [ "${PREFIX:-}" != "" ] ; then prefix=${PREFIX:-} elif [ "${BOXEN_HOME:-}" != "" ] ; then prefix=${BOXEN_HOME:-} fi while [[ $# -gt 0 ]]; do case "$1" in --local) prefix="$HOME/.local" shift ;; *) echo "Unknown option: $1" exit 1 ;; esac done # Check if the user has permission to install in the specified prefix if [ ! -w "$prefix" ]; then echo "Error: Insufficient permissions to install in $prefix. Try running with sudo or choose a different prefix.">&2 exit 1 fi mkdir -p "$prefix/bin" rm -rf "$prefix/bin/git-lfs*" pushd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null for g in git*; do install "$g" "$prefix/bin/$g" done popd > /dev/null PATH+=:"$prefix/bin" git lfs install git-lfs-3.6.1/script/lib/000077500000000000000000000000001472372047300151515ustar00rootroot00000000000000git-lfs-3.6.1/script/lib/distro.rb000077500000000000000000000127661472372047300170210ustar00rootroot00000000000000#!/usr/bin/env ruby require 'optionparser' class DistroMap attr_reader :entries def initialize(map = nil) @entries = map || self.class.builtin_map end # Returns the map for our distros. # # The key in each case is a string containing a lowercase OS name, a slash, # and a version number. The value is a map containing the following fields: # # name:: a human-readable name for this distro. # component:: a component suitable for a packagecloud.io URL. # image:: a Docker image name from build_dockers without any extension. # equivalent:: packagecloud.io components for which we can upload the same # package. # package_type:: the extension for the package format on this OS. # package_tag:: the trailing component after the version number on this OS. def self.builtin_map { # RHEL EOL https://access.redhat.com/support/policy/updates/errata # Fedora EOL https://docs.fedoraproject.org/en-US/releases/ # SLES EOL https://www.suse.com/lifecycle/ # opensuse https://en.opensuse.org/Lifetime # or https://en.wikipedia.org/wiki/OpenSUSE_version_history "centos/7" => { name: "RPM RHEL 7/CentOS 7", component: "el/7", image: "centos_7", package_type: "rpm", package_tag: "-1.el7", equivalent: [ "el/7", # EOL June 2024 "sles/12.5", # EOL October 2024 ], }, "centos/8" => { name: "RPM RHEL 8/Rocky Linux 8", component: "el/8", image: "centos_8", package_type: "rpm", package_tag: "-1.el8", equivalent: [ "el/8", # EOL May 2029 "opensuse/15.5", # EOL December 2024 "sles/15.5", # EOL December 2024 ], }, "rocky/9" => { name: "RPM RHEL 9/Rocky Linux 9", component: "el/9", image: "rocky_9", package_type: "rpm", package_tag: "-1.el9", equivalent: [ "el/9", # EOL May 2032 "fedora/39", # EOL November 2024 "fedora/40", # EOL May 2025 "fedora/41", # EOL November 2025 "opensuse/15.6", # EOL December 2025 "sles/15.6", # Current ], }, # Debian EOL https://wiki.debian.org/LTS/ # Ubuntu EOL https://wiki.ubuntu.com/Releases # Mint EOL https://linuxmint.com/download_all.php "debian/10" => { name: "Debian 10", component: "debian/buster", image: "debian_10", package_type: "deb", package_tag: "", equivalent: [ "debian/buster", # EOL June 2024 "linuxmint/ulyana", # EOL April 2025 "linuxmint/ulyssa", # EOL April 2025 "linuxmint/uma", # EOL April 2025 "linuxmint/una", # EOL April 2025 "ubuntu/focal", # EOL April 2025 ], }, "debian/11" => { name: "Debian 11", component: "debian/bullseye", image: "debian_11", package_type: "deb", package_tag: "", equivalent: [ "debian/bullseye", # EOL August 2026 "linuxmint/vanessa", # EOL April 2027 "linuxmint/vera", # EOL April 2027 "linuxmint/victoria", # EOL April 2027 "linuxmint/virginia", # EOL April 2027 "ubuntu/jammy", # EOL April 2027 ], }, "debian/12" => { name: "Debian 12", component: "debian/bookworm", image: "debian_12", package_type: "deb", package_tag: "", equivalent: [ "debian/bookworm", # EOL June 2028 "debian/trixie", # Current testing (Debian 13) "linuxmint/wilma", # EOL April 2029 "ubuntu/noble", # EOL June 2029 "ubuntu/oracular", # EOL July 2025 ] }, } end def distro_name_map entries.map { |k, v| [k, v[:equivalent]] }.to_h end def image_names entries.values.map { |v| v[:image] }.to_a end end class DistroMapProgram def initialize(stdout, stderr, dmap = nil) @dmap = DistroMap.new(dmap) @stdout = stdout @stderr = stderr end def image_names @stdout.puts @dmap.image_names.join(" ") end def distro_markdown arch = { "rpm" => ".x86_64", "deb" => "_amd64", } separator = { "rpm" => "-", "deb" => "_", } result = @dmap.entries.map do |_k, v| type = v[:package_type] "[#{v[:name]}](https://packagecloud.io/github/git-lfs/packages/#{v[:component]}/git-lfs#{separator[type]}VERSION#{v[:package_tag]}#{arch[type]}.#{type}/download)\n" end.join @stdout.puts result end def run(args) options = {} OptionParser.new do |parser| parser.on("--image-names", "Print the names of all images") do options[:mode] = :image_names end parser.on("--distro-markdown", "Print links to packages for all distros") do options[:mode] = :distro_markdown end end.parse!(args) case options[:mode] when nil @stderr.puts "A mode option is required" 2 when :image_names image_names 0 when :distro_markdown distro_markdown 0 end end end if $PROGRAM_NAME == __FILE__ exit DistroMapProgram.new($stdout, $stderr).run(ARGV) end git-lfs-3.6.1/script/macos/000077500000000000000000000000001472372047300155055ustar00rootroot00000000000000git-lfs-3.6.1/script/macos/manifest.json000066400000000000000000000002501472372047300202030ustar00rootroot00000000000000{ "apple_id": { "password": "@env:DARWIN_DEV_PASS" }, "notarize": { "path": ["git-lfs"], "bundle_id": "com.github.git-lfs", "staple": false } } git-lfs-3.6.1/script/notarize000077500000000000000000000007031472372047300161640ustar00rootroot00000000000000#!/bin/sh # # Notarizes the file given on the command line with the Apple ID in # $DARWIN_DEV_USER, the password in $DARWIN_DEV_PASS, and the team ID (usually # ten characters) in $DARWIN_DEV_TEAM. # # This script exists to not echo these variables into the log. Don't run this # on a multi-user system, only in CI. xcrun notarytool submit "$1" \ --apple-id "$DARWIN_DEV_USER" --password "$DARWIN_DEV_PASS" --team-id "$DARWIN_DEV_TEAM" \ --wait git-lfs-3.6.1/script/packagecloud.rb000077500000000000000000000043361472372047300173630ustar00rootroot00000000000000#!/usr/bin/env ruby # Pushes all deb and rpm files from ./repos to PackageCloud. packagecloud_user = ENV["PACKAGECLOUD_USER"] || "github" packagecloud_token = ENV["PACKAGECLOUD_TOKEN"] || begin puts "PACKAGECLOUD_TOKEN env required" exit 1 end require "json" require_relative 'lib/distro' packagecloud_ruby_minimum_version = "1.0.4" begin gem "packagecloud-ruby", ">=#{packagecloud_ruby_minimum_version}" require "packagecloud" puts "Using packagecloud-ruby:#{Gem.loaded_specs["packagecloud-ruby"].version}" rescue LoadError puts "Requires packagecloud-ruby >=#{packagecloud_ruby_minimum_version}" puts %(gem install packagecloud-ruby) exit 1 end credentials = Packagecloud::Credentials.new(packagecloud_user, packagecloud_token) $client = Packagecloud::Client.new(credentials) # matches package directories built by docker to one or more packagecloud distros # https://packagecloud.io/docs#os_distro_version $distro_name_map = DistroMap.new.distro_name_map # caches distro id lookups $distro_id_map = {} def distro_names_for(filename) $distro_name_map.each do |pattern, distros| return distros if filename.include?(pattern) end raise "no distro for #{filename.inspect}" end package_files = Dir.glob("repos/**/*.rpm") + Dir.glob("repos/**/*.deb") package_files.each do |full_path| next if full_path =~ /repo-release/ pkg = Packagecloud::Package.new(:file => full_path) distro_names = distro_names_for(full_path) distro_names.map do |distro_name| distro_id = $distro_id_map[distro_name] ||= $client.find_distribution_id(distro_name) if !distro_id raise "no distro id for #{distro_name.inspect}" end puts "pushing #{full_path} to #{$distro_id_map.key(distro_id).inspect}" result = $client.put_package("git-lfs", pkg, distro_id) result.succeeded || begin # We've already uploaded this package in an earlier invocation of this # script and our attempt to upload over the existing package failed # because PackageCloud doesn't allow that. Ignore the failure since we # already have the package uploaded. if result.response != '{"filename":["has already been taken"]}' raise "packagecloud put_package failed, error: #{result.response}" end end end end git-lfs-3.6.1/script/spec/000077500000000000000000000000001472372047300153355ustar00rootroot00000000000000git-lfs-3.6.1/script/spec/distro_spec.rb000066400000000000000000000051531472372047300202040ustar00rootroot00000000000000require_relative "../lib/distro" def test_map { "centos/7" => { name: "RPM RHEL 7/CentOS 7", component: "el/7", image: "centos_7", package_type: "rpm", package_tag: "-1.el7", equivalent: [ "el/7", "scientific/7", "opensuse/15.4", "sles/12.5", "sles/15.4", ], }, "centos/8" => { name: "RPM RHEL 8/Rocky Linux 8", component: "el/8", image: "centos_8", package_type: "rpm", package_tag: "-1.el8", equivalent: [ "el/8", ], }, "debian/12" => { name: "Debian 12", component: "debian/bookworm", image: "debian_12", package_type: "deb", package_tag: "", equivalent: [ "debian/bookworm", "debian/trixie", ] }, } end context DistroMapProgram do it "should print image names" do stdout = StringIO.new stderr = StringIO.new expect(DistroMapProgram.new(stdout, stderr, test_map).run(["--image-names"])).to eq 0 expect(stderr.string).to be_empty expect(stdout.string).to eq "centos_7 centos_8 debian_12\n" end it "should print distro markdown" do stdout = StringIO.new stderr = StringIO.new expect(DistroMapProgram.new(stdout, stderr, test_map).run(["--distro-markdown"])).to eq 0 expect(stderr.string).to be_empty expected = <<~EOM [RPM RHEL 7/CentOS 7](https://packagecloud.io/github/git-lfs/packages/el/7/git-lfs-VERSION-1.el7.x86_64.rpm/download) [RPM RHEL 8/Rocky Linux 8](https://packagecloud.io/github/git-lfs/packages/el/8/git-lfs-VERSION-1.el8.x86_64.rpm/download) [Debian 12](https://packagecloud.io/github/git-lfs/packages/debian/bookworm/git-lfs_VERSION_amd64.deb/download) EOM expect(stdout.string).to eq expected end it "should whine when no options were given" do stdout = StringIO.new stderr = StringIO.new expect(DistroMapProgram.new(stdout, stderr, test_map).run([])).to eq 2 expect(stdout.string).to be_empty expect(stderr.string).to eq "A mode option is required\n" end end context DistroMap do it "should produce the correct distro names" do map = { "centos/7" => [ "el/7", "scientific/7", "opensuse/15.4", "sles/12.5", "sles/15.4", ], "centos/8" => [ "el/8", ], "debian/12" => [ "debian/bookworm", "debian/trixie", ], } expect(DistroMap.new(test_map).distro_name_map).to eq map end it "should produce the correct image names" do expect(DistroMap.new(test_map).image_names).to eq %w[centos_7 centos_8 debian_12] end end git-lfs-3.6.1/script/update-version000077500000000000000000000033651472372047300173050ustar00rootroot00000000000000#!/bin/sh -e rfc822_datestamp () { # All the other changelog entries use this exact timestamp, so let's do so as # well. Use -0000 to indicate that our timestamp is in UTC, even though we # ourselves may not be. LC_ALL=C date +'%a, %d %b %Y 14:29:00 -0000' } user_id () { git var GIT_COMMITTER_IDENT | sed -e 's/^\(.*<[^>]*>\).*$/\1/' } update_go () { local version="$1" sed -i -e "s/\(Version = \)\"[0-9.]*\"/\\1\"$version\"/" config/version.go } update_debian () { local version="$1" # Return if already updated. ! grep -qs -F "git-lfs ($version)" debian/changelog || return local tmpdir=$(mktemp -d) local tmpfile="$tmpdir/changelog" printf 'git-lfs (%s) stable; urgency=low * New upstream version -- %s %s\n\n' "$version" "$(user_id)" "$(rfc822_datestamp)" >"$tmpfile" cat debian/changelog >>"$tmpfile" mv "$tmpfile" debian/changelog } update_rpm () { local version="$1" ruby -pi -e "\$_.gsub!(/^(Version:\\s+)[0-9.]+$/, '\\1$version')" \ rpm/SPECS/git-lfs.spec } update_versioninfo () { local version="$1" ruby -pi -e "ver = '$version'; pieces = ver.split('.')" \ -e '$_.gsub!(/("Major": )\d+/, %Q(\\1#{pieces[0]}))' \ -e '$_.gsub!(/("Minor": )\d+/, %Q(\\1#{pieces[1]}))' \ -e '$_.gsub!(/("Patch": )\d+/, %Q(\\1#{pieces[2]}))' \ -e '$_.gsub!(/("ProductVersion": )"[\d.]+"/, %Q(\\1"#{ver}"))' \ versioninfo.json } main () { local version="$1" if [ -z "$version" ] || [ "$version" = "--help" ] then cat <&2 } abort () { local format="$1" shift printf "$format\n" "$@" >&2 exit 2 } uri_encode () { ruby -e 'print ARGV[0].gsub(/[^A-Za-z0-9_.-]/) { |x| "%%%02x" % x.ord }' "$1" } curl () { if [ -n "$GITHUB_TOKEN" ] then command curl -u "token:$GITHUB_TOKEN" -fSs "$@" else command curl -nfSs "$@" fi } categorize_os () { local os="$1" if [ "$os" = "freebsd" ] then echo FreeBSD else ruby -e 'puts ARGV[0].capitalize' "$os" fi } categorize_arch () { local arch="$1" if [ "$arch" = "ppc64le" ] then echo "Little-endian 64-bit PowerPC" elif [ "$arch" = "riscv64" ] then echo "64-bit RISC-V" elif [ "$arch" = "loong64" ] then echo "64-bit LoongArch" else echo "$arch" | tr a-z A-Z fi } # Categorize a release asset and print its human readable name to standard # output. categorize_asset () { local file="$1" local os=$(echo "$file" | sed -e 's/^git-lfs-//' -e 's/[-.].*$//') local arch=$(echo "$file" | ruby -pe '$_.gsub!(/\Agit-lfs-[^-]+-([^-]+)[-.].*/, "\\1")') case "$file" in git-lfs-v*.*.*.tar.gz) echo "Source";; git-lfs-windows-v*.*.*.exe) echo "Windows Installer";; sha256sums) echo "Unsigned SHA-256 Hashes";; sha256sums.asc) echo "Signed SHA-256 Hashes";; hashes) echo "Unsigned Hashes";; hashes.asc) echo "Signed Hashes";; *) printf "%s %s\n" "$(categorize_os "$os")" "$(categorize_arch "$arch")";; esac } # Provide a content type for the asset based on its file name. content_type () { local file="$1" case "$file" in *.zip) echo "application/zip";; *.tar.gz) echo "application/gzip";; *.exe) echo "application/octet-stream";; *.asc|sha256sums*|hashes*) echo "text/plain";; esac } # Format the JSON for creating the release and print it to standard output. format_release_json () { local version="$1" local bodyfile="$2" ruby -rjson -e 'puts JSON.generate({ tag_name: ARGV[0], name: ARGV[0], draft: true, body: File.read(ARGV[1]), })' "$version" "$bodyfile" } # Create a draft release and print the upload URL for release assets to the # standard output. If a release with that version already exists, do nothing # instead. create_release () { local version="$1" local bodyfile="$2" # Check to see if we already have such a release. If so, don't create it. curl https://api.github.com/repos/$REPO/releases | \ jq -r '.[].name' | grep -qsF "$version" && { say "Found an existing release for this version." curl https://api.github.com/repos/$REPO/releases | \ jq -r '.[] | select(.name == "'"$version"'") | .upload_url' | \ sed -e 's/{.*}//g' return } # This can be large, so pass it in a file. format_release_json "$version" "$bodyfile" >> "$WORKDIR/release-json" curl -H'Content-Type: application/json' -d"@$WORKDIR/release-json" \ https://api.github.com/repos/$REPO/releases | \ jq -r '.upload_url' | sed -e 's/{.*}//g' } # Update the draft release with a new body and print the upload URL for release assets to the # standard output. A release with the given version must already exist. patch_release () { local version="$1" local bodyfile="$2" # Find the URL of this release. local url=$(curl https://api.github.com/repos/$REPO/releases | \ jq -r '.[] | select(.name == "'"$version"'") | .url') [ -n "$url" ] || abort "No existing release found for version $version." say "Found the existing release for this version." # This can be large, so pass it in a file. format_release_json "$version" "$bodyfile" >> "$WORKDIR/release-json" curl -XPATCH -H'Content-Type: application/json' -d"@$WORKDIR/release-json" \ $url | \ jq -r '.upload_url' | sed -e 's/{.*}//g' } # Find the release files for the given version. release_files () { local version="$1" local assets="${2:-bin/releases}" [ -n "$version" ] || return 1 find "$assets" -name '*.tar.gz' -o \ -name '*386*.zip' -o \ -name '*amd64*.zip' -o \ -name '*arm64*.zip' -o \ -name '*.exe' -o \ -name 'sha256sums.asc' -o \ -name 'hashes.asc' | \ grep -E "$version|sha256sums.asc|hashes.asc" | \ grep -v "assets" | \ LC_ALL=C sort } # Format the body message and print the file which contains it to the standard # output. finalize_body_message () { local version="$1" local changelog="$2" local assets="$3" version=$(echo "$version" | sed -e 's/^v//') cat "$changelog" > "$WORKDIR/body-template" cat <> "$WORKDIR/body-template" ## Packages Up to date packages are available on [PackageCloud](https://packagecloud.io/github/git-lfs) and [Homebrew](http://brew.sh/). $(script/distro-tool --distro-markdown) ## SHA-256 hashes: EOM shasum -a256 $(release_files "$version" "$assets") | \ ruby -pe '$_.chomp!' \ -e '$_.gsub!(/^([0-9a-f]+)\s+.*\/([^\/]+)$/, "**\\2**\n\\1\n\n")' | \ ruby -0777 -pe '$_.gsub!(/\n+\z/, "\n")' >> "$WORKDIR/body-template" sed -e "s/VERSION/$version/g" < "$WORKDIR/body-template" > "$WORKDIR/body" echo "$WORKDIR/body" } # Filter a list of files from standard input, removing entries found in the file # provided. filter_files () { local filter="$1" # If the filter file is empty (that is, no assets have been uploaded), grep # will produce no output, and therefore nothing will be uploaded. That's not # what we want, so handle this case specially. if [ -s "$filter" ] then grep -vF -f "$filter" else cat fi } # Upload assets from the release directory to GitHub. Only assets that are not # already existing should be uploaded. upload_assets () { local version="$1" local upload_url="$2" local src="$3" local file desc base ct encdesc encbase curl https://api.github.com/repos/$REPO/releases | \ jq -r '.[] | select(.name == "'"$version"'") | .assets | .[] | .name' \ > "$WORKDIR/existing-assets" for file in $(release_files "$version" "$src" | filter_files "$WORKDIR/existing-assets") do base=$(basename "$file") desc=$(categorize_asset "$base") ct=$(content_type "$base") encbase=$(uri_encode "$base") encdesc=$(uri_encode "$desc") say "\tUploading %s as \"%s\" (Content-Type %s)..." "$base" "$desc" "$ct" curl --data-binary "@$file" -H'Accept: application/vnd.github.v3+json' \ -H"Content-Type: $ct" "$upload_url?name=$encbase&label=$encdesc" \ >"$WORKDIR/response" download=$(jq -r '.url' "$WORKDIR/response") done say "Assets uploaded." } # Download assets from GitHub to the specified directory. download_assets () { local version="$1" local dir="$2" curl https://api.github.com/repos/$REPO/releases | \ jq -rc '.[] | select(.name == "'"$version"'") | .assets | .[] | [.name,.url]' | \ ruby -rjson -ne 'puts JSON.parse($_).join(" ")' \ > "$WORKDIR/assets" cat "$WORKDIR/assets" | (while read base url do say "\tDownloading %s..." "$base" ( cd "$dir" && curl -Lo "$base" -H"Accept: application/octet-stream" "$url" ) done) } # Download the assets and verify the signature made on them. verify_assets () { local version="$1" local dir="$WORKDIR/verify" mkdir "$dir" download_assets "$version" "$dir" # If the OpenPGP data is not valid, gpg -d will output nothing to stdout, and # shasum will then fail. say "Checking assets for integrity with SHA-256..." (cd "$dir" && gpg -d sha256sums.asc | shasum -a 256 -c) say "Checking assets for integrity with SHA-2..." (cd "$dir" && gpg -d hashes.asc | grep 'SHA[0-9][^-]' | shasum -c) if command -v sha3sum >/dev/null 2>&1 then say "Checking assets for integrity with SHA-3..." (cd "$dir" && gpg -d hashes.asc | grep 'SHA3-' | sha3sum -c) fi if command -v b2sum >/dev/null 2>&1 then say "Checking assets for integrity with BLAKE2b..." # b2sum on Linux does not handle BLAKE2s, only BLAKE2b. (cd "$dir" && gpg -d hashes.asc | grep 'BLAKE2b' | b2sum -c) fi say "\nAssets look good!" } # Extract the changelog for the given version from the history and save it in a # file. Print the filename of the changelog to standard output. extract_changelog () { local version="$1" git cat-file blob "$version:CHANGELOG.md" | \ ruby -ne "version=%Q($version)[1..-1]; state ||= :silent; text ||= [];" \ -e 'if state == :print && $_.start_with?("## "); puts text.join.strip; exit; end;' \ -e 'text << $_ if state == :print;' \ -e 'state = :print if $_.start_with?("## #{version}")' \ > "$WORKDIR/changelog" echo "$WORKDIR/changelog" } # Perform the final steps to verify a release finalize () { local version="$1" local inspect="$2" local downloads="$WORKDIR/finalize" local uploads="$WORKDIR/finalize-uploads" say "Finalizing the release process..." say "Downloading assets..." mkdir "$downloads" mkdir "$uploads" download_assets "$version" "$downloads" if [ -n "$inspect" ] then say "Dropping you to a shell to inspect the assets." say "Type 'exit 0' to continue, or 'exit 1' to abort." (cd "$downloads" && $SHELL) fi say "Signing asset manifest..." ( root="$(git rev-parse --show-toplevel)" && cd "$downloads" && \ shasum -a256 -b * | grep -vE '(assets|sha256sums|hashes)' | \ gpg --digest-algo SHA256 --clearsign >sha256sums.asc && "$root/script/hash-files" * | grep -vE '(assets|sha256sums|hashes)' | \ gpg --digest-algo SHA512 --clearsign >hashes.asc ) say "Formatting the final body of the GitHub release now..." local changelog=$(extract_changelog "$version") local bodyfile=$(finalize_body_message "$version" "$changelog" "$downloads") say "Uploading final release body..." local upload_url=$(patch_release "$version" "$bodyfile") say "Uploading final versions of assets..." cp "$downloads/sha256sums.asc" "$downloads/hashes.asc" "$uploads" upload_assets "$version" "$upload_url" "$uploads" # Verification occurs in caller below. } # Provide a helpful usage message and exit. usage () { local status="$1" cat <&2 sanity_check "$version" "$FINALIZE" if [ -n "$FINALIZE" ] then finalize "$version" "$inspect" else say "Formatting the body of the GitHub release now..." local changelog=$(extract_changelog "$version") local bodyfile=$(finalize_body_message "$version" "$changelog") say "Creating a GitHub release for %s..." "$version" local upload_url=$(create_release "$version" "$bodyfile") say "Uploading assets to GitHub..." upload_assets "$version" "$upload_url" bin/releases fi if [ -z "$SKIP_VERIFY" ] then say "Verifying assets..." verify_assets "$version" fi say "Okay, done. Sanity-check the release and publish it." } main "$@" git-lfs-3.6.1/script/windows-installer/000077500000000000000000000000001472372047300200705ustar00rootroot00000000000000git-lfs-3.6.1/script/windows-installer/git-lfs-logo.bmp000066400000000000000000000231061472372047300230750ustar00rootroot00000000000000BMF&6(7:&  kyZa1I1I1=!1kuBU1I1I1I1=!1!1)5Ze1I1I1I1I1I)1!1!1!1!1:Is1I1I1I1I1I1I1I)5!1!1!1!1!1!1R]:I1I1I1I1I1I1I1I1I)5!1!1!1!1!1!1!1!1{}BU1I1I1I1I1I1I1I1I1I1I)5!1!1!1!1!1!1!1!1!1)9Zm1I1I1I1I1I1I1I1I1I1I1I1I)5!1!1!1!1!1!1!1!1!1!1!1BM{1I1I1I1I1I1I1I1I1I1I1I1I1I1I)5!1!1!1!1!1!1!1!1!1!1!1!1!1ce:M1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I)5!1!1!1!1!1!1!1!1!1!1!1!1!1!1)1J]1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I)5!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!11=cq1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I)5!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1JQ1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I)5!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1km:Q1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I)5!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1)11I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I)5!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!11I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I)5!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!11I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I)5!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!11I1I1I1I1I1I1I1I1I1I1I1I1Iky1I1I1I1I1I1I1I1I1I)5!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!11I1I1I1I1I1I1I1I1I1I1IRa1I1I1I1I1I1I1I1I1I)5!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!11I1I1I1I1I1I1I1I1I:Q1I1I1I1I1I1I1I1I1IBQ)1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!11I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1IBU:I!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!11I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1IJY19!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!11I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I:IJY)1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!11I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1IBQBU!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!11I1I1I1I1I1I1I1I1I:M1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1IJY:E!1!1!1!1!1!1!1!1!1!1!1!1!1!11I1I1I1I1I1I1I1I1Is1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1IR]1=!1!1!1!1!1!1!1!1!1!1!1!11I1I1I1I1I1I1I1I1IRe1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1MR])1!1!1!1!1!1!1!1!1!1!11I1I1I1I1I1I1I1I1IBQ1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1IBQJU!1!1!1!1!1!1!1!1!11I1I1I1I1I1I1I1I1IBQ1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1IJ]BI!1!1!1!1!1!1!11I1I1I1I1I1I1I1I1I1I1IJaky1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1IRa)=!1!1!1!1!11I1I1I1I1I1I1I1I1I1I1I1I1IkuRa1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I:IRa)1!1!1!11I1I1I1I1I1I1I1I1I1I1I1I1I1I1I:Q1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1IBUJY!1!11I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1IBQ1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1IRaBI:M1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1IRecu1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I:Qs1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1Ik}J]1I1I1I1I1I1I1I1I1I1I1I1IRe1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I:M:M1I1I1I1I1I1I1I1IcqBQ1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1IBU{1I1I1I1I1IJ]1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1IZiZm1I:Mky1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1IsRa1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I:M:Q1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1IJY1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1Icqcq1I1I1I1I1I1I1I1I1I1I1I1I1I1IsJ]1I1I1I1I1I1I1I1I1I1IZe:M1I1I1I1I1I1IBU{1I1I1I1IZmkygit-lfs-3.6.1/script/windows-installer/git-lfs-logo.ico000066400000000000000000001033361472372047300230750ustar00rootroot00000000000000 ( h . 00 h~! % ,  &6(( hF00 %6a( &1:CAK/=5IO[ZbqyHZUfgvyx UUUAUUU,UU UA UUAUTA UTU UUV UY UUYUZ[w??( @%0>HQZ/;5I[ds{GYVfix}y p BUAUUUUUA'UUUU+ UUUUUA0 UUUUU` UUUUUUApUUUUUUUAUUUUUUUUUUUUUAUUUUAUXUUTUXUUUTUXUUUUTUXUUUUXDUXUUUUUAUX UUUUU!UU UUUUUUUU UUUUBUUUU UUUUU UUUUXUUUUUUUUXUUUUUUYUUUUZ UUUU UUUU UUX Uِ???( @%0+5'2'4(4(4*6,92<:C@JGP/=2@2B3C7H3F4G6H6I4G5H5I9MQO[ZbqyASATFYI[L^N`QcSdWgZk]mbrgvhwl{y{sw}Qq/Pp",6@J[1qQq/Pp  =1[Qyq/"P0p=LYgx1Qq&/@PZpt1Qq/&PAp[tϩ1Qq/P"p0>M[iy1Qqұ/Pp  >1\Qzq/Pp!+6@IZ1pQq/ P6pLbx1Qq,/KPip1Qq/-P?pRcv1Qqϑܱ/Pp!&,>X1qQq/7" 3' ,4  .$$ #8!  #"" $!8" #81 &)*(0%)9#!562&+-w??( @%0*4-7.9(4+7,8(5-9-:5?8A=F>HKTMUT]Zc,:0<2@4B6D6F8G:I:J7I6HM[iy1Qqұ/Pp  >1\Qzq/Pp!+6@IZ1pQq/ P6pLbx1Qq,/KPip1Qq/-P?pRcv1Qqϑܱ/Pp!&,>X1qQqH0U8% F>)&C1&&&&Q4&&&&& .W:(&&&&&&&O>*&&&&&&&&E1&&&&&&&&&&&,R5&&&&&&&&&&&& /@(&&&&&&&&&&&&&&7&&&&&&&&&&&&&&7&&&&&&(:E+&&&&&7&&&&&4N4&&&&7&&&&14&&&&& 7&&&&44&&&&&&%7&&&&44&&&&&&&&( 7&&&&4E+&&&&&&&&&#7&&&&4=)&&&&&&&&(!7&&&&1T8&&&&&&&&&(7&&&&&4NM4(&&&&&&&&' 7&&&&&&(:WA*&&&&&&&&(" :&&&&&&&&)><)&&&&&&&&(-<)&&&&&&&&2ES6&&&&&&&)>S6&&&&&&&&&5QG2&&&(8VK1&&&&&&&&):@*2I?*&&&&&&&&*@W:(&&&&&&&&2GR5&&&&&&&&&8B1&&&&&&2L>*&&+AV:<???(0` '/%0%0,4-:'3)2-;.:3==GBKHOV_:F>I:G6G4H5I9J8LM[iy1Qqұ/Pp  >1\Qzq/Pp!+6@IZ1pQq/ P6pLbx1Qq,/KPip1Qq/-P?pRcv1Qqϑܱ/Pp!&,>X1qQq=J !) F6 AL"*8 @D. : G E=4'& $+A% 5#((I,3<2;40H'(G1K5:A>/G5K:(*C.I78<B4M-G:9A.6N)?J8=??>~~~~~???(  @$H6K=3D'3'/A?UU3F(5I4H3F'3%/%/#/+UU5J5Hm5H4I5I3F'4%0%/$/&.r'1:N 5HQ4H4H5I5I5I3F'4%0%0%0%/%/$0U$64H4H5I5I5I5I5I3F'4%0%0%0%0%0%0$/4I5I5I5H4I4I5I3F'4%0%0%0%0%0%0$04I5I4H4I4GN4H5I4G+:&1%0%0%0%0%0$04I5I4H2G27H.5I5I5I4G/@(6%0%0%0%0$04I5I4H2G28F4I5I5I5I5I3G-='3%0%0$04I5I4H4I1J8T 4H?4I4I5I5I5H2E,:&2$04I5I5I5H4H5H_8F/?3JY4H5I5I5I4H1C)85Id4I4I5I5I5H5H4GD3L 6H6Iv4H4H5I4H1E\7D%5H5H5I5I5I4I5H2I-??1H.4H5I}6M!*U5G95G5H5I5I4H5I4Hf$H33??:N 4GR4H4I5H4I5FH332G3Ih5Gd9E(( ??4H?2F'4%0D3 ??7J)5G4I3E'3%/$1!--??2G2Gn5H4I5I3F(4%0$/$/%/u$-6H4JR4I5H5I5I5I3F(4%0%0%0%/%/%.X/$H5G96I4I5I5I5I5I5I3F(4%0%0%0%0%0%0$/%1>4H5I5I5I5I5I5I5I5I3F(4%0%0%0%0%0%0%0%0$.5H5I5I5I5I5I4H5I5I3F(4%0%0%0%0%0%0%0%0$05H5I5I4H5I4H4H5I5I3F(4%0%0%0%0%0%0%0%0$05H5I5I4H7H<2G4I5I5I4H0@(5%0%0%0%0%0%0%0$05H5I5I5I7M3L5I5I5I5I5I3F-<&4%0%0%0%0%0$05H5I5I5I7M??3Hq5I4H5I5I5I5H2D+:&2%0%0%0$05H5I5I4H3H<33*U5I44G4I5I5I5I5I4G0B*7%1%0$05H5I5I5H4H5Hx3L?? 5IL5H5I5I5I5I5I3G.@(5$04H5I5I5I5I4I5H4H\'4%0%0%0%0%0%0%0%0%0$05H5I5I5I5I5I4II3F(5H4I5I5I5I5I5I5I5I5I4H1D,;&3%0%0%0%0%0%0%0$05H5I5I5I5I5I4IIUU1J3Hq4I4I5I5I5I5I5I5I5I5I4H/B*8&2%0%0%0%0%0$05H5I5I5I5I4I5Hw5C??5J05F4H4H5I5I5I5I5I5I5I5I4G/@)6%1%0%0%0$05H5I5I5I5I5I5I4H2JA.E ?? 3FE4H5I5I5I5I5I5I5I5I5I5H3F-='4%0%0$05H5I5I5I5I5I5I5I4I2H2I-337M5H_5I4H5I5I5I5I5I5I5I5I4H2D,:&2$05I5I5I5I5I5I5I5I5I4I5H6Il4FUUUU7K%4Gu5I5I5I5I5I5I5I5I5I5I4G0B*85Hw4I5H5I5I5I5I5I5I5I5I5H4J6IP** ??7K%5Gr5H4I5I3F'4%0$/%0$0}&,.??UU9E4G]4H4H5I5I5I3F(4%0%0%0%0$0$1h%/???O6JA4I5H4I5I5I5I5I3F(5%0%0%0%0%0$/%0%0J&33L 5J04H4H4I5I5I5I5I5I5I3F(5%0%0%0%0%0%0$0#0#0%.7'' UU4K"5Ih4G5H5I5I5I5I5I5I5I5I3F(5%0%0%0%0%0%0%0%0$/%/#.r&,(335P4FW5H4I5I5I5I5I5I5I5I5I5I5I3F(5%0%0%0%0%0%0%0%0%0%0%/%.%/`***? 3G@4H5H4H5I5I5I5I5I5I5I5I5I5I5I3F(5%0%0%0%0%0%0%0%0%0%0%0%/%/%0%0J$68T /G+4H4H4I5I5I5I5I5I5I5I5I5I5I5I5I5I3F(5%0%0%0%0%0%0%0%0%0%0%0%0%0%0$/$/#2233 7G 4Ia5I4H5H5I5I5I5I5I5I5I5I5I5I5I5I5I5I3F(5%0%0%0%0%0%0%0%0%0%0%0%0%0%0%0$0$0#/l"0%4G4G4H5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I3F(5%0%0%0%0%0%0%0%0%0%0%0%0%0%0%0%0%0$.%05I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I3F(5%0%0%0%0%0%0%0%0%0%0%0%0%0%0%0%0%0%0$05H5I5I5I5I5I5I5I5I5I5I5I4I5I5I5I5I5I5I3F(5%0%0%0%0%0%0%0%0%0%0%0%0%0%0%0%0%0%0$05H5I5I5I5I5I5I5I5I5H4H4H5H5I5I5I5I5I5I3F(5%0%0%0%0%0%0%0%0%0%0%0%0%0%0%0%0%0%0$05H5I5I5I5I5I5I4H3H2H6IP/G+5H5I5I5I5I5I5I3F(6%0%0%0%0%0%0%0%0%0%0%0%0%0%0%0%0%0%0$05H5I5I5I5I5I5I4I4Ht3L339E4H5I5I5I5I5I5I4H/@)6&1%0%0%0%0%0%0%0%0%0%0%0%0%0%0%0%0$05H5I5I5I5I5I5I5H3L9E4H5I5I5I5I5I5I5I4H2E.>(3%0%0%0%0%0%0%0%0%0%0%0%0%0%0%0$05H5I5I5I5I5I5I5I8K9E4H5I5I5I5I5I5I5I5I5I5H1A+:&2%0%0%0%0%0%0%0%0%0%0%0%0%0$05H5I5I5I5I5I5I5I8K(5%1%0%0%0%0%0%0%0%0%0$05H5I5I5I5I5I5I5I8K3L5G`3H4I5I5I5I5I5I5I5I5I5I5I5I4H2E,<'4%0%0%0%0%0%0%0%0$05H5I5I5I5I5I5I4H3D??4G'4Jt4H5G5I5I5I5I5I5I5I5I5I5I5I3H1C,:'2%0%0%0%0%0%0$05H5I5I5I5I5I5I5I6Ju3L338T 2J76G4G4H5I5I5I5I5I5I5I5I5I5I5I4G/A*8%1%0%0%0%0$05H5I5I5I5I5I5I5I4H3I4JR3LUU3?5FH3G5I4I5I5I5I5I5I5I5I5I5I5I4H3E/@'5%0%0%0$05H5I5I5I5I5I5I5I5I5H5I4I3H<6H33/K5J`4I4H5I5I5I5I5I5I5I5I5I5I5I4H2C-;(4%0$05H5I5I5I5I5I5I5I5I5I5I4I3G5Hw7H.*U*U4G'4H~4H4I5I5I5I5I5I5I5I5I5I5I5I5G1C*9'36H5I5I5I5I5I5I5I5I5I5I5I5I5H5H6Jg3L8T 6K=5H5I4H5I5I5I5I5I5I5I5I5I5I5I3F.>٣6H84I5H5I5I5I5I5I5I5I5I5I5I5I5I4H4I6IP&1%0%0%05I5I5I5I5I5I5I5I5I5I5I5I5I5I2F4H5C*U4Gu2F5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5F);%0%05I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5G4Hp??7M4H5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I4G)34H5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5JH2G24I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I2F5Hw1F$5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I&4IW5H5I5I5I5I5I5I5I5I5I5I5I5I4I7M3IE3I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I3D8T 4H~5F5I5I5I5I5I5I5I5I5I2G2??3Il4F5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I3Gg6H4H5I5I5I5I5I5I4IW8F5H2F5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I3I4H?4J:5I5I2F4H~8T 5G+5H5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I6E!5JV6H4GN5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I2I5H.E *U6Iv2F5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I4J]7M5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I3G*? 2G25G5I5I5I5I5I5I5I5I5I5I5I5I5I5I4G4K"4HX5I5I5I5I5I5I5I5I5I5I5I5F6IB8T 5I2F5I5I5I5I5I5I2I5Hi??4F5I5I5I5I2F5H??????git-lfs-3.6.1/script/windows-installer/git-lfs-wizard-image.bmp000066400000000000000000004556561472372047300245400ustar00rootroot00000000000000BM[6(:x[  չ̣̣̣꿯_ #ǘԭ<ԸԸԸԸԸԸԸԸԸԸҲ̤ϬΩخ?Ωϫ־Ψ̤ѱͦϫԸֻگ  ^ hʯϹIϫخ ̥ '̭' *ذ"4੊`ջ d齫UǽǘǗ_خșȿfʟ$ֽ ȿHÎƼ~"ٱ#5᪊fջ e汘&خ9ٰ!2bĹt$ⵞ5"ٱ#5᪊fջ e寔(***Ɩخ9޳,&Źw#JȮ.ⵞ5"ٱ#5᪊fջ e汘&ƖʠǼخ9ܱ&,ʨƼ}۰ ⵞ5"ٱ#5੊fջ e鼪Q ҳfخʞ̢@ͨĐִ3ҳZ׾"٬fջ e²'Q꽬WΨ꽬WC4Ѱղ( "ҳ,ջջ eӴǖ׿ЭʞȚˡϩɜƵ6"۱%2Ψջ e䶠:"緢? ŒԷaͦŻze׽"µl5+3öo໨MMĐƼ~Md忯_TTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTT‹⬏U⬏U⬏'ϬϬϬϬϬϬϬϬϬϬϬϬϬϬϬϬϬϬϬϬϬϬϬϬϬϬϬǼU⬏7ϬU⬏7ϬU⬏7ϬU⬏7ϬU⬏7ϬU⬏7Ⱦ 깥GϬU⬏7Ǽ~߬ϬU⬏7Ǽ~ƕϬU⬏7Ǽ~eʱ%ϬU⬏7Ǽ~>ѲϬU⬏7Ǽ~7ͥƻ{ϬU⬏7Ǽ~ ömư!ϬU⬏7Ǽ~Ǿ۴4ϬU⬏7Ǽ~+ͥϬU⬏7Ǽ~Ț歑ϬU⬏7Ǽ~깥FϬU⬏7Ǽ~꺧IϬU⬏7Ǽ~Ȝ孑ϬU⬏7Ǽ~7ϭϬU⬏7Ǽ~鵞6ϬU⬏7Ǽ~ Îõ4ϬU⬏7Ǽ~Tǽöp2ϬU⬏7Ǽ~ҲϬU⬏7Ǽ~ҲϬU⬏7Ǽ~ѰϬU⬏7Ǽ~ Я̥U⬏7Ǽ~Яִ2U⬏7ΪΪΪΪΪΪΪΪΪΪΪΪΪΪΪΪγ.⬏7洜0ǘ⬏7ʳ+Ⱦ⬏7䳚+đ⬏7ʹ0⬏0̳-⬏ƕ⬏Ⱦ佫UHHHHHHHHHHHHHHHHHHHHHHHHɜŮ긽긽긽ŭι긽긽긽긽긽긽긽ŭϹ¹긽긽긽긽踽԰к·Ӧܷ֦Ԧܮ겷稭趻鵺ϲ޳޼ŮĨت׬賸çۨ깾຿§اѩĺ߳ıҧڨ۪ͫɨ水ӧ絻Ǧͦ鵻ا⬱Ḿר˦ᶻȦ좷uOv0]LKKKKKKKKKKM4aV|⨬ꜲOv1_!RKKKKKKKKKKKKKKKKKKKL#S4aX}ˮȦ냟/]KKKKKKKKKKKKKKKKKKKKKKKKKKKK:e⩭簵/]KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKL;f鵺ܧ!RKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK-\Ҧ궽qKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK骮⦪aKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKʦܻ²aKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKݨaKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK宲aKKKKKKKO5aIrZrnX}Fp0^LKKKKKKKaKKKKKBl눣2`KKKKKaKK@j}2`KKlO톢hMu:f0^&VMO*X2`:fRykjLꮲ쩽럵hGp0^OKKKKKKKKKKKKKKKKP4aLtpꏩ穭w%UKKKKKKKKKKKKKKKKKKKKKKKKKK0^⨬h'WKKKKKKKKKKKKKKKKKKKKKKKKKKKKKL,ZԦާIqKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKhاۧꆡKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKۧاdKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKި֦aKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKਬզaKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKᨬaKKKKKKKKL"S,[5b\cmd` to the `PATH` so that we find it // when registering Git LFS later. function AddGitForWindowsCMDToPATHIfNeeded: boolean; var Domain: Integer; Key, PathOption, AppPath, Path: string; begin Result := False; Key := 'Microsoft\Windows\CurrentVersion\Uninstall\Git_is1'; if RegKeyExists(HKEY_LOCAL_MACHINE, 'Software\Wow6432Node\' + Key) then begin Domain := HKEY_LOCAL_MACHINE; Key := 'Software\Wow6432Node\' + Key; end else if RegKeyExists(HKEY_CURRENT_USER, 'Software\Wow6432Node\' + Key) then begin Domain := HKEY_CURRENT_USER; Key := 'Software\Wow6432Node\' + Key; end else if RegKeyExists(HKEY_LOCAL_MACHINE, 'Software\' + Key) then begin Domain := HKEY_LOCAL_MACHINE; Key := 'Software\' + Key; end else if RegKeyExists(HKEY_CURRENT_USER, 'Software\' + Key) then begin Domain := HKEY_CURRENT_USER; Key := 'Software\' + Key; end else Exit; if (not RegQueryStringValue(Domain, Key, 'Inno Setup CodeFile: Path Option', PathOption)) or (PathOption <> 'BashOnly') or (not RegQueryStringValue(Domain, Key, 'Inno Setup: App Path', AppPath)) or (not FileExists(AppPath + '\cmd\git.exe')) then Exit; // Extend PATH so that it finds `git.exe` Path := GetEnv('PATH'); if Path = '' then Path := AppPath + '\cmd' else Path := AppPath + '\cmd;' + Path; SetEnvironmentVariable('PATH', Path); Result := True; end; // Verify that a Git executable is found in the PATH, and if it does not // reside in either 'C:\Program Files' or 'C:\Program Files (x86)', warn // the user in case it is not the Git installation they expected. function GitFoundInPath(): boolean; var PFiles32,PFiles64: string; PathEnv,Path: string; PathExt,Ext: string; i,j: integer; RegisterOrDeregister: string; begin if IsUninstaller then RegisterOrDeregister := 'deregister' else RegisterOrDeregister := 'register'; PFiles32 := AnsiLowercase(ExpandConstant('{commonpf32}\')) if IsWin64 then PFiles64 := AnsiLowercase(ExpandConstant('{commonpf64}\')) else PFiles64 := PFiles32; // `commonpf64` is not available on 32-bit Windows PathEnv := GetEnv('PATH') + ';'; repeat i := Pos(';', PathEnv); Path := Copy(PathEnv, 1, i-1) + '\git'; PathEnv := Copy(PathEnv, i+1, Length(PathEnv)-i); PathExt := AnsiLowercase(GetEnv('PATHEXT')) + ';'; repeat j := Pos(';', PathExt); Ext := Copy(PathExt, 1, j-1); PathExt := Copy(PathExt, j+1, Length(PathExt)-j); if FileExists(Path + Ext) then begin if (Pos(PFiles32, AnsiLowercase(Path)) = 1) or (Pos(PFiles64, AnsiLowercase(Path)) = 1) then begin Result := True; Exit; end; Log('Warning: Found Git in unexpected location: "' + Path + Ext + '"'); Result := (SuppressibleMsgBox( 'An executable Git program was found in an unexpected location outside of Program Files:' + #13+#10 + ' "' + Path + Ext + '"' + #13+#10 + 'If this looks dubious, Git LFS should not be ' + RegisterOrDeregister + 'ed using it.' + #13+#10 + #13+#10 + 'Do you want to ' + RegisterOrDeregister + ' Git LFS using this Git program?', mbConfirmation, MB_YESNO, IDNO) = IDYES); if Result then Log('Using Git found at: "' + Path + Ext + '"') else Log('Refusing to use Git found at: "' + Path + Ext + '"'); Exit; end; until Result or (PathExt = ''); until Result or (PathEnv = ''); if AddGitForWindowsCMDToPATHIfNeeded then Result := True else SuppressibleMsgBox( 'Could not find Git; can not ' + RegisterOrDeregister + ' Git LFS.', mbError, MB_OK, IDOK); end; // Runs the lfs initialization. procedure InstallGitLFS(); var ResultCode: integer; begin Exec( ExpandConstant('{cmd}'), ExpandConstant('/C ""{app}\git-lfs.exe" install"'), '', SW_HIDE, ewWaitUntilTerminated, ResultCode ); if not ResultCode = 1 then MsgBox( 'Git LFS was not able to automatically initialize itself. ' + 'Please run "git lfs install" from the commandline.', mbInformation, MB_OK); end; // Event function automatically called when installing: function InitializeSetup(): Boolean; begin Result := GitFoundInPath(); end; // Event function automatically called when uninstalling: function InitializeUninstall(): Boolean; var ResultCode: integer; begin Result := False; if GitFoundInPath() then begin Exec( ExpandConstant('{cmd}'), ExpandConstant('/C ""{app}\git-lfs.exe" uninstall"'), '', SW_HIDE, ewWaitUntilTerminated, ResultCode ); Result := True; end; end; git-lfs-3.6.1/ssh/000077500000000000000000000000001472372047300136745ustar00rootroot00000000000000git-lfs-3.6.1/ssh/connection.go000066400000000000000000000132031472372047300163610ustar00rootroot00000000000000package ssh import ( "bytes" "fmt" "sync" "github.com/git-lfs/git-lfs/v3/config" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/subprocess" "github.com/git-lfs/git-lfs/v3/tr" "github.com/git-lfs/pktline" "github.com/rubyist/tracerx" ) type SSHTransfer struct { lock *sync.RWMutex conn []*PktlineConnection osEnv config.Environment gitEnv config.Environment meta *SSHMetadata operation string multiplexing bool controlPath string } func NewSSHTransfer(osEnv config.Environment, gitEnv config.Environment, meta *SSHMetadata, operation string) (*SSHTransfer, error) { conn, multiplexing, controlPath, err := startConnection(0, osEnv, gitEnv, meta, operation, "") if err != nil { return nil, err } return &SSHTransfer{ lock: &sync.RWMutex{}, osEnv: osEnv, gitEnv: gitEnv, meta: meta, operation: operation, multiplexing: multiplexing, controlPath: controlPath, conn: []*PktlineConnection{conn}, }, nil } func startConnection(id int, osEnv config.Environment, gitEnv config.Environment, meta *SSHMetadata, operation string, multiplexControlPath string) (conn *PktlineConnection, multiplexing bool, controlPath string, err error) { tracerx.Printf("spawning pure SSH connection (#%d)", id) var errbuf bytes.Buffer exe, args, multiplexing, controlPath := GetLFSExeAndArgs(osEnv, gitEnv, meta, "git-lfs-transfer", operation, true, multiplexControlPath) cmd, err := subprocess.ExecCommand(exe, args...) if err != nil { return nil, false, "", err } r, err := cmd.StdoutPipe() if err != nil { return nil, false, "", err } w, err := cmd.StdinPipe() if err != nil { return nil, false, "", err } cmd.Stderr = &errbuf err = cmd.Start() if err != nil { return nil, false, "", err } var pl Pktline if osEnv.Bool("GIT_TRACE_PACKET", false) { pl = &TraceablePktline{id: id, pl: pktline.NewPktline(r, w)} } else { pl = pktline.NewPktline(r, w) } conn = &PktlineConnection{ cmd: cmd, pl: pl, r: r, w: w, } err = conn.Start() if err != nil { r.Close() w.Close() cmd.Wait() err = errors.Combine([]error{err, fmt.Errorf(tr.Tr.Get("Failed to connect to remote SSH server: %s", cmd.Stderr))}) tracerx.Printf("pure SSH connection unsuccessful (#%d)", id) } else { tracerx.Printf("pure SSH connection successful (#%d)", id) } return conn, multiplexing, controlPath, err } // Connection returns the nth connection (starting from 0) in this transfer // instance or nil if there is no such item. func (st *SSHTransfer) IsMultiplexingEnabled() bool { return st.multiplexing } // Connection returns the nth connection (starting from 0) in this transfer // instance if it is initialized and otherwise initializes a new connection and // saves it in the nth position. In all cases, nil is returned with an error // if n is greater than the maximum number of connections, including when // the connection array itself is nil. func (st *SSHTransfer) Connection(n int) (*PktlineConnection, error) { st.lock.RLock() if n >= len(st.conn) { st.lock.RUnlock() return nil, errors.New(tr.Tr.Get("pure SSH connection unavailable (#%d)", n)) } if st.conn[n] != nil { defer st.lock.RUnlock() return st.conn[n], nil } st.lock.RUnlock() st.lock.Lock() defer st.lock.Unlock() if st.conn[n] != nil { return st.conn[n], nil } conn, _, err := st.spawnConnection(n) if err != nil { return nil, err } st.conn[n] = conn return conn, nil } // ConnectionCount returns the number of connections this object has. func (st *SSHTransfer) ConnectionCount() int { st.lock.RLock() defer st.lock.RUnlock() return len(st.conn) } // SetConnectionCount sets the number of connections to the specified number. func (st *SSHTransfer) SetConnectionCount(n int) error { st.lock.Lock() defer st.lock.Unlock() return st.setConnectionCount(n) } // SetConnectionCountAtLeast sets the number of connections to be not less than // the specified number. func (st *SSHTransfer) SetConnectionCountAtLeast(n int) error { st.lock.Lock() defer st.lock.Unlock() count := len(st.conn) if n <= count { return nil } return st.setConnectionCount(n) } func (st *SSHTransfer) spawnConnection(n int) (*PktlineConnection, string, error) { conn, _, controlPath, err := startConnection(n, st.osEnv, st.gitEnv, st.meta, st.operation, st.controlPath) if err != nil { tracerx.Printf("failed to spawn pure SSH connection (#%d): %s", n, err) return nil, "", err } return conn, controlPath, err } func (st *SSHTransfer) setConnectionCount(n int) error { count := len(st.conn) if n < count { tn := n if tn == 0 { tn = 1 } for i, item := range st.conn[tn:count] { if item == nil { tracerx.Printf("skipping uninitialized lazy pure SSH connection (#%d) (resetting total from %d to %d)", i, count, n) continue } tracerx.Printf("terminating pure SSH connection (#%d) (resetting total from %d to %d)", tn+i, count, n) if err := item.End(); err != nil { return err } } st.conn = st.conn[0:tn] } else if n > count { for i := count; i < n; i++ { if i == 0 { conn, controlPath, err := st.spawnConnection(i) if err != nil { return err } st.conn = append(st.conn, conn) st.controlPath = controlPath } else { st.conn = append(st.conn, nil) } } } if n == 0 && count > 0 { tracerx.Printf("terminating pure SSH connection (#0) (resetting total from %d to %d)", count, n) if err := st.conn[0].End(); err != nil { return err } st.conn = nil st.controlPath = "" } return nil } func (st *SSHTransfer) Shutdown() error { tracerx.Printf("shutting down pure SSH connections") return st.SetConnectionCount(0) } git-lfs-3.6.1/ssh/pktline.go000066400000000000000000000035251472372047300156760ustar00rootroot00000000000000package ssh import ( "io" "github.com/git-lfs/pktline" "github.com/rubyist/tracerx" ) func pktlineReader(p Pktline) io.Reader { if pl, ok := p.(*pktline.Pktline); ok { return pktline.NewPktlineReaderFromPktline(pl, 65536) } tp := p.(*TraceablePktline) return pktline.NewPktlineReaderFromPktline(tp.pl, 65536) } type Pktline interface { ReadPacketList() ([]string, error) ReadPacketTextWithLength() (string, int, error) WritePacket([]byte) error WritePacketText(string) error WriteDelim() error WriteFlush() error } type TraceablePktline struct { id int pl *pktline.Pktline } func (tp *TraceablePktline) ReadPacketList() ([]string, error) { var list []string for { data, pktLen, err := tp.pl.ReadPacketTextWithLength() if err != nil { return nil, err } if pktLen <= 1 { tracerx.Printf("packet %02x < %04x", tp.id, pktLen) } else { tracerx.Printf("packet %02x < %s", tp.id, data) } if pktLen == 0 { break } list = append(list, data) } return list, nil } func (tp *TraceablePktline) ReadPacketTextWithLength() (string, int, error) { s, pktLen, err := tp.pl.ReadPacketTextWithLength() if err != nil { return "", 0, err } if pktLen <= 1 { tracerx.Printf("packet %02x < %04x", tp.id, pktLen) } else { tracerx.Printf("packet %02x < %s", tp.id, s) } return s, pktLen, nil } func (tp *TraceablePktline) WritePacket(b []byte) error { // Don't trace because this is probably binary data. return tp.pl.WritePacket(b) } func (tp *TraceablePktline) WritePacketText(s string) error { tracerx.Printf("packet %02x > %s", tp.id, s) return tp.pl.WritePacketText(s) } func (tp *TraceablePktline) WriteDelim() error { tracerx.Printf("packet %02x > 0001", tp.id) return tp.pl.WriteDelim() } func (tp *TraceablePktline) WriteFlush() error { tracerx.Printf("packet %02x > 0000", tp.id) return tp.pl.WriteFlush() } git-lfs-3.6.1/ssh/protocol.go000066400000000000000000000142341472372047300160700ustar00rootroot00000000000000package ssh import ( "io" "strconv" "strings" "sync" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/subprocess" "github.com/git-lfs/git-lfs/v3/tr" ) type PktlineConnection struct { r io.ReadCloser w io.WriteCloser mu sync.Mutex cmd *subprocess.Cmd pl Pktline } func (conn *PktlineConnection) Lock() { conn.mu.Lock() } func (conn *PktlineConnection) Unlock() { conn.mu.Unlock() } func (conn *PktlineConnection) Start() error { conn.Lock() defer conn.Unlock() return conn.negotiateVersion() } func (conn *PktlineConnection) End() error { conn.Lock() defer conn.Unlock() err := conn.SendMessage("quit", nil) if err != nil { return err } _, err = conn.ReadStatus() conn.r.Close() conn.w.Close() conn.cmd.Wait() return err } func (conn *PktlineConnection) negotiateVersion() error { pkts, err := conn.pl.ReadPacketList() if err != nil { return errors.NewProtocolError(tr.Tr.Get("Unable to negotiate version with remote side (unable to read capabilities)"), err) } ok := false for _, line := range pkts { if line == "version=1" { ok = true } } if !ok { return errors.NewProtocolError(tr.Tr.Get("Unable to negotiate version with remote side (missing version=1)"), nil) } err = conn.SendMessage("version 1", nil) if err != nil { return errors.NewProtocolError(tr.Tr.Get("Unable to negotiate version with remote side (unable to send version)"), err) } status, args, _, err := conn.ReadStatusWithLines() if err != nil { return errors.NewProtocolError(tr.Tr.Get("Unable to negotiate version with remote side (unable to read status)"), err) } if status != 200 { text := tr.Tr.Get("no error provided") if len(args) > 0 { text = tr.Tr.Get("server said: %q", args[0]) } return errors.NewProtocolError(tr.Tr.Get("Unable to negotiate version with remote side (unexpected status %d; %s)", status, text), nil) } return nil } func (conn *PktlineConnection) SendMessage(command string, args []string) error { err := conn.pl.WritePacketText(command) if err != nil { return err } for _, arg := range args { err = conn.pl.WritePacketText(arg) if err != nil { return err } } return conn.pl.WriteFlush() } func (conn *PktlineConnection) SendMessageWithLines(command string, args []string, lines []string) error { err := conn.pl.WritePacketText(command) if err != nil { return err } for _, arg := range args { err = conn.pl.WritePacketText(arg) if err != nil { return err } } err = conn.pl.WriteDelim() if err != nil { return err } for _, line := range lines { err = conn.pl.WritePacketText(line) if err != nil { return err } } return conn.pl.WriteFlush() } func (conn *PktlineConnection) SendMessageWithData(command string, args []string, data io.Reader) error { err := conn.pl.WritePacketText(command) if err != nil { return err } for _, arg := range args { err = conn.pl.WritePacketText(arg) if err != nil { return err } } err = conn.pl.WriteDelim() if err != nil { return err } buf := make([]byte, 32768) for { n, err := data.Read(buf) if n > 0 { err := conn.pl.WritePacket(buf[0:n]) if err != nil { return err } } if err != nil { break } } return conn.pl.WriteFlush() } func (conn *PktlineConnection) ReadStatus() (int, error) { status := 0 seenStatus := false for { s, pktLen, err := conn.pl.ReadPacketTextWithLength() if err != nil { return 0, errors.NewProtocolError(tr.Tr.Get("error reading packet"), err) } switch { case pktLen == 0: if !seenStatus { return 0, errors.NewProtocolError(tr.Tr.Get("no status seen"), nil) } return status, nil case !seenStatus: ok := false if strings.HasPrefix(s, "status ") { status, err = strconv.Atoi(s[7:]) ok = err == nil } if !ok { return 0, errors.NewProtocolError(tr.Tr.Get("expected status line, got %q", s), err) } seenStatus = true default: return 0, errors.NewProtocolError(tr.Tr.Get("unexpected data, got %q", s), err) } } } // ReadStatusWithData reads a status, arguments, and any binary data. Note that // the reader must be fully exhausted before invoking any other read methods. func (conn *PktlineConnection) ReadStatusWithData() (int, []string, io.Reader, error) { args := make([]string, 0, 100) status := 0 seenStatus := false for { s, pktLen, err := conn.pl.ReadPacketTextWithLength() if err != nil { return 0, nil, nil, errors.NewProtocolError(tr.Tr.Get("error reading packet"), err) } if pktLen == 0 { if !seenStatus { return 0, nil, nil, errors.NewProtocolError(tr.Tr.Get("no status seen"), nil) } return 0, nil, nil, errors.NewProtocolError(tr.Tr.Get("unexpected flush packet"), nil) } else if !seenStatus { ok := false if strings.HasPrefix(s, "status ") { status, err = strconv.Atoi(s[7:]) ok = err == nil } if !ok { return 0, nil, nil, errors.NewProtocolError(tr.Tr.Get("expected status line, got %q", s), err) } seenStatus = true } else if pktLen == 1 { break } else { args = append(args, s) } } return status, args, pktlineReader(conn.pl), nil } // ReadStatusWithLines reads a status, arguments, and a set of text lines. func (conn *PktlineConnection) ReadStatusWithLines() (int, []string, []string, error) { args := make([]string, 0, 100) lines := make([]string, 0, 100) status := 0 seenDelim := false seenStatus := false for { s, pktLen, err := conn.pl.ReadPacketTextWithLength() if err != nil { return 0, nil, nil, errors.NewProtocolError(tr.Tr.Get("error reading packet"), err) } switch { case pktLen == 0: if !seenStatus { return 0, nil, nil, errors.NewProtocolError(tr.Tr.Get("no status seen"), nil) } return status, args, lines, nil case seenDelim: lines = append(lines, s) case !seenStatus: ok := false if strings.HasPrefix(s, "status ") { status, err = strconv.Atoi(s[7:]) ok = err == nil } if !ok { return 0, nil, nil, errors.NewProtocolError(tr.Tr.Get("expected status line, got %q", s), err) } seenStatus = true case pktLen == 1: if seenDelim { return 0, nil, nil, errors.NewProtocolError(tr.Tr.Get("unexpected delimiter packet"), nil) } seenDelim = true default: args = append(args, s) } } } git-lfs-3.6.1/ssh/ssh.go000066400000000000000000000136331472372047300150260ustar00rootroot00000000000000package ssh import ( "fmt" "os" "path" "path/filepath" "regexp" "runtime" "strings" "github.com/git-lfs/git-lfs/v3/config" "github.com/git-lfs/git-lfs/v3/subprocess" "github.com/git-lfs/git-lfs/v3/tools" "github.com/rubyist/tracerx" ) type sshVariant string const ( variantSSH = sshVariant("ssh") variantSimple = sshVariant("simple") variantPutty = sshVariant("putty") variantTortoise = sshVariant("tortoiseplink") ) type SSHMetadata struct { UserAndHost string Port string Path string } func FormatArgs(cmd string, args []string, needShell bool) (string, []string) { if !needShell { return cmd, args } return subprocess.FormatForShellQuotedArgs(cmd, args) } func GetLFSExeAndArgs(osEnv config.Environment, gitEnv config.Environment, meta *SSHMetadata, command, operation string, multiplexDesired bool, multiplexControlPath string) (exe string, args []string, multiplexing bool, controlPath string) { exe, args, needShell, multiplexing, controlPath := GetExeAndArgs(osEnv, gitEnv, meta, multiplexDesired, multiplexControlPath) args = append(args, fmt.Sprintf("%s %s %s", command, meta.Path, operation)) exe, args = FormatArgs(exe, args, needShell) tracerx.Printf("run_command: %s %s", exe, strings.Join(args, " ")) return exe, args, multiplexing, controlPath } // Parse command, and if it looks like a valid command, return the ssh binary // name, the command to run, and whether we need a shell. If not, return // existing as the ssh binary name. func parseShellCommand(command string, existing string) (ssh string, cmd string, needShell bool) { ssh = existing if cmdArgs := tools.QuotedFields(command); len(cmdArgs) > 0 { needShell = true ssh = cmdArgs[0] cmd = command } return } func findVariant(variant string) (bool, sshVariant) { switch variant { case "ssh", "simple", "putty", "tortoiseplink": return false, sshVariant(variant) case "plink": return false, variantPutty case "auto": return true, "" default: return false, variantSSH } } func autodetectVariant(osEnv config.Environment, gitEnv config.Environment, basessh string) sshVariant { if basessh != defaultSSHCmd { // Strip extension for easier comparison if ext := filepath.Ext(basessh); len(ext) > 0 { basessh = basessh[:len(basessh)-len(ext)] } if strings.EqualFold(basessh, "plink") { return variantPutty } if strings.EqualFold(basessh, "tortoiseplink") { return variantTortoise } } return "ssh" } func getVariant(osEnv config.Environment, gitEnv config.Environment, basessh string) sshVariant { variant, ok := osEnv.Get("GIT_SSH_VARIANT") if !ok { variant, ok = gitEnv.Get("ssh.variant") } autodetect, val := findVariant(variant) if ok && !autodetect { return val } return autodetectVariant(osEnv, gitEnv, basessh) } // findRuntimeDir returns a path to the runtime directory if one exists and is // guaranteed to be private. func findRuntimeDir(osEnv config.Environment) string { if dir, ok := osEnv.Get("XDG_RUNTIME_DIR"); ok { return dir } return "" } func getControlDir(osEnv config.Environment) (string, error) { tmpdir, pattern := "", "sock-*" if runtime.GOOS == "darwin" { // On Darwin, the default temporary directory results in a socket path that's too long. tmpdir = "/tmp" } dir := findRuntimeDir(osEnv) if dir == "" { return os.MkdirTemp(tmpdir, pattern) } return os.MkdirTemp(dir, pattern) } // Return the executable name for ssh on this machine and the base args // Base args includes port settings, user/host, everything pre the command to execute func GetExeAndArgs(osEnv config.Environment, gitEnv config.Environment, meta *SSHMetadata, multiplexDesired bool, multiplexControlPath string) (exe string, baseargs []string, needShell bool, multiplexing bool, controlPath string) { var cmd string ssh, _ := osEnv.Get("GIT_SSH") sshCmd, _ := osEnv.Get("GIT_SSH_COMMAND") ssh, cmd, needShell = parseShellCommand(sshCmd, ssh) if ssh == "" { sshCmd, _ := gitEnv.Get("core.sshcommand") ssh, cmd, needShell = parseShellCommand(sshCmd, defaultSSHCmd) } if cmd == "" { cmd = ssh } basessh := filepath.Base(ssh) variant := getVariant(osEnv, gitEnv, basessh) args := make([]string, 0, 7) if variant == variantTortoise { // TortoisePlink requires the -batch argument to behave like ssh/plink args = append(args, "-batch") } multiplexing = false multiplexEnabled := gitEnv.Bool("lfs.ssh.automultiplex", runtime.GOOS != "windows") if variant == variantSSH && multiplexDesired && multiplexEnabled { controlMasterArg := "-oControlMaster=no" controlPath = multiplexControlPath if multiplexControlPath == "" { controlMasterArg = "-oControlMaster=yes" controlDir, err := getControlDir(osEnv) if err == nil { controlPath = path.Join(controlDir, "lfs.sock") } } if controlPath != "" { multiplexing = true args = append(args, controlMasterArg, fmt.Sprintf("-oControlPath=%s", controlPath)) } } if len(meta.Port) > 0 { if variant == variantPutty || variant == variantTortoise { args = append(args, "-P") } else { args = append(args, "-p") } args = append(args, meta.Port) } if sshOptPrefixRE.MatchString(meta.UserAndHost) { if variant == variantSSH { // inserts a separator between cli -options and host/cmd commands // example: $ ssh -p 12345 -- user@host.com git-lfs-authenticate ... args = append(args, "--", meta.UserAndHost) } else { // no prefix supported, strip leading - off host to prevent cmd like: // $ git config lfs.url ssh://-proxycmd=whatever // $ plink -P 12345 -proxycmd=foo git-lfs-authenticate ... // // Instead, it'll attempt this, and eventually return an error // $ plink -P 12345 proxycmd=foo git-lfs-authenticate ... args = append(args, sshOptPrefixRE.ReplaceAllString(meta.UserAndHost, "")) } } else { args = append(args, meta.UserAndHost) } return cmd, args, needShell, multiplexing, controlPath } const defaultSSHCmd = "ssh" var ( sshOptPrefixRE = regexp.MustCompile(`\A\-+`) ) git-lfs-3.6.1/ssh/ssh_test.go000066400000000000000000000515751472372047300160740ustar00rootroot00000000000000package ssh_test import ( "net/url" "path/filepath" "strings" "testing" "github.com/git-lfs/git-lfs/v3/lfshttp" "github.com/git-lfs/git-lfs/v3/ssh" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestSSHGetLFSExeAndArgs(t *testing.T) { cli, err := lfshttp.NewClient(nil) require.Nil(t, err) meta := ssh.SSHMetadata{} meta.UserAndHost = "user@foo.com" meta.Path = "user/repo" exe, args, _, _ := ssh.GetLFSExeAndArgs(cli.OSEnv(), cli.GitEnv(), &meta, "git-lfs-authenticate", "download", false, "") assert.Equal(t, "ssh", exe) assert.Equal(t, []string{ "user@foo.com", "git-lfs-authenticate user/repo download", }, args) exe, args, _, _ = ssh.GetLFSExeAndArgs(cli.OSEnv(), cli.GitEnv(), &meta, "git-lfs-authenticate", "upload", false, "") assert.Equal(t, "ssh", exe) assert.Equal(t, []string{ "user@foo.com", "git-lfs-authenticate user/repo upload", }, args) } func TestSSHGetExeAndArgsSsh(t *testing.T) { cli, err := lfshttp.NewClient(lfshttp.NewContext(nil, map[string]string{ "GIT_SSH_COMMAND": "", "GIT_SSH": "", }, nil)) require.Nil(t, err) meta := ssh.SSHMetadata{} meta.UserAndHost = "user@foo.com" exe, args, needShell, _, _ := ssh.GetExeAndArgs(cli.OSEnv(), cli.GitEnv(), &meta, false, "") exe, args = ssh.FormatArgs(exe, args, needShell) assert.Equal(t, "ssh", exe) assert.Equal(t, []string{"user@foo.com"}, args) } func TestSSHGetExeAndArgsSshCustomPort(t *testing.T) { cli, err := lfshttp.NewClient(lfshttp.NewContext(nil, map[string]string{ "GIT_SSH_COMMAND": "", "GIT_SSH": "", }, nil)) require.Nil(t, err) meta := ssh.SSHMetadata{} meta.UserAndHost = "user@foo.com" meta.Port = "8888" exe, args, needShell, _, _ := ssh.GetExeAndArgs(cli.OSEnv(), cli.GitEnv(), &meta, false, "") exe, args = ssh.FormatArgs(exe, args, needShell) assert.Equal(t, "ssh", exe) assert.Equal(t, []string{"-p", "8888", "user@foo.com"}, args) } func TestSSHGetExeAndArgsSshNoMultiplexing(t *testing.T) { cli, err := lfshttp.NewClient(lfshttp.NewContext(nil, map[string]string{ "GIT_SSH_COMMAND": "", "GIT_SSH": "", }, map[string]string{ "lfs.ssh.automultiplex": "false", })) require.Nil(t, err) meta := ssh.SSHMetadata{} meta.UserAndHost = "user@foo.com" exe, baseargs, needShell, multiplexing, controlPath := ssh.GetExeAndArgs(cli.OSEnv(), cli.GitEnv(), &meta, true, "") exe, args := ssh.FormatArgs(exe, baseargs, needShell) assert.Equal(t, "ssh", exe) assert.Equal(t, false, multiplexing) assert.Equal(t, []string{"user@foo.com"}, args) assert.Empty(t, controlPath) } func TestSSHGetExeAndArgsSshMultiplexingMaster(t *testing.T) { cli, err := lfshttp.NewClient(lfshttp.NewContext(nil, map[string]string{ "GIT_SSH_COMMAND": "", "GIT_SSH": "", }, map[string]string{ "lfs.ssh.automultiplex": "true", })) require.Nil(t, err) meta := ssh.SSHMetadata{} meta.UserAndHost = "user@foo.com" exe, baseargs, needShell, multiplexing, controlPath := ssh.GetExeAndArgs(cli.OSEnv(), cli.GitEnv(), &meta, true, "") exe, args := ssh.FormatArgs(exe, baseargs, needShell) assert.Equal(t, "ssh", exe) assert.Equal(t, true, multiplexing) assert.Equal(t, 3, len(args)) assert.Equal(t, "-oControlMaster=yes", args[0]) assert.True(t, strings.HasPrefix(args[1], "-oControlPath=")) assert.Equal(t, "user@foo.com", args[2]) assert.NotEmpty(t, controlPath) } func TestSSHGetExeAndArgsSshMultiplexingExtra(t *testing.T) { cli, err := lfshttp.NewClient(lfshttp.NewContext(nil, map[string]string{ "GIT_SSH_COMMAND": "", "GIT_SSH": "", }, map[string]string{ "lfs.ssh.automultiplex": "true", })) require.Nil(t, err) meta := ssh.SSHMetadata{} meta.UserAndHost = "user@foo.com" exe, baseargs, needShell, multiplexing, controlPath := ssh.GetExeAndArgs(cli.OSEnv(), cli.GitEnv(), &meta, true, "/tmp/lfs/lfs.sock") exe, args := ssh.FormatArgs(exe, baseargs, needShell) assert.Equal(t, "ssh", exe) assert.Equal(t, true, multiplexing) assert.Equal(t, []string{"-oControlMaster=no", "-oControlPath=/tmp/lfs/lfs.sock", "user@foo.com"}, args) assert.Equal(t, "/tmp/lfs/lfs.sock", controlPath) } func TestSSHGetExeAndArgsPlink(t *testing.T) { plink := filepath.Join("Users", "joebloggs", "bin", "plink.exe") cli, err := lfshttp.NewClient(lfshttp.NewContext(nil, map[string]string{ "GIT_SSH_COMMAND": "", "GIT_SSH": plink, }, nil)) require.Nil(t, err) meta := ssh.SSHMetadata{} meta.UserAndHost = "user@foo.com" exe, args, needShell, _, _ := ssh.GetExeAndArgs(cli.OSEnv(), cli.GitEnv(), &meta, false, "") exe, args = ssh.FormatArgs(exe, args, needShell) assert.Equal(t, plink, exe) assert.Equal(t, []string{"user@foo.com"}, args) } func TestSSHGetExeAndArgsPlinkCustomPort(t *testing.T) { plink := filepath.Join("Users", "joebloggs", "bin", "plink") cli, err := lfshttp.NewClient(lfshttp.NewContext(nil, map[string]string{ "GIT_SSH_COMMAND": "", "GIT_SSH": plink, }, nil)) require.Nil(t, err) meta := ssh.SSHMetadata{} meta.UserAndHost = "user@foo.com" meta.Port = "8888" exe, args, needShell, _, _ := ssh.GetExeAndArgs(cli.OSEnv(), cli.GitEnv(), &meta, false, "") exe, args = ssh.FormatArgs(exe, args, needShell) assert.Equal(t, plink, exe) assert.Equal(t, []string{"-P", "8888", "user@foo.com"}, args) } func TestSSHGetExeAndArgsPlinkCustomPortExplicitEnvironment(t *testing.T) { plink := filepath.Join("Users", "joebloggs", "bin", "ssh") cli, err := lfshttp.NewClient(lfshttp.NewContext(nil, map[string]string{ "GIT_SSH_COMMAND": "", "GIT_SSH": plink, "GIT_SSH_VARIANT": "plink", }, nil)) require.Nil(t, err) meta := ssh.SSHMetadata{} meta.UserAndHost = "user@foo.com" meta.Port = "8888" exe, args, needShell, _, _ := ssh.GetExeAndArgs(cli.OSEnv(), cli.GitEnv(), &meta, false, "") exe, args = ssh.FormatArgs(exe, args, needShell) assert.Equal(t, plink, exe) assert.Equal(t, []string{"-P", "8888", "user@foo.com"}, args) } func TestSSHGetExeAndArgsPlinkCustomPortExplicitEnvironmentPutty(t *testing.T) { plink := filepath.Join("Users", "joebloggs", "bin", "ssh") cli, err := lfshttp.NewClient(lfshttp.NewContext(nil, map[string]string{ "GIT_SSH_COMMAND": "", "GIT_SSH": plink, "GIT_SSH_VARIANT": "putty", }, nil)) require.Nil(t, err) meta := ssh.SSHMetadata{} meta.UserAndHost = "user@foo.com" meta.Port = "8888" exe, args, needShell, _, _ := ssh.GetExeAndArgs(cli.OSEnv(), cli.GitEnv(), &meta, false, "") exe, args = ssh.FormatArgs(exe, args, needShell) assert.Equal(t, plink, exe) assert.Equal(t, []string{"-P", "8888", "user@foo.com"}, args) } func TestSSHGetExeAndArgsPlinkCustomPortExplicitEnvironmentSsh(t *testing.T) { plink := filepath.Join("Users", "joebloggs", "bin", "ssh") cli, err := lfshttp.NewClient(lfshttp.NewContext(nil, map[string]string{ "GIT_SSH_COMMAND": "", "GIT_SSH": plink, "GIT_SSH_VARIANT": "ssh", }, nil)) require.Nil(t, err) meta := ssh.SSHMetadata{} meta.UserAndHost = "user@foo.com" meta.Port = "8888" exe, args, needShell, _, _ := ssh.GetExeAndArgs(cli.OSEnv(), cli.GitEnv(), &meta, false, "") exe, args = ssh.FormatArgs(exe, args, needShell) assert.Equal(t, plink, exe) assert.Equal(t, []string{"-p", "8888", "user@foo.com"}, args) } func TestSSHGetExeAndArgsTortoisePlink(t *testing.T) { plink := filepath.Join("Users", "joebloggs", "bin", "tortoiseplink.exe") cli, err := lfshttp.NewClient(lfshttp.NewContext(nil, map[string]string{ "GIT_SSH_COMMAND": "", "GIT_SSH": plink, }, nil)) require.Nil(t, err) meta := ssh.SSHMetadata{} meta.UserAndHost = "user@foo.com" exe, args, needShell, _, _ := ssh.GetExeAndArgs(cli.OSEnv(), cli.GitEnv(), &meta, false, "") exe, args = ssh.FormatArgs(exe, args, needShell) assert.Equal(t, plink, exe) assert.Equal(t, []string{"-batch", "user@foo.com"}, args) } func TestSSHGetExeAndArgsTortoisePlinkCustomPort(t *testing.T) { plink := filepath.Join("Users", "joebloggs", "bin", "tortoiseplink") cli, err := lfshttp.NewClient(lfshttp.NewContext(nil, map[string]string{ "GIT_SSH_COMMAND": "", "GIT_SSH": plink, }, nil)) require.Nil(t, err) meta := ssh.SSHMetadata{} meta.UserAndHost = "user@foo.com" meta.Port = "8888" exe, args, needShell, _, _ := ssh.GetExeAndArgs(cli.OSEnv(), cli.GitEnv(), &meta, false, "") exe, args = ssh.FormatArgs(exe, args, needShell) assert.Equal(t, plink, exe) assert.Equal(t, []string{"-batch", "-P", "8888", "user@foo.com"}, args) } func TestSSHGetExeAndArgsTortoisePlinkCustomPortExplicitEnvironment(t *testing.T) { plink := filepath.Join("Users", "joebloggs", "bin", "ssh") cli, err := lfshttp.NewClient(lfshttp.NewContext(nil, map[string]string{ "GIT_SSH_COMMAND": "", "GIT_SSH": plink, "GIT_SSH_VARIANT": "tortoiseplink", }, nil)) require.Nil(t, err) meta := ssh.SSHMetadata{} meta.UserAndHost = "user@foo.com" meta.Port = "8888" exe, args, needShell, _, _ := ssh.GetExeAndArgs(cli.OSEnv(), cli.GitEnv(), &meta, false, "") exe, args = ssh.FormatArgs(exe, args, needShell) assert.Equal(t, plink, exe) assert.Equal(t, []string{"-batch", "-P", "8888", "user@foo.com"}, args) } func TestSSHGetExeAndArgsTortoisePlinkCustomPortExplicitConfig(t *testing.T) { plink := filepath.Join("Users", "joebloggs", "bin", "ssh") cli, err := lfshttp.NewClient(lfshttp.NewContext(nil, map[string]string{ "GIT_SSH_COMMAND": "", "GIT_SSH": plink, "GIT_SSH_VARIANT": "tortoiseplink", }, map[string]string{ "ssh.variant": "tortoiseplink", })) require.Nil(t, err) meta := ssh.SSHMetadata{} meta.UserAndHost = "user@foo.com" meta.Port = "8888" exe, args, needShell, _, _ := ssh.GetExeAndArgs(cli.OSEnv(), cli.GitEnv(), &meta, false, "") exe, args = ssh.FormatArgs(exe, args, needShell) assert.Equal(t, plink, exe) assert.Equal(t, []string{"-batch", "-P", "8888", "user@foo.com"}, args) } func TestSSHGetExeAndArgsTortoisePlinkCustomPortExplicitConfigOverride(t *testing.T) { plink := filepath.Join("Users", "joebloggs", "bin", "ssh") cli, err := lfshttp.NewClient(lfshttp.NewContext(nil, map[string]string{ "GIT_SSH_COMMAND": "", "GIT_SSH": plink, }, map[string]string{ "ssh.variant": "putty", })) require.Nil(t, err) meta := ssh.SSHMetadata{} meta.UserAndHost = "user@foo.com" meta.Port = "8888" exe, args, needShell, _, _ := ssh.GetExeAndArgs(cli.OSEnv(), cli.GitEnv(), &meta, false, "") exe, args = ssh.FormatArgs(exe, args, needShell) assert.Equal(t, plink, exe) assert.Equal(t, []string{"-P", "8888", "user@foo.com"}, args) } func TestSSHGetExeAndArgsSshCommandPrecedence(t *testing.T) { cli, err := lfshttp.NewClient(lfshttp.NewContext(nil, map[string]string{ "GIT_SSH_COMMAND": "sshcmd", "GIT_SSH": "bad", "GIT_SSH_VARIANT": "simple", }, nil)) require.Nil(t, err) meta := ssh.SSHMetadata{} meta.UserAndHost = "user@foo.com" exe, args, needShell, _, _ := ssh.GetExeAndArgs(cli.OSEnv(), cli.GitEnv(), &meta, false, "") exe, args = ssh.FormatArgs(exe, args, needShell) assert.Equal(t, "sh", exe) assert.Equal(t, []string{"-c", "sshcmd user@foo.com"}, args) } func TestSSHGetExeAndArgsSshCommandArgs(t *testing.T) { cli, err := lfshttp.NewClient(lfshttp.NewContext(nil, map[string]string{ "GIT_SSH_COMMAND": "sshcmd --args 1", "GIT_SSH_VARIANT": "simple", }, nil)) require.Nil(t, err) meta := ssh.SSHMetadata{} meta.UserAndHost = "user@foo.com" exe, args, needShell, _, _ := ssh.GetExeAndArgs(cli.OSEnv(), cli.GitEnv(), &meta, false, "") exe, args = ssh.FormatArgs(exe, args, needShell) assert.Equal(t, "sh", exe) assert.Equal(t, []string{"-c", "sshcmd --args 1 user@foo.com"}, args) } func TestSSHGetExeAndArgsSshCommandArgsWithMixedQuotes(t *testing.T) { cli, err := lfshttp.NewClient(lfshttp.NewContext(nil, map[string]string{ "GIT_SSH_COMMAND": "sshcmd foo 'bar \"baz\"'", "GIT_SSH_VARIANT": "simple", }, nil)) require.Nil(t, err) meta := ssh.SSHMetadata{} meta.UserAndHost = "user@foo.com" exe, args, needShell, _, _ := ssh.GetExeAndArgs(cli.OSEnv(), cli.GitEnv(), &meta, false, "") exe, args = ssh.FormatArgs(exe, args, needShell) assert.Equal(t, "sh", exe) assert.Equal(t, []string{"-c", "sshcmd foo 'bar \"baz\"' user@foo.com"}, args) } func TestSSHGetExeAndArgsSshCommandCustomPort(t *testing.T) { cli, err := lfshttp.NewClient(lfshttp.NewContext(nil, map[string]string{ "GIT_SSH_COMMAND": "sshcmd", }, nil)) require.Nil(t, err) meta := ssh.SSHMetadata{} meta.UserAndHost = "user@foo.com" meta.Port = "8888" exe, args, needShell, _, _ := ssh.GetExeAndArgs(cli.OSEnv(), cli.GitEnv(), &meta, false, "") exe, args = ssh.FormatArgs(exe, args, needShell) assert.Equal(t, "sh", exe) assert.Equal(t, []string{"-c", "sshcmd -p 8888 user@foo.com"}, args) } func TestSSHGetExeAndArgsCoreSshCommand(t *testing.T) { cli, err := lfshttp.NewClient(lfshttp.NewContext(nil, map[string]string{ "GIT_SSH_COMMAND": "sshcmd --args 2", }, map[string]string{ "core.sshcommand": "sshcmd --args 1", })) require.Nil(t, err) meta := ssh.SSHMetadata{} meta.UserAndHost = "user@foo.com" exe, args, needShell, _, _ := ssh.GetExeAndArgs(cli.OSEnv(), cli.GitEnv(), &meta, false, "") exe, args = ssh.FormatArgs(exe, args, needShell) assert.Equal(t, "sh", exe) assert.Equal(t, []string{"-c", "sshcmd --args 2 user@foo.com"}, args) } func TestSSHGetExeAndArgsCoreSshCommandArgsWithMixedQuotes(t *testing.T) { cli, err := lfshttp.NewClient(lfshttp.NewContext(nil, nil, map[string]string{ "core.sshcommand": "sshcmd foo 'bar \"baz\"'", })) require.Nil(t, err) meta := ssh.SSHMetadata{} meta.UserAndHost = "user@foo.com" exe, args, needShell, _, _ := ssh.GetExeAndArgs(cli.OSEnv(), cli.GitEnv(), &meta, false, "") exe, args = ssh.FormatArgs(exe, args, needShell) assert.Equal(t, "sh", exe) assert.Equal(t, []string{"-c", "sshcmd foo 'bar \"baz\"' user@foo.com"}, args) } func TestSSHGetExeAndArgsConfigVersusEnv(t *testing.T) { cli, err := lfshttp.NewClient(lfshttp.NewContext(nil, nil, map[string]string{ "core.sshcommand": "sshcmd --args 1", })) require.Nil(t, err) meta := ssh.SSHMetadata{} meta.UserAndHost = "user@foo.com" exe, args, needShell, _, _ := ssh.GetExeAndArgs(cli.OSEnv(), cli.GitEnv(), &meta, false, "") exe, args = ssh.FormatArgs(exe, args, needShell) assert.Equal(t, "sh", exe) assert.Equal(t, []string{"-c", "sshcmd --args 1 user@foo.com"}, args) } func TestSSHGetExeAndArgsPlinkCommand(t *testing.T) { plink := filepath.Join("Users", "joebloggs", "bin", "plink.exe") cli, err := lfshttp.NewClient(lfshttp.NewContext(nil, map[string]string{ "GIT_SSH_COMMAND": plink, }, nil)) require.Nil(t, err) meta := ssh.SSHMetadata{} meta.UserAndHost = "user@foo.com" exe, args, needShell, _, _ := ssh.GetExeAndArgs(cli.OSEnv(), cli.GitEnv(), &meta, false, "") exe, args = ssh.FormatArgs(exe, args, needShell) assert.Equal(t, "sh", exe) assert.Equal(t, []string{"-c", plink + " user@foo.com"}, args) } func TestSSHGetExeAndArgsPlinkCommandCustomPort(t *testing.T) { plink := filepath.Join("Users", "joebloggs", "bin", "plink") cli, err := lfshttp.NewClient(lfshttp.NewContext(nil, map[string]string{ "GIT_SSH_COMMAND": plink, }, nil)) require.Nil(t, err) meta := ssh.SSHMetadata{} meta.UserAndHost = "user@foo.com" meta.Port = "8888" exe, args, needShell, _, _ := ssh.GetExeAndArgs(cli.OSEnv(), cli.GitEnv(), &meta, false, "") exe, args = ssh.FormatArgs(exe, args, needShell) assert.Equal(t, "sh", exe) assert.Equal(t, []string{"-c", plink + " -P 8888 user@foo.com"}, args) } func TestSSHGetExeAndArgsTortoisePlinkCommand(t *testing.T) { plink := filepath.Join("Users", "joebloggs", "bin", "tortoiseplink.exe") cli, err := lfshttp.NewClient(lfshttp.NewContext(nil, map[string]string{ "GIT_SSH_COMMAND": plink, }, nil)) require.Nil(t, err) meta := ssh.SSHMetadata{} meta.UserAndHost = "user@foo.com" exe, args, needShell, _, _ := ssh.GetExeAndArgs(cli.OSEnv(), cli.GitEnv(), &meta, false, "") exe, args = ssh.FormatArgs(exe, args, needShell) assert.Equal(t, "sh", exe) assert.Equal(t, []string{"-c", plink + " -batch user@foo.com"}, args) } func TestSSHGetExeAndArgsTortoisePlinkCommandCustomPort(t *testing.T) { plink := filepath.Join("Users", "joebloggs", "bin", "tortoiseplink") cli, err := lfshttp.NewClient(lfshttp.NewContext(nil, map[string]string{ "GIT_SSH_COMMAND": plink, }, nil)) require.Nil(t, err) meta := ssh.SSHMetadata{} meta.UserAndHost = "user@foo.com" meta.Port = "8888" exe, args, needShell, _, _ := ssh.GetExeAndArgs(cli.OSEnv(), cli.GitEnv(), &meta, false, "") exe, args = ssh.FormatArgs(exe, args, needShell) assert.Equal(t, "sh", exe) assert.Equal(t, []string{"-c", plink + " -batch -P 8888 user@foo.com"}, args) } func TestSSHGetLFSExeAndArgsWithCustomSSH(t *testing.T) { cli, err := lfshttp.NewClient(lfshttp.NewContext(nil, map[string]string{ "GIT_SSH": "not-ssh", "GIT_SSH_VARIANT": "simple", }, nil)) require.Nil(t, err) u, err := url.Parse("ssh://git@host.com:12345/repo") require.Nil(t, err) e := lfshttp.EndpointFromSshUrl(u) t.Logf("ENDPOINT: %+v", e) assert.Equal(t, "12345", e.SSHMetadata.Port) assert.Equal(t, "git@host.com", e.SSHMetadata.UserAndHost) assert.Equal(t, "/repo", e.SSHMetadata.Path) exe, args, _, _ := ssh.GetLFSExeAndArgs(cli.OSEnv(), cli.GitEnv(), &e.SSHMetadata, "git-lfs-authenticate", "download", false, "") assert.Equal(t, "not-ssh", exe) assert.Equal(t, []string{"-p", "12345", "git@host.com", "git-lfs-authenticate /repo download"}, args) } func TestSSHGetLFSExeAndArgsInvalidOptionsAsHost(t *testing.T) { cli, err := lfshttp.NewClient(nil) require.Nil(t, err) u, err := url.Parse("ssh://-oProxyCommand=gnome-calculator/repo") require.Nil(t, err) assert.Equal(t, "-oProxyCommand=gnome-calculator", u.Host) e := lfshttp.EndpointFromSshUrl(u) t.Logf("ENDPOINT: %+v", e) assert.Equal(t, "-oProxyCommand=gnome-calculator", e.SSHMetadata.UserAndHost) assert.Equal(t, "/repo", e.SSHMetadata.Path) exe, args, _, _ := ssh.GetLFSExeAndArgs(cli.OSEnv(), cli.GitEnv(), &e.SSHMetadata, "git-lfs-authenticate", "download", false, "") assert.Equal(t, "ssh", exe) assert.Equal(t, []string{"--", "-oProxyCommand=gnome-calculator", "git-lfs-authenticate /repo download"}, args) } func TestSSHGetLFSExeAndArgsInvalidOptionsAsHostWithCustomSSH(t *testing.T) { cli, err := lfshttp.NewClient(lfshttp.NewContext(nil, map[string]string{ "GIT_SSH": "not-ssh", "GIT_SSH_VARIANT": "simple", }, nil)) require.Nil(t, err) u, err := url.Parse("ssh://--oProxyCommand=gnome-calculator/repo") require.Nil(t, err) assert.Equal(t, "--oProxyCommand=gnome-calculator", u.Host) e := lfshttp.EndpointFromSshUrl(u) t.Logf("ENDPOINT: %+v", e) assert.Equal(t, "--oProxyCommand=gnome-calculator", e.SSHMetadata.UserAndHost) assert.Equal(t, "/repo", e.SSHMetadata.Path) exe, args, _, _ := ssh.GetLFSExeAndArgs(cli.OSEnv(), cli.GitEnv(), &e.SSHMetadata, "git-lfs-authenticate", "download", false, "") assert.Equal(t, "not-ssh", exe) assert.Equal(t, []string{"oProxyCommand=gnome-calculator", "git-lfs-authenticate /repo download"}, args) } func TestSSHGetExeAndArgsInvalidOptionsAsHost(t *testing.T) { cli, err := lfshttp.NewClient(nil) require.Nil(t, err) u, err := url.Parse("ssh://-oProxyCommand=gnome-calculator") require.Nil(t, err) assert.Equal(t, "-oProxyCommand=gnome-calculator", u.Host) e := lfshttp.EndpointFromSshUrl(u) t.Logf("ENDPOINT: %+v", e) assert.Equal(t, "-oProxyCommand=gnome-calculator", e.SSHMetadata.UserAndHost) assert.Equal(t, "", e.SSHMetadata.Path) exe, args, needShell, _, _ := ssh.GetExeAndArgs(cli.OSEnv(), cli.GitEnv(), &e.SSHMetadata, false, "") assert.Equal(t, "ssh", exe) assert.Equal(t, []string{"--", "-oProxyCommand=gnome-calculator"}, args) assert.Equal(t, false, needShell) } func TestSSHGetExeAndArgsInvalidOptionsAsPath(t *testing.T) { cli, err := lfshttp.NewClient(nil) require.Nil(t, err) u, err := url.Parse("ssh://git@git-host.com/-oProxyCommand=gnome-calculator") require.Nil(t, err) assert.Equal(t, "git-host.com", u.Host) e := lfshttp.EndpointFromSshUrl(u) t.Logf("ENDPOINT: %+v", e) assert.Equal(t, "git@git-host.com", e.SSHMetadata.UserAndHost) assert.Equal(t, "/-oProxyCommand=gnome-calculator", e.SSHMetadata.Path) exe, args, needShell, _, _ := ssh.GetExeAndArgs(cli.OSEnv(), cli.GitEnv(), &e.SSHMetadata, false, "") assert.Equal(t, "ssh", exe) assert.Equal(t, []string{"git@git-host.com"}, args) assert.Equal(t, false, needShell) } func TestParseBareSSHUrl(t *testing.T) { e := lfshttp.EndpointFromBareSshUrl("git@git-host.com:repo.git") t.Logf("endpoint: %+v", e) assert.Equal(t, "git@git-host.com", e.SSHMetadata.UserAndHost) assert.Equal(t, "repo.git", e.SSHMetadata.Path) e = lfshttp.EndpointFromBareSshUrl("git@git-host.com/should-be-a-colon.git") t.Logf("endpoint: %+v", e) assert.Equal(t, "", e.SSHMetadata.UserAndHost) assert.Equal(t, "", e.SSHMetadata.Path) e = lfshttp.EndpointFromBareSshUrl("-oProxyCommand=gnome-calculator") t.Logf("endpoint: %+v", e) assert.Equal(t, "", e.SSHMetadata.UserAndHost) assert.Equal(t, "", e.SSHMetadata.Path) e = lfshttp.EndpointFromBareSshUrl("git@git-host.com:-oProxyCommand=gnome-calculator") t.Logf("endpoint: %+v", e) assert.Equal(t, "git@git-host.com", e.SSHMetadata.UserAndHost) assert.Equal(t, "-oProxyCommand=gnome-calculator", e.SSHMetadata.Path) } git-lfs-3.6.1/subprocess/000077500000000000000000000000001472372047300152675ustar00rootroot00000000000000git-lfs-3.6.1/subprocess/buffered_cmd.go000066400000000000000000000004021472372047300202170ustar00rootroot00000000000000package subprocess import ( "bufio" "io" ) const ( // stdoutBufSize is the size of the buffers given to a sub-process stdout stdoutBufSize = 16384 ) type BufferedCmd struct { *Cmd Stdin io.WriteCloser Stdout *bufio.Reader Stderr *bufio.Reader } git-lfs-3.6.1/subprocess/cmd.go000066400000000000000000000024361472372047300163660ustar00rootroot00000000000000package subprocess import ( "io" "os/exec" ) // Thin wrapper around exec.Cmd. Takes care of pipe shutdown by // keeping an internal reference to any created pipes. Whenever // Cmd.Wait() is called, all created pipes are closed. type Cmd struct { *exec.Cmd pipes []io.Closer } func (c *Cmd) Run() error { c.trace() return c.Cmd.Run() } func (c *Cmd) Start() error { c.trace() return c.Cmd.Start() } func (c *Cmd) Output() ([]byte, error) { c.trace() return c.Cmd.Output() } func (c *Cmd) CombinedOutput() ([]byte, error) { c.trace() return c.Cmd.CombinedOutput() } func (c *Cmd) StdoutPipe() (io.ReadCloser, error) { stdout, err := c.Cmd.StdoutPipe() c.pipes = append(c.pipes, stdout) return stdout, err } func (c *Cmd) StderrPipe() (io.ReadCloser, error) { stderr, err := c.Cmd.StderrPipe() c.pipes = append(c.pipes, stderr) return stderr, err } func (c *Cmd) StdinPipe() (io.WriteCloser, error) { stdin, err := c.Cmd.StdinPipe() c.pipes = append(c.pipes, stdin) return stdin, err } func (c *Cmd) Wait() error { for _, pipe := range c.pipes { pipe.Close() } return c.Cmd.Wait() } func (c *Cmd) trace() { if len(c.Args) > 0 { Trace(c.Args[0], c.Args[1:]...) } else { Trace(c.Path) } } func newCmd(cmd *exec.Cmd) *Cmd { wrapped := &Cmd{Cmd: cmd} return wrapped } git-lfs-3.6.1/subprocess/path.go000066400000000000000000000051451472372047300165570ustar00rootroot00000000000000// Copyright 2010 The Go Authors. All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package subprocess import ( "os" "os/exec" "path/filepath" "runtime" "strings" ) // LookPath searches for an executable named file in the // directories named by the PATH environment variable. // If file contains a slash, it is tried directly and the PATH is not consulted. // The result may be an absolute path or a path relative to the current directory. func LookPath(file string) (string, error) { sep := string([]rune{os.PathSeparator}) exts := findPathExtensions() if strings.Contains(file, sep) { path, err := findExecutable(file, exts) if err == nil { return path, nil } return "", exec.ErrNotFound } path := os.Getenv("PATH") for _, dir := range filepath.SplitList(path) { if dir == "" { // Windows often has empty components in the PATH and // treating them as "." is not expected. if runtime.GOOS == "windows" { continue } // Unix shell semantics: path element "" means "." dir = "." } path := filepath.Join(dir, file) if resolved, err := findExecutable(path, exts); err == nil { return resolved, nil } } return "", exec.ErrNotFound } git-lfs-3.6.1/subprocess/path_nix.go000066400000000000000000000035701472372047300174350ustar00rootroot00000000000000// Copyright 2010 The Go Authors. All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. //go:build !windows // +build !windows package subprocess import ( "os" ) func findPathExtensions() []string { return nil } func findExecutable(file string, exts []string) (string, error) { d, err := os.Stat(file) if err != nil { return "", err } if m := d.Mode(); !m.IsDir() && m&0111 != 0 { return file, nil } return "", os.ErrPermission } git-lfs-3.6.1/subprocess/path_windows.go000066400000000000000000000050411472372047300203240ustar00rootroot00000000000000// Copyright 2010 The Go Authors. All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. //go:build windows // +build windows package subprocess import ( "os" "strings" ) func chkStat(file string) error { d, err := os.Stat(file) if err != nil { return err } if d.IsDir() { return os.ErrPermission } return nil } func hasExt(file string) bool { i := strings.LastIndex(file, ".") if i < 0 { return false } return strings.LastIndexAny(file, `:\/`) < i } func findExecutable(file string, exts []string) (string, error) { if len(exts) == 0 { return file, chkStat(file) } if hasExt(file) { if chkStat(file) == nil { return file, nil } } for _, e := range exts { if f := file + e; chkStat(f) == nil { return f, nil } } return "", os.ErrNotExist } func findPathExtensions() []string { var exts []string x := os.Getenv(`PATHEXT`) if x != "" { for _, e := range strings.Split(strings.ToLower(x), `;`) { if e == "" { continue } if e[0] != '.' { e = "." + e } exts = append(exts, e) } } else { exts = []string{".com", ".exe", ".bat", ".cmd"} } return exts } git-lfs-3.6.1/subprocess/subprocess.go000066400000000000000000000143461472372047300200160ustar00rootroot00000000000000// Package subprocess provides helper functions for forking new processes // NOTE: Subject to change, do not rely on this package from outside git-lfs source package subprocess import ( "bufio" "errors" "fmt" "os" "os/exec" "regexp" "strings" "sync" "github.com/git-lfs/git-lfs/v3/tr" "github.com/rubyist/tracerx" ) // BufferedExec starts up a command and creates a stdin pipe and a buffered // stdout & stderr pipes, wrapped in a BufferedCmd. The stdout buffer will be // of stdoutBufSize bytes. func BufferedExec(name string, args ...string) (*BufferedCmd, error) { cmd, err := ExecCommand(name, args...) if err != nil { return nil, err } stdout, err := cmd.StdoutPipe() if err != nil { return nil, err } stderr, err := cmd.StderrPipe() if err != nil { return nil, err } stdin, err := cmd.StdinPipe() if err != nil { return nil, err } if err := cmd.Start(); err != nil { return nil, err } return &BufferedCmd{ cmd, stdin, bufio.NewReaderSize(stdout, stdoutBufSize), bufio.NewReaderSize(stderr, stdoutBufSize), }, nil } // StdoutBufferedExec starts up a command and creates a stdin pipe and a // buffered stdout pipe, with stderr directed to /dev/null, wrapped in a // BufferedCmd. The stdout buffer will be of stdoutBufSize bytes. func StdoutBufferedExec(name string, args ...string) (*BufferedCmd, error) { cmd, err := ExecCommand(name, args...) if err != nil { return nil, err } stdout, err := cmd.StdoutPipe() if err != nil { return nil, err } stdin, err := cmd.StdinPipe() if err != nil { return nil, err } if err := cmd.Start(); err != nil { return nil, err } return &BufferedCmd{ cmd, stdin, bufio.NewReaderSize(stdout, stdoutBufSize), nil, }, nil } // SimpleExec is a small wrapper around os/exec.Command. func SimpleExec(name string, args ...string) (string, error) { cmd, err := ExecCommand(name, args...) if err != nil { return "", err } return Output(cmd) } func Output(cmd *Cmd) (string, error) { out, err := cmd.Output() if exitError, ok := err.(*exec.ExitError); ok { errorOutput := strings.TrimSpace(string(exitError.Stderr)) if errorOutput == "" { // some commands might write nothing to stderr but something to stdout in error-conditions, in which case, we'll use that // in the error string errorOutput = strings.TrimSpace(string(out)) } ran := cmd.Path if len(cmd.Args) > 1 { ran = fmt.Sprintf("%s %s", cmd.Path, quotedArgs(cmd.Args[1:])) } formattedErr := errors.New(tr.Tr.Get("error running %s: '%s' '%s'", ran, errorOutput, strings.TrimSpace(exitError.Error()))) // return "" as output in error case, for callers that don't care about errors but rely on "" returned, in-case stdout != "" return "", formattedErr } return strings.Trim(string(out), " \n"), err } var shellWordRe = regexp.MustCompile(`\A[A-Za-z0-9_@/.-]+\z`) // ShellQuoteSingle returns a string which is quoted suitably for sh. func ShellQuoteSingle(str string) string { // Quote anything that looks slightly complicated. if shellWordRe.FindStringIndex(str) == nil { return "'" + strings.Replace(str, "'", "'\\''", -1) + "'" } return str } // ShellQuote returns a copied string slice where each element is quoted // suitably for sh. func ShellQuote(strs []string) []string { dup := make([]string, 0, len(strs)) for _, str := range strs { dup = append(dup, ShellQuoteSingle(str)) } return dup } // FormatForShell takes a command name and an argument string and returns a // command and arguments that pass this command to the shell. Note that neither // the command nor the arguments are quoted. Consider FormatForShellQuoted // instead. func FormatForShell(name string, args string) (string, []string) { return "sh", []string{"-c", name + " " + args} } // FormatForShellQuotedArgs takes a command name and an argument string and // returns a command and arguments that pass this command to the shell. The // arguments are escaped, but the name of the command is not. func FormatForShellQuotedArgs(name string, args []string) (string, []string) { return FormatForShell(name, strings.Join(ShellQuote(args), " ")) } func FormatPercentSequences(pattern string, replacements map[string]string) string { s := new(strings.Builder) state := 0 for _, r := range pattern { if state == 0 && r == '%' { state = 1 continue } else if state == 1 { state = 0 if r == '%' { s.WriteRune('%') } else if val, ok := replacements[string([]rune{r})]; ok { s.WriteString(ShellQuoteSingle(val)) } } else { s.WriteRune(r) } } return s.String() } func Trace(name string, args ...string) { tracerx.Printf("exec: %s %s", name, quotedArgs(args)) } func quotedArgs(args []string) string { if len(args) == 0 { return "" } quoted := make([]string, len(args)) for i, arg := range args { quoted[i] = fmt.Sprintf("'%s'", arg) } return strings.Join(quoted, " ") } // An env for an exec.Command without GIT_TRACE and GIT_INTERNAL_SUPER_PREFIX var env []string var envMu sync.Mutex var traceEnv = "GIT_TRACE=" // Don't pass GIT_INTERNAL_SUPER_PREFIX back to Git. Git passes this environment // variable to child processes when submodule.recurse is set to true. However, // passing that environment variable back to Git will cause it to append the // --super-prefix command-line option to every Git call. This is problematic // because many Git commands (including git config and git rev-parse) don't // support --super-prefix and would immediately exit with an error as a result. var superPrefixEnv = "GIT_INTERNAL_SUPER_PREFIX=" func fetchEnvironment() []string { envMu.Lock() defer envMu.Unlock() return fetchEnvironmentInternal() } // fetchEnvironmentInternal should only be called from fetchEnvironment or // ResetEnvironment, who will hold the required lock. func fetchEnvironmentInternal() []string { if env != nil { return env } realEnv := os.Environ() env = make([]string, 0, len(realEnv)) for _, kv := range realEnv { if strings.HasPrefix(kv, traceEnv) || strings.HasPrefix(kv, superPrefixEnv) { continue } env = append(env, kv) } return env } // ResetEnvironment resets the cached environment that's used in subprocess // calls. func ResetEnvironment() { envMu.Lock() defer envMu.Unlock() env = nil // Reinitialize the environment settings. fetchEnvironmentInternal() } git-lfs-3.6.1/subprocess/subprocess_nix.go000066400000000000000000000006141472372047300206650ustar00rootroot00000000000000//go:build !windows // +build !windows package subprocess import ( "os/exec" ) // ExecCommand is a small platform specific wrapper around os/exec.Command func ExecCommand(name string, arg ...string) (*Cmd, error) { cmd := exec.Command(name, arg...) var err error cmd.Path, err = LookPath(name) if err != nil { return nil, err } cmd.Env = fetchEnvironment() return newCmd(cmd), nil } git-lfs-3.6.1/subprocess/subprocess_test.go000066400000000000000000000103001472372047300210370ustar00rootroot00000000000000package subprocess import ( "testing" "github.com/stretchr/testify/assert" ) type ShellQuoteTestCase struct { Given []string Expected []string } func (c *ShellQuoteTestCase) Assert(t *testing.T) { actual := ShellQuote(c.Given) assert.Equal(t, c.Expected, actual, "subprocess: expected ShellQuote(%q) to equal %#v (was %#v)", c.Given, c.Expected, actual, ) } func TestShellQuote(t *testing.T) { for desc, c := range map[string]ShellQuoteTestCase{ "simple": {[]string{"foo", "bar", "an_id"}, []string{"foo", "bar", "an_id"}}, "leading space": {[]string{" foo", "bar"}, []string{"' foo'", "bar"}}, "trailing space": {[]string{"foo", "bar "}, []string{"foo", "'bar '"}}, "internal space": {[]string{"foo bar", "baz quux"}, []string{"'foo bar'", "'baz quux'"}}, "backslash": {[]string{`foo\bar`, `b\az`}, []string{`'foo\bar'`, `'b\az'`}}, "quotes": {[]string{`foo"bar`, "b'az"}, []string{`'foo"bar'`, "'b'\\''az'"}}, "mixed quotes": {[]string{`"foo'ba\"r\"'"`}, []string{`'"foo'\''ba\"r\"'\''"'`}}, } { t.Run(desc, c.Assert) } } type FormatForShellQuotedArgsTestCase struct { GivenCmd string GivenArgs []string ExpectedArgs []string } func (c *FormatForShellQuotedArgsTestCase) Assert(t *testing.T) { actualCmd, actualArgs := FormatForShellQuotedArgs(c.GivenCmd, c.GivenArgs) assert.Equal(t, "sh", actualCmd, "subprocess: expected FormatForShell command to equal 'sh' (was #%v)", actualCmd) assert.Equal(t, c.ExpectedArgs, actualArgs, "subprocess: expected FormatForShell(%q, %v) to equal %#v (was %#v)", c.GivenCmd, c.GivenArgs, c.ExpectedArgs, actualArgs, ) } func TestFormatForShellQuotedArgs(t *testing.T) { for desc, c := range map[string]FormatForShellQuotedArgsTestCase{ "simple": {"foo", []string{"bar", "baz"}, []string{"-c", "foo bar baz"}}, "spaces": {"foo quux", []string{" bar", "baz "}, []string{"-c", "foo quux ' bar' 'baz '"}}, "backslashes": {"bin/foo", []string{"\\bar", "b\\az"}, []string{"-c", "bin/foo '\\bar' 'b\\az'"}}, } { t.Run(desc, c.Assert) } } type FormatForShellTestCase struct { GivenCmd string GivenArgs string ExpectedArgs []string } func (c *FormatForShellTestCase) Assert(t *testing.T) { actualCmd, actualArgs := FormatForShell(c.GivenCmd, c.GivenArgs) assert.Equal(t, "sh", actualCmd, "subprocess: expected FormatForShell command to equal 'sh' (was #%v)", actualCmd) assert.Equal(t, c.ExpectedArgs, actualArgs, "subprocess: expected FormatForShell(%q, %v) to equal %#v (was %#v)", c.GivenCmd, c.GivenArgs, c.ExpectedArgs, actualArgs, ) } func TestFormatForShell(t *testing.T) { for desc, c := range map[string]FormatForShellTestCase{ "simple": {"foo", "bar", []string{"-c", "foo bar"}}, "spaces": {"foo quux", "bar baz", []string{"-c", "foo quux bar baz"}}, "quotes": {"bin/foo", "bar \"baz quux\" 'fred wilma'", []string{"-c", "bin/foo bar \"baz quux\" 'fred wilma'"}}, } { t.Run(desc, c.Assert) } } type FormatPercentSequencesTestCase struct { GivenPattern string GivenReplacements map[string]string ExpectedString string } func (c *FormatPercentSequencesTestCase) Assert(t *testing.T) { actualString := FormatPercentSequences(c.GivenPattern, c.GivenReplacements) assert.Equal(t, c.ExpectedString, actualString, "subprocess: expected FormatForShell(%q, %v) to equal %q (was %q)", c.GivenPattern, c.GivenReplacements, c.ExpectedString, actualString, ) } func TestFormatPercentSequences(t *testing.T) { replacements := map[string]string{ "A": "current", "B": "other file", "P": "some ' output \" file", } for desc, c := range map[string]FormatPercentSequencesTestCase{ "simple": {"merge-foo %A", replacements, "merge-foo current"}, "double-percent": {"merge-foo %A %%A", replacements, "merge-foo current %A"}, "spaces": {"merge-foo %B", replacements, "merge-foo 'other file'"}, "weird filename": {"merge-foo %P", replacements, "merge-foo 'some '\\'' output \" file'"}, "no patterns": {"merge-foo /dev/null", replacements, "merge-foo /dev/null"}, "pattern adjacent to non-space": {"merge-foo >%B", replacements, "merge-foo >'other file'"}, } { t.Run(desc, c.Assert) } } git-lfs-3.6.1/subprocess/subprocess_windows.go000066400000000000000000000007171472372047300215650ustar00rootroot00000000000000//go:build windows // +build windows package subprocess import ( "os/exec" "syscall" ) // ExecCommand is a small platform specific wrapper around os/exec.Command func ExecCommand(name string, arg ...string) (*Cmd, error) { cmd := exec.Command(name, arg...) var err error cmd.Path, err = LookPath(name) if err != nil { return nil, err } cmd.SysProcAttr = &syscall.SysProcAttr{HideWindow: true} cmd.Env = fetchEnvironment() return newCmd(cmd), nil } git-lfs-3.6.1/t/000077500000000000000000000000001472372047300133425ustar00rootroot00000000000000git-lfs-3.6.1/t/Makefile000066400000000000000000000032311472372047300150010ustar00rootroot00000000000000SHELL = bash RM ?= rm -f PROVE ?= prove PROVE_EXTRA_ARGS = DEFAULT_TEST_TARGET ?= test GO ?= go # GOTOOLCHAIN is an environment variable which, when set to 'local', # prevents Go from downloading and running non-local versions of itself. export GOTOOLCHAIN = local ifeq ($(OS),Windows_NT) X ?= .exe else X ?= endif TEST_CMDS = TEST_CMDS += ../bin/git-credential-lfsnoop$X TEST_CMDS += ../bin/git-credential-lfstest$X TEST_CMDS += ../bin/lfs-askpass$X TEST_CMDS += ../bin/lfs-ssh-echo$X TEST_CMDS += ../bin/lfs-ssh-proxy-test$X TEST_CMDS += ../bin/lfstest-badpathcheck$X TEST_CMDS += ../bin/lfstest-count-tests$X TEST_CMDS += ../bin/lfstest-customadapter$X TEST_CMDS += ../bin/lfstest-genrandom$X TEST_CMDS += ../bin/lfstest-gitserver$X TEST_CMDS += ../bin/lfstest-nanomtime$X TEST_CMDS += ../bin/lfstest-realpath$X TEST_CMDS += ../bin/lfstest-standalonecustomadapter$X TEST_CMDS += ../bin/lfstest-testutils$X # Not used for the integration tests, but build it here anyway to ensure it # continues to work. TEST_CMDS += ../bin/git-lfs-test-server-api$X TEST_SRCS = $(wildcard t-*.sh) TEST_API_SRCS = $(wildcard git-lfs-test-server-api/*.go) all : $(DEFAULT_TEST_TARGET) test-commands : $(TEST_CMDS) test : test-commands $(RM) -r remote test_count{,.lock} @. ./testenv.sh && setup && cd t && \ RM_GIT_LFS_TEST_DIR=no $(PROVE) $(PROVE_EXTRA_ARGS) t-*.sh && \ shutdown .PHONY : $(TEST_SRCS) $(TEST_SRCS) : $(TEST_CMDS) $(RM) -r remote test_count{,.lock} $(PROVE) -v $(PROVE_EXTRA_ARGS) $@ .PHONY : clean clean : $(RM) -r remote $(RM) $(TEST_CMDS) ../bin/%$X : cmd/%.go $(GO) build -o $@ $^ ../bin/git-lfs-test-server-api$X : $(TEST_API_SRCS) $(GO) build -o $@ $^ git-lfs-3.6.1/t/README.md000066400000000000000000000076071472372047300146330ustar00rootroot00000000000000# `t` This directory contains one of the two types of tests that the Git LFS project uses to protect against regression. The first, scattered in `*_test.go` files throughout the repository are _unit tests_, and written in Go, designed to uncover failures at the unit level. The second kind--and the one contained in this directory--are _integration tests_, which are designed to exercise Git LFS in an end-to-end fashion, running the `git`, and `git-lfs` binaries, along with a mock Git server. You can run all tests in this directory with any of the following: ```ShellSession $ make $ make test $ make PROVE_EXTRA_ARGS=-j9 test ``` Or run a single test (for example, `t-checkout.sh`) by any of the following: ```ShellSession $ make ./t-checkout.sh $ make PROVE_EXTRA_ARGS=-v ./t-checkout.sh $ ./t-checkout.sh ``` Alternatively, one can run a selection of tests (via explicitly listing them or making use of the built-in shell globbing) by any of the following: ```ShellSession $ make ./t-*.sh $ make PROVE_EXTRA_ARGS=-j9 ./t-*.sh $ ./t-*.sh ``` ## Test File(s) There are a few important kinds of files to know about in the `t` directory: - `cmd/`: contains the source code of binaries that are useful during test time, like the mocked Git server, or the test counting binary. For more about the contents of this directory, see [test lifecycle](#test-lifecycle) below. The file `t/cmd/testutils.go` is automatically linked and included during the build process of each file in `cmd`. - `fixtures/`: contains shell scripts that load fixture repositories useful for testing against. - `t-*.sh`: file(s) containing zero or more tests, typically related to a similar topic (c.f,. `t/t-push.sh`, `t/t-pull.sh`, etc.) - `testenv.sh`: loads environment variables useful during tests. This file is sourced by `testlib.sh`. - `testhelpers.sh`: loads shell functions useful during tests, like `setup_remote_repo`, and `clone_repo`. - `testlib.sh`: loads the `begin_test`, `end_test`, and similar functions useful for instantiating a particular test. ## Test Lifecycle When a test is run, the following occurs, in order: 1. Missing test binaries are compiled into the `bin` directory in the repository root. Note: this does _not_ include the `git-lfs` binary, which is re-compiled via `script/boostrap`. 2. An integration server is started by either (1) the `Makefile` or (2) the `cmd/lfstest-count-test.go` program, which keeps track of the number of running tests and starts an integration server any time the number of active tests goes from `0` to `1`, and stops the server when it goes from `n` to `0`. 3. After sourcing `t/testlib.sh` (& loading `t/testenv.sh`), each test is run in sequence per file. (In other words, multiple test files can be run in parallel, but the tests in a single file are run in sequence.) 4. An individual test will finish, and (if running under `prove`) another will be started in its place. Once all tests are done, `t/test_count` will go to `0`, and the test server will be torn down. ## Test Environment There are a few environment variables that you can set to change the test suite behavior: * `GIT_LFS_TEST_DIR=path` - This sets the directory that is used as the current working directory of the tests. By default, this will be in your temp dir. It's recommended that this is set to a directory outside of any Git repository. * `KEEPTRASH=1` - This will leave the local repository data in a `tmp` directory and the remote repository data in `test/remote`. Also ensure that your `noproxy` environment variable contains `127.0.0.1` host, to allow git commands to reach the local Git server `lfstest-gitserver`. ## Writing new tests A new test file should be named `t/t-*.sh`, where `*` is the topic of Git LFS being tested. It should look as follows: ```bash #!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" begin_test "my test" ( set -e # ... ) end_test ``` git-lfs-3.6.1/t/cmd/000077500000000000000000000000001472372047300141055ustar00rootroot00000000000000git-lfs-3.6.1/t/cmd/git-credential-lfsnoop.go000066400000000000000000000001101472372047300207750ustar00rootroot00000000000000//go:build testtools // +build testtools package main func main() { } git-lfs-3.6.1/t/cmd/git-credential-lfstest.go000066400000000000000000000161641472372047300210210ustar00rootroot00000000000000//go:build testtools // +build testtools package main import ( "bufio" "errors" "fmt" "os" "path/filepath" "slices" "strings" ) var ( commands = map[string]func(){ "get": fill, "store": log, "erase": log, } delim = '\n' credsDir = "" ) type credential struct { authtype string username string password string credential string matchState string state string multistage bool skip bool } func (c *credential) Serialize(capabilities map[string]struct{}, state []string, username []string) map[string][]string { formattedState := fmt.Sprintf("lfstest:%s", c.state) formattedMatchState := fmt.Sprintf("lfstest:%s", c.matchState) creds := make(map[string][]string) if c.skip { // Do nothing. } else if _, ok := capabilities["authtype"]; ok && len(c.authtype) != 0 && len(c.credential) != 0 { if _, ok := capabilities["state"]; len(c.matchState) == 0 || (ok && slices.Contains(state, formattedMatchState)) { creds["authtype"] = []string{c.authtype} creds["credential"] = []string{c.credential} if ok { creds["state[]"] = []string{formattedState} if c.multistage { creds["continue"] = []string{"1"} } } } } else if len(c.authtype) == 0 && (len(username) == 0 || username[0] == c.username) { if len(username) == 0 { creds["username"] = []string{c.username} } creds["password"] = []string{c.password} } return creds } func init() { if len(credsDir) == 0 { credsDir = os.Getenv("CREDSDIR") } } func main() { if argsize := len(os.Args); argsize != 2 { fmt.Fprintf(os.Stderr, "wrong number of args: %d\n", argsize) os.Exit(1) } arg := os.Args[1] cmd := commands[arg] if cmd == nil { fmt.Fprintf(os.Stderr, "bad cmd: %s\n", arg) os.Exit(1) } cmd() } func fill() { scanner := bufio.NewScanner(os.Stdin) creds := map[string][]string{} for scanner.Scan() { line := scanner.Text() parts := strings.SplitN(line, "=", 2) if len(parts) != 2 { fmt.Fprintf(os.Stderr, "bad line: %s\n", line) os.Exit(1) } fmt.Fprintf(os.Stderr, "CREDS RECV: %s\n", line) if _, ok := creds[parts[0]]; ok { creds[parts[0]] = append(creds[parts[0]], strings.TrimSpace(parts[1])) } else { creds[parts[0]] = []string{strings.TrimSpace(parts[1])} } } if err := scanner.Err(); err != nil { fmt.Fprintf(os.Stderr, "reading standard input: %v", err) os.Exit(1) } hostPieces := strings.SplitN(firstEntryForKey(creds, "host"), ":", 2) credentials, err := credsForHostAndPath(hostPieces[0], firstEntryForKey(creds, "path")) if err != nil { fmt.Fprintln(os.Stderr, err.Error()) os.Exit(1) } result := map[string][]string{} capas := discoverCapabilities(creds) for _, cred := range credentials { result = cred.Serialize(capas, creds["state[]"], creds["username"]) if len(result) != 0 { break } } mode := os.Getenv("LFS_TEST_CREDS_WWWAUTH") wwwauth := firstEntryForKey(creds, "wwwauth[]") if mode == "required" && !strings.HasPrefix(wwwauth, "Basic ") { fmt.Fprintf(os.Stderr, "Missing required 'wwwauth[]' key in credentials\n") os.Exit(1) } else if mode == "forbidden" && wwwauth != "" { fmt.Fprintf(os.Stderr, "Unexpected 'wwwauth[]' key in credentials\n") os.Exit(1) } if len(result) == 0 { os.Exit(0) } // Send capabilities first to all for one-pass parsing, but only if // client advertised capabilities matching those of the per-host data. key := "capability[]" for entry, _ := range capas { fmt.Fprintf(os.Stderr, "CREDS SEND: %s=%s\n", key, entry) fmt.Fprintf(os.Stdout, "%s=%s\n", key, entry) } for key, value := range result { for _, entry := range value { fmt.Fprintf(os.Stderr, "CREDS SEND: %s=%s\n", key, entry) fmt.Fprintf(os.Stdout, "%s=%s\n", key, entry) } } } func discoverCapabilities(creds map[string][]string) map[string]struct{} { capas := make(map[string]struct{}) supportedCapas := map[string]struct{}{ "authtype": struct{}{}, "state": struct{}{}, } for _, capa := range creds["capability[]"] { // Only pass on capabilities we support. if _, ok := supportedCapas[capa]; ok { capas[capa] = struct{}{} } } return capas } func credsForHostAndPath(host, path string) ([]credential, error) { if len(path) > 0 { pathFilename := fmt.Sprintf("%s--%s", host, strings.Replace(path, "/", "-", -1)) cred, err := credsFromFilename(filepath.Join(credsDir, pathFilename)) if err == nil { return cred, err } // Ideally we might run cygpath to convert paths like D:/... // to /d/... paths, but we only need to do this to support // one test of the deprecated git-lfs-clone command in our // CI suite, so for simplicity we just do basic rewriting. if len(path) > 2 && path[0] >= 'A' && path[0] <= 'Z' && path[1] == ':' { path = "/" + strings.ToLower(string(path[0])) + path[2:] pathFilename := fmt.Sprintf("%s--%s", host, strings.Replace(path, "/", "-", -1)) cred, err := credsFromFilename(filepath.Join(credsDir, pathFilename)) if err == nil { return cred, err } } } if len(host) == 0 { return nil, errors.New("No file available; empty 'host' key in credentials") } return credsFromFilename(filepath.Join(credsDir, host)) } func parseOneCredential(s, file string) (credential, error) { // Each line in a file is of the following form: // // skip:: // The literal word "skip" means to skip emitting credentials. // AUTHTYPE::CREDENTIAL // If the authtype is not empty, then this is an authtype and // credential. // AUTHTYPE::CREDENTIAL:MATCH:STATE:MULTISTAGE // Like above, but this matches only if MATCH is empty or if the // state[] entry is present and matches "lfstest:MATCH". If so, // the value "lfstest:STATE" is emitted as the new state[] entry. // If MULTISTAGE is set to "true", then the multistage flag is set. // :USERNAME:PASSWORD // This is a normal username and password. credsPieces := strings.Split(strings.TrimSpace(s), ":") if len(credsPieces) != 3 && len(credsPieces) != 6 { return credential{}, fmt.Errorf("Invalid data %q while reading %q", string(s), file) } if credsPieces[0] == "skip" { return credential{skip: true}, nil } else if len(credsPieces[0]) == 0 { return credential{username: credsPieces[1], password: credsPieces[2]}, nil } else if len(credsPieces) == 3 { return credential{authtype: credsPieces[0], credential: credsPieces[2]}, nil } else { return credential{ authtype: credsPieces[0], credential: credsPieces[2], matchState: credsPieces[3], state: credsPieces[4], multistage: credsPieces[5] == "true", }, nil } } func credsFromFilename(file string) ([]credential, error) { fileContents, err := os.ReadFile(file) if err != nil { return nil, fmt.Errorf("Error opening %q: %s", file, err) } lines := strings.Split(strings.TrimSpace(string(fileContents)), "\n") creds := make([]credential, 0, len(lines)) for _, line := range lines { cred, err := parseOneCredential(line, file) if err != nil { return nil, err } creds = append(creds, cred) } return creds, nil } func log() { fmt.Fprintf(os.Stderr, "CREDS received command: %s (ignored)\n", os.Args[1]) } func firstEntryForKey(input map[string][]string, key string) string { if val, ok := input[key]; ok && len(val) > 0 { return val[0] } return "" } git-lfs-3.6.1/t/cmd/lfs-askpass.go000066400000000000000000000007431472372047300166670ustar00rootroot00000000000000//go:build testtools // +build testtools package main import ( "fmt" "os" "strings" ) func main() { prompt := strings.Join(os.Args[1:], " ") var answer string if strings.Contains(prompt, "Username") { answer = "user" if env, ok := os.LookupEnv("LFS_ASKPASS_USERNAME"); ok { answer = env } } else if strings.Contains(prompt, "Password") { answer = "pass" if env, ok := os.LookupEnv("LFS_ASKPASS_PASSWORD"); ok { answer = env } } fmt.Println(answer) } git-lfs-3.6.1/t/cmd/lfs-ssh-echo.go000066400000000000000000000073771472372047300167450ustar00rootroot00000000000000//go:build testtools // +build testtools package main import ( "encoding/json" "fmt" "os" "os/exec" "runtime" "strings" "syscall" "time" ) type sshResponse struct { Href string `json:"href"` Header map[string]string `json:"header"` ExpiresAt time.Time `json:"expires_at,omitempty"` ExpiresIn int `json:"expires_in,omitempty"` } func shell() string { if runtime.GOOS == "windows" { return "bash" } return "sh" } func spawnCommand(command string) error { cmd := exec.Command(shell(), "-c", command) cmd.Stdin = os.Stdin cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr err := cmd.Run() if e, ok := err.(*exec.ExitError); ok { var ws syscall.WaitStatus ws, ok = e.ProcessState.Sys().(syscall.WaitStatus) if ok { os.Exit(ws.ExitStatus()) } } return err } func checkSufficientArgs(offset int) { if len(os.Args) < offset+2 { fmt.Fprintf(os.Stderr, "got %d args: %v", len(os.Args), os.Args) os.Exit(1) } } func main() { // expect args: // lfs-ssh-echo [-p PORT [--]] git@127.0.0.1 "git-lfs-authenticate REPO OPERATION" // lfs-ssh-echo [-p PORT [--]] git@127.0.0.1 "git-lfs-transfer REPO OPERATION" // lfs-ssh-echo git@127.0.0.1 "git-upload-pack REPO" // lfs-ssh-echo git@127.0.0.1 "git-receive-pack REPO" offset := 1 checkSufficientArgs(offset) if masterArg, found := strings.CutPrefix(os.Args[offset], "-oControlMaster="); found { var master bool switch masterArg { case "yes": master = true case "no": master = false default: fmt.Fprintf(os.Stderr, "expected \"-oControlMaster=yes\" or \"-oControlMaster=no\", got %q", os.Args[offset]) os.Exit(1) } if pathArg, found := strings.CutPrefix(os.Args[offset+1], "-oControlPath="); found { if master { if file, err := os.OpenFile(pathArg, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0666); err != nil { if os.IsExist(err) { fmt.Fprintf(os.Stderr, "expected %q to not exist", pathArg) } else { fmt.Fprintf(os.Stderr, "unable to create %q: %s", pathArg, err) } os.Exit(1) } else { file.Close() defer os.Remove(pathArg) } } else { if file, err := os.OpenFile(pathArg, os.O_RDONLY, 0); err != nil { if os.IsNotExist(err) { fmt.Fprintf(os.Stderr, "expected %q to exist", pathArg) } else { fmt.Fprintf(os.Stderr, "unable to open %q: %s", pathArg, err) } os.Exit(1) } else { file.Close() } } } else { fmt.Fprintf(os.Stderr, "expected \"-oControlPath\"") os.Exit(1) } offset += 2 } checkSufficientArgs(offset) if os.Args[offset] == "-p" { offset += 2 } checkSufficientArgs(offset) if os.Args[offset] == "--" { offset += 1 } checkSufficientArgs(offset) if os.Args[offset] != "git@127.0.0.1" { fmt.Fprintf(os.Stderr, "expected \"git@127.0.0.1\", got %q", os.Args[offset]) os.Exit(1) } // just "git-lfs-(authenticate|transfer) REPO OPERATION" or "git-(upload|receive)-pack REPO" remoteCmd := strings.Split(os.Args[offset+1], " ") if len(remoteCmd) < 2 { fmt.Fprintf(os.Stderr, "bad command line: %s\nargs: %v", remoteCmd, os.Args) os.Exit(1) } if remoteCmd[0] == "git-lfs-transfer" || remoteCmd[0] == "git-upload-pack" || remoteCmd[0] == "git-receive-pack" { err := spawnCommand(os.Args[offset+1]) if err != nil { fmt.Fprintf(os.Stderr, "error running command %q: %v", remoteCmd[0], err) os.Exit(1) } return } repo := remoteCmd[1] r := &sshResponse{ Href: fmt.Sprintf("http://127.0.0.1:%s/%s.git/info/lfs", os.Args[2], repo), } switch repo { case "/ssh-expired-absolute": r.ExpiresAt = time.Now().Add(-5 * time.Minute) case "/ssh-expired-relative": r.ExpiresIn = -5 case "/ssh-expired-both": r.ExpiresAt = time.Now().Add(-5 * time.Minute) r.ExpiresIn = -5 } json.NewEncoder(os.Stdout).Encode(r) } git-lfs-3.6.1/t/cmd/lfs-ssh-proxy-test.go000066400000000000000000000001741472372047300201510ustar00rootroot00000000000000//go:build testtools // +build testtools package main import "fmt" func main() { fmt.Println("SSH PROXY TEST called") } git-lfs-3.6.1/t/cmd/lfstest-badpathcheck.go000066400000000000000000000003361472372047300205210ustar00rootroot00000000000000//go:build testtools // +build testtools package main import ( "fmt" "os" ) func main() { fmt.Println("exploit") fmt.Fprintln(os.Stderr, "exploit") f, err := os.Create("exploit") if err != nil { f.Close() } } git-lfs-3.6.1/t/cmd/lfstest-count-tests.go000066400000000000000000000166151472372047300204170ustar00rootroot00000000000000package main import ( "context" "fmt" "io" "net/http" "os" "os/exec" "path/filepath" "runtime" "strconv" "strings" "time" ) var ( // countFile is the path to a file (relative to the $LFSTEST_DIR) who's // contents is the number of actively-running integration tests. countFile = "test_count" // lockFile is the path to a file (relative to the $LFSTEST_DIR) who's // presence indicates that another invocation of the lfstest-count-tests // program is modifying the test_count. lockFile = "test_count.lock" // lockAcquireTimeout is the maximum amount of time that we will wait // for lockFile to become available (and thus the amount of time that we // will wait in order to acquire the lock). lockAcquireTimeout = 5 * time.Second // errCouldNotAcquire indicates that the program could not acquire the // lock needed to modify the test_count. It is a fatal error. errCouldNotAcquire = fmt.Errorf("could not acquire lock, dying") // errNegativeCount indicates that the count in test_count was negative, // which is unexpected and makes this script behave in an undefined // fashion errNegativeCount = fmt.Errorf("unexpected negative count") ) // countFn is a type signature that all functions who wish to modify the // test_count must inhabit. // // The first and only formal parameter is the current number of running tests // found in test_count after acquiring the lock. // // The returned tuple indicates (1) the new number that should be written to // test_count, and (2) if there was an error in computing that value. If err is // non-nil, the program will exit and test_count will not be updated. type countFn func(int) (int, error) func main() { if len(os.Args) > 2 { fmt.Fprintf(os.Stderr, "usage: %s [increment|decrement]\n", os.Args[0]) os.Exit(1) } ctx, cancel := context.WithTimeout( context.Background(), lockAcquireTimeout) defer cancel() if err := acquire(ctx); err != nil { fatal(err) } defer func() { if err := release(); err != nil { fmt.Fprintf(os.Stderr, "unable to release lock file: %s\n", err) } }() if len(os.Args) == 1 { // Calling with no arguments indicates that we simply want to // read the contents of test_count. callWithCount(func(n int) (int, error) { fmt.Fprintf(os.Stdout, "%d\n", n) return n, nil }) return } var err error switch strings.ToLower(os.Args[1]) { case "increment": err = callWithCount(func(n int) (int, error) { if n > 0 { // If n>1, it is therefore true that a // lfstest-gitserver invocation is already // running. // // Hence, let's do nothing here other than // increase the count. return n + 1, nil } // The lfstest-gitserver invocation (see: below) does // not itself create a gitserver.log in the appropriate // directory. Thus, let's create it ourselves instead. log, err := os.Create(fmt.Sprintf( "%s/gitserver.log", os.Getenv("LFSTEST_DIR"))) if err != nil { return n, err } // The executable name depends on the X environment // variable, which is set in script/cibuild. var cmd *exec.Cmd if runtime.GOOS == "windows" { cmd = exec.Command("lfstest-gitserver.exe") } else { cmd = exec.Command("lfstest-gitserver") } // The following are ported from the old // test/testhelpers.sh, and comprise the requisite // environment variables needed to run // lfstest-gitserver. cmd.Env = append(os.Environ(), fmt.Sprintf("LFSTEST_URL=%s", os.Getenv("LFSTEST_URL")), fmt.Sprintf("LFSTEST_SSL_URL=%s", os.Getenv("LFSTEST_SSL_URL")), fmt.Sprintf("LFSTEST_CLIENT_CERT_URL=%s", os.Getenv("LFSTEST_CLIENT_CERT_URL")), fmt.Sprintf("LFSTEST_DIR=%s", os.Getenv("LFSTEST_DIR")), fmt.Sprintf("LFSTEST_CERT=%s", os.Getenv("LFSTEST_CERT")), fmt.Sprintf("LFSTEST_CLIENT_CERT=%s", os.Getenv("LFSTEST_CLIENT_CERT")), fmt.Sprintf("LFSTEST_CLIENT_KEY=%s", os.Getenv("LFSTEST_CLIENT_KEY")), ) cmd.Stdout = log cmd.Stderr = log // Start performs a fork/execve, hence we can abandon // this process once it has started. if err := cmd.Start(); err != nil { return n, err } return 1, nil }) case "decrement": err = callWithCount(func(n int) (int, error) { if n > 1 { // If there is at least two tests running, we // need not shutdown a lfstest-gitserver // instance. return n - 1, nil } // Otherwise, we need to POST to /shutdown, which will // cause the lfstest-gitserver to abort itself. url, err := os.ReadFile(os.Getenv("LFS_URL_FILE")) if err == nil { _, err = http.Post(string(url)+"/shutdown", "application/text", strings.NewReader(time.Now().String())) } return 0, nil }) } if err != nil { fatal(err) } } var ( // acquireTick is the constant time that one tick (i.e., one attempt at // acquiring the lock) should last. acquireTick = 10 * time.Millisecond ) // acquire acquires the lock file necessary to perform updates to test_count, // and returns an error if that lock cannot be acquired. func acquire(ctx context.Context) error { path, err := path(lockFile) if err != nil { return err } tick := time.NewTicker(acquireTick) defer tick.Stop() for { select { case <-tick.C: // Try every tick of the above ticker before giving up // and trying again. f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666) if err == nil || !os.IsExist(err) { if err == nil { f.Close() } return err } case <-ctx.Done(): // If the context.Context above has reached its // deadline, we must give up. return errCouldNotAcquire } } } // release releases the lock file so that another process can take over, or // returns an error. func release() error { path, err := path(lockFile) if err != nil { return err } return os.Remove(path) } // callWithCount calls the given countFn with the current count in test_count, // and updates it with what the function returns. // // If the function produced an error, that will be returned instead. func callWithCount(fn countFn) error { path, err := path(countFile) if err != nil { return err } f, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0666) if err != nil { return err } contents, err := io.ReadAll(f) if err != nil { return err } var n int = 0 if len(contents) != 0 { n, err = strconv.Atoi(string(contents)) if err != nil { return err } if n < 0 { return errNegativeCount } } after, err := fn(n) if err != nil { return err } // We want to write over the contents in the file, so "truncate" the // file to a length of 0, and then seek to the beginning of the file to // update the write head. if err := f.Truncate(0); err != nil { return err } if _, err := f.Seek(0, io.SeekStart); err != nil { return err } if _, err := fmt.Fprintf(f, "%d", after); err != nil { return err } return nil } // path returns an absolute path corresponding to any given path relative to the // 't' directory of the current checkout of Git LFS. func path(s string) (string, error) { p := filepath.Join(filepath.Dir(os.Getenv("LFSTEST_DIR")), s) if err := os.MkdirAll(filepath.Dir(p), 0777); err != nil { return "", err } return p, nil } // fatal reports the given error (if non-nil), and then dies. If the error was // nil, nothing happens. func fatal(err error) { if err == nil { return } if err := release(); err != nil { fmt.Fprintf(os.Stderr, "fatal: while dying, got: %s\n", err) } fmt.Fprintf(os.Stderr, "fatal: %s\n", err) os.Exit(1) } git-lfs-3.6.1/t/cmd/lfstest-customadapter.go000066400000000000000000000175651472372047300210070ustar00rootroot00000000000000//go:build testtools // +build testtools package main import ( "bufio" "encoding/json" "fmt" "io" "net/http" "os" "strconv" "strings" "time" "github.com/git-lfs/git-lfs/v3/config" "github.com/git-lfs/git-lfs/v3/lfsapi" "github.com/git-lfs/git-lfs/v3/tools" ) var cfg = config.New() // This test custom adapter just acts as a bridge for uploads/downloads // in order to demonstrate & test the custom transfer adapter protocols // All we actually do is relay the requests back to the normal storage URLs // of our test server for simplicity, but this proves the principle func main() { scanner := bufio.NewScanner(os.Stdin) writer := bufio.NewWriter(os.Stdout) errWriter := bufio.NewWriter(os.Stderr) apiClient, err := lfsapi.NewClient(cfg) if err != nil { writeToStderr("Error creating api client: "+err.Error(), errWriter) os.Exit(1) } for scanner.Scan() { line := scanner.Text() var req request if err := json.Unmarshal([]byte(line), &req); err != nil { writeToStderr(fmt.Sprintf("Unable to parse request: %v\n", line), errWriter) continue } switch req.Event { case "init": writeToStderr(fmt.Sprintf("Initialised test custom adapter for %s\n", req.Operation), errWriter) resp := &initResponse{} sendResponse(resp, writer, errWriter) case "download": writeToStderr(fmt.Sprintf("Received download request for %s\n", req.Oid), errWriter) performDownload(apiClient, req.Oid, req.Size, req.Action, writer, errWriter) case "upload": writeToStderr(fmt.Sprintf("Received upload request for %s\n", req.Oid), errWriter) performUpload(apiClient, req.Oid, req.Size, req.Action, req.Path, writer, errWriter) case "terminate": writeToStderr("Terminating test custom adapter gracefully.\n", errWriter) break } } } func writeToStderr(msg string, errWriter *bufio.Writer) { if !strings.HasSuffix(msg, "\n") { msg = msg + "\n" } errWriter.WriteString(msg) errWriter.Flush() } func sendResponse(r interface{}, writer, errWriter *bufio.Writer) error { b, err := json.Marshal(r) if err != nil { return err } // Line oriented JSON b = append(b, '\n') _, err = writer.Write(b) if err != nil { return err } writer.Flush() writeToStderr(fmt.Sprintf("Sent message %v", string(b)), errWriter) return nil } func sendTransferError(oid string, code int, message string, writer, errWriter *bufio.Writer) { resp := &transferResponse{"complete", oid, "", &transferError{code, message}} err := sendResponse(resp, writer, errWriter) if err != nil { writeToStderr(fmt.Sprintf("Unable to send transfer error: %v\n", err), errWriter) } } func sendProgress(oid string, bytesSoFar int64, bytesSinceLast int, writer, errWriter *bufio.Writer) { resp := &progressResponse{"progress", oid, bytesSoFar, bytesSinceLast} err := sendResponse(resp, writer, errWriter) if err != nil { writeToStderr(fmt.Sprintf("Unable to send progress update: %v\n", err), errWriter) } } func performDownload(apiClient *lfsapi.Client, oid string, size int64, a *action, writer, errWriter *bufio.Writer) { // We just use the URLs we're given, so we're just a proxy for the direct method // but this is enough to test intermediate custom adapters req, err := http.NewRequest("GET", a.Href, nil) if err != nil { sendTransferError(oid, 2, err.Error(), writer, errWriter) return } for k := range a.Header { req.Header.Set(k, a.Header[k]) } res, err := apiClient.DoAPIRequestWithAuth("origin", req) if err != nil { statusCode := 6 if res != nil { statusCode = res.StatusCode } sendTransferError(oid, statusCode, err.Error(), writer, errWriter) return } defer res.Body.Close() dlFile, err := os.CreateTemp("", "lfscustomdl") if err != nil { sendTransferError(oid, 3, err.Error(), writer, errWriter) return } defer dlFile.Close() dlfilename := dlFile.Name() // Turn callback into progress messages cb := func(totalSize int64, readSoFar int64, readSinceLast int) error { sendProgress(oid, readSoFar, readSinceLast, writer, errWriter) return nil } _, err = tools.CopyWithCallback(dlFile, res.Body, res.ContentLength, cb) if err != nil { sendTransferError(oid, 4, fmt.Sprintf("cannot write data to tempfile %q: %v", dlfilename, err), writer, errWriter) os.Remove(dlfilename) return } if err := dlFile.Close(); err != nil { sendTransferError(oid, 5, fmt.Sprintf("can't close tempfile %q: %v", dlfilename, err), writer, errWriter) os.Remove(dlfilename) return } // completed complete := &transferResponse{"complete", oid, dlfilename, nil} err = sendResponse(complete, writer, errWriter) if err != nil { writeToStderr(fmt.Sprintf("Unable to send completion message: %v\n", err), errWriter) } } func performUpload(apiClient *lfsapi.Client, oid string, size int64, a *action, fromPath string, writer, errWriter *bufio.Writer) { // We just use the URLs we're given, so we're just a proxy for the direct method // but this is enough to test intermediate custom adapters req, err := http.NewRequest("PUT", a.Href, nil) if err != nil { sendTransferError(oid, 2, err.Error(), writer, errWriter) return } for k := range a.Header { req.Header.Set(k, a.Header[k]) } if len(req.Header.Get("Content-Type")) == 0 { req.Header.Set("Content-Type", "application/octet-stream") } if req.Header.Get("Transfer-Encoding") == "chunked" { req.TransferEncoding = []string{"chunked"} } else { req.Header.Set("Content-Length", strconv.FormatInt(size, 10)) } req.ContentLength = size f, err := os.OpenFile(fromPath, os.O_RDONLY, 0644) if err != nil { sendTransferError(oid, 3, fmt.Sprintf("Cannot read data from %q: %v", fromPath, err), writer, errWriter) return } defer f.Close() // Turn callback into progress messages cb := func(totalSize int64, readSoFar int64, readSinceLast int) error { sendProgress(oid, readSoFar, readSinceLast, writer, errWriter) return nil } req.Body = tools.NewBodyWithCallback(f, size, cb) res, err := apiClient.DoAPIRequestWithAuth("origin", req) if err != nil { sendTransferError(oid, res.StatusCode, fmt.Sprintf("Error uploading data for %s: %v", oid, err), writer, errWriter) return } if res.StatusCode > 299 { msg := fmt.Sprintf("Invalid status for %s %s: %d", req.Method, strings.SplitN(req.URL.String(), "?", 2)[0], res.StatusCode) sendTransferError(oid, res.StatusCode, msg, writer, errWriter) return } io.Copy(io.Discard, res.Body) res.Body.Close() // completed complete := &transferResponse{"complete", oid, "", nil} err = sendResponse(complete, writer, errWriter) if err != nil { writeToStderr(fmt.Sprintf("Unable to send completion message: %v\n", err), errWriter) } } // Structs reimplemented so closer to a real external implementation type header struct { Key string `json:"key"` Value string `json:"value"` } type action struct { Href string `json:"href"` Header map[string]string `json:"header,omitempty"` ExpiresAt time.Time `json:"expires_at,omitempty"` } type transferError struct { Code int `json:"code"` Message string `json:"message"` } // Combined request struct which can accept anything type request struct { Event string `json:"event"` Operation string `json:"operation"` Concurrent bool `json:"concurrent"` ConcurrentTransfers int `json:"concurrenttransfers"` Oid string `json:"oid"` Size int64 `json:"size"` Path string `json:"path"` Action *action `json:"action"` } type initResponse struct { Error *transferError `json:"error,omitempty"` } type transferResponse struct { Event string `json:"event"` Oid string `json:"oid"` Path string `json:"path,omitempty"` // always blank for upload Error *transferError `json:"error,omitempty"` } type progressResponse struct { Event string `json:"event"` Oid string `json:"oid"` BytesSoFar int64 `json:"bytesSoFar"` BytesSinceLast int `json:"bytesSinceLast"` } git-lfs-3.6.1/t/cmd/lfstest-genrandom.go000066400000000000000000000030771472372047300200770ustar00rootroot00000000000000//go:build testtools // +build testtools package main import ( "crypto/rand" "encoding/base64" "encoding/binary" "fmt" "os" "strconv" ) const usageFmt = "Usage: %s [--base64|--base64url] []\n" func main() { offset := 1 b64 := false b64url := false if len(os.Args) > offset && (os.Args[offset] == "--base64" || os.Args[offset] == "--base64url") { b64 = true b64url = os.Args[offset] == "--base64url" offset += 1 } if len(os.Args) > offset+1 { fmt.Fprintf(os.Stderr, usageFmt, os.Args[0]) os.Exit(2) } var count uint64 = ^uint64(0) if len(os.Args) == offset+1 { var err error if count, err = strconv.ParseUint(os.Args[offset], 10, 64); err != nil { fmt.Fprintf(os.Stderr, "Error reading size: %s, %v\n", os.Args[offset], err) os.Exit(3) } } b := make([]byte, 32) bb := make([]byte, max(base64.RawStdEncoding.EncodedLen(len(b)), base64.RawURLEncoding.EncodedLen(len(b)))) for count > 0 { n, err := rand.Read(b) if err != nil { fmt.Fprintf(os.Stderr, "Error reading random bytes: %v\n", err) os.Exit(4) } if b64 { if b64url { base64.RawURLEncoding.Encode(bb, b[:n]) n = base64.RawURLEncoding.EncodedLen(n) } else { base64.RawStdEncoding.Encode(bb, b[:n]) n = base64.RawStdEncoding.EncodedLen(n) } } num := min(uint64(n), count) if b64 { err = binary.Write(os.Stdout, binary.LittleEndian, bb[:num]) } else { err = binary.Write(os.Stdout, binary.LittleEndian, b[:num]) } if err != nil { fmt.Fprintf(os.Stderr, "Error writing random bytes: %v\n", err) os.Exit(5) } count -= num } } git-lfs-3.6.1/t/cmd/lfstest-gitserver.go000066400000000000000000001364451472372047300201450ustar00rootroot00000000000000//go:build testtools // +build testtools package main import ( "bufio" "bytes" "compress/gzip" "crypto/rand" "crypto/rsa" "crypto/sha256" "crypto/tls" "crypto/x509" "crypto/x509/pkix" "encoding/base64" "encoding/hex" "encoding/json" "encoding/pem" "errors" "fmt" "io" "log" "math" "math/big" "net/http" "net/http/httptest" "net/textproto" "net/url" "os" "os/exec" "regexp" "sort" "strconv" "strings" "sync" "time" "unicode" ) var ( repoDir string largeObjects = newLfsStorage() server *httptest.Server serverTLS *httptest.Server serverClientCert *httptest.Server // maps OIDs to content strings. Both the LFS and Storage test servers below // see OIDs. oidHandlers map[string]string // These magic strings tell the test lfs server change their behavior so the // integration tests can check those use cases. Tests will create objects with // the magic strings as the contents. // // printf "status:lfs:404" > 404.dat // contentHandlers = []string{ "status-batch-403", "status-batch-404", "status-batch-410", "status-batch-422", "status-batch-500", "status-storage-403", "status-storage-404", "status-storage-410", "status-storage-422", "status-storage-500", "status-storage-503", "status-batch-resume-206", "batch-resume-fail-fallback", "return-expired-action", "return-expired-action-forever", "return-invalid-size", "object-authenticated", "storage-download-retry", "storage-upload-retry", "storage-upload-retry-later", "storage-upload-retry-later-no-header", "unknown-oid", "send-verify-action", "send-deprecated-links", "redirect-storage-upload", "storage-compress", "batch-hash-algo-empty", "batch-hash-algo-invalid", "auth-bearer", "auth-multistage", } reqCookieReposRE = regexp.MustCompile(`\A/require-cookie-`) dekInfoRE = regexp.MustCompile(`DEK-Info: AES-128-CBC,([a-fA-F0-9]*)`) ) func main() { repoDir = os.Getenv("LFSTEST_DIR") mux := http.NewServeMux() server = httptest.NewServer(mux) serverTLS = httptest.NewTLSServer(mux) serverClientCert = httptest.NewUnstartedServer(mux) //setup Client Cert server rootKey, rootCert := generateCARootCertificates() _, clientCertPEM, clientKeyPEM, clientKeyEncPEM := generateClientCertificates(rootCert, rootKey) certPool := x509.NewCertPool() certPool.AddCert(rootCert) serverClientCert.TLS = &tls.Config{ Certificates: []tls.Certificate{serverTLS.TLS.Certificates[0]}, ClientAuth: tls.RequireAndVerifyClientCert, ClientCAs: certPool, } serverClientCert.StartTLS() stopch := make(chan bool) mux.HandleFunc("/shutdown", func(w http.ResponseWriter, r *http.Request) { stopch <- true }) mux.HandleFunc("/storage/", storageHandler) mux.HandleFunc("/verify", verifyHandler) mux.HandleFunc("/redirect307/", redirect307Handler) mux.HandleFunc("/status", func(w http.ResponseWriter, r *http.Request) { fmt.Fprintf(w, "%s\n", time.Now().String()) }) mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { id, ok := reqId(w) if !ok { return } if reqCookieReposRE.MatchString(r.URL.Path) { if skipIfNoCookie(w, r, id) { return } } if strings.Contains(r.URL.Path, "/info/lfs") { if !skipIfBadAuth(w, r, id) { lfsHandler(w, r, id) } return } debug(id, "git http-backend %s %s", r.Method, r.URL) gitHandler(w, r) }) urlname := writeTestStateFile([]byte(server.URL), "LFSTEST_URL", "lfstest-gitserver") defer os.RemoveAll(urlname) sslurlname := writeTestStateFile([]byte(serverTLS.URL), "LFSTEST_SSL_URL", "lfstest-gitserver-ssl") defer os.RemoveAll(sslurlname) clientCertUrlname := writeTestStateFile([]byte(serverClientCert.URL), "LFSTEST_CLIENT_CERT_URL", "lfstest-gitserver-client-cert-url") defer os.RemoveAll(clientCertUrlname) block := &pem.Block{} block.Type = "CERTIFICATE" block.Bytes = serverTLS.TLS.Certificates[0].Certificate[0] pembytes := pem.EncodeToMemory(block) certname := writeTestStateFile(pembytes, "LFSTEST_CERT", "lfstest-gitserver-cert") defer os.RemoveAll(certname) cccertname := writeTestStateFile(clientCertPEM, "LFSTEST_CLIENT_CERT", "lfstest-gitserver-client-cert") defer os.RemoveAll(cccertname) ckcertname := writeTestStateFile(clientKeyPEM, "LFSTEST_CLIENT_KEY", "lfstest-gitserver-client-key") defer os.RemoveAll(ckcertname) ckecertname := writeTestStateFile(clientKeyEncPEM, "LFSTEST_CLIENT_KEY_ENCRYPTED", "lfstest-gitserver-client-key-enc") defer os.RemoveAll(ckecertname) debug("init", "server url: %s", server.URL) debug("init", "server tls url: %s", serverTLS.URL) debug("init", "server client cert url: %s", serverClientCert.URL) <-stopch server.Close() serverTLS.Close() serverClientCert.Close() debug("close", "git server done") } // writeTestStateFile writes contents to either the file referenced by the // environment variable envVar, or defaultFilename if that's not set. Returns // the filename that was used func writeTestStateFile(contents []byte, envVar, defaultFilename string) string { f := os.Getenv(envVar) if len(f) == 0 { f = defaultFilename } file, err := os.Create(f) if err != nil { log.Fatalln(err) } file.Write(contents) file.Close() return f } type lfsObject struct { Oid string `json:"oid,omitempty"` Size int64 `json:"size,omitempty"` Authenticated bool `json:"authenticated,omitempty"` Actions map[string]*lfsLink `json:"actions,omitempty"` Links map[string]*lfsLink `json:"_links,omitempty"` Err *lfsError `json:"error,omitempty"` } type lfsLink struct { Href string `json:"href"` Header map[string]string `json:"header,omitempty"` ExpiresAt time.Time `json:"expires_at,omitempty"` ExpiresIn int `json:"expires_in,omitempty"` } type lfsError struct { Code int `json:"code,omitempty"` Message string `json:"message"` } func writeLFSError(w http.ResponseWriter, code int, msg string) { by, err := json.Marshal(&lfsError{Message: msg}) if err != nil { http.Error(w, "json encoding error: "+err.Error(), 500) return } w.Header().Set("Content-Type", "application/vnd.git-lfs+json") w.WriteHeader(code) w.Write(by) } // handles any requests with "{name}.server.git/info/lfs" in the path func lfsHandler(w http.ResponseWriter, r *http.Request, id string) { repo, err := repoFromLfsUrl(r.URL.Path) if err != nil { w.WriteHeader(500) w.Write([]byte(err.Error())) return } // Check that we're sending valid data. if !strings.Contains(r.Header.Get("Accept"), "application/vnd.git-lfs+json") { w.WriteHeader(406) return } debug(id, "git lfs %s %s repo: %s", r.Method, r.URL, repo) w.Header().Set("Content-Type", "application/vnd.git-lfs+json") switch r.Method { case "POST": // Reject invalid data. if !strings.Contains(r.Header.Get("Content-Type"), "application/vnd.git-lfs+json") { w.WriteHeader(400) return } if strings.HasSuffix(r.URL.String(), "batch") { lfsBatchHandler(w, r, id, repo) } else { locksHandler(w, r, repo) } case "DELETE": lfsDeleteHandler(w, r, id, repo) case "GET": if strings.Contains(r.URL.String(), "/locks") { locksHandler(w, r, repo) } else { w.WriteHeader(404) w.Write([]byte("lock request")) } default: w.WriteHeader(405) } } func lfsUrl(repo, oid string, redirect bool) string { repo = url.QueryEscape(repo) if redirect { return server.URL + "/redirect307/objects/" + oid + "?r=" + repo } return server.URL + "/storage/" + oid + "?r=" + repo } const ( secondsToRefillTokens = 10 refillTokenCount = 5 ) var ( requestTokens = make(map[string]int) retryStartTimes = make(map[string]time.Time) laterRetriesMu sync.Mutex ) // checkRateLimit tracks the various requests to the git-server. If it is the first // request of its kind, then a times is started, that when it is finished, a certain // number of requests become available. func checkRateLimit(api, direction, repo, oid string) (seconds int, isWait bool) { laterRetriesMu.Lock() defer laterRetriesMu.Unlock() key := strings.Join([]string{direction, repo, oid}, ":") if requestsRemaining, ok := requestTokens[key]; !ok || requestsRemaining == 0 { if retryStartTimes[key] == (time.Time{}) { // If time is not initialized, set it to now retryStartTimes[key] = time.Now() } // The user is not allowed to make a request now and must wait for the required // time to pass. secsPassed := time.Since(retryStartTimes[key]).Seconds() if secsPassed >= float64(secondsToRefillTokens) { // The required time has passed. requestTokens[key] = refillTokenCount return 0, false } return secondsToRefillTokens - int(secsPassed) + 1, true } requestTokens[key]-- // Tokens are now over, record time. if requestTokens[key] == 0 { retryStartTimes[key] = time.Now() } return 0, false } var ( retries = make(map[string]uint32) retriesMu sync.Mutex ) func incrementRetriesFor(api, direction, repo, oid string, check bool) (after uint32, ok bool) { // fmtStr formats a string like "--[check]-", // i.e., "legacy-upload-check-retry", or "storage-download-retry". var fmtStr string if check { fmtStr = "%s-%s-check-retry" } else { fmtStr = "%s-%s-retry" } if oidHandlers[oid] != fmt.Sprintf(fmtStr, api, direction) { return 0, false } retriesMu.Lock() defer retriesMu.Unlock() retryKey := strings.Join([]string{direction, repo, oid}, ":") retries[retryKey]++ retries := retries[retryKey] return retries, true } func lfsDeleteHandler(w http.ResponseWriter, r *http.Request, id, repo string) { parts := strings.Split(r.URL.Path, "/") oid := parts[len(parts)-1] largeObjects.Delete(repo, oid) debug(id, "DELETE:", oid) w.WriteHeader(200) } type batchReq struct { Transfers []string `json:"transfers"` Operation string `json:"operation"` Objects []lfsObject `json:"objects"` Ref *Ref `json:"ref,omitempty"` } func (r *batchReq) RefName() string { if r.Ref == nil { return "" } return r.Ref.Name } type batchResp struct { Transfer string `json:"transfer,omitempty"` Objects []lfsObject `json:"objects"` HashAlgorithm string `json:"hash_algo,omitempty"` } func lfsBatchHandler(w http.ResponseWriter, r *http.Request, id, repo string) { checkingObject := r.Header.Get("X-Check-Object") == "1" if !checkingObject && repo == "batchunsupported" { w.WriteHeader(404) return } if !checkingObject && repo == "badbatch" { w.WriteHeader(203) return } if repo == "netrctest" { _, user, pass, err := extractAuth(r.Header.Get("Authorization")) if err != nil || (user != "netrcuser" || pass != "netrcpass") { w.WriteHeader(403) return } } if missingRequiredCreds(w, r, repo) { return } buf := &bytes.Buffer{} tee := io.TeeReader(r.Body, buf) objs := &batchReq{} err := json.NewDecoder(tee).Decode(objs) io.Copy(io.Discard, r.Body) r.Body.Close() debug(id, "REQUEST") debug(id, buf.String()) if err != nil { log.Fatal(err) } if strings.HasSuffix(repo, "branch-required") { parts := strings.Split(repo, "-") lenParts := len(parts) if lenParts > 3 && "refs/heads/"+parts[lenParts-3] != objs.RefName() { w.WriteHeader(403) json.NewEncoder(w).Encode(struct { Message string `json:"message"` }{fmt.Sprintf("Expected ref %q, got %q", "refs/heads/"+parts[lenParts-3], objs.RefName())}) return } } if strings.HasSuffix(repo, "batch-retry-later") { if timeLeft, isWaiting := checkRateLimit("batch", "", repo, ""); isWaiting { w.Header().Set("Retry-After", strconv.Itoa(timeLeft)) w.WriteHeader(http.StatusTooManyRequests) w.Write([]byte("rate limit reached")) fmt.Println("Setting header to: ", strconv.Itoa(timeLeft)) return } } if strings.HasSuffix(repo, "batch-retry-later-no-header") { if _, isWaiting := checkRateLimit("batch", "", repo, ""); isWaiting { w.WriteHeader(http.StatusTooManyRequests) w.Write([]byte("rate limit reached")) fmt.Println("Not setting Retry-After header") return } } res := []lfsObject{} testingChunked := testingChunkedTransferEncoding(r) testingTus := testingTusUploadInBatchReq(r) testingTusInterrupt := testingTusUploadInterruptedInBatchReq(r) testingCustomTransfer := testingCustomTransfer(r) var transferChoice string var searchForTransfer string hashAlgo := "sha256" if testingTus { searchForTransfer = "tus" } else if testingCustomTransfer { searchForTransfer = "testcustom" } if len(searchForTransfer) > 0 { for _, t := range objs.Transfers { if t == searchForTransfer { transferChoice = searchForTransfer break } } } for _, obj := range objs.Objects { handler := oidHandlers[obj.Oid] action := objs.Operation o := lfsObject{ Size: obj.Size, Actions: make(map[string]*lfsLink), } // Clobber the OID if told to do so. if handler == "unknown-oid" { o.Oid = "unknown-oid" } else { o.Oid = obj.Oid } exists := largeObjects.Has(repo, obj.Oid) addAction := true if action == "download" { if !exists { o.Err = &lfsError{Code: 404, Message: fmt.Sprintf("Object %v does not exist", obj.Oid)} addAction = false } } else { if exists { // not an error but don't add an action addAction = false } } if handler == "object-authenticated" { o.Authenticated = true } switch handler { case "status-batch-403": o.Err = &lfsError{Code: 403, Message: "welp"} case "status-batch-404": o.Err = &lfsError{Code: 404, Message: "welp"} case "status-batch-410": o.Err = &lfsError{Code: 410, Message: "welp"} case "status-batch-422": o.Err = &lfsError{Code: 422, Message: "welp"} case "status-batch-500": o.Err = &lfsError{Code: 500, Message: "welp"} default: // regular 200 response if handler == "return-invalid-size" { o.Size = -1 } if handler == "batch-hash-algo-empty" { hashAlgo = "" } else if handler == "batch-hash-algo-invalid" { hashAlgo = "invalid" } if handler == "send-deprecated-links" { o.Links = make(map[string]*lfsLink) } if addAction { a := &lfsLink{ Href: lfsUrl(repo, obj.Oid, handler == "redirect-storage-upload"), Header: map[string]string{}, } a = serveExpired(a, repo, handler) if handler == "send-deprecated-links" { o.Links[action] = a } else { o.Actions[action] = a } } if handler == "send-verify-action" { o.Actions["verify"] = &lfsLink{ Href: server.URL + "/verify", Header: map[string]string{ "repo": repo, }, } } } if testingChunked && addAction { if handler == "send-deprecated-links" { o.Links[action].Header["Transfer-Encoding"] = "chunked" } else { o.Actions[action].Header["Transfer-Encoding"] = "chunked" } } if testingTusInterrupt && addAction { if handler == "send-deprecated-links" { o.Links[action].Header["Lfs-Tus-Interrupt"] = "true" } else { o.Actions[action].Header["Lfs-Tus-Interrupt"] = "true" } } res = append(res, o) } ores := batchResp{HashAlgorithm: hashAlgo, Transfer: transferChoice, Objects: res} by, err := json.Marshal(ores) if err != nil { log.Fatal(err) } debug(id, "RESPONSE: 200") debug(id, string(by)) w.WriteHeader(200) w.Write(by) } // emu guards expiredRepos var emu sync.Mutex // expiredRepos is a map keyed by repository name, valuing to whether or not it // has yet served an expired object. var expiredRepos = map[string]bool{} // serveExpired marks the given repo as having served an expired object, making // it unable for that same repository to return an expired object in the future, func serveExpired(a *lfsLink, repo, handler string) *lfsLink { var ( dur = -5 * time.Minute at = time.Now().Add(dur) ) if handler == "return-expired-action-forever" || (handler == "return-expired-action" && canServeExpired(repo)) { emu.Lock() expiredRepos[repo] = true emu.Unlock() a.ExpiresAt = at return a } switch repo { case "expired-absolute": a.ExpiresAt = at case "expired-relative": a.ExpiresIn = -5 case "expired-both": a.ExpiresAt = at a.ExpiresIn = -5 } return a } // canServeExpired returns whether or not a repository is capable of serving an // expired object. In other words, canServeExpired returns whether or not the // given repo has yet served an expired object. func canServeExpired(repo string) bool { emu.Lock() defer emu.Unlock() return !expiredRepos[repo] } // Persistent state across requests var batchResumeFailFallbackStorageAttempts = 0 var tusStorageAttempts = 0 var ( vmu sync.Mutex verifyCounts = make(map[string]int) verifyRetryRe = regexp.MustCompile(`verify-fail-(\d+)-times?$`) ) func verifyHandler(w http.ResponseWriter, r *http.Request) { repo := r.Header.Get("repo") var payload struct { Oid string `json:"oid"` Size int64 `json:"size"` } if err := json.NewDecoder(r.Body).Decode(&payload); err != nil { writeLFSError(w, http.StatusUnprocessableEntity, err.Error()) return } var max int if matches := verifyRetryRe.FindStringSubmatch(repo); len(matches) < 2 { return } else { max, _ = strconv.Atoi(matches[1]) } key := strings.Join([]string{repo, payload.Oid}, ":") vmu.Lock() verifyCounts[key] = verifyCounts[key] + 1 count := verifyCounts[key] vmu.Unlock() if count < max { writeLFSError(w, http.StatusServiceUnavailable, fmt.Sprintf( "intentionally failing verify request %d (out of %d)", count, max, )) return } } // handles any /storage/{oid} requests func storageHandler(w http.ResponseWriter, r *http.Request) { id, ok := reqId(w) if !ok { return } repo := r.URL.Query().Get("r") parts := strings.Split(r.URL.Path, "/") oid := parts[len(parts)-1] if missingRequiredCreds(w, r, repo) { return } debug(id, "storage %s %s repo: %s", r.Method, oid, repo) switch r.Method { case "PUT": switch oidHandlers[oid] { case "status-storage-403": w.WriteHeader(403) return case "status-storage-404": w.WriteHeader(404) return case "status-storage-410": w.WriteHeader(410) return case "status-storage-422": w.WriteHeader(422) return case "status-storage-500": w.WriteHeader(500) return case "status-storage-503": writeLFSError(w, 503, "LFS is temporarily unavailable") return case "object-authenticated": if len(r.Header.Get("Authorization")) > 0 { w.WriteHeader(400) w.Write([]byte("Should not send authentication")) } return case "storage-upload-retry": if retries, ok := incrementRetriesFor("storage", "upload", repo, oid, false); ok && retries < 3 { w.WriteHeader(500) w.Write([]byte("malformed content")) return } case "storage-upload-retry-later": if timeLeft, isWaiting := checkRateLimit("storage", "upload", repo, oid); isWaiting { w.Header().Set("Retry-After", strconv.Itoa(timeLeft)) w.WriteHeader(http.StatusTooManyRequests) w.Write([]byte("rate limit reached")) fmt.Println("Setting header to: ", strconv.Itoa(timeLeft)) return } case "storage-upload-retry-later-no-header": if _, isWaiting := checkRateLimit("storage", "upload", repo, oid); isWaiting { w.WriteHeader(http.StatusTooManyRequests) w.Write([]byte("rate limit reached")) fmt.Println("Not setting Retry-After header") return } case "storage-compress": if r.Header.Get("Accept-Encoding") != "gzip" { w.WriteHeader(500) w.Write([]byte("not encoded")) return } } if testingChunkedTransferEncoding(r) { valid := false for _, value := range r.TransferEncoding { if value == "chunked" { valid = true break } } if !valid { debug(id, "Chunked transfer encoding expected") } } hash := sha256.New() buf := &bytes.Buffer{} io.Copy(io.MultiWriter(hash, buf), r.Body) oid := hex.EncodeToString(hash.Sum(nil)) if !strings.HasSuffix(r.URL.Path, "/"+oid) { w.WriteHeader(403) return } largeObjects.Set(repo, oid, buf.Bytes()) case "GET": parts := strings.Split(r.URL.Path, "/") oid := parts[len(parts)-1] statusCode := 200 byteLimit := 0 resumeAt := int64(0) compress := false if by, ok := largeObjects.Get(repo, oid); ok { if len(by) == len("storage-download-retry-later") && string(by) == "storage-download-retry-later" { if secsToWait, wait := checkRateLimit("storage", "download", repo, oid); wait { statusCode = http.StatusTooManyRequests w.Header().Set("Retry-After", strconv.Itoa(secsToWait)) by = []byte("rate limit reached") fmt.Println("Setting header to: ", strconv.Itoa(secsToWait)) } } else if len(by) == len("storage-download-retry-later-no-header") && string(by) == "storage-download-retry-later-no-header" { if _, wait := checkRateLimit("storage", "download", repo, oid); wait { statusCode = http.StatusTooManyRequests by = []byte("rate limit reached") fmt.Println("Not setting Retry-After header") } } else if len(by) == len("storage-download-retry") && string(by) == "storage-download-retry" { if retries, ok := incrementRetriesFor("storage", "download", repo, oid, false); ok && retries < 3 { statusCode = 500 by = []byte("malformed content") } } else if len(by) == len("storage-compress") && string(by) == "storage-compress" { if r.Header.Get("Accept-Encoding") != "gzip" { statusCode = 500 by = []byte("not encoded") } else { compress = true } } else if len(by) == len("status-batch-resume-206") && string(by) == "status-batch-resume-206" { // Resume if header includes range, otherwise deliberately interrupt if rangeHdr := r.Header.Get("Range"); rangeHdr != "" { regex := regexp.MustCompile(`bytes=(\d+)\-.*`) match := regex.FindStringSubmatch(rangeHdr) if match != nil && len(match) > 1 { statusCode = 206 resumeAt, _ = strconv.ParseInt(match[1], 10, 32) w.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", resumeAt, len(by), resumeAt-int64(len(by)))) } } else { byteLimit = 10 } } else if len(by) == len("batch-resume-fail-fallback") && string(by) == "batch-resume-fail-fallback" { // Fail any Range: request even though we said we supported it // To make sure client can fall back if rangeHdr := r.Header.Get("Range"); rangeHdr != "" { w.WriteHeader(416) return } if batchResumeFailFallbackStorageAttempts == 0 { // Truncate output on FIRST attempt to cause resume // Second attempt (without range header) is fallback, complete successfully byteLimit = 8 batchResumeFailFallbackStorageAttempts++ } } else if string(by) == "status-batch-retry" { if rangeHdr := r.Header.Get("Range"); rangeHdr != "" { regex := regexp.MustCompile(`bytes=(\d+)\-(.*)`) match := regex.FindStringSubmatch(rangeHdr) // We have a Range header with two // non-empty values. if match != nil && len(match) > 2 && len(match[2]) != 0 { first, _ := strconv.ParseInt(match[1], 10, 32) second, _ := strconv.ParseInt(match[2], 10, 32) // The second part of the range // is smaller than the first // part (or the latter part of // the range is non-integral). // This is invalid; reject it. if second < first { w.WriteHeader(400) return } // The range is valid; we'll // take the branch below. } // We got a valid range header, so // provide a 206 Partial Content. We // ignore the upper bound if one was // provided. if match != nil && len(match) > 1 { statusCode = 206 resumeAt, _ = strconv.ParseInt(match[1], 10, 32) w.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", resumeAt, len(by), resumeAt-int64(len(by)))) } } } var wrtr io.Writer = w if compress { w.Header().Set("Content-Encoding", "gzip") gz := gzip.NewWriter(w) defer gz.Close() wrtr = gz } w.WriteHeader(statusCode) if byteLimit > 0 { wrtr.Write(by[0:byteLimit]) } else if resumeAt > 0 { wrtr.Write(by[resumeAt:]) } else { wrtr.Write(by) } return } w.WriteHeader(404) case "HEAD": // tus.io if !validateTusHeaders(r, id) { w.WriteHeader(400) return } parts := strings.Split(r.URL.Path, "/") oid := parts[len(parts)-1] var offset int64 if by, ok := largeObjects.GetIncomplete(repo, oid); ok { offset = int64(len(by)) } w.Header().Set("Upload-Offset", strconv.FormatInt(offset, 10)) w.WriteHeader(200) case "PATCH": // tus.io if !validateTusHeaders(r, id) { w.WriteHeader(400) return } parts := strings.Split(r.URL.Path, "/") oid := parts[len(parts)-1] offsetHdr := r.Header.Get("Upload-Offset") offset, err := strconv.ParseInt(offsetHdr, 10, 64) if err != nil { log.Fatal("Unable to parse Upload-Offset header in request: ", err) w.WriteHeader(400) return } hash := sha256.New() buf := &bytes.Buffer{} out := io.MultiWriter(hash, buf) if by, ok := largeObjects.GetIncomplete(repo, oid); ok { if offset != int64(len(by)) { log.Fatal(fmt.Sprintf("Incorrect offset in request, got %d expected %d", offset, len(by))) w.WriteHeader(400) return } _, err := out.Write(by) if err != nil { log.Fatal("Error reading incomplete bytes from store: ", err) w.WriteHeader(500) return } largeObjects.DeleteIncomplete(repo, oid) debug(id, "Resuming upload of %v at byte %d", oid, offset) } // As a test, we intentionally break the upload from byte 0 by only // reading some bytes the quitting & erroring, this forces a resume // any offset > 0 will work ok var copyErr error if r.Header.Get("Lfs-Tus-Interrupt") == "true" && offset == 0 { chdr := r.Header.Get("Content-Length") contentLen, err := strconv.ParseInt(chdr, 10, 64) if err != nil { log.Fatal(fmt.Sprintf("Invalid Content-Length %q", chdr)) w.WriteHeader(400) return } truncated := contentLen / 3 _, _ = io.CopyN(out, r.Body, truncated) r.Body.Close() copyErr = fmt.Errorf("Simulated copy error") } else { _, copyErr = io.Copy(out, r.Body) } if copyErr != nil { b := buf.Bytes() if len(b) > 0 { debug(id, "Incomplete upload of %v, %d bytes", oid, len(b)) largeObjects.SetIncomplete(repo, oid, b) } w.WriteHeader(500) } else { checkoid := hex.EncodeToString(hash.Sum(nil)) if checkoid != oid { log.Fatal(fmt.Sprintf("Incorrect oid after calculation, got %q expected %q", checkoid, oid)) w.WriteHeader(403) return } b := buf.Bytes() largeObjects.Set(repo, oid, b) w.Header().Set("Upload-Offset", strconv.FormatInt(int64(len(b)), 10)) w.WriteHeader(204) } default: w.WriteHeader(405) } } func validateTusHeaders(r *http.Request, id string) bool { if len(r.Header.Get("Tus-Resumable")) == 0 { debug(id, "Missing Tus-Resumable header in request") return false } return true } func gitHandler(w http.ResponseWriter, r *http.Request) { defer func() { io.Copy(io.Discard, r.Body) r.Body.Close() }() cmd := exec.Command("git", "http-backend") cmd.Env = []string{ fmt.Sprintf("GIT_PROJECT_ROOT=%s", repoDir), fmt.Sprintf("GIT_HTTP_EXPORT_ALL="), fmt.Sprintf("PATH_INFO=%s", r.URL.Path), fmt.Sprintf("QUERY_STRING=%s", r.URL.RawQuery), fmt.Sprintf("REQUEST_METHOD=%s", r.Method), fmt.Sprintf("CONTENT_TYPE=%s", r.Header.Get("Content-Type")), } if vals := r.Header.Values("Git-Protocol"); len(vals) == 1 { cmd.Env = append(cmd.Env, fmt.Sprintf("GIT_PROTOCOL=%s", vals[0])) } buffer := &bytes.Buffer{} cmd.Stdin = r.Body cmd.Stdout = buffer cmd.Stderr = os.Stderr if err := cmd.Run(); err != nil { log.Fatal(err) } text := textproto.NewReader(bufio.NewReader(buffer)) code, _, _ := text.ReadCodeLine(-1) if code != 0 { w.WriteHeader(code) } headers, _ := text.ReadMIMEHeader() head := w.Header() for key, values := range headers { for _, value := range values { head.Add(key, value) } } io.Copy(w, text.R) } func redirect307Handler(w http.ResponseWriter, r *http.Request) { id, ok := reqId(w) if !ok { return } // Send a redirect to info/lfs // Make it either absolute or relative depending on subpath parts := strings.Split(r.URL.Path, "/") // first element is always blank since rooted var redirectTo string if parts[2] == "rel" { redirectTo = "/" + strings.Join(parts[3:], "/") } else if parts[2] == "abs" { redirectTo = server.URL + "/" + strings.Join(parts[3:], "/") } else if parts[2] == "objects" { repo := r.URL.Query().Get("r") redirectTo = server.URL + "/storage/" + strings.Join(parts[3:], "/") + "?r=" + repo } else { debug(id, "Invalid URL for redirect: %v", r.URL) w.WriteHeader(404) return } w.Header().Set("Location", redirectTo) w.WriteHeader(307) } type User struct { Name string `json:"name"` } type Lock struct { Id string `json:"id"` Path string `json:"path"` Owner User `json:"owner"` LockedAt time.Time `json:"locked_at"` } type LockRequest struct { Path string `json:"path"` Ref *Ref `json:"ref,omitempty"` } func (r *LockRequest) RefName() string { if r.Ref == nil { return "" } return r.Ref.Name } type LockResponse struct { Lock *Lock `json:"lock"` Message string `json:"message,omitempty"` } type UnlockRequest struct { Force bool `json:"force"` Ref *Ref `json:"ref,omitempty"` } func (r *UnlockRequest) RefName() string { if r.Ref == nil { return "" } return r.Ref.Name } type UnlockResponse struct { Lock *Lock `json:"lock"` Message string `json:"message,omitempty"` } type LockList struct { Locks []Lock `json:"locks"` NextCursor string `json:"next_cursor,omitempty"` Message string `json:"message,omitempty"` } type Ref struct { Name string `json:"name,omitempty"` } type VerifiableLockRequest struct { Ref *Ref `json:"ref,omitempty"` Cursor string `json:"cursor,omitempty"` Limit int `json:"limit,omitempty"` } func (r *VerifiableLockRequest) RefName() string { if r.Ref == nil { return "" } return r.Ref.Name } type VerifiableLockList struct { Ours []Lock `json:"ours"` Theirs []Lock `json:"theirs"` NextCursor string `json:"next_cursor,omitempty"` Message string `json:"message,omitempty"` } var ( lmu sync.RWMutex repoLocks = map[string][]Lock{} ) func addLocks(repo string, l ...Lock) { lmu.Lock() defer lmu.Unlock() repoLocks[repo] = append(repoLocks[repo], l...) sort.Sort(LocksByCreatedAt(repoLocks[repo])) } func getLocks(repo string) []Lock { lmu.RLock() defer lmu.RUnlock() locks := repoLocks[repo] cp := make([]Lock, len(locks)) for i, l := range locks { cp[i] = l } return cp } func getFilteredLocks(repo, path, cursor, limit string) ([]Lock, string, error) { locks := getLocks(repo) if cursor != "" { lastSeen := -1 for i, l := range locks { if l.Id == cursor { lastSeen = i break } } if lastSeen > -1 { locks = locks[lastSeen:] } else { return nil, "", fmt.Errorf("cursor (%s) not found", cursor) } } if path != "" { var filtered []Lock for _, l := range locks { if l.Path == path { filtered = append(filtered, l) } } locks = filtered } if limit != "" { size, err := strconv.Atoi(limit) if err != nil { return nil, "", errors.New("unable to parse limit amount") } size = int(math.Min(float64(len(locks)), 3)) if size < 0 { return nil, "", nil } if size+1 < len(locks) { return locks[:size], locks[size+1].Id, nil } } return locks, "", nil } func delLock(repo string, id string) *Lock { lmu.RLock() defer lmu.RUnlock() var deleted *Lock locks := make([]Lock, 0, len(repoLocks[repo])) for _, l := range repoLocks[repo] { if l.Id == id { deleted = &l continue } locks = append(locks, l) } repoLocks[repo] = locks return deleted } type LocksByCreatedAt []Lock func (c LocksByCreatedAt) Len() int { return len(c) } func (c LocksByCreatedAt) Less(i, j int) bool { return c[i].LockedAt.Before(c[j].LockedAt) } func (c LocksByCreatedAt) Swap(i, j int) { c[i], c[j] = c[j], c[i] } var ( lockRe = regexp.MustCompile(`/locks/?$`) unlockRe = regexp.MustCompile(`locks/([^/]+)/unlock\z`) ) func locksHandler(w http.ResponseWriter, r *http.Request, repo string) { dec := json.NewDecoder(r.Body) enc := json.NewEncoder(w) if repo == "netrctest" { _, user, pass, err := extractAuth(r.Header.Get("Authorization")) if err != nil || (user == "netrcuser" && pass == "badpassretry") { writeLFSError(w, 401, "Error: Bad Auth") return } } switch r.Method { case "GET": if !lockRe.MatchString(r.URL.Path) { w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusNotFound) w.Write([]byte(`{"message":"unknown path: ` + r.URL.Path + `"}`)) return } if err := r.ParseForm(); err != nil { http.Error(w, "could not parse form values", http.StatusInternalServerError) return } if strings.HasSuffix(repo, "branch-required") { parts := strings.Split(repo, "-") lenParts := len(parts) if lenParts > 3 && "refs/heads/"+parts[lenParts-3] != r.FormValue("refspec") { w.WriteHeader(403) enc.Encode(struct { Message string `json:"message"` }{fmt.Sprintf("Expected ref %q, got %q", "refs/heads/"+parts[lenParts-3], r.FormValue("refspec"))}) return } } ll := &LockList{} w.Header().Set("Content-Type", "application/json") locks, nextCursor, err := getFilteredLocks(repo, r.FormValue("path"), r.FormValue("cursor"), r.FormValue("limit")) if err != nil { ll.Message = err.Error() } else { ll.Locks = locks ll.NextCursor = nextCursor } enc.Encode(ll) return case "POST": w.Header().Set("Content-Type", "application/json") if strings.HasSuffix(r.URL.Path, "unlock") { var lockId string if matches := unlockRe.FindStringSubmatch(r.URL.Path); len(matches) > 1 { lockId = matches[1] } if len(lockId) == 0 { enc.Encode(&UnlockResponse{Message: "Invalid lock"}) } unlockRequest := &UnlockRequest{} if err := dec.Decode(unlockRequest); err != nil { enc.Encode(&UnlockResponse{Message: err.Error()}) return } if strings.HasSuffix(repo, "branch-required") { parts := strings.Split(repo, "-") lenParts := len(parts) if lenParts > 3 && "refs/heads/"+parts[lenParts-3] != unlockRequest.RefName() { w.WriteHeader(403) enc.Encode(struct { Message string `json:"message"` }{fmt.Sprintf("Expected ref %q, got %q", "refs/heads/"+parts[lenParts-3], unlockRequest.RefName())}) return } } if l := delLock(repo, lockId); l != nil { enc.Encode(&UnlockResponse{Lock: l}) } else { enc.Encode(&UnlockResponse{Message: "unable to find lock"}) } return } if strings.HasSuffix(r.URL.Path, "/locks/verify") { if strings.HasSuffix(repo, "verify-5xx") { w.WriteHeader(500) return } if strings.HasSuffix(repo, "verify-501") { w.WriteHeader(501) return } if strings.HasSuffix(repo, "verify-403") { w.WriteHeader(403) return } switch repo { case "pre_push_locks_verify_404": w.WriteHeader(http.StatusNotFound) w.Write([]byte(`{"message":"pre_push_locks_verify_404"}`)) return case "pre_push_locks_verify_410": w.WriteHeader(http.StatusGone) w.Write([]byte(`{"message":"pre_push_locks_verify_410"}`)) return } reqBody := &VerifiableLockRequest{} if err := dec.Decode(reqBody); err != nil { w.WriteHeader(http.StatusBadRequest) enc.Encode(struct { Message string `json:"message"` }{"json decode error: " + err.Error()}) return } if strings.HasSuffix(repo, "branch-required") { parts := strings.Split(repo, "-") lenParts := len(parts) if lenParts > 3 && "refs/heads/"+parts[lenParts-3] != reqBody.RefName() { w.WriteHeader(403) enc.Encode(struct { Message string `json:"message"` }{fmt.Sprintf("Expected ref %q, got %q", "refs/heads/"+parts[lenParts-3], reqBody.RefName())}) return } } ll := &VerifiableLockList{} locks, nextCursor, err := getFilteredLocks(repo, "", reqBody.Cursor, strconv.Itoa(reqBody.Limit)) if err != nil { ll.Message = err.Error() } else { ll.NextCursor = nextCursor for _, l := range locks { if strings.Contains(l.Path, "theirs") { ll.Theirs = append(ll.Theirs, l) } else { ll.Ours = append(ll.Ours, l) } } } enc.Encode(ll) return } if strings.HasSuffix(r.URL.Path, "/locks") { lockRequest := &LockRequest{} if err := dec.Decode(lockRequest); err != nil { enc.Encode(&LockResponse{Message: err.Error()}) } if strings.HasSuffix(repo, "branch-required") { parts := strings.Split(repo, "-") lenParts := len(parts) if lenParts > 3 && "refs/heads/"+parts[lenParts-3] != lockRequest.RefName() { w.WriteHeader(403) enc.Encode(struct { Message string `json:"message"` }{fmt.Sprintf("Expected ref %q, got %q", "refs/heads/"+parts[lenParts-3], lockRequest.RefName())}) return } } for _, l := range getLocks(repo) { if l.Path == lockRequest.Path { enc.Encode(&LockResponse{Message: "lock already created"}) return } } var id [20]byte rand.Read(id[:]) lock := &Lock{ Id: fmt.Sprintf("%x", id[:]), Path: lockRequest.Path, Owner: User{Name: "Git LFS Tests"}, LockedAt: time.Now(), } addLocks(repo, *lock) // TODO(taylor): commit_needed case // TODO(taylor): err case enc.Encode(&LockResponse{ Lock: lock, }) return } } http.NotFound(w, r) } func missingRequiredCreds(w http.ResponseWriter, r *http.Request, repo string) bool { if !strings.HasPrefix(repo, "requirecreds") { return false } auth := r.Header.Get("Authorization") if len(auth) == 0 { writeLFSError(w, 401, "Error: Authorization Required") return true } _, user, pass, err := extractAuth(auth) if err != nil { writeLFSError(w, 403, err.Error()) return true } if user != "requirecreds" || pass != "pass" { writeLFSError(w, 403, fmt.Sprintf("Got: '%s' => '%s' : '%s'", auth, user, pass)) return true } return false } func testingChunkedTransferEncoding(r *http.Request) bool { return strings.HasPrefix(r.URL.String(), "/test-chunked-transfer-encoding") } func testingTusUploadInBatchReq(r *http.Request) bool { return strings.HasPrefix(r.URL.String(), "/test-tus-upload") } func testingTusUploadInterruptedInBatchReq(r *http.Request) bool { return strings.HasPrefix(r.URL.String(), "/test-tus-upload-interrupt") } func testingCustomTransfer(r *http.Request) bool { return strings.HasPrefix(r.URL.String(), "/test-custom-transfer") } var lfsUrlRE = regexp.MustCompile(`\A/?([^/]+)/info/lfs`) func repoFromLfsUrl(urlpath string) (string, error) { matches := lfsUrlRE.FindStringSubmatch(urlpath) if len(matches) != 2 { return "", fmt.Errorf("LFS url '%s' does not match %v", urlpath, lfsUrlRE) } repo := matches[1] if strings.HasSuffix(repo, ".git") { return repo[0 : len(repo)-4], nil } return repo, nil } type lfsStorage struct { objects map[string]map[string][]byte incomplete map[string]map[string][]byte mutex *sync.Mutex } func (s *lfsStorage) Get(repo, oid string) ([]byte, bool) { s.mutex.Lock() defer s.mutex.Unlock() repoObjects, ok := s.objects[repo] if !ok { return nil, ok } by, ok := repoObjects[oid] return by, ok } func (s *lfsStorage) Has(repo, oid string) bool { s.mutex.Lock() defer s.mutex.Unlock() repoObjects, ok := s.objects[repo] if !ok { return false } _, ok = repoObjects[oid] return ok } func (s *lfsStorage) Set(repo, oid string, by []byte) { s.mutex.Lock() defer s.mutex.Unlock() repoObjects, ok := s.objects[repo] if !ok { repoObjects = make(map[string][]byte) s.objects[repo] = repoObjects } repoObjects[oid] = by } func (s *lfsStorage) Delete(repo, oid string) { s.mutex.Lock() defer s.mutex.Unlock() repoObjects, ok := s.objects[repo] if ok { delete(repoObjects, oid) } } func (s *lfsStorage) GetIncomplete(repo, oid string) ([]byte, bool) { s.mutex.Lock() defer s.mutex.Unlock() repoObjects, ok := s.incomplete[repo] if !ok { return nil, ok } by, ok := repoObjects[oid] return by, ok } func (s *lfsStorage) SetIncomplete(repo, oid string, by []byte) { s.mutex.Lock() defer s.mutex.Unlock() repoObjects, ok := s.incomplete[repo] if !ok { repoObjects = make(map[string][]byte) s.incomplete[repo] = repoObjects } repoObjects[oid] = by } func (s *lfsStorage) DeleteIncomplete(repo, oid string) { s.mutex.Lock() defer s.mutex.Unlock() repoObjects, ok := s.incomplete[repo] if ok { delete(repoObjects, oid) } } func newLfsStorage() *lfsStorage { return &lfsStorage{ objects: make(map[string]map[string][]byte), incomplete: make(map[string]map[string][]byte), mutex: &sync.Mutex{}, } } func extractAuth(auth string) (string, string, string, error) { if strings.HasPrefix(auth, "Basic ") { decodeBy, err := base64.StdEncoding.DecodeString(auth[6:len(auth)]) decoded := string(decodeBy) if err != nil { return "", "", "", err } parts := strings.SplitN(decoded, ":", 2) if len(parts) == 2 { return "Basic", parts[0], parts[1], nil } return "", "", "", nil } else if strings.HasPrefix(auth, "Bearer ") || strings.HasPrefix(auth, "Multistage ") { authtype, cred, _ := strings.Cut(auth, " ") return authtype, "", cred, nil } return "", "", "", nil } func skipIfNoCookie(w http.ResponseWriter, r *http.Request, id string) bool { cookie := r.Header.Get("Cookie") if strings.Contains(cookie, "secret") { return false } w.WriteHeader(403) debug(id, "No cookie received: %q", r.URL.Path) return true } func skipIfBadAuth(w http.ResponseWriter, r *http.Request, id string) bool { wantedAuth := "Basic realm=\"testsuite\"" authHeader := "Lfs-Authenticate" if strings.HasPrefix(r.URL.Path, "/auth-bearer") { wantedAuth = "Bearer" authHeader = "Www-Authenticate" } if strings.HasPrefix(r.URL.Path, "/auth-multistage") { wantedAuth = "Multistage type=foo" authHeader = "Www-Authenticate" } auth := r.Header.Get("Authorization") if auth == "" { w.Header().Add(authHeader, wantedAuth) w.WriteHeader(401) return true } authtype, user, cred, err := extractAuth(auth) if err != nil { w.WriteHeader(403) debug(id, "Error decoding auth: %s", err) return true } if !strings.HasPrefix(wantedAuth, authtype) { w.WriteHeader(403) debug(id, "Unwanted auth: %s (wanted %q)", authtype, wantedAuth) return true } switch authtype { case "Basic": switch user { case "user": if cred == "pass" { return false } case "netrcuser", "requirecreds": return false case "path": if strings.HasPrefix(r.URL.Path, "/"+cred) { return false } debug(id, "auth attempt against: %q", r.URL.Path) } case "Bearer": if cred == "token" { return false } case "Multistage": if cred == "cred1" { wantedAuth = "Multistage type=bar" w.Header().Add(authHeader, wantedAuth) w.WriteHeader(401) debug(id, "auth stage 1 succeeded: %q", auth) return true } else if cred == "cred2" { return false } } w.WriteHeader(403) debug(id, "Bad auth: %q", auth) return true } func init() { oidHandlers = make(map[string]string) for _, content := range contentHandlers { h := sha256.New() h.Write([]byte(content)) oidHandlers[hex.EncodeToString(h.Sum(nil))] = content } } func debug(reqid, msg string, args ...interface{}) { fullargs := make([]interface{}, len(args)+1) fullargs[0] = reqid for i, a := range args { fullargs[i+1] = a } log.Printf("[%s] "+msg+"\n", fullargs...) } func reqId(w http.ResponseWriter) (string, bool) { b := make([]byte, 16) _, err := rand.Read(b) if err != nil { http.Error(w, "error generating id: "+err.Error(), 500) return "", false } return fmt.Sprintf("%x-%x-%x-%x-%x", b[0:4], b[4:6], b[6:8], b[8:10], b[10:]), true } // https://ericchiang.github.io/post/go-tls/ func generateCARootCertificates() (rootKey *rsa.PrivateKey, rootCert *x509.Certificate) { // generate a new key-pair rootKey, err := rsa.GenerateKey(rand.Reader, 2048) if err != nil { log.Fatalf("generating random key: %v", err) } rootCertTmpl, err := CertTemplate() if err != nil { log.Fatalf("creating cert template: %v", err) } // describe what the certificate will be used for rootCertTmpl.IsCA = true rootCertTmpl.KeyUsage = x509.KeyUsageCertSign | x509.KeyUsageDigitalSignature rootCertTmpl.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth} // rootCertTmpl.IPAddresses = []net.IP{net.ParseIP("127.0.0.1")} rootCert, _, err = CreateCert(rootCertTmpl, rootCertTmpl, &rootKey.PublicKey, rootKey) return } func generateClientCertificates(rootCert *x509.Certificate, rootKey interface{}) (clientKey *rsa.PrivateKey, clientCertPEM []byte, clientKeyPEM []byte, clientKeyEncPEM []byte) { // create a key-pair for the client clientKey, err := rsa.GenerateKey(rand.Reader, 2048) if err != nil { log.Fatalf("generating random key: %v", err) } // create a template for the client clientCertTmpl, err1 := CertTemplate() if err1 != nil { log.Fatalf("creating cert template: %v", err1) } clientCertTmpl.KeyUsage = x509.KeyUsageDigitalSignature clientCertTmpl.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth} // the root cert signs the cert by again providing its private key _, clientCertPEM, err2 := CreateCert(clientCertTmpl, rootCert, &clientKey.PublicKey, rootKey) if err2 != nil { log.Fatalf("error creating cert: %v", err2) } privKey := x509.MarshalPKCS1PrivateKey(clientKey) // encode and load the cert and private key for the client clientKeyPEM = pem.EncodeToMemory(&pem.Block{ Type: "RSA PRIVATE KEY", Bytes: privKey, }) clientKeyEnc, err := x509.EncryptPEMBlock(bytes.NewBuffer(privKey), "RSA PRIVATE KEY", privKey, ([]byte)("pass"), x509.PEMCipherAES128) if err != nil { log.Fatalf("creating encrypted private key: %v", err) } clientKeyEncPEM = pem.EncodeToMemory(clientKeyEnc) // ensure salt is in uppercase hexadecimal for gnutls library v3.7.x: // https://github.com/gnutls/gnutls/commit/4604bbde14d2c6adb2af5315f9063ad65ab50aa6 // https://github.com/gnutls/gnutls/blob/a0aa4780892dcc3c14cc10d823f8766ac75bcd85/lib/x509/privkey_openssl.c#L205-L206 dekInfoIndexes := dekInfoRE.FindSubmatchIndex(clientKeyEncPEM) if dekInfoIndexes == nil || len(dekInfoIndexes) != 4 { log.Fatalf("DEK-Info header not found in encrypted private key: %s", string(clientKeyEncPEM)) } for i := dekInfoIndexes[2]; i < dekInfoIndexes[3]; i++ { c := clientKeyEncPEM[i] if c >= 'a' && c <= 'f' { clientKeyEncPEM[i] = byte(unicode.ToUpper(rune(c))) } } return } // helper function to create a cert template with a serial number and other required fields func CertTemplate() (*x509.Certificate, error) { // generate a random serial number (a real cert authority would have some logic behind this) serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) if err != nil { return nil, errors.New("failed to generate serial number: " + err.Error()) } tmpl := x509.Certificate{ SerialNumber: serialNumber, Subject: pkix.Name{Organization: []string{"Yhat, Inc."}}, SignatureAlgorithm: x509.SHA256WithRSA, NotBefore: time.Now(), NotAfter: time.Now().Add(time.Hour), // valid for an hour BasicConstraintsValid: true, } return &tmpl, nil } func CreateCert(template, parent *x509.Certificate, pub interface{}, parentPriv interface{}) ( cert *x509.Certificate, certPEM []byte, err error) { certDER, err := x509.CreateCertificate(rand.Reader, template, parent, pub, parentPriv) if err != nil { return } // parse the resulting certificate so we can use it again cert, err = x509.ParseCertificate(certDER) if err != nil { return } // PEM encode the certificate (this is a standard TLS encoding) b := pem.Block{Type: "CERTIFICATE", Bytes: certDER} certPEM = pem.EncodeToMemory(&b) return } git-lfs-3.6.1/t/cmd/lfstest-nanomtime.go000066400000000000000000000006111472372047300201030ustar00rootroot00000000000000//go:build testtools // +build testtools package main import ( "fmt" "os" ) func main() { if len(os.Args) < 2 { fmt.Fprintf(os.Stderr, "Need an argument") os.Exit(2) } st, err := os.Stat(os.Args[1]) if err != nil { fmt.Fprintf(os.Stderr, "Failed to stat %q: %s", os.Args[1], err) os.Exit(3) } mtime := st.ModTime() fmt.Printf("%d.%09d", mtime.Unix(), mtime.Nanosecond()) } git-lfs-3.6.1/t/cmd/lfstest-realpath.go000066400000000000000000000022771472372047300177260ustar00rootroot00000000000000//go:build testtools // +build testtools package main import ( "fmt" "os" "path/filepath" ) func canonicalize(path string) (string, error) { left := path right := "" for { canon, err := filepath.EvalSymlinks(left) if err != nil && !os.IsNotExist(err) { return "", err } if err == nil { if right == "" { return canon, nil } return filepath.Join(canon, right), nil } // One component of our path is missing. Let's walk up a level // and canonicalize that and then append the remaining piece. full := filepath.Join(left, right) if right == "" { full = left } newleft := filepath.Clean(fmt.Sprintf("%s%c..", left, os.PathSeparator)) newright, err := filepath.Rel(newleft, full) if err != nil { return "", err } left = newleft right = newright } } func main() { if len(os.Args) != 2 { fmt.Fprintf(os.Stderr, "Usage: %s PATH\n", os.Args[0]) os.Exit(2) } path, err := filepath.Abs(os.Args[1]) if err != nil { fmt.Fprintf(os.Stderr, "Error creating absolute path: %v", err) os.Exit(3) } fullpath, err := canonicalize(path) if err != nil { fmt.Fprintf(os.Stderr, "Error canonicalizing: %v", err) os.Exit(4) } fmt.Println(fullpath) } git-lfs-3.6.1/t/cmd/lfstest-standalonecustomadapter.go000066400000000000000000000143511472372047300230460ustar00rootroot00000000000000//go:build testtools // +build testtools package main import ( "bufio" "encoding/json" "fmt" "os" "path/filepath" "strings" "time" "github.com/git-lfs/git-lfs/v3/tools" ) var backupDir string // This test custom adapter just copies the files to a folder. func main() { scanner := bufio.NewScanner(os.Stdin) writer := bufio.NewWriter(os.Stdout) errWriter := bufio.NewWriter(os.Stderr) backupDir = os.Getenv("TEST_STANDALONE_BACKUP_PATH") if backupDir == "" { writeToStderr("TEST_STANDALONE_BACKUP_PATH backup dir not set", errWriter) os.Exit(1) } for _, arg := range os.Args { writeToStderr(fmt.Sprintf("Saw argument %q\n", arg), errWriter) } for scanner.Scan() { line := scanner.Text() var req request if err := json.Unmarshal([]byte(line), &req); err != nil { writeToStderr(fmt.Sprintf("Unable to parse request: %v\n", line), errWriter) continue } switch req.Event { case "init": writeToStderr(fmt.Sprintf("Initialised test custom adapter for %s\n", req.Operation), errWriter) resp := &initResponse{} sendResponse(resp, writer, errWriter) case "download": writeToStderr(fmt.Sprintf("Received download request for %s\n", req.Oid), errWriter) performDownload(req.Oid, req.Size, writer, errWriter) case "upload": writeToStderr(fmt.Sprintf("Received upload request for %s\n", req.Oid), errWriter) performUpload(req.Oid, req.Size, req.Path, writer, errWriter) case "terminate": writeToStderr("Terminating test custom adapter gracefully.\n", errWriter) break } } } func writeToStderr(msg string, errWriter *bufio.Writer) { if !strings.HasSuffix(msg, "\n") { msg = msg + "\n" } errWriter.WriteString(msg) errWriter.Flush() } func sendResponse(r interface{}, writer, errWriter *bufio.Writer) error { b, err := json.Marshal(r) if err != nil { return err } // Line oriented JSON b = append(b, '\n') _, err = writer.Write(b) if err != nil { return err } writer.Flush() writeToStderr(fmt.Sprintf("Sent message %v", string(b)), errWriter) return nil } func sendTransferError(oid string, code int, message string, writer, errWriter *bufio.Writer) { resp := &transferResponse{"complete", oid, "", &transferError{code, message}} err := sendResponse(resp, writer, errWriter) if err != nil { writeToStderr(fmt.Sprintf("Unable to send transfer error: %v\n", err), errWriter) } } func sendProgress(oid string, bytesSoFar int64, bytesSinceLast int, writer, errWriter *bufio.Writer) { resp := &progressResponse{"progress", oid, bytesSoFar, bytesSinceLast} err := sendResponse(resp, writer, errWriter) if err != nil { writeToStderr(fmt.Sprintf("Unable to send progress update: %v\n", err), errWriter) } } func performCopy(oid, src, dst string, size int64, writer, errWriter *bufio.Writer) error { writeToStderr(fmt.Sprintf("Copying %s to %s\n", src, dst), errWriter) srcFile, err := os.OpenFile(src, os.O_RDONLY, 0644) if err != nil { sendTransferError(oid, 10, err.Error(), writer, errWriter) return err } defer srcFile.Close() dstFile, err := os.Create(dst) if err != nil { sendTransferError(oid, 11, err.Error(), writer, errWriter) return err } defer dstFile.Close() // Turn callback into progress messages cb := func(totalSize int64, readSoFar int64, readSinceLast int) error { sendProgress(oid, readSoFar, readSinceLast, writer, errWriter) return nil } _, err = tools.CopyWithCallback(dstFile, srcFile, size, cb) if err != nil { sendTransferError(oid, 4, fmt.Sprintf("cannot write data to dst %q: %v", dst, err), writer, errWriter) os.Remove(dst) return err } if err := dstFile.Close(); err != nil { sendTransferError(oid, 5, fmt.Sprintf("can't close dst %q: %v", dst, err), writer, errWriter) os.Remove(dst) return err } return nil } func performDownload(oid string, size int64, writer, errWriter *bufio.Writer) { dlFile, err := os.CreateTemp("", "lfscustomdl") if err != nil { sendTransferError(oid, 1, err.Error(), writer, errWriter) return } if err = dlFile.Close(); err != nil { sendTransferError(oid, 2, err.Error(), writer, errWriter) return } dlfilename := dlFile.Name() backupPath := filepath.Join(backupDir, oid) if err = performCopy(oid, backupPath, dlfilename, size, writer, errWriter); err != nil { return } // completed complete := &transferResponse{"complete", oid, dlfilename, nil} if err := sendResponse(complete, writer, errWriter); err != nil { writeToStderr(fmt.Sprintf("Unable to send completion message: %v\n", err), errWriter) } } func performUpload(oid string, size int64, fromPath string, writer, errWriter *bufio.Writer) { backupPath := filepath.Join(backupDir, oid) if err := performCopy(oid, fromPath, backupPath, size, writer, errWriter); err != nil { return } // completed complete := &transferResponse{"complete", oid, "", nil} if err := sendResponse(complete, writer, errWriter); err != nil { writeToStderr(fmt.Sprintf("Unable to send completion message: %v\n", err), errWriter) } } // Structs reimplemented so closer to a real external implementation type header struct { Key string `json:"key"` Value string `json:"value"` } type action struct { Href string `json:"href"` Header map[string]string `json:"header,omitempty"` ExpiresAt time.Time `json:"expires_at,omitempty"` } type transferError struct { Code int `json:"code"` Message string `json:"message"` } // Combined request struct which can accept anything type request struct { Event string `json:"event"` Operation string `json:"operation"` Concurrent bool `json:"concurrent"` ConcurrentTransfers int `json:"concurrenttransfers"` Oid string `json:"oid"` Size int64 `json:"size"` Path string `json:"path"` Action *action `json:"action"` } type initResponse struct { Error *transferError `json:"error,omitempty"` } type transferResponse struct { Event string `json:"event"` Oid string `json:"oid"` Path string `json:"path,omitempty"` // always blank for upload Error *transferError `json:"error,omitempty"` } type progressResponse struct { Event string `json:"event"` Oid string `json:"oid"` BytesSoFar int64 `json:"bytesSoFar"` BytesSinceLast int `json:"bytesSinceLast"` } git-lfs-3.6.1/t/cmd/lfstest-testutils.go000066400000000000000000000042051472372047300201570ustar00rootroot00000000000000//go:build testtools // +build testtools package main import ( "encoding/json" "fmt" "io" "os" "path/filepath" . "github.com/git-lfs/git-lfs/v3/t/cmd/util" ) type TestUtilRepoCallback struct{} func (*TestUtilRepoCallback) Fatalf(format string, args ...interface{}) { fmt.Fprintf(os.Stderr, format, args...) os.Exit(4) } func (*TestUtilRepoCallback) Errorf(format string, args ...interface{}) { fmt.Fprintf(os.Stderr, format, args...) } func main() { commandMap := map[string]func(*Repo){ "addcommits": AddCommits, } if len(os.Args) < 2 { fmt.Fprintf(os.Stderr, "Command required (e.g. addcommits)\n") os.Exit(2) } f, ok := commandMap[os.Args[1]] if !ok { fmt.Fprintf(os.Stderr, "Unknown command: %v\n", os.Args[1]) os.Exit(2) } // Construct test repo context (note: no Cleanup() call since managed outside) // also assume we're in the same folder wd, err := os.Getwd() if err != nil { fmt.Fprintf(os.Stderr, "Problem getting working dir: %v\n", err) os.Exit(2) } // Make sure we're directly inside directory which contains .git // don't want to accidentally end up committing to some other parent git _, err = os.Stat(filepath.Join(wd, ".git")) if err != nil { fmt.Fprintf(os.Stderr, "You're in the wrong directory, should be in root of a test repo: %v\n", err) os.Exit(2) } repo := WrapRepo(&TestUtilRepoCallback{}, wd) f(repo) } func AddCommits(repo *Repo) { // Read stdin as JSON []*CommitInput in, err := io.ReadAll(os.Stdin) if err != nil { fmt.Fprintf(os.Stderr, "addcommits: Unable to read input data: %v\n", err) os.Exit(3) } inputs := make([]*CommitInput, 0) err = json.Unmarshal(in, &inputs) if err != nil { fmt.Fprintf(os.Stderr, "addcommits: Unable to unmarshal JSON: %v\n%v\n", string(in), err) os.Exit(3) } outputs := repo.AddCommits(inputs) by, err := json.Marshal(outputs) if err != nil { fmt.Fprintf(os.Stderr, "addcommits: Unable to marshal output JSON: %v\n", err) os.Exit(3) } // Write response to stdout _, err = os.Stdout.Write(by) if err != nil { fmt.Fprintf(os.Stderr, "addcommits: Error writing JSON to stdout: %v\n", err) os.Exit(3) } os.Stdout.WriteString("\n") } git-lfs-3.6.1/t/cmd/util/000077500000000000000000000000001472372047300150625ustar00rootroot00000000000000git-lfs-3.6.1/t/cmd/util/testutils.go000066400000000000000000000353271472372047300174630ustar00rootroot00000000000000package util // Utility functions for more complex go tests // Need to be in a separate test package so they can be imported anywhere // Also can't add _test.go suffix to exclude from main build (import doesn't work) // To avoid import cycles, append "_test" to the package statement of any test using // this package and use "import . original/package/name" to get the same visibility // as if the test was in the same package (as usual) import ( "fmt" "io" "math/rand" "os" "os/exec" "path/filepath" "runtime" "strings" "sync" "time" "github.com/git-lfs/git-lfs/v3/config" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/fs" "github.com/git-lfs/git-lfs/v3/git" "github.com/git-lfs/git-lfs/v3/lfs" ) func init() { path := os.Getenv("PATH") sep := "" if path != "" { if runtime.GOOS == "windows" { sep = ";" } else { sep = ":" } } // Strip the trailing "t/cmd/util/testutils.go" from the path to this // source file to create a path to the working tree's "bin" directory, // then prepend that to the PATH environment variable to ensure our // "git-lfs" binary is used in preference to any installed versions when // executing the Go tests. _, srcdir, _, _ := runtime.Caller(0) for i := 0; i < 4; i++ { srcdir = filepath.Dir(srcdir) } var err error srcdir, err = filepath.Abs(srcdir) if err != nil { panic(err) } os.Setenv("PATH", filepath.Join(srcdir, "bin")+sep+path) } type RepoType int const ( // Normal repo with working copy RepoTypeNormal = RepoType(iota) // Bare repo (no working copy) RepoTypeBare = RepoType(iota) // Repo with working copy but git dir is separate RepoTypeSeparateDir = RepoType(iota) ) var ( // Deterministic sequence of seeds for file data fileInputSeed = rand.NewSource(0) storageOnce sync.Once ) type RepoCreateSettings struct { RepoType RepoType } // Callback interface (testing.T compatible) type RepoCallback interface { // Fatalf reports error and fails Fatalf(format string, args ...interface{}) // Errorf reports error and continues Errorf(format string, args ...interface{}) } type Repo struct { // Path to the repo, working copy if non-bare Path string // Path to the git dir GitDir string // Paths to remotes Remotes map[string]*Repo // Settings used to create this repo Settings *RepoCreateSettings // Previous dir for pushd popDir string // Test callback callback RepoCallback cfg *config.Configuration gitfilter *lfs.GitFilter fs *fs.Filesystem } // Change to repo dir but save current dir func (r *Repo) Pushd() { if r.popDir != "" { r.callback.Fatalf("Cannot Pushd twice") } oldwd, err := os.Getwd() if err != nil { r.callback.Fatalf("Can't get cwd %v", err) } err = os.Chdir(r.Path) if err != nil { r.callback.Fatalf("Can't chdir %v", err) } r.popDir = oldwd } func (r *Repo) Popd() { if r.popDir != "" { err := os.Chdir(r.popDir) if err != nil { r.callback.Fatalf("Can't chdir %v", err) } r.popDir = "" } } func (r *Repo) Filesystem() *fs.Filesystem { return r.fs } func (r *Repo) Configuration() *config.Configuration { return r.cfg } func (r *Repo) GitConfig() *git.Configuration { return r.cfg.GitConfig() } func (r *Repo) GitEnv() config.Environment { return r.cfg.Git } func (r *Repo) OSEnv() config.Environment { return r.cfg.Os } func (r *Repo) Cleanup() { // pop out if necessary r.Popd() // Make sure cwd isn't inside a path we're going to delete oldwd, err := os.Getwd() if err == nil { if strings.HasPrefix(oldwd, r.Path) || strings.HasPrefix(oldwd, r.GitDir) { os.Chdir(os.TempDir()) } } if r.GitDir != "" { os.RemoveAll(r.GitDir) r.GitDir = "" } if r.Path != "" { os.RemoveAll(r.Path) r.Path = "" } for _, remote := range r.Remotes { remote.Cleanup() } r.Remotes = nil } // NewRepo creates a new git repo in a new temp dir func NewRepo(callback RepoCallback) *Repo { return newRepo(callback, &RepoCreateSettings{ RepoType: RepoTypeNormal, }) } // NewBareRepo creates a new bare git repo in a new temp dir // Note that the repository's path does not end in ".git". func NewBareRepo(callback RepoCallback) *Repo { return newRepo(callback, &RepoCreateSettings{ RepoType: RepoTypeBare, }) } // newRepo creates a new git repo in a new temp dir with more control over settings func newRepo(callback RepoCallback, settings *RepoCreateSettings) *Repo { ret := &Repo{ Settings: settings, Remotes: make(map[string]*Repo), callback: callback, } path, err := os.MkdirTemp("", "lfsRepo") if err != nil { callback.Fatalf("Can't create temp dir for git repo: %v", err) } ret.Path = path args := []string{"init"} switch settings.RepoType { case RepoTypeBare: args = append(args, "--bare") ret.GitDir = ret.Path case RepoTypeSeparateDir: gitdir, err := os.MkdirTemp("", "lfstestgitdir") if err != nil { ret.Cleanup() callback.Fatalf("Can't create temp dir for git repo: %v", err) } args = append(args, "--separate-dir", gitdir) ret.GitDir = gitdir default: ret.GitDir = filepath.Join(ret.Path, ".git") } args = append(args, path) cmd := exec.Command("git", args...) err = cmd.Run() if err != nil { ret.Cleanup() callback.Fatalf("Unable to create git repo at %v: %v", path, err) } ret.cfg = config.NewIn(ret.Path, ret.GitDir) ret.fs = ret.cfg.Filesystem() ret.gitfilter = lfs.NewGitFilter(ret.cfg) // Configure default user/email so not reliant on env ret.Pushd() RunGitCommand(callback, true, "config", "user.name", "Git LFS Tests") RunGitCommand(callback, true, "config", "user.email", "git-lfs@example.com") ret.Popd() return ret } // WrapRepo creates a new Repo instance for an existing git repo func WrapRepo(c RepoCallback, path string) *Repo { cfg := config.NewIn(path, "") return &Repo{ Path: path, GitDir: cfg.LocalGitDir(), Settings: &RepoCreateSettings{ RepoType: RepoTypeNormal, }, callback: c, cfg: cfg, gitfilter: lfs.NewGitFilter(cfg), fs: cfg.Filesystem(), } } // Simplistic fire & forget running of git command - returns combined output func RunGitCommand(callback RepoCallback, failureCheck bool, args ...string) string { outp, err := exec.Command("git", args...).CombinedOutput() if failureCheck && err != nil { callback.Fatalf("Error running git command 'git %v': %v %v", strings.Join(args, " "), err, string(outp)) } return string(outp) } // Input data for a single file in a commit type FileInput struct { // Name of file (required) Filename string // Size of file (required) Size int64 // Input data (optional, if provided will be source of data) DataReader io.Reader // Input data (optional, if provided will be source of data) Data string } func (infile *FileInput) AddToIndex(output *CommitOutput, repo *Repo) { inputData := infile.getFileInputReader() pointer, err := infile.writeLFSPointer(repo, inputData) if err != nil { repo.callback.Errorf("%+v", err) return } output.Files = append(output.Files, pointer) RunGitCommand(repo.callback, true, "add", infile.Filename) } func (infile *FileInput) writeLFSPointer(repo *Repo, inputData io.Reader) (*lfs.Pointer, error) { cleaned, err := repo.gitfilter.Clean(inputData, infile.Filename, infile.Size, nil) if err != nil { return nil, errors.Wrap(err, "creating pointer file") } // this only created the temp file, move to final location tmpfile := cleaned.Filename mediafile, err := repo.fs.ObjectPath(cleaned.Oid) if err != nil { return nil, errors.Wrap(err, "local media path") } if _, err := os.Stat(mediafile); err != nil { if err := os.Rename(tmpfile, mediafile); err != nil { return nil, err } } // Write pointer to local filename for adding (not using clean filter) os.MkdirAll(filepath.Dir(infile.Filename), 0755) f, err := os.Create(infile.Filename) if err != nil { return nil, errors.Wrap(err, "creating pointer file") } _, err = cleaned.Pointer.Encode(f) f.Close() if err != nil { return nil, errors.Wrap(err, "encoding pointer file") } return cleaned.Pointer, nil } func (infile *FileInput) getFileInputReader() io.Reader { if infile.DataReader != nil { return infile.DataReader } if len(infile.Data) > 0 { return strings.NewReader(infile.Data) } // Different data for each file but deterministic return NewPlaceholderDataReader(fileInputSeed.Int63(), infile.Size) } // Input for defining commits for test repo type CommitInput struct { // Date that we should commit on (optional, leave blank for 'now') CommitDate time.Time // List of files to include in this commit Files []*FileInput // List of parent branches (all branches must have been created in a previous NewBranch or be main) // Can be omitted to just use the parent of the previous commit ParentBranches []string // Name of a new branch we should create at this commit (optional - main not required) NewBranch string // Names of any tags we should create at this commit (optional) Tags []string // Name of committer CommitterName string // Email of committer CommitterEmail string } // Output struct with details of commits created for test type CommitOutput struct { Sha string Parents []string Files []*lfs.Pointer } func commitAtDate(atDate time.Time, committerName, committerEmail, msg string) error { var args []string if committerName != "" && committerEmail != "" { args = append(args, "-c", fmt.Sprintf("user.name=%v", committerName)) args = append(args, "-c", fmt.Sprintf("user.email=%v", committerEmail)) } args = append(args, "commit", "--allow-empty", "-m", msg) cmd := exec.Command("git", args...) env := os.Environ() // set GIT_COMMITTER_DATE environment var e.g. "Fri Jun 21 20:26:41 2013 +0900" if atDate.IsZero() { env = append(env, "GIT_COMMITTER_DATE=") env = append(env, "GIT_AUTHOR_DATE=") } else { env = append(env, fmt.Sprintf("GIT_COMMITTER_DATE=%v", git.FormatGitDate(atDate))) env = append(env, fmt.Sprintf("GIT_AUTHOR_DATE=%v", git.FormatGitDate(atDate))) } cmd.Env = env out, err := cmd.CombinedOutput() if err != nil { return fmt.Errorf("%v %v", err, string(out)) } return nil } func (repo *Repo) AddCommits(inputs []*CommitInput) []*CommitOutput { if repo.Settings.RepoType == RepoTypeBare { repo.callback.Fatalf("Cannot use AddCommits on a bare repo; clone it & push changes instead") } // Change to repo working dir oldwd, err := os.Getwd() if err != nil { repo.callback.Fatalf("Can't get cwd %v", err) } err = os.Chdir(repo.Path) if err != nil { repo.callback.Fatalf("Can't chdir to repo %v", err) } // Used to check whether we need to checkout another commit before lastBranch := "main" outputs := make([]*CommitOutput, 0, len(inputs)) for i, input := range inputs { output := &CommitOutput{} // first, are we on the correct branch if len(input.ParentBranches) > 0 { if input.ParentBranches[0] != lastBranch { RunGitCommand(repo.callback, true, "checkout", input.ParentBranches[0]) lastBranch = input.ParentBranches[0] } } // Is this a merge? if len(input.ParentBranches) > 1 { // Always take the *other* side in a merge so we adopt changes // also don't automatically commit, we'll do that below args := []string{"merge", "--no-ff", "--no-commit", "--strategy-option=theirs"} args = append(args, input.ParentBranches[1:]...) RunGitCommand(repo.callback, false, args...) } else if input.NewBranch != "" { RunGitCommand(repo.callback, true, "checkout", "-b", input.NewBranch) lastBranch = input.NewBranch } // Any files to write? for _, infile := range input.Files { infile.AddToIndex(output, repo) } // Now commit err = commitAtDate(input.CommitDate, input.CommitterName, input.CommitterEmail, fmt.Sprintf("Test commit %d", i)) if err != nil { repo.callback.Fatalf("Error committing: %v", err) } commit, err := git.GetCommitSummary("HEAD") if err != nil { repo.callback.Fatalf("Error determining commit SHA: %v", err) } // tags for _, tag := range input.Tags { // Use annotated tags, assume full release tags (also tag objects have edge cases) RunGitCommand(repo.callback, true, "tag", "-a", "-m", "Added tag", tag) } output.Sha = commit.Sha output.Parents = commit.Parents outputs = append(outputs, output) } // Restore cwd err = os.Chdir(oldwd) if err != nil { repo.callback.Fatalf("Can't restore old cwd %v", err) } return outputs } // Add a new remote (generate a path for it to live in, will be cleaned up) func (r *Repo) AddRemote(name string) *Repo { if _, exists := r.Remotes[name]; exists { r.callback.Fatalf("Remote %v already exists", name) } remote := newRepo(r.callback, &RepoCreateSettings{ RepoType: RepoTypeBare, }) r.Remotes[name] = remote RunGitCommand(r.callback, true, "remote", "add", name, remote.Path) return remote } // Just a pseudo-random stream of bytes (not cryptographic) // Calls RNG a bit less often than using rand.Source directly type PlaceholderDataReader struct { source rand.Source bytesLeft int64 } func NewPlaceholderDataReader(seed, size int64) *PlaceholderDataReader { return &PlaceholderDataReader{rand.NewSource(seed), size} } func (r *PlaceholderDataReader) Read(p []byte) (int, error) { c := len(p) i := 0 for i < c && r.bytesLeft > 0 { // Use all 8 bytes of the 64-bit random number val64 := r.source.Int63() for j := 0; j < 8 && i < c && r.bytesLeft > 0; j++ { // Duplicate this byte 16 times (faster) for k := 0; k < 16 && r.bytesLeft > 0; k++ { p[i] = byte(val64) i++ r.bytesLeft-- } // Next byte from the 8-byte number val64 = val64 >> 8 } } var err error if r.bytesLeft == 0 { err = io.EOF } return i, err } // RefsByName implements sort.Interface for []*git.Ref based on name type RefsByName []*git.Ref func (a RefsByName) Len() int { return len(a) } func (a RefsByName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a RefsByName) Less(i, j int) bool { return a[i].Name < a[j].Name } // WorktreesByName implements sort.Interface for []*git.Worktree based on dir type WorktreesByName []*git.Worktree func (a WorktreesByName) Len() int { return len(a) } func (a WorktreesByName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a WorktreesByName) Less(i, j int) bool { return a[i].Dir < a[j].Dir } // WrappedPointersByOid implements sort.Interface for []*lfs.WrappedPointer based on oid type WrappedPointersByOid []*lfs.WrappedPointer func (a WrappedPointersByOid) Len() int { return len(a) } func (a WrappedPointersByOid) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a WrappedPointersByOid) Less(i, j int) bool { return a[i].Pointer.Oid < a[j].Pointer.Oid } // PointersByOid implements sort.Interface for []*lfs.Pointer based on oid type PointersByOid []*lfs.Pointer func (a PointersByOid) Len() int { return len(a) } func (a PointersByOid) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a PointersByOid) Less(i, j int) bool { return a[i].Oid < a[j].Oid } git-lfs-3.6.1/t/fixtures/000077500000000000000000000000001472372047300152135ustar00rootroot00000000000000git-lfs-3.6.1/t/fixtures/completions/000077500000000000000000000000001472372047300175475ustar00rootroot00000000000000git-lfs-3.6.1/t/fixtures/completions/git-lfs-completion.bash000066400000000000000000000274131472372047300241310ustar00rootroot00000000000000# bash completion V2 for git-lfs -*- shell-script -*- __git-lfs_debug() { if [[ -n ${BASH_COMP_DEBUG_FILE-} ]]; then echo "$*" >> "${BASH_COMP_DEBUG_FILE}" fi } # Macs have bash3 for which the bash-completion package doesn't include # _init_completion. This is a minimal version of that function. __git-lfs_init_completion() { COMPREPLY=() _get_comp_words_by_ref "$@" cur prev words cword } # This function calls the git-lfs program to obtain the completion # results and the directive. It fills the 'out' and 'directive' vars. __git-lfs_get_completion_results() { local requestComp lastParam lastChar args # Prepare the command to request completions for the program. # Calling ${words[0]} instead of directly git-lfs allows to handle aliases args=("${words[@]:1}") requestComp="${words[0]} __completeNoDesc ${args[*]}" lastParam=${words[$((${#words[@]}-1))]} lastChar=${lastParam:$((${#lastParam}-1)):1} __git-lfs_debug "lastParam ${lastParam}, lastChar ${lastChar}" if [[ -z ${cur} && ${lastChar} != = ]]; then # If the last parameter is complete (there is a space following it) # We add an extra empty parameter so we can indicate this to the go method. __git-lfs_debug "Adding extra empty parameter" requestComp="${requestComp} ''" fi # When completing a flag with an = (e.g., git-lfs -n=) # bash focuses on the part after the =, so we need to remove # the flag part from $cur if [[ ${cur} == -*=* ]]; then cur="${cur#*=}" fi __git-lfs_debug "Calling ${requestComp}" # Use eval to handle any environment variables and such out=$(eval "${requestComp}" 2>/dev/null) # Extract the directive integer at the very end of the output following a colon (:) directive=${out##*:} # Remove the directive out=${out%:*} if [[ ${directive} == "${out}" ]]; then # There is not directive specified directive=0 fi __git-lfs_debug "The completion directive is: ${directive}" __git-lfs_debug "The completions are: ${out}" } __git-lfs_process_completion_results() { local shellCompDirectiveError=1 local shellCompDirectiveNoSpace=2 local shellCompDirectiveNoFileComp=4 local shellCompDirectiveFilterFileExt=8 local shellCompDirectiveFilterDirs=16 local shellCompDirectiveKeepOrder=32 if (((directive & shellCompDirectiveError) != 0)); then # Error code. No completion. __git-lfs_debug "Received error from custom completion go code" return else if (((directive & shellCompDirectiveNoSpace) != 0)); then if [[ $(type -t compopt) == builtin ]]; then __git-lfs_debug "Activating no space" compopt -o nospace else __git-lfs_debug "No space directive not supported in this version of bash" fi fi if (((directive & shellCompDirectiveKeepOrder) != 0)); then if [[ $(type -t compopt) == builtin ]]; then # no sort isn't supported for bash less than < 4.4 if [[ ${BASH_VERSINFO[0]} -lt 4 || ( ${BASH_VERSINFO[0]} -eq 4 && ${BASH_VERSINFO[1]} -lt 4 ) ]]; then __git-lfs_debug "No sort directive not supported in this version of bash" else __git-lfs_debug "Activating keep order" compopt -o nosort fi else __git-lfs_debug "No sort directive not supported in this version of bash" fi fi if (((directive & shellCompDirectiveNoFileComp) != 0)); then if [[ $(type -t compopt) == builtin ]]; then __git-lfs_debug "Activating no file completion" compopt +o default else __git-lfs_debug "No file completion directive not supported in this version of bash" fi fi fi # Separate activeHelp from normal completions local completions=() local activeHelp=() __git-lfs_extract_activeHelp if (((directive & shellCompDirectiveFilterFileExt) != 0)); then # File extension filtering local fullFilter filter filteringCmd # Do not use quotes around the $completions variable or else newline # characters will be kept. for filter in ${completions[*]}; do fullFilter+="$filter|" done filteringCmd="_filedir $fullFilter" __git-lfs_debug "File filtering command: $filteringCmd" $filteringCmd elif (((directive & shellCompDirectiveFilterDirs) != 0)); then # File completion for directories only local subdir subdir=${completions[0]} if [[ -n $subdir ]]; then __git-lfs_debug "Listing directories in $subdir" pushd "$subdir" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1 || return else __git-lfs_debug "Listing directories in ." _filedir -d fi else __git-lfs_handle_completion_types fi __git-lfs_handle_special_char "$cur" : __git-lfs_handle_special_char "$cur" = # Print the activeHelp statements before we finish if ((${#activeHelp[*]} != 0)); then printf "\n"; printf "%s\n" "${activeHelp[@]}" printf "\n" # The prompt format is only available from bash 4.4. # We test if it is available before using it. if (x=${PS1@P}) 2> /dev/null; then printf "%s" "${PS1@P}${COMP_LINE[@]}" else # Can't print the prompt. Just print the # text the user had typed, it is workable enough. printf "%s" "${COMP_LINE[@]}" fi fi } # Separate activeHelp lines from real completions. # Fills the $activeHelp and $completions arrays. __git-lfs_extract_activeHelp() { local activeHelpMarker="_activeHelp_ " local endIndex=${#activeHelpMarker} while IFS='' read -r comp; do if [[ ${comp:0:endIndex} == $activeHelpMarker ]]; then comp=${comp:endIndex} __git-lfs_debug "ActiveHelp found: $comp" if [[ -n $comp ]]; then activeHelp+=("$comp") fi else # Not an activeHelp line but a normal completion completions+=("$comp") fi done <<<"${out}" } __git-lfs_handle_completion_types() { __git-lfs_debug "__git-lfs_handle_completion_types: COMP_TYPE is $COMP_TYPE" case $COMP_TYPE in 37|42) # Type: menu-complete/menu-complete-backward and insert-completions # If the user requested inserting one completion at a time, or all # completions at once on the command-line we must remove the descriptions. # https://github.com/spf13/cobra/issues/1508 local tab=$'\t' comp while IFS='' read -r comp; do [[ -z $comp ]] && continue # Strip any description comp=${comp%%$tab*} # Only consider the completions that match if [[ $comp == "$cur"* ]]; then COMPREPLY+=("$comp") fi done < <(printf "%s\n" "${completions[@]}") ;; *) # Type: complete (normal completion) __git-lfs_handle_standard_completion_case ;; esac } __git-lfs_handle_standard_completion_case() { local tab=$'\t' comp # Short circuit to optimize if we don't have descriptions if [[ "${completions[*]}" != *$tab* ]]; then IFS=$'\n' read -ra COMPREPLY -d '' < <(compgen -W "${completions[*]}" -- "$cur") return 0 fi local longest=0 local compline # Look for the longest completion so that we can format things nicely while IFS='' read -r compline; do [[ -z $compline ]] && continue # Strip any description before checking the length comp=${compline%%$tab*} # Only consider the completions that match [[ $comp == "$cur"* ]] || continue COMPREPLY+=("$compline") if ((${#comp}>longest)); then longest=${#comp} fi done < <(printf "%s\n" "${completions[@]}") # If there is a single completion left, remove the description text if ((${#COMPREPLY[*]} == 1)); then __git-lfs_debug "COMPREPLY[0]: ${COMPREPLY[0]}" comp="${COMPREPLY[0]%%$tab*}" __git-lfs_debug "Removed description from single completion, which is now: ${comp}" COMPREPLY[0]=$comp else # Format the descriptions __git-lfs_format_comp_descriptions $longest fi } __git-lfs_handle_special_char() { local comp="$1" local char=$2 if [[ "$comp" == *${char}* && "$COMP_WORDBREAKS" == *${char}* ]]; then local word=${comp%"${comp##*${char}}"} local idx=${#COMPREPLY[*]} while ((--idx >= 0)); do COMPREPLY[idx]=${COMPREPLY[idx]#"$word"} done fi } __git-lfs_format_comp_descriptions() { local tab=$'\t' local comp desc maxdesclength local longest=$1 local i ci for ci in ${!COMPREPLY[*]}; do comp=${COMPREPLY[ci]} # Properly format the description string which follows a tab character if there is one if [[ "$comp" == *$tab* ]]; then __git-lfs_debug "Original comp: $comp" desc=${comp#*$tab} comp=${comp%%$tab*} # $COLUMNS stores the current shell width. # Remove an extra 4 because we add 2 spaces and 2 parentheses. maxdesclength=$(( COLUMNS - longest - 4 )) # Make sure we can fit a description of at least 8 characters # if we are to align the descriptions. if ((maxdesclength > 8)); then # Add the proper number of spaces to align the descriptions for ((i = ${#comp} ; i < longest ; i++)); do comp+=" " done else # Don't pad the descriptions so we can fit more text after the completion maxdesclength=$(( COLUMNS - ${#comp} - 4 )) fi # If there is enough space for any description text, # truncate the descriptions that are too long for the shell width if ((maxdesclength > 0)); then if ((${#desc} > maxdesclength)); then desc=${desc:0:$(( maxdesclength - 1 ))} desc+="…" fi comp+=" ($desc)" fi COMPREPLY[ci]=$comp __git-lfs_debug "Final comp: $comp" fi done } __start_git-lfs() { local cur prev words cword split COMPREPLY=() # Call _init_completion from the bash-completion package # to prepare the arguments properly if declare -F _init_completion >/dev/null 2>&1; then _init_completion -n =: || return else __git-lfs_init_completion -n =: || return fi __git-lfs_debug __git-lfs_debug "========= starting completion logic ==========" __git-lfs_debug "cur is ${cur}, words[*] is ${words[*]}, #words[@] is ${#words[@]}, cword is $cword" # The user could have moved the cursor backwards on the command-line. # We need to trigger completion from the $cword location, so we need # to truncate the command-line ($words) up to the $cword location. words=("${words[@]:0:$cword+1}") __git-lfs_debug "Truncated words[*]: ${words[*]}," if [[ ${words[0]} == "git" && ${words[1]} == "lfs" ]]; then words=("git-lfs" "${words[@]:2:${#words[@]}-2}") __git-lfs_debug "Rewritten words[*]: ${words[*]}," fi local out directive __git-lfs_get_completion_results __git-lfs_process_completion_results } if [[ $(type -t compopt) = "builtin" ]]; then complete -o default -F __start_git-lfs git-lfs else complete -o default -o nospace -F __start_git-lfs git-lfs fi # ex: ts=4 sw=4 et filetype=sh _git_lfs() { __start_git-lfs; } git-lfs-3.6.1/t/fixtures/completions/git-lfs-completion.fish000066400000000000000000000230761472372047300241460ustar00rootroot00000000000000# fish completion for git-lfs -*- shell-script -*- function __git_lfs_debug set -l file "$BASH_COMP_DEBUG_FILE" if test -n "$file" echo "$argv" >> $file end end function __git_lfs_perform_completion __git_lfs_debug "Starting __git_lfs_perform_completion" # Extract all args except the last one set -l args (commandline -opc) # Extract the last arg and escape it in case it is a space set -l lastArg (string escape -- (commandline -ct)) __git_lfs_debug "args: $args" __git_lfs_debug "last arg: $lastArg" # Disable ActiveHelp which is not supported for fish shell set -l requestComp "GIT_LFS_ACTIVE_HELP=0 $args[1] __completeNoDesc $args[2..-1] $lastArg" __git_lfs_debug "Calling $requestComp" set -l results (eval $requestComp 2> /dev/null) # Some programs may output extra empty lines after the directive. # Let's ignore them or else it will break completion. # Ref: https://github.com/spf13/cobra/issues/1279 for line in $results[-1..1] if test (string trim -- $line) = "" # Found an empty line, remove it set results $results[1..-2] else # Found non-empty line, we have our proper output break end end set -l comps $results[1..-2] set -l directiveLine $results[-1] # For Fish, when completing a flag with an = (e.g., -n=) # completions must be prefixed with the flag set -l flagPrefix (string match -r -- '-.*=' "$lastArg") __git_lfs_debug "Comps: $comps" __git_lfs_debug "DirectiveLine: $directiveLine" __git_lfs_debug "flagPrefix: $flagPrefix" for comp in $comps printf "%s%s\n" "$flagPrefix" "$comp" end printf "%s\n" "$directiveLine" end # this function limits calls to __git_lfs_perform_completion, by caching the result behind $__git_lfs_perform_completion_once_result function __git_lfs_perform_completion_once __git_lfs_debug "Starting __git_lfs_perform_completion_once" if test -n "$__git_lfs_perform_completion_once_result" __git_lfs_debug "Seems like a valid result already exists, skipping __git_lfs_perform_completion" return 0 end set --global __git_lfs_perform_completion_once_result (__git_lfs_perform_completion) if test -z "$__git_lfs_perform_completion_once_result" __git_lfs_debug "No completions, probably due to a failure" return 1 end __git_lfs_debug "Performed completions and set __git_lfs_perform_completion_once_result" return 0 end # this function is used to clear the $__git_lfs_perform_completion_once_result variable after completions are run function __git_lfs_clear_perform_completion_once_result __git_lfs_debug "" __git_lfs_debug "========= clearing previously set __git_lfs_perform_completion_once_result variable ==========" set --erase __git_lfs_perform_completion_once_result __git_lfs_debug "Succesfully erased the variable __git_lfs_perform_completion_once_result" end function __git_lfs_requires_order_preservation __git_lfs_debug "" __git_lfs_debug "========= checking if order preservation is required ==========" __git_lfs_perform_completion_once if test -z "$__git_lfs_perform_completion_once_result" __git_lfs_debug "Error determining if order preservation is required" return 1 end set -l directive (string sub --start 2 $__git_lfs_perform_completion_once_result[-1]) __git_lfs_debug "Directive is: $directive" set -l shellCompDirectiveKeepOrder 32 set -l keeporder (math (math --scale 0 $directive / $shellCompDirectiveKeepOrder) % 2) __git_lfs_debug "Keeporder is: $keeporder" if test $keeporder -ne 0 __git_lfs_debug "This does require order preservation" return 0 end __git_lfs_debug "This doesn't require order preservation" return 1 end # This function does two things: # - Obtain the completions and store them in the global __git_lfs_comp_results # - Return false if file completion should be performed function __git_lfs_prepare_completions __git_lfs_debug "" __git_lfs_debug "========= starting completion logic ==========" # Start fresh set --erase __git_lfs_comp_results __git_lfs_perform_completion_once __git_lfs_debug "Completion results: $__git_lfs_perform_completion_once_result" if test -z "$__git_lfs_perform_completion_once_result" __git_lfs_debug "No completion, probably due to a failure" # Might as well do file completion, in case it helps return 1 end set -l directive (string sub --start 2 $__git_lfs_perform_completion_once_result[-1]) set --global __git_lfs_comp_results $__git_lfs_perform_completion_once_result[1..-2] __git_lfs_debug "Completions are: $__git_lfs_comp_results" __git_lfs_debug "Directive is: $directive" set -l shellCompDirectiveError 1 set -l shellCompDirectiveNoSpace 2 set -l shellCompDirectiveNoFileComp 4 set -l shellCompDirectiveFilterFileExt 8 set -l shellCompDirectiveFilterDirs 16 if test -z "$directive" set directive 0 end set -l compErr (math (math --scale 0 $directive / $shellCompDirectiveError) % 2) if test $compErr -eq 1 __git_lfs_debug "Received error directive: aborting." # Might as well do file completion, in case it helps return 1 end set -l filefilter (math (math --scale 0 $directive / $shellCompDirectiveFilterFileExt) % 2) set -l dirfilter (math (math --scale 0 $directive / $shellCompDirectiveFilterDirs) % 2) if test $filefilter -eq 1; or test $dirfilter -eq 1 __git_lfs_debug "File extension filtering or directory filtering not supported" # Do full file completion instead return 1 end set -l nospace (math (math --scale 0 $directive / $shellCompDirectiveNoSpace) % 2) set -l nofiles (math (math --scale 0 $directive / $shellCompDirectiveNoFileComp) % 2) __git_lfs_debug "nospace: $nospace, nofiles: $nofiles" # If we want to prevent a space, or if file completion is NOT disabled, # we need to count the number of valid completions. # To do so, we will filter on prefix as the completions we have received # may not already be filtered so as to allow fish to match on different # criteria than the prefix. if test $nospace -ne 0; or test $nofiles -eq 0 set -l prefix (commandline -t | string escape --style=regex) __git_lfs_debug "prefix: $prefix" set -l completions (string match -r -- "^$prefix.*" $__git_lfs_comp_results) set --global __git_lfs_comp_results $completions __git_lfs_debug "Filtered completions are: $__git_lfs_comp_results" # Important not to quote the variable for count to work set -l numComps (count $__git_lfs_comp_results) __git_lfs_debug "numComps: $numComps" if test $numComps -eq 1; and test $nospace -ne 0 # We must first split on \t to get rid of the descriptions to be # able to check what the actual completion will be. # We don't need descriptions anyway since there is only a single # real completion which the shell will expand immediately. set -l split (string split --max 1 \t $__git_lfs_comp_results[1]) # Fish won't add a space if the completion ends with any # of the following characters: @=/:., set -l lastChar (string sub -s -1 -- $split) if not string match -r -q "[@=/:.,]" -- "$lastChar" # In other cases, to support the "nospace" directive we trick the shell # by outputting an extra, longer completion. __git_lfs_debug "Adding second completion to perform nospace directive" set --global __git_lfs_comp_results $split[1] $split[1]. __git_lfs_debug "Completions are now: $__git_lfs_comp_results" end end if test $numComps -eq 0; and test $nofiles -eq 0 # To be consistent with bash and zsh, we only trigger file # completion when there are no other completions __git_lfs_debug "Requesting file completion" return 1 end end return 0 end # Since Fish completions are only loaded once the user triggers them, we trigger them ourselves # so we can properly delete any completions provided by another script. # Only do this if the program can be found, or else fish may print some errors; besides, # the existing completions will only be loaded if the program can be found. if type -q "git-lfs" # The space after the program name is essential to trigger completion for the program # and not completion of the program name itself. # Also, we use '> /dev/null 2>&1' since '&>' is not supported in older versions of fish. complete --do-complete "git-lfs " > /dev/null 2>&1 end # Remove any pre-existing completions for the program since we will be handling all of them. complete -c git-lfs -e # this will get called after the two calls below and clear the $__git_lfs_perform_completion_once_result global complete -c git-lfs -n '__git_lfs_clear_perform_completion_once_result' # The call to __git_lfs_prepare_completions will setup __git_lfs_comp_results # which provides the program's completion choices. # If this doesn't require order preservation, we don't use the -k flag complete -c git-lfs -n 'not __git_lfs_requires_order_preservation && __git_lfs_prepare_completions' -f -a '$__git_lfs_comp_results' # otherwise we use the -k flag complete -k -c git-lfs -n '__git_lfs_requires_order_preservation && __git_lfs_prepare_completions' -f -a '$__git_lfs_comp_results' git-lfs-3.6.1/t/fixtures/completions/git-lfs-completion.zsh000066400000000000000000000171701472372047300240170ustar00rootroot00000000000000#compdef git-lfs compdef _git-lfs git-lfs # zsh completion for git-lfs -*- shell-script -*- __git-lfs_debug() { local file="$BASH_COMP_DEBUG_FILE" if [[ -n ${file} ]]; then echo "$*" >> "${file}" fi } _git-lfs() { local shellCompDirectiveError=1 local shellCompDirectiveNoSpace=2 local shellCompDirectiveNoFileComp=4 local shellCompDirectiveFilterFileExt=8 local shellCompDirectiveFilterDirs=16 local shellCompDirectiveKeepOrder=32 local lastParam lastChar flagPrefix requestComp out directive comp lastComp noSpace keepOrder local -a completions __git-lfs_debug "\n========= starting completion logic ==========" __git-lfs_debug "CURRENT: ${CURRENT}, words[*]: ${words[*]}" # The user could have moved the cursor backwards on the command-line. # We need to trigger completion from the $CURRENT location, so we need # to truncate the command-line ($words) up to the $CURRENT location. # (We cannot use $CURSOR as its value does not work when a command is an alias.) words=("${=words[1,CURRENT]}") __git-lfs_debug "Truncated words[*]: ${words[*]}," lastParam=${words[-1]} lastChar=${lastParam[-1]} __git-lfs_debug "lastParam: ${lastParam}, lastChar: ${lastChar}" # For zsh, when completing a flag with an = (e.g., git-lfs -n=) # completions must be prefixed with the flag setopt local_options BASH_REMATCH if [[ "${lastParam}" =~ '-.*=' ]]; then # We are dealing with a flag with an = flagPrefix="-P ${BASH_REMATCH}" fi # Prepare the command to obtain completions requestComp="git-${words[1]#*git-} __completeNoDesc ${words[2,-1]}" if [ "${lastChar}" = "" ]; then # If the last parameter is complete (there is a space following it) # We add an extra empty parameter so we can indicate this to the go completion code. __git-lfs_debug "Adding extra empty parameter" requestComp="${requestComp} \"\"" fi __git-lfs_debug "About to call: eval ${requestComp}" # Use eval to handle any environment variables and such out=$(eval ${requestComp} 2>/dev/null) __git-lfs_debug "completion output: ${out}" # Extract the directive integer following a : from the last line local lastLine while IFS='\n' read -r line; do lastLine=${line} done < <(printf "%s\n" "${out[@]}") __git-lfs_debug "last line: ${lastLine}" if [ "${lastLine[1]}" = : ]; then directive=${lastLine[2,-1]} # Remove the directive including the : and the newline local suffix (( suffix=${#lastLine}+2)) out=${out[1,-$suffix]} else # There is no directive specified. Leave $out as is. __git-lfs_debug "No directive found. Setting do default" directive=0 fi __git-lfs_debug "directive: ${directive}" __git-lfs_debug "completions: ${out}" __git-lfs_debug "flagPrefix: ${flagPrefix}" if [ $((directive & shellCompDirectiveError)) -ne 0 ]; then __git-lfs_debug "Completion received error. Ignoring completions." return fi local activeHelpMarker="_activeHelp_ " local endIndex=${#activeHelpMarker} local startIndex=$((${#activeHelpMarker}+1)) local hasActiveHelp=0 while IFS='\n' read -r comp; do # Check if this is an activeHelp statement (i.e., prefixed with $activeHelpMarker) if [ "${comp[1,$endIndex]}" = "$activeHelpMarker" ];then __git-lfs_debug "ActiveHelp found: $comp" comp="${comp[$startIndex,-1]}" if [ -n "$comp" ]; then compadd -x "${comp}" __git-lfs_debug "ActiveHelp will need delimiter" hasActiveHelp=1 fi continue fi if [ -n "$comp" ]; then # If requested, completions are returned with a description. # The description is preceded by a TAB character. # For zsh's _describe, we need to use a : instead of a TAB. # We first need to escape any : as part of the completion itself. comp=${comp//:/\\:} local tab="$(printf '\t')" comp=${comp//$tab/:} __git-lfs_debug "Adding completion: ${comp}" completions+=${comp} lastComp=$comp fi done < <(printf "%s\n" "${out[@]}") # Add a delimiter after the activeHelp statements, but only if: # - there are completions following the activeHelp statements, or # - file completion will be performed (so there will be choices after the activeHelp) if [ $hasActiveHelp -eq 1 ]; then if [ ${#completions} -ne 0 ] || [ $((directive & shellCompDirectiveNoFileComp)) -eq 0 ]; then __git-lfs_debug "Adding activeHelp delimiter" compadd -x "--" hasActiveHelp=0 fi fi if [ $((directive & shellCompDirectiveNoSpace)) -ne 0 ]; then __git-lfs_debug "Activating nospace." noSpace="-S ''" fi if [ $((directive & shellCompDirectiveKeepOrder)) -ne 0 ]; then __git-lfs_debug "Activating keep order." keepOrder="-V" fi if [ $((directive & shellCompDirectiveFilterFileExt)) -ne 0 ]; then # File extension filtering local filteringCmd filteringCmd='_files' for filter in ${completions[@]}; do if [ ${filter[1]} != '*' ]; then # zsh requires a glob pattern to do file filtering filter="\*.$filter" fi filteringCmd+=" -g $filter" done filteringCmd+=" ${flagPrefix}" __git-lfs_debug "File filtering command: $filteringCmd" _arguments '*:filename:'"$filteringCmd" elif [ $((directive & shellCompDirectiveFilterDirs)) -ne 0 ]; then # File completion for directories only local subdir subdir="${completions[1]}" if [ -n "$subdir" ]; then __git-lfs_debug "Listing directories in $subdir" pushd "${subdir}" >/dev/null 2>&1 else __git-lfs_debug "Listing directories in ." fi local result _arguments '*:dirname:_files -/'" ${flagPrefix}" result=$? if [ -n "$subdir" ]; then popd >/dev/null 2>&1 fi return $result else __git-lfs_debug "Calling _describe" if eval _describe $keepOrder "completions" completions $flagPrefix $noSpace; then __git-lfs_debug "_describe found some completions" # Return the success of having called _describe return 0 else __git-lfs_debug "_describe did not find completions." __git-lfs_debug "Checking if we should do file completion." if [ $((directive & shellCompDirectiveNoFileComp)) -ne 0 ]; then __git-lfs_debug "deactivating file completion" # We must return an error code here to let zsh know that there were no # completions found by _describe; this is what will trigger other # matching algorithms to attempt to find completions. # For example zsh can match letters in the middle of words. return 1 else # Perform file completion __git-lfs_debug "Activating file completion" # We must return the result of this command, so it must be the # last command, or else we must store its result to return it. _arguments '*:filename:_files'" ${flagPrefix}" fi fi fi } # don't run the completion function when being source-ed or eval-ed if [ "$funcstack[1]" = "_git-lfs" ]; then _git-lfs fi git-lfs-3.6.1/t/fixtures/migrate.sh000077500000000000000000000463501472372047300172120ustar00rootroot00000000000000#!/usr/bin/env bash # assert_ref_unmoved ensures that the previous and current SHA1 of a given ref # is equal by string comparison: # # assert_ref_unmoved "HEAD" "$previous_sha" "$current_sha" # # If the two are unequal (the ref has moved), a message is printed to stderr and # the program exits. assert_ref_unmoved() { local name="$1" local prev_sha="$2" local current_sha="$3" if [ "$prev_sha" != "$current_sha" ]; then echo >&2 "$name should not have moved (from: $prev_sha, to: $current_sha)" exit 1 fi } # setup_local_branch_with_gitattrs creates a repository as follows: # # A---B # \ # refs/heads/main # # - Commit 'A' has 120, in a.txt, and a corresponding entry in .gitattributes. # # If "0755" is passed as an argument, the .gitattributes file is created # with that permissions mode. # If "link" is passed as an argument, the .gitattributes file is created # as a symlink to a gitattrs file. setup_local_branch_with_gitattrs() { set -e reponame="migrate-single-local-branch-with-attrs" remove_and_create_local_repo "$reponame" lfstest-genrandom --base64 120 >a.txt git add a.txt git commit -m "initial commit" git lfs track "*.txt" git lfs track "*.other" if [[ $1 == "0755" ]]; then chmod +x .gitattributes elif [[ $1 == "link" ]]; then mv .gitattributes gitattrs add_symlink gitattrs .gitattributes git add gitattrs fi git add .gitattributes git commit -m "add .gitattributes" } # setup_local_branch_with_nested_gitattrs creates a repository as follows: # # A---B # \ # refs/heads/main # # - Commit 'A' has 120, in a.txt, and a corresponding entry in .gitattributes. There is also # 140 in a.md, with no corresponding entry in .gitattributes. # It also has 140 in subtree/a.md, and a corresponding entry in subtree/.gitattributes setup_local_branch_with_nested_gitattrs() { set -e reponame="migrate-single-local-branch-nested-attrs" remove_and_create_local_repo "$reponame" mkdir b lfstest-genrandom --base64 120 >a.txt lfstest-genrandom --base64 140 >a.md lfstest-genrandom --base64 140 >b/a.md git add a.txt a.md b/a.md git commit -m "initial commit" git lfs track "*.txt" git add .gitattributes git commit -m "add .gitattributes" cd b git lfs track "*.md" cd .. git add b/.gitattributes git commit -m "add nested .gitattributes" } # setup_single_local_branch_untracked creates a repository as follows: # # A---B # \ # refs/heads/main # # - Commit 'A' has 120, in a.txt and 140 in a.md, with neither files tracked as # pointers in Git LFS setup_single_local_branch_untracked() { set -e local name="${1:-a.md}" reponame="migrate-single-local-branch-untracked" remove_and_create_local_repo "$reponame" git commit --allow-empty -m "initial commit" lfstest-genrandom --base64 120 >a.txt lfstest-genrandom --base64 140 >"$name" git add a.txt "$name" git commit -m "add a.txt and $name" } # setup_single_local_branch_tracked creates a repository as follows: # # A---B # \ # refs/heads/main # # - Commit 'A' has 120, in a.txt and 140 in a.md, with both files tracked as # pointers in Git LFS # # If "0755" is passed as an argument, the .gitattributes file is created # with that permissions mode. # If "link" is passed as an argument, the .gitattributes file is created # as a symlink to a gitattrs file. setup_single_local_branch_tracked() { set -e reponame="migrate-single-local-branch-tracked" remove_and_create_local_repo "$reponame" echo "*.txt filter=lfs diff=lfs merge=lfs -text" > .gitattributes echo "*.md filter=lfs diff=lfs merge=lfs -text" >> .gitattributes if [[ $1 == "0755" ]]; then chmod +x .gitattributes fi git add .gitattributes git commit -m "initial commit" lfstest-genrandom --base64 120 >a.txt lfstest-genrandom --base64 140 >a.md git add a.txt a.md git commit -m "add a.{txt,md}" if [[ $1 == "link" ]]; then git mv .gitattributes gitattrs add_symlink gitattrs .gitattributes git commit -m "link .gitattributes" fi } # setup_single_local_branch_complex_tracked creates a repository as follows: # # A # \ # refs/heads/main # # - Commit 'A' has 1 byte of text in a.txt and dir/b.txt. According to the # .gitattributes files, a.txt should be tracked using Git LFS, but b.txt should # not be. setup_single_local_branch_complex_tracked() { set -e reponame="migrate-single-local-branch-complex-tracked" remove_and_create_local_repo "$reponame" mkdir -p dir echo "*.txt filter=lfs diff=lfs merge=lfs -text" > .gitattributes echo "*.txt !filter !diff !merge" > dir/.gitattributes printf "a" > a.txt printf "b" > dir/b.txt git lfs uninstall git add .gitattributes dir/.gitattributes a.txt dir/b.txt git commit -m "initial commit" git lfs install } # setup_single_local_branch_tracked_corrupt creates a repository as follows: # # A # \ # refs/heads/main # # - Commit 'A' has 120 bytes of random data in a.txt, and tracks *.txt under Git # LFS, but a.txt is not stored as an LFS object. # # If "lfsmacro" is passed as an argument, a macro attribute definition # which sets the LFS filter attribute is added to the .gitattributes file, # and then referenced by the test file pattern attribute. # If "macro" is passed as an argument, a macro attribute definition is # added to the .gitattributes file. # If "link" is passed as an argument, the .gitattributes file is created # as a symlink to a gitattrs file. setup_single_local_branch_tracked_corrupt() { set -e reponame="migrate-single-local-branch-with-attrs-corrupt" remove_and_create_local_repo "$reponame" git lfs uninstall lfstest-genrandom --base64 120 >a.txt if [[ $1 == "lfsmacro" ]]; then printf '[attr]lfs filter=lfs diff=lfs merge=lfs -text\n*.txt lfs\n' \ >.gitattributes else echo "*.txt filter=lfs diff=lfs merge=lfs -text" > .gitattributes if [[ $1 == "macro" ]]; then echo "[attr]foo foo" >>.gitattributes elif [[ $1 == "link" ]]; then mv .gitattributes gitattrs add_symlink gitattrs .gitattributes fi fi git add .gitattributes a.txt git commit -m "initial commit" git lfs install } # setup_multiple_local_branches creates a repository as follows: # # B # / \ # A refs/heads/my-feature # \ # refs/heads/main # # - Commit 'A' has 120, 140 bytes of data in a.txt, and a.md, respectively. # # - Commit 'B' has 30 bytes of data in a.md, and includes commit 'A' as a # parent. setup_multiple_local_branches() { set -e reponame="migrate-info-multiple-local-branches" remove_and_create_local_repo "$reponame" lfstest-genrandom --base64 120 >a.txt lfstest-genrandom --base64 140 >a.md git add a.txt a.md git commit -m "initial commit" git checkout -b my-feature lfstest-genrandom --base64 30 >a.md git add a.md git commit -m "add an additional 30 bytes to a.md" git checkout main } # setup_multiple_local_branches_with_alternate_names performs the same task # as setup_multiple_local_branches, but creates a file with no extension. setup_multiple_local_branches_with_alternate_names() { set -e reponame="migrate-info-multiple-local-branches" remove_and_create_local_repo "$reponame" lfstest-genrandom --base64 120 >no_extension lfstest-genrandom --base64 140 >a.txt git add no_extension a.txt git commit -m "initial commit" git checkout -b my-feature lfstest-genrandom --base64 30 >a.txt lfstest-genrandom --base64 100 >no_extension git add no_extension a.txt git commit -m "add an additional 30 bytes to a.txt" git checkout main } # setup_multiple_local_branches_with_gitattrs creates a repository in the same way # as setup_multiple_local_branches, but also adds relevant lfs filters to the # .gitattributes file in the main branch setup_multiple_local_branches_with_gitattrs() { set -e setup_multiple_local_branches git lfs track *.txt git lfs track *.md git add .gitattributes git commit -m "add .gitattributes" } # setup_multiple_local_branches_non_standard creates a repository as follows: # # refs/pull/1/head # / # | # B # / \ # A refs/heads/my-feature # |\ # | refs/heads/main # \ # refs/pull/1/base # # With the same contents in 'A' and 'B' as setup_multiple_local_branches. setup_multiple_local_branches_non_standard() { set -e setup_multiple_local_branches git update-ref refs/pull/1/head "$(git rev-parse my-feature)" git update-ref refs/pull/1/base "$(git rev-parse main)" } # setup_multiple_local_branches_tracked creates a repo with exactly the same # structure as in setup_multiple_local_branches, but with all files tracked by # Git LFS setup_multiple_local_branches_tracked() { set -e reponame="migrate-info-multiple-local-branches" remove_and_create_local_repo "$reponame" echo "*.txt filter=lfs diff=lfs merge=lfs -text" > .gitattributes echo "*.md filter=lfs diff=lfs merge=lfs -text" >> .gitattributes git add .gitattributes git commit -m "initial commit" lfstest-genrandom --base64 120 >a.txt lfstest-genrandom --base64 140 >a.md git add a.txt a.md git commit -m "add a.{txt,md}" git checkout -b my-feature lfstest-genrandom --base64 30 >a.md git add a.md git commit -m "add an additional 30 bytes to a.md" git checkout main } # setup_local_branch_with_space creates a repository as follows: # # A # \ # refs/heads/main # # - Commit 'A' has 50 bytes in a file named "a file.txt". setup_local_branch_with_space() { set -e reponame="migrate-local-branch-with-space" filename="a file.txt" remove_and_create_local_repo "$reponame" lfstest-genrandom --base64 50 >"$filename" git add "$filename" git commit -m "initial commit" } # setup_single_remote_branch creates a repository as follows: # # A---B # \ \ # \ refs/heads/main # \ # refs/remotes/origin/main # # - Commit 'A' has 120, 140 bytes of data in a.txt, and a.md, respectively. It # is the latest commit pushed to the remote 'origin'. # # - Commit 'B' has 30, 50 bytes of data in a.txt, and a.md, respectively. setup_single_remote_branch() { set -e reponame="migrate-info-single-remote-branch" remove_and_create_remote_repo "$reponame" lfstest-genrandom --base64 120 >a.txt lfstest-genrandom --base64 140 >a.md git add a.txt a.md git commit -m "initial commit" git push origin main lfstest-genrandom --base64 30 >a.txt lfstest-genrandom --base64 50 >a.md git add a.md a.txt git commit -m "add an additional 30, 50 bytes to a.{txt,md}" } setup_single_remote_branch_with_gitattrs() { set -e setup_single_remote_branch git lfs track *.txt git lfs track *.md git add .gitattributes git commit -m "add .gitattributes" } # Creates a repo identical to setup_single_remote_branch, except with *.md and # *.txt files tracked by Git LFS setup_single_remote_branch_tracked() { set -e reponame="migrate-info-single-remote-branch" remove_and_create_remote_repo "$reponame" git lfs track "*.md" "*.txt" git add .gitattributes git commit -m "initial commit" lfstest-genrandom --base64 120 >a.txt lfstest-genrandom --base64 140 >a.md git add a.txt a.md git commit -m "add a.{txt,md}" git push origin main lfstest-genrandom --base64 30 >a.txt lfstest-genrandom --base64 50 >a.md git add a.md a.txt git commit -m "add an additional 30, 50 bytes to a.{txt,md}" } # setup_multiple_remote_branches creates a repository as follows: # # C # / \ # A---B refs/heads/my-feature # \ \ # \ refs/heads/main # \ # refs/remotes/origin/main # # - Commit 'A' has 10, 11 bytes of data in a.txt, and a.md, respectively. It is # the latest commit pushed to the remote 'origin'. # # - Commit 'B' has 20, 21 bytes of data in a.txt, and a.md, respectively. # # - Commit 'C' has 30, 31 bytes of data in a.txt, and a.md, respectively. It is # the latest commit on refs/heads/my-feature. setup_multiple_remote_branches() { set -e reponame="migrate-info-exclude-remote-refs-given-branch" remove_and_create_remote_repo "$reponame" lfstest-genrandom --base64 10 >a.txt lfstest-genrandom --base64 11 >a.md git add a.txt a.md git commit -m "add 10, 11 bytes, a.{txt,md}" git push origin main lfstest-genrandom --base64 20 >a.txt lfstest-genrandom --base64 21 >a.md git add a.txt a.md git commit -m "add 20, 21 bytes, a.{txt,md}" git checkout -b my-feature lfstest-genrandom --base64 30 >a.txt lfstest-genrandom --base64 31 >a.md git add a.txt a.md git commit -m "add 30, 31 bytes, a.{txt,md}" git checkout main } # Creates a repo identical to that in setup_multiple_remote_branches(), but # with all files tracked by Git LFS setup_multiple_remote_branches_gitattrs() { set -e reponame="migrate-info-exclude-remote-refs-given-branch" remove_and_create_remote_repo "$reponame" git lfs track "*.txt" "*.md" git add .gitattributes git commit -m "initial commit" lfstest-genrandom --base64 10 >a.txt lfstest-genrandom --base64 11 >a.md git add a.txt a.md git commit -m "add 10, 11 bytes, a.{txt,md}" git push origin main lfstest-genrandom --base64 20 >a.txt lfstest-genrandom --base64 21 >a.md git add a.txt a.md git commit -m "add 20, 21 bytes, a.{txt,md}" git checkout -b my-feature lfstest-genrandom --base64 30 >a.txt lfstest-genrandom --base64 31 >a.md git add a.txt a.md git commit -m "add 30, 31 bytes, a.{txt,md}" git checkout main } # setup_single_local_branch_with_tags creates a repository as follows: # # A---B # |\ # | refs/heads/main # | # \ # refs/tags/v1.0.0 # # - Commit 'A' has 1 byte of data in 'a.txt' # - Commit 'B' has 2 bytes of data in 'a.txt', and is tagged at 'v1.0.0'. setup_single_local_branch_with_tags() { set -e reponame="migrate-single-local-branch-tags" remove_and_create_local_repo "$reponame" lfstest-genrandom --base64 1 >a.txt git add a.txt git commit -m "initial commit" lfstest-genrandom --base64 2 >a.txt git add a.txt git commit -m "secondary commit" git tag "v1.0.0" } # setup_single_local_branch_with_annotated_tags creates a repository as follows: # # A---B # |\ # | refs/heads/main # | # \ # refs/tags/v1.0.0 (annotated) # # - Commit 'A' has 1 byte of data in 'a.txt' # - Commit 'B' has 2 bytes of data in 'a.txt', and is tagged (with annotation) # at 'v1.0.0'. setup_single_local_branch_with_annotated_tags() { set -e reponame="migrate-single-local-branch-annotated-tags" remove_and_create_local_repo "$reponame" lfstest-genrandom --base64 1 >a.txt git add a.txt git commit -m "initial commit" lfstest-genrandom --base64 2 >a.txt git add a.txt git commit -m "secondary commit" git tag "v1.0.0" -m "v1.0.0" } setup_multiple_remotes() { set -e reponame="migrate-multiple-remotes" remove_and_create_remote_repo "$reponame" forkname="$(git remote -v \ | head -n1 \ | cut -d ' ' -f 1 \ | sed -e 's/^.*\///g')-fork" ( setup_remote_repo "$forkname" ) git remote add fork "$GITSERVER/$forkname" lfstest-genrandom --base64 16 >a.txt git add a.txt git commit -m "initial commit" git push origin main lfstest-genrandom --base64 16 >a.txt git add a.txt git commit -m "another commit" git push fork main } # setup_single_local_branch_deep_trees creates a repository as follows: # # A # \ # refs/heads/main # # - Commit 'A' has 120 bytes of data in 'foo/bar/baz/a.txt'. setup_single_local_branch_deep_trees() { set -e reponame="migrate-single-local-branch-with-deep-trees" remove_and_create_local_repo "$reponame" mkdir -p foo/bar/baz lfstest-genrandom --base64 120 >foo/bar/baz/a.txt git add foo/bar/baz/a.txt git commit -m "initial commit" } # setup_single_local_branch_same_file_tree_ext creates a repository as follows: # # A # \ # refs/heads/main # # - Commit 'A' has 120 bytes of data in each of 'a.txt`, `foo/a.txt', # `bar.txt/b.md`, and `bar.txt/b.txt`. setup_single_local_branch_same_file_tree_ext() { set -e reponame="migrate-single-local-branch-with-same-file-tree-ext" remove_and_create_local_repo "$reponame" mkdir -p foo bar.txt lfstest-genrandom --base64 120 >a.txt lfstest-genrandom --base64 120 >foo/a.txt lfstest-genrandom --base64 120 >bar.txt/b.md lfstest-genrandom --base64 120 >bar.txt/b.txt git add a.txt foo bar.txt git commit -m "initial commit" } # setup_local_branch_with_symlink creates a repository as follows: # # A # \ # refs/heads/main # # - Commit 'A' has 120, in a.txt, and a symbolic link link.txt to a.txt. setup_local_branch_with_symlink() { set -e reponame="migrate-single-local-branch-with-symlink" remove_and_create_local_repo "$reponame" lfstest-genrandom --base64 120 >a.txt git add a.txt git commit -m "initial commit" add_symlink "a.txt" "link.txt" git commit -m "add symlink" } # setup_local_branch_with_dirty_copy creates a repository as follows: # # A # \ # refs/heads/main # # - Commit 'A' has the contents "a.txt in a.txt, and marks a.txt as unclean # in the working copy. setup_local_branch_with_dirty_copy() { set -e reponame="migrate-single-local-branch-with-dirty-copy" remove_and_create_local_repo "$reponame" printf "a.txt" > a.txt git add a.txt git commit -m "initial commit" printf "2" >> a.txt } # setup_local_branch_with_copied_file creates a repository as follows: # # A # \ # refs/heads/main # # - Commit 'A' has the contents "a.txt" in a.txt, and another identical file # (same name and content) in another directory. setup_local_branch_with_copied_file() { set -e reponame="migrate-single-local-branch-with-copied-file" remove_and_create_local_repo "$reponame" printf "a.txt" > a.txt mkdir dir cp a.txt dir/ git add a.txt dir/a.txt git commit -m "initial commit" } # setup_local_branch_with_special_character_files creates a repository as follows: # # A # \ # refs/heads/main # # - Commit 'A' has binary files with special characters setup_local_branch_with_special_character_files() { set -e reponame="migrate-single-local-branch-with-special-filenames" remove_and_create_local_repo "$reponame" lfstest-genrandom 80 >'./test - special.bin' lfstest-genrandom 100 >'./test (test2) special.bin' # Windows does not allow creation of files with '*' [ "$IS_WINDOWS" -eq '1' ] || lfstest-genrandom 120 >'./test * ** special.bin' git add *.bin git commit -m "initial commit" } # make_bare converts the existing full checkout of a repository into a bare one, # and then `cd`'s into it. make_bare() { reponame=$(basename "$(pwd)") mv .git "../$reponame.git" cd .. rm -rf "$reponame" cd "$reponame.git" git config --bool core.bare true } # remove_and_create_local_repo removes, creates, and checks out a local # repository given by a particular name: # # remove_and_create_local_repo "$reponame" remove_and_create_local_repo() { local reponame="$1-$(lfstest-genrandom --base64url 32)" git init "$reponame" cd "$reponame" } # remove_and_create_remote_repo removes, creates, and checks out a remote # repository both locally and on the gitserver, given by a particular name: # # remove_and_create_remote_repo "$reponame" remove_and_create_remote_repo() { local reponame="$1-$(lfstest-genrandom --base64url 32)" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" rm clone.log } git-lfs-3.6.1/t/fixtures/templates/000077500000000000000000000000001472372047300172115ustar00rootroot00000000000000git-lfs-3.6.1/t/fixtures/templates/HEAD000066400000000000000000000000251472372047300176320ustar00rootroot00000000000000ref: refs/heads/main git-lfs-3.6.1/t/fixtures/templates/info/000077500000000000000000000000001472372047300201445ustar00rootroot00000000000000git-lfs-3.6.1/t/fixtures/templates/info/exclude000066400000000000000000000000001472372047300215060ustar00rootroot00000000000000git-lfs-3.6.1/t/git-lfs-test-server-api/000077500000000000000000000000001472372047300177375ustar00rootroot00000000000000git-lfs-3.6.1/t/git-lfs-test-server-api/.gitignore000066400000000000000000000000311472372047300217210ustar00rootroot00000000000000git-lfs-test-server-api* git-lfs-3.6.1/t/git-lfs-test-server-api/README.md000066400000000000000000000060601472372047300212200ustar00rootroot00000000000000# Git LFS Server API compliance test utility This package exists to provide automated testing of server API implementations, to ensure that they conform to the behaviour expected by the client. You can run this utility against any server that implements the Git LFS API. ## Automatic or data-driven testing This utility is primarily intended to test the API implementation, but in order to correctly test the responses, the tests have to know what objects exist on the server already and which don't. In 'automatic' mode, the tests require that both the API and the content server it links to via upload and download links are both available & free to use. The content server must be empty at the start of the tests, and the tests will upload some data as part of the tests. Therefore obviously this cannot be a production system. Alternatively, in 'data-driven' mode, the tests must be provided with a list of object IDs that already exist on the server (minimum 10), and a list of other object IDs that are known to not exist. The test will use these IDs to construct its data sets, will only call the API (not the content server), and thus will not update any data - meaning you can in theory run this against a production system. ## Calling the test tool ``` git-lfs-test-server-api [--url= | --clone=] [ ] [--save=] ``` |Argument|Purpose| |------|-------| |`--url=`|URL of the server API to call. This must point directly at the API root and not the clone URL, and must be HTTP[S]. You must supply either this argument or the `--clone` argument| |`--clone=`|The clone URL from which to derive the API URL. If it is HTTP[S], the test will try to find the API at `/info/lfs`; if it is an SSH URL, then the test will call git-lfs-authenticate on the server to derive the API (with auth token if needed) just like the git-lfs client does. You must supply either this argument or the `--url` argument| |` `|Optional input files for data-driven mode (both must be supplied if this is used); each must be a file with ` ` per line. The first file must be a list of oids that exist on the server, the second must be a list of oids known not to exist. If supplied, the tests will not call the content server or modify any data. If omitted, the test will generate its own list of oids and will modify the server (and expects that the server is empty of oids at the start)| |`--save=`|If specified and no input files were provided, saves generated test data in the files `_exists` and `_missing`. These can be used as parameters to subsequent runs if required, if the server content remains unchanged between runs.| ## Authentication Authentication will behave just like the git-lfs client, so for HTTP[S] URLs the git credential helper system will be used to obtain logins, and for SSH URLs, keys can be used to automate login. Otherwise you will receive prompts on the command line. git-lfs-3.6.1/t/git-lfs-test-server-api/main.go000066400000000000000000000226111472372047300212140ustar00rootroot00000000000000package main import ( "bufio" "crypto/sha256" "encoding/hex" "fmt" "math/rand" "os" "strconv" "strings" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/fs" "github.com/git-lfs/git-lfs/v3/lfsapi" "github.com/git-lfs/git-lfs/v3/lfshttp" t "github.com/git-lfs/git-lfs/v3/t/cmd/util" "github.com/git-lfs/git-lfs/v3/tasklog" "github.com/git-lfs/git-lfs/v3/tq" "github.com/spf13/cobra" ) type TestObject struct { Oid string Size int64 } type ServerTest struct { Name string F func(m tq.Manifest, oidsExist, oidsMissing []TestObject) error } var ( RootCmd = &cobra.Command{ Use: "git-lfs-test-server-api [--url= | --clone=] [ ]", Short: "Test a Git LFS API server for compliance", Run: testServerApi, } apiUrl string cloneUrl string savePrefix string tests []ServerTest ) func main() { RootCmd.Execute() } func testServerApi(cmd *cobra.Command, args []string) { if (len(apiUrl) == 0 && len(cloneUrl) == 0) || (len(apiUrl) != 0 && len(cloneUrl) != 0) { exit("Must supply either --url or --clone (and not both)") } if len(args) != 0 && len(args) != 2 { exit("Must supply either no file arguments or both the exists AND missing file") } if len(args) != 0 && len(savePrefix) > 0 { exit("Cannot combine input files and --save option") } // Build test data for existing files & upload // Use test repo for this to simplify the process of making sure data matches oid // We're not performing a real test at this point (although an upload fail will break it) var callback testDataCallback repo := t.NewRepo(&callback) // Force loading of config before we alter it repo.GitEnv().All() repo.Pushd() defer repo.Popd() manifest, err := buildManifest(repo) if err != nil { exit("error building tq.Manifest: " + err.Error()) } var oidsExist, oidsMissing []TestObject if len(args) >= 2 { fmt.Printf("Reading test data from files (no server content changes)\n") oidsExist = readTestOids(args[0]) oidsMissing = readTestOids(args[1]) } else { fmt.Printf("Creating test data (will upload to server)\n") var err error oidsExist, oidsMissing, err = buildTestData(repo, manifest) if err != nil { exit("Failed to set up test data, aborting") } if len(savePrefix) > 0 { existFile := savePrefix + "_exists" missingFile := savePrefix + "_missing" saveTestOids(existFile, oidsExist) saveTestOids(missingFile, oidsMissing) fmt.Printf("Wrote test to %s, %s for future use\n", existFile, missingFile) } } ok := runTests(manifest, oidsExist, oidsMissing) if !ok { exit("One or more tests failed, see above") } fmt.Println("All tests passed") } func readTestOids(filename string) []TestObject { f, err := os.OpenFile(filename, os.O_RDONLY, 0644) if err != nil { exit("Error opening file %s", filename) } defer f.Close() var ret []TestObject rdr := bufio.NewReader(f) line, err := rdr.ReadString('\n') for err == nil { fields := strings.Fields(strings.TrimSpace(line)) if len(fields) == 2 { sz, _ := strconv.ParseInt(fields[1], 10, 64) ret = append(ret, TestObject{Oid: fields[0], Size: sz}) } line, err = rdr.ReadString('\n') } return ret } type testDataCallback struct{} func (*testDataCallback) Fatalf(format string, args ...interface{}) { exit(format, args...) } func (*testDataCallback) Errorf(format string, args ...interface{}) { fmt.Printf(format, args...) } func buildManifest(r *t.Repo) (tq.Manifest, error) { // Configure the endpoint manually finder := lfsapi.NewEndpointFinder(r) var endp lfshttp.Endpoint if len(cloneUrl) > 0 { endp = finder.NewEndpointFromCloneURL("upload", cloneUrl) } else { endp = finder.NewEndpoint("upload", apiUrl) } apiClient, err := lfsapi.NewClient(r) if err != nil { return nil, err } apiClient.Endpoints = &constantEndpoint{ e: endp, EndpointFinder: apiClient.Endpoints, } return tq.NewManifest(r.Filesystem(), apiClient, "", ""), nil } type constantEndpoint struct { e lfshttp.Endpoint lfsapi.EndpointFinder } func (c *constantEndpoint) NewEndpointFromCloneURL(operation, rawurl string) lfshttp.Endpoint { return c.e } func (c *constantEndpoint) NewEndpoint(operation, rawurl string) lfshttp.Endpoint { return c.e } func (c *constantEndpoint) Endpoint(operation, remote string) lfshttp.Endpoint { return c.e } func (c *constantEndpoint) RemoteEndpoint(operation, remote string) lfshttp.Endpoint { return c.e } func buildTestData(repo *t.Repo, manifest tq.Manifest) (oidsExist, oidsMissing []TestObject, err error) { const oidCount = 50 oidsExist = make([]TestObject, 0, oidCount) oidsMissing = make([]TestObject, 0, oidCount) // just one commit logger := tasklog.NewLogger(os.Stdout, tasklog.ForceProgress(false), ) meter := tq.NewMeter(repo.Configuration()) meter.Logger = meter.LoggerFromEnv(repo.OSEnv()) logger.Enqueue(meter) commit := t.CommitInput{CommitterName: "A N Other", CommitterEmail: "noone@somewhere.com"} for i := 0; i < oidCount; i++ { filename := fmt.Sprintf("file%d.dat", i) sz := int64(rand.Intn(200)) + 50 commit.Files = append(commit.Files, &t.FileInput{Filename: filename, Size: sz}) meter.Add(sz) } outputs := repo.AddCommits([]*t.CommitInput{&commit}) // now upload uploadQueue := tq.NewTransferQueue(tq.Upload, manifest, "origin", tq.WithProgress(meter)) for _, f := range outputs[0].Files { oidsExist = append(oidsExist, TestObject{Oid: f.Oid, Size: f.Size}) t, err := uploadTransfer(repo.Filesystem(), f.Oid, "Test file") if err != nil { return nil, nil, err } uploadQueue.Add(t.Name, t.Path, t.Oid, t.Size, false, nil) } uploadQueue.Wait() for _, err := range uploadQueue.Errors() { if errors.IsFatalError(err) { exit("Fatal error setting up test data: %s", err) } } // Generate SHAs for missing files, random but repeatable // No actual file content needed for these rand.Seed(int64(oidCount)) runningSha := sha256.New() for i := 0; i < oidCount; i++ { runningSha.Write([]byte{byte(rand.Intn(256))}) oid := hex.EncodeToString(runningSha.Sum(nil)) sz := int64(rand.Intn(200)) + 50 oidsMissing = append(oidsMissing, TestObject{Oid: oid, Size: sz}) } return oidsExist, oidsMissing, nil } func saveTestOids(filename string, objs []TestObject) { f, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0644) if err != nil { exit("Error opening file %s", filename) } defer f.Close() for _, o := range objs { f.WriteString(fmt.Sprintf("%s %d\n", o.Oid, o.Size)) } } func runTests(manifest tq.Manifest, oidsExist, oidsMissing []TestObject) bool { ok := true fmt.Printf("Running %d tests...\n", len(tests)) for _, t := range tests { err := runTest(t, manifest, oidsExist, oidsMissing) if err != nil { ok = false } } return ok } func runTest(t ServerTest, manifest tq.Manifest, oidsExist, oidsMissing []TestObject) error { const linelen = 70 line := t.Name if len(line) > linelen { line = line[:linelen] } else if len(line) < linelen { line = fmt.Sprintf("%s%s", line, strings.Repeat(" ", linelen-len(line))) } fmt.Printf("%s...\r", line) err := t.F(manifest, oidsExist, oidsMissing) if err != nil { fmt.Printf("%s FAILED\n", line) fmt.Println(err.Error()) } else { fmt.Printf("%s OK\n", line) } return err } // Exit prints a formatted message and exits. func exit(format string, args ...interface{}) { fmt.Fprintf(os.Stderr, format, args...) os.Exit(2) } func addTest(name string, f func(manifest tq.Manifest, oidsExist, oidsMissing []TestObject) error) { tests = append(tests, ServerTest{Name: name, F: f}) } func callBatchApi(manifest tq.Manifest, dir tq.Direction, objs []TestObject) ([]*tq.Transfer, error) { apiobjs := make([]*tq.Transfer, 0, len(objs)) for _, o := range objs { apiobjs = append(apiobjs, &tq.Transfer{Oid: o.Oid, Size: o.Size}) } bres, err := tq.Batch(manifest, dir, "origin", nil, apiobjs) if err != nil { return nil, err } return bres.Objects, nil } // Combine 2 slices into one by "randomly" interleaving // Not actually random, same sequence each time so repeatable func interleaveTestData(slice1, slice2 []TestObject) []TestObject { // Predictable sequence, mixin existing & missing semi-randomly rand.Seed(21) count := len(slice1) + len(slice2) ret := make([]TestObject, 0, count) slice1Idx := 0 slice2Idx := 0 for left := count; left > 0; { for i := rand.Intn(3) + 1; slice1Idx < len(slice1) && i > 0; i-- { obj := slice1[slice1Idx] ret = append(ret, obj) slice1Idx++ left-- } for i := rand.Intn(3) + 1; slice2Idx < len(slice2) && i > 0; i-- { obj := slice2[slice2Idx] ret = append(ret, obj) slice2Idx++ left-- } } return ret } func uploadTransfer(fs *fs.Filesystem, oid, filename string) (*tq.Transfer, error) { localMediaPath, err := fs.ObjectPath(oid) if err != nil { return nil, errors.Wrapf(err, "Error uploading file %s (%s)", filename, oid) } fi, err := os.Stat(localMediaPath) if err != nil { return nil, errors.Wrapf(err, "Error uploading file %s (%s)", filename, oid) } return &tq.Transfer{ Name: filename, Path: localMediaPath, Oid: oid, Size: fi.Size(), }, nil } func init() { RootCmd.Flags().StringVarP(&apiUrl, "url", "u", "", "URL of the API (must supply this or --clone)") RootCmd.Flags().StringVarP(&cloneUrl, "clone", "c", "", "Clone URL from which to find API (must supply this or --url)") RootCmd.Flags().StringVarP(&savePrefix, "save", "s", "", "Saves generated data to _exists|missing for subsequent use") } git-lfs-3.6.1/t/git-lfs-test-server-api/testdownload.go000066400000000000000000000064421472372047300230030ustar00rootroot00000000000000package main import ( "bytes" "errors" "fmt" "github.com/git-lfs/git-lfs/v3/tools" "github.com/git-lfs/git-lfs/v3/tq" ) // "download" - all present func downloadAllExist(manifest tq.Manifest, oidsExist, oidsMissing []TestObject) error { retobjs, err := callBatchApi(manifest, tq.Download, oidsExist) if err != nil { return err } if len(retobjs) != len(oidsExist) { return fmt.Errorf("incorrect number of returned objects, expected %d, got %d", len(oidsExist), len(retobjs)) } var errbuf bytes.Buffer for _, o := range retobjs { rel, _ := o.Rel("download") if rel == nil { errbuf.WriteString(fmt.Sprintf("Missing download link for %s\n", o.Oid)) } } if errbuf.Len() > 0 { return errors.New(errbuf.String()) } return nil } // "download" - all missing (test includes 404 error entry) func downloadAllMissing(manifest tq.Manifest, oidsExist, oidsMissing []TestObject) error { retobjs, err := callBatchApi(manifest, tq.Download, oidsMissing) if err != nil { return err } if len(retobjs) != len(oidsMissing) { return fmt.Errorf("incorrect number of returned objects, expected %d, got %d", len(oidsMissing), len(retobjs)) } var errbuf bytes.Buffer for _, o := range retobjs { link, _ := o.Rel("download") if link != nil { errbuf.WriteString(fmt.Sprintf("Download link should not exist for %s, was %+v\n", o.Oid, link)) } if o.Error == nil { errbuf.WriteString(fmt.Sprintf("Download should include an error for missing object %s\n", o.Oid)) } else if o.Error.Code != 404 { errbuf.WriteString(fmt.Sprintf("Download error code for missing object %s should be 404, got %d\n", o.Oid, o.Error.Code)) } } if errbuf.Len() > 0 { return errors.New(errbuf.String()) } return nil } // "download" - mixture func downloadMixed(manifest tq.Manifest, oidsExist, oidsMissing []TestObject) error { existSet := tools.NewStringSetWithCapacity(len(oidsExist)) for _, o := range oidsExist { existSet.Add(o.Oid) } missingSet := tools.NewStringSetWithCapacity(len(oidsMissing)) for _, o := range oidsMissing { missingSet.Add(o.Oid) } calloids := interleaveTestData(oidsExist, oidsMissing) retobjs, err := callBatchApi(manifest, tq.Download, calloids) if err != nil { return err } count := len(oidsExist) + len(oidsMissing) if len(retobjs) != count { return fmt.Errorf("incorrect number of returned objects, expected %d, got %d", count, len(retobjs)) } var errbuf bytes.Buffer for _, o := range retobjs { link, _ := o.Rel("download") if missingSet.Contains(o.Oid) { if link != nil { errbuf.WriteString(fmt.Sprintf("Download link should not exist for %s, was %+v\n", o.Oid, link)) } if o.Error == nil { errbuf.WriteString(fmt.Sprintf("Download should include an error for missing object %s", o.Oid)) } else if o.Error.Code != 404 { errbuf.WriteString(fmt.Sprintf("Download error code for missing object %s should be 404, got %d\n", o.Oid, o.Error.Code)) } } if existSet.Contains(o.Oid) && link == nil { errbuf.WriteString(fmt.Sprintf("Missing download link for %s\n", o.Oid)) } } if errbuf.Len() > 0 { return errors.New(errbuf.String()) } return nil } func init() { addTest("Test download: all existing", downloadAllExist) addTest("Test download: all missing", downloadAllMissing) addTest("Test download: mixed", downloadMixed) } git-lfs-3.6.1/t/git-lfs-test-server-api/testupload.go000066400000000000000000000132311472372047300224520ustar00rootroot00000000000000package main import ( "bytes" "errors" "fmt" "github.com/git-lfs/git-lfs/v3/tools" "github.com/git-lfs/git-lfs/v3/tq" ) // "upload" - all missing func uploadAllMissing(manifest tq.Manifest, oidsExist, oidsMissing []TestObject) error { retobjs, err := callBatchApi(manifest, tq.Upload, oidsMissing) if err != nil { return err } if len(retobjs) != len(oidsMissing) { return fmt.Errorf("incorrect number of returned objects, expected %d, got %d", len(oidsMissing), len(retobjs)) } var errbuf bytes.Buffer for _, o := range retobjs { rel, _ := o.Rel("upload") if rel == nil { errbuf.WriteString(fmt.Sprintf("Missing upload link for %s\n", o.Oid)) } // verify link is optional so don't check } if errbuf.Len() > 0 { return errors.New(errbuf.String()) } return nil } // "upload" - all present func uploadAllExists(manifest tq.Manifest, oidsExist, oidsMissing []TestObject) error { retobjs, err := callBatchApi(manifest, tq.Upload, oidsExist) if err != nil { return err } if len(retobjs) != len(oidsExist) { return fmt.Errorf("incorrect number of returned objects, expected %d, got %d", len(oidsExist), len(retobjs)) } var errbuf bytes.Buffer for _, o := range retobjs { link, _ := o.Rel("upload") if link != nil { errbuf.WriteString(fmt.Sprintf("Upload link should not exist for %s, was %+v\n", o.Oid, link)) } } if errbuf.Len() > 0 { return errors.New(errbuf.String()) } return nil } // "upload" - mix of missing & present func uploadMixed(manifest tq.Manifest, oidsExist, oidsMissing []TestObject) error { existSet := tools.NewStringSetWithCapacity(len(oidsExist)) for _, o := range oidsExist { existSet.Add(o.Oid) } missingSet := tools.NewStringSetWithCapacity(len(oidsMissing)) for _, o := range oidsMissing { missingSet.Add(o.Oid) } calloids := interleaveTestData(oidsExist, oidsMissing) retobjs, err := callBatchApi(manifest, tq.Upload, calloids) if err != nil { return err } count := len(oidsExist) + len(oidsMissing) if len(retobjs) != count { return fmt.Errorf("incorrect number of returned objects, expected %d, got %d", count, len(retobjs)) } var errbuf bytes.Buffer for _, o := range retobjs { link, _ := o.Rel("upload") if existSet.Contains(o.Oid) { if link != nil { errbuf.WriteString(fmt.Sprintf("Upload link should not exist for %s, was %+v\n", o.Oid, link)) } } if missingSet.Contains(o.Oid) && link == nil { errbuf.WriteString(fmt.Sprintf("Missing upload link for %s\n", o.Oid)) } } if errbuf.Len() > 0 { return errors.New(errbuf.String()) } return nil } func uploadEdgeCases(manifest tq.Manifest, oidsExist, oidsMissing []TestObject) error { errorCases := make([]TestObject, 0, 5) errorCodeMap := make(map[string]int, 5) errorReasonMap := make(map[string]string, 5) validCases := make([]TestObject, 0, 1) validReasonMap := make(map[string]string, 5) // Invalid SHAs - code 422 // Too short sha := "a345cde" errorCases = append(errorCases, TestObject{Oid: sha, Size: 99}) errorCodeMap[sha] = 422 errorReasonMap[sha] = "SHA is too short" // Too long sha = "1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef" errorCases = append(errorCases, TestObject{Oid: sha, Size: 99}) errorCodeMap[sha] = 422 errorReasonMap[sha] = "SHA is too long" // Invalid characters -----!---------------------------------! sha = "bf3e3e2af9366a3b704ax0c31de5afa64193ebabffde2091936ad2G7510bc03a" errorCases = append(errorCases, TestObject{Oid: sha, Size: 99}) errorCodeMap[sha] = 422 errorReasonMap[sha] = "SHA contains invalid characters" // Invalid size - code 422 sha = "e3bf3e2af9366a3b704af0c31de5afa64193ebabffde2091936ad237510bc03a" errorCases = append(errorCases, TestObject{Oid: sha, Size: -1}) errorCodeMap[sha] = 422 errorReasonMap[sha] = "Negative size" sha = "d2983e2af9366a3b704af0c31de5afa64193ebabffde2091936ad237510bc03a" errorCases = append(errorCases, TestObject{Oid: sha, Size: -125}) errorCodeMap[sha] = 422 errorReasonMap[sha] = "Negative size" // Zero size - should be allowed sha = "159f6ac723b9023b704af0c31de5afa64193ebabffde2091936ad237510bc03a" validCases = append(validCases, TestObject{Oid: sha, Size: 0}) validReasonMap[sha] = "Zero size should be allowed" calloids := interleaveTestData(errorCases, validCases) retobjs, err := callBatchApi(manifest, tq.Upload, calloids) if err != nil { return err } count := len(errorCases) + len(validCases) if len(retobjs) != count { return fmt.Errorf("incorrect number of returned objects, expected %d, got %d", count, len(retobjs)) } var errbuf bytes.Buffer for _, o := range retobjs { link, _ := o.Rel("upload") if code, iserror := errorCodeMap[o.Oid]; iserror { reason, _ := errorReasonMap[o.Oid] if link != nil { errbuf.WriteString(fmt.Sprintf("Upload link should not exist for %s, was %+v, reason %s\n", o.Oid, link, reason)) } if o.Error == nil { errbuf.WriteString(fmt.Sprintf("Upload should include an error for invalid object %s, reason %s", o.Oid, reason)) } else if o.Error.Code != code { errbuf.WriteString(fmt.Sprintf("Upload error code for missing object %s should be %d, got %d, reason %s\n", o.Oid, code, o.Error.Code, reason)) } } if reason, reasonok := validReasonMap[o.Oid]; reasonok { if link == nil { errbuf.WriteString(fmt.Sprintf("Missing upload link for %s, should be present because %s\n", o.Oid, reason)) } } } if errbuf.Len() > 0 { return errors.New(errbuf.String()) } return nil } func init() { addTest("Test upload: all missing", uploadAllMissing) addTest("Test upload: all present", uploadAllExists) addTest("Test upload: mixed", uploadMixed) addTest("Test upload: edge cases", uploadEdgeCases) } git-lfs-3.6.1/t/t-alternates.sh000077500000000000000000000105631472372047300163110ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" begin_test "alternates (single)" ( set -e reponame="alternates-single-alternate" setup_remote_repo_with_file "$reponame" "a.txt" pushd "$TRASHDIR" > /dev/null clone_repo "$reponame" "${reponame}_alternate" popd > /dev/null rm -rf .git/lfs/objects alternate="$TRASHDIR/${reponame}_alternate/.git/objects" echo "$(native_path "$alternate")" > .git/objects/info/alternates GIT_TRACE=1 git lfs fetch origin main 2>&1 | tee fetch.log [ "0" -eq "$(grep -c "sending batch of size 1" fetch.log)" ] ) end_test begin_test "alternates (multiple)" ( set -e reponame="alternates-multiple-alternates" setup_remote_repo_with_file "$reponame" "a.txt" pushd "$TRASHDIR" > /dev/null clone_repo "$reponame" "${reponame}_alternate_stale" rm -rf .git/lfs/objects popd > /dev/null pushd "$TRASHDIR" > /dev/null clone_repo "$reponame" "${reponame}_alternate" popd > /dev/null rm -rf .git/lfs/objects alternate_stale="$TRASHDIR/${reponame}_alternate_stale/.git/objects" alternate="$TRASHDIR/${reponame}_alternate/.git/objects" echo "$(native_path "$alternate")" > .git/objects/info/alternates echo "$(native_path "$alternate_stale")" >> .git/objects/info/alternates GIT_TRACE=1 git lfs fetch origin main 2>&1 | tee fetch.log [ "0" -eq "$(grep -c "sending batch of size 1" fetch.log)" ] ) end_test begin_test "alternates (commented)" ( set -e reponame="alternates-commented-alternate" setup_remote_repo_with_file "$reponame" "a.txt" pushd "$TRASHDIR" > /dev/null clone_repo "$reponame" "${reponame}_alternate" popd > /dev/null rm -rf .git/lfs/objects alternate="$TRASHDIR/${reponame}_alternate/.git/objects" echo "# $alternate" > .git/objects/info/alternates GIT_TRACE=1 git lfs fetch origin main 2>&1 | tee fetch.log [ "1" -eq "$(grep -c "sending batch of size 1" fetch.log)" ] ) end_test begin_test "alternates (quoted)" ( set -e reponame="alternates-quoted-alternate" setup_remote_repo_with_file "$reponame" "a.txt" pushd "$TRASHDIR" > /dev/null clone_repo "$reponame" "${reponame}_alternate" popd > /dev/null rm -rf .git/lfs/objects # Normally, a plain native_path call would be sufficient here, but when we # use a quoted alternate, Git interprets backslash escapes, and Windows path # names look like backslash escapes. As a consequence, we switch to forward # slashes to avoid misinterpretation. alternate=$(native_path "$TRASHDIR/${reponame}_alternate/.git/objects" | sed -e 's,\\,/,g') echo "\"$alternate\"" > .git/objects/info/alternates GIT_TRACE=1 git lfs fetch origin main 2>&1 | tee fetch.log [ "0" -eq "$(grep -c "sending batch of size 1" fetch.log)" ] ) end_test begin_test "alternates (OS environment, single)" ( set -e reponame="alternates-environment-single-alternate" setup_remote_repo_with_file "$reponame" "a.txt" pushd "$TRASHDIR" > /dev/null clone_repo "$reponame" "${reponame}_alternate" popd > /dev/null rm -rf .git/lfs/objects rm -rf .git/objects/* git init alternate="$(native_path "$TRASHDIR/${reponame}_alternate/.git/objects")" GIT_ALTERNATE_OBJECT_DIRECTORIES="$alternate" \ GIT_TRACE=1 \ git lfs fetch origin main 2>&1 | tee fetch.log [ "0" -eq "$(grep -c "sending batch of size 1" fetch.log)" ] GIT_ALTERNATE_OBJECT_DIRECTORIES="$alternate" \ git lfs push "$(git config remote.origin.url)" main ) end_test begin_test "alternates (OS environment, multiple)" ( set -e reponame="alternates-environment-multiple-alternates" setup_remote_repo_with_file "$reponame" "a.txt" pushd "$TRASHDIR" > /dev/null clone_repo "$reponame" "${reponame}_alternate_stale" rm -rf .git/lfs/objects popd > /dev/null pushd "$TRASHDIR" > /dev/null clone_repo "$reponame" "${reponame}_alternate" popd > /dev/null rm -rf .git/lfs/objects rm -rf .git/objects/* git init alternate_stale="$(native_path "$TRASHDIR/${reponame}_alternate_stale/.git/objects")" alternate="$(native_path "$TRASHDIR/${reponame}_alternate/.git/objects")" sep="$(native_path_list_separator)" GIT_ALTERNATE_OBJECT_DIRECTORIES="$alternate_stale$sep$alternate" \ GIT_TRACE=1 \ git lfs fetch origin main 2>&1 | tee fetch.log [ "0" -eq "$(grep -c "sending batch of size 1" fetch.log)" ] GIT_ALTERNATE_OBJECT_DIRECTORIES="$alternate_stale$sep$alternate" \ git lfs push "$(git config remote.origin.url)" main ) end_test git-lfs-3.6.1/t/t-askpass.sh000077500000000000000000000067671472372047300156270ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" begin_test "askpass: push with GIT_ASKPASS" ( set -e reponame="askpass-with-git-environ" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" echo "hello" > a.dat git add .gitattributes a.dat git commit -m "initial commit" # $password is defined from test/cmd/lfstest-gitserver.go (see: skipIfBadAuth) export LFS_ASKPASS_USERNAME="user" export LFS_ASKPASS_PASSWORD="pass" git config "credential.helper" "" GIT_ASKPASS="lfs-askpass" SSH_ASKPASS="dont-call-me" GIT_TRACE=1 GIT_CURL_VERBOSE=1 git push origin main 2>&1 | tee push.log GITSERVER_USER="$(printf $GITSERVER | sed -e 's/http:\/\//http:\/\/user@/')" grep "filling with GIT_ASKPASS: lfs-askpass Username for \"$GITSERVER/$reponame\"" push.log grep "filling with GIT_ASKPASS: lfs-askpass Password for \"$GITSERVER_USER/$reponame\"" push.log grep "main -> main" push.log ) end_test begin_test "askpass: push with core.askPass" ( set -e reponame="askpass-with-config" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" echo "hello" > a.dat git add .gitattributes a.dat git commit -m "initial commit" # $password is defined from test/cmd/lfstest-gitserver.go (see: skipIfBadAuth) export LFS_ASKPASS_PASSWORD="pass" git config "credential.helper" "" git config "core.askPass" "lfs-askpass" cat .git/config SSH_ASKPASS="dont-call-me" GIT_TRACE=1 GIT_CURL_VERBOSE=1 git push origin main 2>&1 | tee push.log GITSERVER_USER="$(printf $GITSERVER | sed -e 's/http:\/\//http:\/\/user@/')" grep "filling with GIT_ASKPASS: lfs-askpass Username for \"$GITSERVER/$reponame\"" push.log grep "filling with GIT_ASKPASS: lfs-askpass Password for \"$GITSERVER_USER/$reponame\"" push.log grep "main -> main" push.log ) end_test begin_test "askpass: push with SSH_ASKPASS" ( set -e reponame="askpass-with-ssh-environ" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" echo "hello" > a.dat git add .gitattributes a.dat git commit -m "initial commit" # $password is defined from test/cmd/lfstest-gitserver.go (see: skipIfBadAuth) export LFS_ASKPASS_USERNAME="user" export LFS_ASKPASS_PASSWORD="pass" git config "credential.helper" "" SSH_ASKPASS="lfs-askpass" GIT_TRACE=1 GIT_CURL_VERBOSE=1 git push origin main 2>&1 | tee push.log GITSERVER_USER="$(printf $GITSERVER | sed -e 's/http:\/\//http:\/\/user@/')" grep "filling with GIT_ASKPASS: lfs-askpass Username for \"$GITSERVER/$reponame\"" push.log grep "filling with GIT_ASKPASS: lfs-askpass Password for \"$GITSERVER_USER/$reponame\"" push.log grep "main -> main" push.log ) end_test begin_test "askpass: defaults to provided credentials" ( set -e reponame="askpass-provided-creds" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" echo "hello" > a.dat git add .gitattributes a.dat git commit -m "initial commit" # $password is defined from test/cmd/lfstest-gitserver.go (see: skipIfBadAuth) export LFS_ASKPASS_USERNAME="fakeuser" export LFS_ASKPASS_PASSWORD="fakepass" git config --local "credential.helper" "" url=$(git config --get remote.origin.url) newurl=${url/http:\/\//http:\/\/user\:pass@} git remote set-url origin "$newurl" GIT_ASKPASS="lfs-askpass" GIT_TRACE=1 GIT_CURL_VERBOSE=1 git push origin main 2>&1 | tee push.log grep "filling with GIT_ASKPASS" push.log && exit 1 grep "main -> main" push.log ) end_test git-lfs-3.6.1/t/t-attributes.sh000077500000000000000000000104101472372047300163240ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" begin_test "macros" ( set -e reponame="$(basename "$0" ".sh")" clone_repo "$reponame" repo mkdir dir printf '[attr]lfs filter=lfs diff=lfs merge=lfs -text\n*.dat lfs\n' \ > .gitattributes printf '[attr]lfs2 filter=lfs diff=lfs merge=lfs -text\n*.bin lfs2\n' \ > dir/.gitattributes git add .gitattributes dir git commit -m 'initial import' contents="some data" printf "$contents" > foo.dat git add *.dat git commit -m 'foo.dat' assert_local_object "$(calc_oid "$contents")" 9 contents2="other data" printf "$contents2" > dir/foo.bin git add dir git commit -m 'foo.bin' refute_local_object "$(calc_oid "$contents2")" git lfs track '*.dat' 2>&1 | tee track.log grep '"*.dat" already supported' track.log cd dir git lfs track '*.bin' 2>&1 | tee track.log grep '"*.bin" already supported' track.log && exit 1 true # NOTE: At present we do not test that "git lfs track" reports # "already supported" when it finds a pattern in a subdirectory's # .gitattributes file which references a macro attribute in # the top-level .gitattributes file that sets "filter=lfs". # This is because, while "git check-attr" resolves macro references # from a file such as dir/.gitattributes to .gitattributess, # "git lfs track" only resolves macro references as it reads these # files in depth-first order, so unlike Git it does not expand an # "lfs" reference to "filter=lfs" if it appears in dir/.gitattributes. ) end_test begin_test "macros with HOME" ( set -e reponame="$(basename "$0" ".sh")-home" clone_repo "$reponame" repo-home mkdir -p "$HOME/.config/git" printf '[attr]lfs filter=lfs diff=lfs merge=lfs -text\n*.dat lfs\n' \ > "$HOME/.config/git/attributes" contents="some data" printf "$contents" > foo.dat git add *.dat git commit -m 'foo.dat' assert_local_object "$(calc_oid "$contents")" 9 git lfs track 2>&1 | tee track.log grep '*.dat' track.log ) end_test begin_test "macros with HOME split" ( set -e reponame="$(basename "$0" ".sh")-home-split" clone_repo "$reponame" repo-home-split mkdir -p "$HOME/.config/git" printf '[attr]lfs filter=lfs diff=lfs merge=lfs -text\n' \ > "$HOME/.config/git/attributes" printf '*.dat lfs\n' > .gitattributes git add .gitattributes git commit -m 'initial import' contents="some data" printf "$contents" > foo.dat git add *.dat git commit -m 'foo.dat' assert_local_object "$(calc_oid "$contents")" 9 git lfs track '*.dat' 2>&1 | tee track.log grep '"*.dat" already supported' track.log ) end_test begin_test "macros with unspecified flag" ( set -e reponame="$(basename "$0" ".sh")" clone_repo "$reponame" repo-unspecified mkdir dir printf '[attr]lfs filter=lfs diff=lfs merge=lfs -text\n**/*.dat lfs\n' \ > .gitattributes printf '*.dat !lfs\n' \ > dir/.gitattributes git add .gitattributes dir git commit -m 'initial import' contents="some data" printf "$contents" > foo.dat git add *.dat git commit -m 'foo.dat' assert_local_object "$(calc_oid "$contents")" 9 contents2="other data" printf "$contents2" > dir/foo.dat git add dir git commit -m 'dir/foo.dat' refute_local_object "$(calc_oid "$contents2")" git lfs track '**/*.dat' 2>&1 | tee track.log grep '"*\*/\*.dat" already supported' track.log # NOTE: The intent of this test is to confirm that running the # "git lfs track '*.dat'" command in the dir/ directory returns # "already supported", because it finds the "*.dat" pattern and # resolves its reference to the "lfs" macro attribute in # top-level .gitattributes file such that a "filter" attribute # is recognized, albeit with the unspecified state set. # # However, as noted in the "macros" test above, because the # "git lfs track" command parses the dir/.gitattributes file # before the top-level .gitattributes file, it does not resolve # the macro attribute reference, and our test would fail despite # our ability to parse macro attribute references with a "!" # unspecified flag character prefix. #cd dir #git lfs track '*.dat' 2>&1 | tee track.log #grep '"*.dat" already supported' track.log ) end_test git-lfs-3.6.1/t/t-batch-error-handling.sh000077500000000000000000000030421472372047300201330ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" begin_test "batch error handling" ( set -e # This initializes a new bare git repository in test/remote. # These remote repositories are global to every test, so keep the names # unique. reponame="badbatch" # Server looks for the "badbatch" repo, returns a 203 status setup_remote_repo "$reponame" # Clone the repository from the test Git server. This is empty, and will be # used to test a "git pull" below. The repo is cloned to $TRASHDIR/clone clone_repo "$reponame" clone # Clone the repository again to $TRASHDIR/repo. This will be used to commit # and push objects. clone_repo "$reponame" repo # This executes Git LFS from the local repo that was just cloned. git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log contents="a" contents_oid=$(calc_oid "$contents") printf "%s" "$contents" > a.dat git add a.dat git add .gitattributes git commit -m "add a.dat" 2>&1 | tee commit.log grep "main (root-commit)" commit.log grep "2 files changed" commit.log grep "create mode 100644 a.dat" commit.log grep "create mode 100644 .gitattributes" commit.log [ "a" = "$(cat a.dat)" ] # This is a small shell function that runs several git commands together. assert_pointer "main" "a.dat" "$contents_oid" 1 refute_server_object "$reponame" "$contents_oid" # This pushes to the remote repository set up at the top of the test. git push origin main 2>&1 | tee push.log grep "Unable to parse HTTP response" push.log ) end_test git-lfs-3.6.1/t/t-batch-retries-ratelimit.sh000077500000000000000000000071621472372047300206740ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" begin_test "batch upload causes retries" ( set -e reponame="upload-batch-retry-later" setup_remote_repo "$reponame" clone_repo "$reponame" batch-repo-upload contents="content" oid="$(calc_oid "$contents")" printf "%s" "$contents" > a.dat git lfs track "*.dat" git add .gitattributes a.dat git commit -m "initial commit" GIT_TRACE=1 git push origin main 2>&1 | tee push.log if [ "0" -ne "${PIPESTATUS[0]}" ]; then echo >&2 "fatal: expected \`git push origin main\` to succeed ..." exit 1 fi assert_server_object "$reponame" "$oid" ) end_test begin_test "batch upload with multiple files causes retries" ( set -e reponame="upload-multiple-batch-retry-later" setup_remote_repo "$reponame" clone_repo "$reponame" batch-repo-upload-multiple contents1="content 1" oid1="$(calc_oid "$contents1")" printf "%s" "$contents1" > a.dat contents2="content 2" oid2="$(calc_oid "$contents2")" printf "%s" "$contents2" > b.dat git lfs track "*.dat" git add .gitattributes a.dat b.dat git commit -m "initial commit" GIT_TRACE=1 git push origin main 2>&1 | tee push.log if [ "0" -ne "${PIPESTATUS[0]}" ]; then echo >&2 "fatal: expected \`git push origin main\` to succeed ..." exit 1 fi assert_server_object "$reponame" "$oid1" assert_server_object "$reponame" "$oid2" ) end_test begin_test "batch clone causes retries" ( set -e reponame="clone-batch-retry-later" setup_remote_repo "$reponame" clone_repo "$reponame" batch-repo-clone contents="content" oid="$(calc_oid "$contents")" printf "%s" "$contents" > a.dat git lfs track "*.dat" git add .gitattributes a.dat git commit -m "initial commit" git push origin main assert_server_object "$reponame" "$oid" pushd .. git lfs clone "$GITSERVER/$reponame" "$reponame-assert" if [ "0" -ne "$?" ]; then echo >&2 "fatal: expected \`git lfs clone \"$GITSERVER/$reponame\" \"$reponame-assert\"\` to su``" exit 1 fi cd "$reponame-assert" assert_local_object "$oid" "${#contents}" popd ) end_test begin_test "batch clone with multiple files causes retries" ( set -e reponame="clone-multiple-batch-retry-later" setup_remote_repo "$reponame" clone_repo "$reponame" batch-repo-clone-multiple contents1="content 1" oid1="$(calc_oid "$contents1")" printf "%s" "$contents1" > a.dat contents2="content 2" oid2="$(calc_oid "$contents2")" printf "%s" "$contents2" > b.dat git lfs track "*.dat" git add .gitattributes a.dat b.dat git commit -m "initial commit" git push origin main assert_server_object "$reponame" "$oid1" assert_server_object "$reponame" "$oid2" pushd .. git lfs clone "$GITSERVER/$reponame" "$reponame-assert" if [ "0" -ne "$?" ]; then echo >&2 "fatal: expected \`git lfs clone \"$GITSERVER/$reponame\" \"$reponame-assert\"\` to su``" exit 1 fi cd "$reponame-assert" assert_local_object "$oid1" "${#contents1}" assert_local_object "$oid2" "${#contents2}" popd ) end_test begin_test "batch upload causes retries (missing header)" ( set -e reponame="upload-batch-retry-later-no-header" setup_remote_repo "$reponame" clone_repo "$reponame" batch-repo-upload-no-header contents="content" oid="$(calc_oid "$contents")" printf "%s" "$contents" > a.dat git lfs track "*.dat" git add .gitattributes a.dat git commit -m "initial commit" GIT_TRACE=1 git push origin main 2>&1 | tee push.log if [ "0" -ne "${PIPESTATUS[0]}" ]; then echo >&2 "fatal: expected \`git push origin main\` to succeed ..." exit 1 fi assert_server_object "$reponame" "$oid" ) end_test git-lfs-3.6.1/t/t-batch-retries.sh000077500000000000000000000036701472372047300167040ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" begin_test "batch storage upload causes retries" ( set -e reponame="batch-storage-upload-retry" setup_remote_repo "$reponame" clone_repo "$reponame" batch-storage-repo-upload contents="storage-upload-retry" oid="$(calc_oid "$contents")" printf "%s" "$contents" > a.dat git lfs track "*.dat" git add .gitattributes a.dat git commit -m "initial commit" git config --local lfs.transfer.maxretries 3 GIT_TRACE=1 git push origin main 2>&1 | tee push.log if [ "0" -ne "${PIPESTATUS[0]}" ]; then echo >&2 "fatal: expected \`git push origin main\` to succeed ..." exit 1 fi actual_count="$(grep -c "tq: retrying object $oid: Fatal error: Server error" push.log)" [ "2" = "$actual_count" ] assert_server_object "$reponame" "$oid" ) end_test begin_test "batch storage download causes retries" ( set -e reponame="batch-storage-download-retry" setup_remote_repo "$reponame" clone_repo "$reponame" batch-storage-repo-download contents="storage-download-retry" oid="$(calc_oid "$contents")" printf "%s" "$contents" > a.dat git lfs track "*.dat" git add .gitattributes a.dat git commit -m "initial commit" git push origin main assert_server_object "$reponame" "$oid" pushd .. git \ -c "filter.lfs.process=" \ -c "filter.lfs.smudge=cat" \ -c "filter.lfs.required=false" \ clone "$GITSERVER/$reponame" "$reponame-assert" cd "$reponame-assert" git config credential.helper lfstest git config --local lfs.transfer.maxretries 3 GIT_TRACE=1 git lfs pull origin main 2>&1 | tee pull.log if [ "0" -ne "${PIPESTATUS[0]}" ]; then echo >&2 "fatal: expected \`git lfs pull origin main\` to succeed ..." exit 1 fi actual_count="$(grep -c "tq: retrying object $oid: Fatal error: Server error" pull.log)" [ "2" = "$actual_count" ] assert_local_object "$oid" "${#contents}" popd ) end_test git-lfs-3.6.1/t/t-batch-storage-retries-ratelimit.sh000077500000000000000000000100171472372047300223270ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" begin_test "batch storage upload causes retries" ( set -e reponame="batch-storage-upload-retry-later" setup_remote_repo "$reponame" clone_repo "$reponame" batch-storage-repo-upload contents="storage-upload-retry-later" oid="$(calc_oid "$contents")" printf "%s" "$contents" > a.dat git lfs track "*.dat" git add .gitattributes a.dat git commit -m "initial commit" GIT_TRACE=1 git push origin main 2>&1 | tee push.log if [ "0" -ne "${PIPESTATUS[0]}" ]; then echo >&2 "fatal: expected \`git push origin main\` to succeed ..." exit 1 fi assert_server_object "$reponame" "$oid" ) end_test begin_test "batch storage download causes retries" ( set -e reponame="batch-storage-download-retry-later" setup_remote_repo "$reponame" clone_repo "$reponame" batch-storage-repo-download contents="storage-download-retry-later" oid="$(calc_oid "$contents")" printf "%s" "$contents" > a.dat git lfs track "*.dat" git add .gitattributes a.dat git commit -m "initial commit" git push origin main assert_server_object "$reponame" "$oid" pushd .. git \ -c "filter.lfs.process=" \ -c "filter.lfs.smudge=cat" \ -c "filter.lfs.required=false" \ clone "$GITSERVER/$reponame" "$reponame-assert" cd "$reponame-assert" git config credential.helper lfstest GIT_TRACE=1 git lfs pull origin main 2>&1 | tee pull.log if [ "0" -ne "${PIPESTATUS[0]}" ]; then echo >&2 "fatal: expected \`git lfs pull origin main\` to succeed ..." exit 1 fi assert_local_object "$oid" "${#contents}" popd ) end_test begin_test "batch clone causes retries" ( set -e reponame="batch-storage-clone-retry-later" setup_remote_repo "$reponame" clone_repo "$reponame" batch-storage-repo-clone contents="storage-download-retry-later" oid="$(calc_oid "$contents")" printf "%s" "$contents" > a.dat git lfs track "*.dat" git add .gitattributes a.dat git commit -m "initial commit" git push origin main assert_server_object "$reponame" "$oid" pushd .. git lfs clone "$GITSERVER/$reponame" "$reponame-assert" if [ "0" -ne "$?" ]; then echo >&2 "fatal: expected \`git lfs clone \"$GITSERVER/$reponame\" \"$reponame-assert\"\` to su``" exit 1 fi cd "$reponame-assert" assert_local_object "$oid" "${#contents}" popd ) end_test begin_test "batch storage upload causes retries (missing header)" ( set -e reponame="batch-storage-upload-retry-later-no-header" setup_remote_repo "$reponame" clone_repo "$reponame" batch-storage-repo-upload-no-header contents="storage-upload-retry-later-no-header" oid="$(calc_oid "$contents")" printf "%s" "$contents" > a.dat git lfs track "*.dat" git add .gitattributes a.dat git commit -m "initial commit" GIT_TRACE=1 git push origin main 2>&1 | tee push.log if [ "0" -ne "${PIPESTATUS[0]}" ]; then echo >&2 "fatal: expected \`git push origin main\` to succeed ..." exit 1 fi assert_server_object "$reponame" "$oid" ) end_test begin_test "batch storage download causes retries (missing header)" ( set -e reponame="batch-storage-download-retry-later-no-header" setup_remote_repo "$reponame" clone_repo "$reponame" batch-storage-repo-download-no-header contents="storage-download-retry-later-no-header" oid="$(calc_oid "$contents")" printf "%s" "$contents" > a.dat git lfs track "*.dat" git add .gitattributes a.dat git commit -m "initial commit" git push origin main assert_server_object "$reponame" "$oid" pushd .. git \ -c "filter.lfs.process=" \ -c "filter.lfs.smudge=cat" \ -c "filter.lfs.required=false" \ clone "$GITSERVER/$reponame" "$reponame-assert" cd "$reponame-assert" git config credential.helper lfstest GIT_TRACE=1 git lfs pull origin main 2>&1 | tee pull.log if [ "0" -ne "${PIPESTATUS[0]}" ]; then echo >&2 "fatal: expected \`git lfs pull origin main\` to succeed ..." exit 1 fi assert_local_object "$oid" "${#contents}" popd ) end_test git-lfs-3.6.1/t/t-batch-transfer-size.sh000077500000000000000000000046331472372047300200230ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" begin_test "batch storage upload with small batch size" ( set -e reponame="batch-storage-upload-small-batch" setup_remote_repo "$reponame" clone_repo "$reponame" batch-storage-repo-upload contents1="storage-upload-batch-1" contents2="storage-upload-batch-2" contents3="storage-upload-batch-3" oid1="$(calc_oid "$contents1")" oid2="$(calc_oid "$contents2")" oid3="$(calc_oid "$contents3")" printf "%s" "$contents1" > a.dat printf "%s" "$contents2" > b.dat printf "%s" "$contents3" > c.dat git lfs track "*.dat" git add .gitattributes a.dat b.dat c.dat git commit -m "initial commit" git config --local lfs.transfer.batchSize 1 GIT_TRACE=1 git push origin main 2>&1 | tee push.log if [ "0" -ne "${PIPESTATUS[0]}" ]; then echo >&2 "fatal: expected \`git push origin main\` to succeed ..." exit 1 fi actual_count="$(grep -c "tq: sending batch of size 1" push.log)" [ "3" = "$actual_count" ] assert_server_object "$reponame" "$oid1" assert_server_object "$reponame" "$oid2" assert_server_object "$reponame" "$oid3" ) end_test begin_test "batch storage download with small batch size" ( set -e reponame="batch-storage-download-small-batch" setup_remote_repo "$reponame" clone_repo "$reponame" batch-storage-repo-download contents1="storage-download-batch-1" contents2="storage-download-batch-2" contents3="storage-download-batch-3" oid1="$(calc_oid "$contents1")" oid2="$(calc_oid "$contents2")" oid3="$(calc_oid "$contents3")" printf "%s" "$contents1" > a.dat printf "%s" "$contents2" > b.dat printf "%s" "$contents3" > c.dat git lfs track "*.dat" git add .gitattributes a.dat b.dat c.dat git commit -m "initial commit" git push origin main assert_server_object "$reponame" "$oid1" assert_server_object "$reponame" "$oid2" assert_server_object "$reponame" "$oid3" cd .. git config --global lfs.transfer.batchSize 1 GIT_TRACE=1 git clone "$GITSERVER/$reponame" "${reponame}-assert" 2>&1 | tee clone.log if [ "0" -ne "${PIPESTATUS[0]}" ]; then echo >&2 "fatal: expected \`git clone\` to succeed ..." exit 1 fi actual_count="$(grep -c "tq: sending batch of size 1" clone.log)" [ "3" = "$actual_count" ] cd "${reponame}-assert" assert_local_object "$oid1" "${#contents1}" assert_local_object "$oid2" "${#contents2}" assert_local_object "$oid3" "${#contents3}" ) end_test git-lfs-3.6.1/t/t-batch-transfer.sh000077500000000000000000000242511472372047300170510ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" begin_test "batch transfer" ( set -e # This initializes a new bare git repository in test/remote. # These remote repositories are global to every test, so keep the names # unique. reponame1="$(basename "$0" ".sh")" reponame2="CAPITALLETTERS" reponame=$reponame1$reponame2 setup_remote_repo "$reponame" # Clone the repository from the test Git server. This is empty, and will be # used to test a "git pull" below. The repo is cloned to $TRASHDIR/clone clone_repo "$reponame" clone # Clone the repository again to $TRASHDIR/repo. This will be used to commit # and push objects. clone_repo "$reponame" repo # This executes Git LFS from the local repo that was just cloned. git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log contents="a" contents_oid=$(calc_oid "$contents") printf "%s" "$contents" > a.dat git add a.dat git add .gitattributes git commit -m "add a.dat" 2>&1 | tee commit.log grep "main (root-commit)" commit.log grep "2 files changed" commit.log grep "create mode 100644 a.dat" commit.log grep "create mode 100644 .gitattributes" commit.log [ "a" = "$(cat a.dat)" ] # This is a small shell function that runs several git commands together. assert_pointer "main" "a.dat" "$contents_oid" 1 refute_server_object "$reponame" "$contents_oid" # This pushes to the remote repository set up at the top of the test. git push origin main 2>&1 | tee push.log grep "Uploading LFS objects: 100% (1/1), 1 B" push.log grep "main -> main" push.log assert_server_object "$reponame" "$contents_oid" # change to the clone's working directory cd ../clone git pull origin main [ "a" = "$(cat a.dat)" ] assert_pointer "main" "a.dat" "$contents_oid" 1 ) end_test begin_test "batch transfers occur in reverse order by size" ( set -e reponame="batch-order-test" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" git add .gitattributes git commit -m "initial commit" small_contents="small" small_oid="$(calc_oid "$small_contents")" printf "%s" "$small_contents" > small.dat bigger_contents="bigger" bigger_oid="$(calc_oid "$bigger_contents")" printf "%s" "$bigger_contents" > bigger.dat git add *.dat git commit -m "add small and large objects" GIT_CURL_VERBOSE=1 git push origin main 2>&1 | tee push.log batch="$(grep "{\"operation\":\"upload\"" push.log | head -1)" pos_small="$(substring_position "$batch" "$small_oid")" pos_large="$(substring_position "$batch" "$bigger_oid")" # Assert that the larger object shows up earlier in the batch than the # smaller object [ "$pos_large" -lt "$pos_small" ] ) end_test begin_test "batch transfers succeed with an empty hash algorithm" ( set -e reponame="batch-test-empty-algo" contents="batch-hash-algo-empty" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" printf "hi" > good.dat printf "%s" "$contents" > special.dat git add .gitattributes good.dat special.dat git commit -m "hi" git push origin main assert_server_object "$reponame" "$(calc_oid "$contents")" ) end_test begin_test "batch transfers fail with an unknown hash algorithm" ( set -e reponame="batch-test-invalid-algo" contents="batch-hash-algo-invalid" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" printf "hi" > good.dat printf "%s" "$contents" > special.dat git add .gitattributes good.dat special.dat git commit -m "hi" git push origin main 2>&1 | tee push.log grep 'unsupported hash algorithm' push.log refute_server_object "$reponame" "$(calc_oid "$contents")" ) end_test begin_test "batch transfers with ssh endpoint (git-lfs-authenticate)" ( set -e reponame="batch-ssh" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" sshurl="${GITSERVER/http:\/\//ssh://git@}/$reponame" git config lfs.url "$sshurl" contents="test" git lfs track "*.dat" printf "%s" "$contents" > test.dat git add .gitattributes test.dat git commit -m "initial commit" GIT_TRACE=1 git push origin main >push.log 2>&1 [ "1" -eq "$(grep -c "exec: lfs-ssh-echo.*git-lfs-authenticate /$reponame upload" push.log)" ] assert_server_object "$reponame" "$(calc_oid "$contents")" ) end_test assert_ssh_transfer_session_counts() { local log="$1" local msg="$2" local min="$3" local max="$4" local count="$(grep -c "$msg" "$log")" [ "$max" -ge "$count" ] [ "$min" -le "$count" ] } assert_ssh_transfer_sessions() { local log="$1" local direction="$2" local num_objs="$3" local objs_per_batch="$4" local min_expected_start=1 local max_expected_start=$(( num_objs > objs_per_batch ? objs_per_batch : num_objs )) local min_expected_end=1 local max_expected_end="$max_expected_start" local expected_ctrl=1 # On upload we currently spawn one extra control socket SSH connection # to run locking commands and never shut it down cleanly, so our expected # start counts are higher than our expected termination counts. if [ "upload" = "$direction" ]; then (( ++expected_ctrl )) (( ++min_expected_start )) (( ++max_expected_start )) fi # Versions of Git prior to 2.11.0 invoke Git LFS via the "smudge" filter # rather than the "process" filter, so a separate Git LFS process runs for # each downloaded object and spawns its own control socket SSH connection. if [ "download" = "$direction" ]; then gitversion="$(git version | cut -d" " -f3)" set +e compare_version "$gitversion" '2.11.0' result=$? set -e if [ "$result" -eq "$VERSION_LOWER" ]; then min_expected_start="$num_objs" max_expected_start="$num_objs" min_expected_end="$num_objs" max_expected_end="$num_objs" expected_ctrl="$num_objs" fi fi local max_expected_nonctrl=$(( max_expected_start - expected_ctrl )) local lines="$(grep "exec: lfs-ssh-echo.*git-lfs-transfer .*${reponame}.git $direction" "$log")" local ctrl_count="$(printf '%s' "$lines" | grep -c -- '-oControlMaster=yes')" local nonctrl_count="$(printf '%s' "$lines" | grep -c -- '-oControlMaster=no')" [ "$expected_ctrl" -eq "$ctrl_count" ] [ "$max_expected_nonctrl" -ge "$nonctrl_count" ] assert_ssh_transfer_session_counts "$log" 'spawning pure SSH connection' \ "$min_expected_start" "$max_expected_start" assert_ssh_transfer_session_counts "$log" 'pure SSH connection successful' \ "$min_expected_start" "$max_expected_start" assert_ssh_transfer_session_counts "$log" 'terminating pure SSH connection' \ "$min_expected_end" "$max_expected_end" } begin_test "batch transfers with ssh endpoint (git-lfs-transfer)" ( set -e setup_pure_ssh reponame="batch-ssh-transfer" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" sshurl=$(ssh_remote "$reponame") git config lfs.url "$sshurl" contents="test" git lfs track "*.dat" printf "%s" "$contents" > test.dat git add .gitattributes test.dat git commit -m "initial commit" # On Windows we do not multiplex SSH connections by default, so we # enforce their use in order to match other platforms' connection counts. git config --global lfs.ssh.autoMultiplex true GIT_TRACE=1 git push origin main >push.log 2>&1 assert_ssh_transfer_sessions 'push.log' 'upload' 1 8 assert_remote_object "$reponame" "$(calc_oid "$contents")" "${#contents}" cd .. GIT_TRACE=1 git clone "$sshurl" "$reponame-2" 2>&1 | tee clone.log assert_ssh_transfer_sessions 'clone.log' 'download' 1 8 cd "$reponame-2" git lfs fsck ) end_test begin_test "batch transfers with ssh endpoint and multiple objects (git-lfs-transfer)" ( set -e setup_pure_ssh reponame="batch-ssh-transfer-multiple" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" contents1="test1" contents2="test2" contents3="test3" git lfs track "*.dat" printf "%s" "$contents1" >test1.dat printf "%s" "$contents2" >test2.dat printf "%s" "$contents3" >test3.dat git add .gitattributes test*.dat git commit -m "initial commit" sshurl=$(ssh_remote "$reponame") git config lfs.url "$sshurl" # On Windows we do not multiplex SSH connections by default, so we # enforce their use in order to match other platforms' connection counts. git config --global lfs.ssh.autoMultiplex true GIT_TRACE=1 git push origin main >push.log 2>&1 assert_ssh_transfer_sessions 'push.log' 'upload' 3 8 assert_remote_object "$reponame" "$(calc_oid "$contents1")" "${#contents1}" assert_remote_object "$reponame" "$(calc_oid "$contents2")" "${#contents2}" assert_remote_object "$reponame" "$(calc_oid "$contents3")" "${#contents3}" cd .. GIT_TRACE=1 git clone "$sshurl" "$reponame-2" 2>&1 | tee clone.log assert_ssh_transfer_sessions 'clone.log' 'download' 3 8 cd "$reponame-2" git lfs fsck ) end_test begin_test "batch transfers with ssh endpoint and multiple objects and batches (git-lfs-transfer)" ( set -e setup_pure_ssh reponame="batch-ssh-transfer-multiple-batch" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" contents1="test1" contents2="test2" contents3="test3" git lfs track "*.dat" printf "%s" "$contents1" >test1.dat printf "%s" "$contents2" >test2.dat printf "%s" "$contents3" >test3.dat git add .gitattributes test*.dat git commit -m "initial commit" sshurl=$(ssh_remote "$reponame") git config lfs.url "$sshurl" # On Windows we do not multiplex SSH connections by default, so we # enforce their use in order to match other platforms' connection counts. git config --global lfs.ssh.autoMultiplex true # Allow no more than two objects to be transferred in each batch. git config --global lfs.concurrentTransfers 2 GIT_TRACE=1 git push origin main >push.log 2>&1 assert_ssh_transfer_sessions 'push.log' 'upload' 3 2 assert_remote_object "$reponame" "$(calc_oid "$contents1")" "${#contents1}" assert_remote_object "$reponame" "$(calc_oid "$contents2")" "${#contents2}" assert_remote_object "$reponame" "$(calc_oid "$contents3")" "${#contents3}" cd .. GIT_TRACE=1 git clone "$sshurl" "$reponame-2" 2>&1 | tee clone.log assert_ssh_transfer_sessions 'clone.log' 'download' 3 2 cd "$reponame-2" git lfs fsck ) end_test git-lfs-3.6.1/t/t-batch-unknown-oids.sh000077500000000000000000000013001472372047300176460ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" begin_test "transfer queue rejects unknown OIDs" ( set -e reponame="unknown-oids" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" git add .gitattributes git commit -m "initial commit" contents="unknown-oid" printf "%s" "$contents" > a.dat git add a.dat git commit -m "add objects" set +e git push origin main 2>&1 | tee push.log res="${PIPESTATUS[0]}" set -e refute_server_object "$reponame" "$(calc_oid "$contents")" if [ "0" -eq "$res" ]; then echo "push successful?" exit 1 fi grep "\[unknown-oid\] The server returned an unknown OID." push.log ) end_test git-lfs-3.6.1/t/t-checkout.sh000077500000000000000000000241421472372047300157520ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" begin_test "checkout" ( set -e reponame="$(basename "$0" ".sh")" setup_remote_repo "$reponame" clone_repo "$reponame" repo git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log contents="something something" contentsize=19 contents_oid=$(calc_oid "$contents") # Same content everywhere is ok, just one object in lfs db printf "%s" "$contents" > file1.dat printf "%s" "$contents" > file2.dat printf "%s" "$contents" > file3.dat mkdir folder1 folder2 printf "%s" "$contents" > folder1/nested.dat printf "%s" "$contents" > folder2/nested.dat git add file1.dat file2.dat file3.dat folder1/nested.dat folder2/nested.dat git add .gitattributes git commit -m "add files" [ "$contents" = "$(cat file1.dat)" ] [ "$contents" = "$(cat file2.dat)" ] [ "$contents" = "$(cat file3.dat)" ] [ "$contents" = "$(cat folder1/nested.dat)" ] [ "$contents" = "$(cat folder2/nested.dat)" ] assert_pointer "main" "file1.dat" "$contents_oid" $contentsize # Remove the working directory rm -rf file1.dat file2.dat file3.dat folder1/nested.dat folder2/nested.dat echo "checkout should replace all" GIT_TRACE=1 git lfs checkout 2>&1 | tee checkout.log [ "$contents" = "$(cat file1.dat)" ] [ "$contents" = "$(cat file2.dat)" ] [ "$contents" = "$(cat file3.dat)" ] [ "$contents" = "$(cat folder1/nested.dat)" ] [ "$contents" = "$(cat folder2/nested.dat)" ] grep "Checking out LFS objects: 100% (5/5), 95 B" checkout.log grep 'accepting "file1.dat"' checkout.log grep 'rejecting "file1.dat"' checkout.log && exit 1 git rm file1.dat echo "checkout should skip replacing files deleted in index" git lfs checkout [ ! -f file1.dat ] git reset --hard # Remove the working directory rm -rf file1.dat file2.dat file3.dat folder1/nested.dat folder2/nested.dat echo "checkout with filters" git lfs checkout file2.dat [ "$contents" = "$(cat file2.dat)" ] [ ! -f file1.dat ] [ ! -f file3.dat ] [ ! -f folder1/nested.dat ] [ ! -f folder2/nested.dat ] echo "quotes to avoid shell globbing" git lfs checkout "file*.dat" [ "$contents" = "$(cat file1.dat)" ] [ "$contents" = "$(cat file3.dat)" ] [ ! -f folder1/nested.dat ] [ ! -f folder2/nested.dat ] echo "test subdir context" pushd folder1 git lfs checkout nested.dat [ "$contents" = "$(cat nested.dat)" ] [ ! -f ../folder2/nested.dat ] # test '.' in current dir rm nested.dat git lfs checkout . 2>&1 | tee checkout.log [ "$contents" = "$(cat nested.dat)" ] popd echo "test folder param" git lfs checkout folder2 [ "$contents" = "$(cat folder2/nested.dat)" ] echo "test '.' in current dir" rm -rf file1.dat file2.dat file3.dat folder1/nested.dat folder2/nested.dat git lfs checkout . [ "$contents" = "$(cat file1.dat)" ] [ "$contents" = "$(cat file2.dat)" ] [ "$contents" = "$(cat file3.dat)" ] [ "$contents" = "$(cat folder1/nested.dat)" ] [ "$contents" = "$(cat folder2/nested.dat)" ] echo "test checkout with missing data doesn't fail" git push origin main rm -rf .git/lfs/objects rm file*.dat git lfs checkout [ "$(pointer $contents_oid $contentsize)" = "$(cat file1.dat)" ] [ "$(pointer $contents_oid $contentsize)" = "$(cat file2.dat)" ] [ "$(pointer $contents_oid $contentsize)" = "$(cat file3.dat)" ] [ "$contents" = "$(cat folder1/nested.dat)" ] [ "$contents" = "$(cat folder2/nested.dat)" ] ) end_test begin_test "checkout: without clean filter" ( set -e reponame="$(basename "$0" ".sh")" git lfs uninstall git clone "$GITSERVER/$reponame" checkout-without-clean cd checkout-without-clean echo "checkout without clean filter" git lfs uninstall git config --list > config.txt grep "filter.lfs.clean" config.txt && { echo "clean filter still configured:" cat config.txt exit 1 } ls -al git lfs checkout | tee checkout.txt grep "Git LFS is not installed" checkout.txt if [ "0" -ne "${PIPESTATUS[0]}" ]; then echo >&2 "fatal: expected checkout to succeed ..." exit 1 fi contentsize=19 contents_oid=$(calc_oid "something something") [ "$(pointer $contents_oid $contentsize)" = "$(cat file1.dat)" ] [ "$(pointer $contents_oid $contentsize)" = "$(cat file2.dat)" ] [ "$(pointer $contents_oid $contentsize)" = "$(cat file3.dat)" ] [ "$(pointer $contents_oid $contentsize)" = "$(cat folder1/nested.dat)" ] [ "$(pointer $contents_oid $contentsize)" = "$(cat folder2/nested.dat)" ] ) end_test begin_test "checkout: outside git repository" ( set +e git lfs checkout 2>&1 > checkout.log res=$? set -e if [ "$res" = "0" ]; then echo "Passes because $GIT_LFS_TEST_DIR is unset." exit 0 fi [ "$res" = "128" ] grep "Not in a Git repository" checkout.log ) end_test begin_test "checkout: write-only file" ( set -e reponame="checkout-locked" filename="a.txt" setup_remote_repo_with_file "$reponame" "$filename" pushd "$TRASHDIR" > /dev/null GIT_LFS_SKIP_SMUDGE=1 clone_repo "$reponame" "${reponame}_checkout" chmod -w "$filename" refute_file_writeable "$filename" assert_pointer "refs/heads/main" "$filename" "$(calc_oid "$filename\n")" 6 git lfs fetch git lfs checkout "$filename" refute_file_writeable "$filename" [ "$filename" = "$(cat "$filename")" ] popd > /dev/null ) end_test begin_test "checkout: conflicts" ( set -e reponame="checkout-conflicts" filename="file1.dat" setup_remote_repo_with_file "$reponame" "$filename" pushd "$TRASHDIR" > /dev/null clone_repo "$reponame" "${reponame}_checkout" git tag base git checkout -b first echo "abc123" > file1.dat git add -u echo "first" > other.txt git add other.txt git commit -m "first" git lfs checkout --to base.txt 2>&1 | tee output.txt grep -- '--to and exactly one of --theirs, --ours, and --base must be used together' output.txt git lfs checkout --base 2>&1 | tee output.txt grep -- '--to and exactly one of --theirs, --ours, and --base must be used together' output.txt git lfs checkout --to base.txt --ours --theirs 2>&1 | tee output.txt grep -- 'at most one of --base, --theirs, and --ours is allowed' output.txt git lfs checkout --to base.txt --base 2>&1 | tee output.txt grep -- '--to requires exactly one Git LFS object file path' output.txt git lfs checkout --to base.txt --base 2>&1 abc def | tee output.txt grep -- '--to requires exactly one Git LFS object file path' output.txt git lfs checkout --to base.txt --base file1.dat 2>&1 | tee output.txt grep 'Could not checkout.*not in the middle of a merge' output.txt git checkout -b second main echo "def456" > file1.dat git add -u echo "second" > other.txt git add other.txt git commit -m "second" # This will cause a conflict. git merge first && exit 1 git lfs checkout --to base.txt --base file1.dat git lfs checkout --to ours.txt --ours file1.dat git lfs checkout --to theirs.txt --theirs file1.dat echo "file1.dat" | cmp - base.txt echo "abc123" | cmp - theirs.txt echo "def456" | cmp - ours.txt git lfs checkout --to base.txt --ours other.txt 2>&1 | tee output.txt grep 'Could not find decoder pointer for object' output.txt popd > /dev/null ) end_test begin_test "checkout: GIT_WORK_TREE" ( set -e reponame="checkout-work-tree" remotename="$(basename "$0" ".sh")" export GIT_WORK_TREE="$reponame" GIT_DIR="$reponame-git" mkdir "$GIT_WORK_TREE" "$GIT_DIR" git init git remote add origin "$GITSERVER/$remotename" git lfs uninstall --skip-repo git fetch origin git checkout -B main origin/main git lfs install git lfs fetch git lfs checkout contents="something something" [ "$contents" = "$(cat "$reponame/file1.dat")" ] ) end_test begin_test "checkout: sparse with partial clone and sparse index" ( set -e # Only test with Git version 2.42.0 as it introduced support for the # "objecttype" format option to the "git ls-files" command, which our # code requires. ensure_git_version_isnt "$VERSION_LOWER" "2.42.0" reponame="checkout-sparse" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" contents1="a" contents1_oid=$(calc_oid "$contents1") contents2="b" contents2_oid=$(calc_oid "$contents2") contents3="c" contents3_oid=$(calc_oid "$contents3") mkdir in-dir out-dir printf "%s" "$contents1" >a.dat printf "%s" "$contents2" >in-dir/b.dat printf "%s" "$contents3" >out-dir/c.dat git add . git commit -m "add files" git push origin main assert_server_object "$reponame" "$contents1_oid" assert_server_object "$reponame" "$contents2_oid" assert_server_object "$reponame" "$contents3_oid" # Create a partial clone with a cone-mode sparse checkout of one directory # and a sparse index, which is important because otherwise the "git ls-files" # command ignores the --sparse option and lists all Git LFS files. cd .. git clone --filter=tree:0 --depth=1 --no-checkout \ "$GITSERVER/$reponame" "${reponame}-partial" cd "${reponame}-partial" git sparse-checkout init --cone --sparse-index git sparse-checkout set "in-dir" git checkout main [ -d "in-dir" ] [ ! -e "out-dir" ] assert_local_object "$contents1_oid" 1 assert_local_object "$contents2_oid" 1 refute_local_object "$contents3_oid" # Git LFS objects associated with files outside of the sparse cone # should be ignored entirely, rather than just skipped. git lfs checkout 2>&1 | tee checkout.log if [ "0" -ne "${PIPESTATUS[0]}" ]; then echo >&2 "fatal: expected checkout to succeed ..." exit 1 fi grep -q 'Skipped checkout for "out-dir/c.dat"' checkout.log && exit 1 # Fetch all Git LFS objects, including those outside the sparse cone. git lfs fetch origin main assert_local_object "$contents3_oid" 1 # Git LFS objects associated with files outside of the sparse cone # should not be checked out. git lfs checkout 2>&1 | tee checkout.log if [ "0" -ne "${PIPESTATUS[0]}" ]; then echo >&2 "fatal: expected checkout to succeed ..." exit 1 fi grep -q 'Checking out LFS objects: 100% (3/3), 3 B' checkout.log && exit 1 [ ! -e "out-dir/c.dat" ] ) end_test git-lfs-3.6.1/t/t-cherry-pick-commits.sh000077500000000000000000000013041472372047300200310ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" begin_test "cherry-pick two commits without lfs cache" ( set -e reponame="$(basename "$0" ".sh")-cherry-pick-commits" setup_remote_repo "$reponame" clone_repo "$reponame" cherrypickcommits git lfs track "*.dat" git add .gitattributes git commit -m "initial commit" git branch secondbranch echo "smudge a" > a.dat git add a.dat git commit -m "add a.dat" commit1=$(git log -n1 --format="%H") echo "smudge b" > b.dat git add b.dat git commit -m "add a.dat" commit2=$(git log -n1 --format="%H") git push origin main git checkout secondbranch rm -rf .git/lfs/objects git cherry-pick $commit1 $commit2 ) end_test git-lfs-3.6.1/t/t-chunked-transfer-encoding.sh000077500000000000000000000034411472372047300211730ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" begin_test "chunked transfer encoding" ( set -e # This initializes a new bare git repository in test/remote. # These remote repositories are global to every test, so keep the names # unique. reponame="$(basename "$0" ".sh")" setup_remote_repo "$reponame" # Clone the repository from the test Git server. This is empty, and will be # used to test a "git pull" below. The repo is cloned to $TRASHDIR/clone clone_repo "$reponame" clone # Clone the repository again to $TRASHDIR/repo. This will be used to commit # and push objects. clone_repo "$reponame" repo # This executes Git LFS from the local repo that was just cloned. git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log contents="a" contents_oid=$(calc_oid "$contents") # Regular Git commands can be used. printf "%s" "$contents" > a.dat git add a.dat git add .gitattributes git commit -m "add a.dat" 2>&1 | tee commit.log grep "main (root-commit)" commit.log grep "2 files changed" commit.log grep "create mode 100644 a.dat" commit.log grep "create mode 100644 .gitattributes" commit.log [ "a" = "$(cat a.dat)" ] # This is a small shell function that runs several git commands together. assert_pointer "main" "a.dat" "$contents_oid" 1 refute_server_object "$reponame" "$contents_oid" # This pushes to the remote repository set up at the top of the test. git push origin main 2>&1 | tee push.log grep "Uploading LFS objects: 100% (1/1), 1 B" push.log grep "main -> main" push.log assert_server_object "$reponame" "$contents_oid" # change to the clone's working directory cd ../clone git pull origin main 2>&1 [ "a" = "$(cat a.dat)" ] assert_pointer "main" "a.dat" "$contents_oid" 1 ) end_test git-lfs-3.6.1/t/t-clean.sh000077500000000000000000000062451472372047300152330ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" clean_setup () { mkdir "$1" cd "$1" git init } begin_test "clean simple file" ( set -e clean_setup "simple" echo "whatever" | git lfs clean | tee clean.log [ "$(pointer cd293be6cea034bd45a0352775a219ef5dc7825ce55d1f7dae9762d80ce64411 9)" = "$(cat clean.log)" ] ) end_test begin_test "clean a pointer" ( set -e clean_setup "pointer" pointer cd293be6cea034bd45a0352775a219ef5dc7825ce55d1f7dae9762d80ce64411 9 | git lfs clean | tee clean.log [ "$(pointer cd293be6cea034bd45a0352775a219ef5dc7825ce55d1f7dae9762d80ce64411 9)" = "$(cat clean.log)" ] ) end_test begin_test "clean pseudo pointer" ( set -e clean_setup "pseudo" echo "version https://git-lfs.github.com/spec/v1 oid sha256:7cd8be1d2cd0dd22cd9d229bb6b5785009a05e8b39d405615d882caac56562b5 size 1024 This is my test pointer. There are many like it, but this one is mine." | git lfs clean | tee clean.log [ "$(pointer f492acbebb5faa22da4c1501c022af035469f624f426631f31936575873fefe1 202)" = "$(cat clean.log)" ] ) end_test begin_test "clean pseudo pointer with extra data" ( set -e clean_setup "extra-data" # pointer includes enough extra data to fill the 'git lfs clean' buffer printf "version https://git-lfs.github.com/spec/v1 oid sha256:7cd8be1d2cd0dd22cd9d229bb6b5785009a05e8b39d405615d882caac56562b5 size 1024 \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n This is my test pointer. There are many like it, but this one is mine.\n" | git lfs clean | tee clean.log [ "$(pointer c2f909f6961bf85a92e2942ef3ed80c938a3d0ebaee6e72940692581052333be 586)" = "$(cat clean.log)" ] ) end_test begin_test "clean stdin" ( set -e # git-lfs-clean(1) writes to .git/lfs/objects, and therefore must be executed # within a repository. reponame="clean-over-stdin" git init "$reponame" cd "$reponame" lfstest-genrandom --base64 1024 >small.dat lfstest-genrandom --base64 2048 >large.dat expected_small="$(calc_oid_file "small.dat")" expected_large="$(calc_oid_file "large.dat")" actual_small="$(git lfs clean < "small.dat" | grep "oid" | cut -d ':' -f 2)" actual_large="$(git lfs clean < "large.dat" | grep "oid" | cut -d ':' -f 2)" if [ "$expected_small" != "$actual_small" ]; then echo >&2 "fatal: expected small OID of: $expected_small, got: $actual_small" exit 1 fi if [ "$expected_large" != "$actual_large" ]; then echo >&2 "fatal: expected large OID of: $expected_large, got: $actual_large" exit 1 fi ) end_test git-lfs-3.6.1/t/t-clone-deprecated.sh000077500000000000000000000010701472372047300173360ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" ensure_git_version_isnt $VERSION_LOWER "2.15.0" begin_test "clone (deprecated on new versions of Git)" ( set -e reponame="clone-deprecated-recent-versions" setup_remote_repo "$reponame" mkdir -p "$reponame" pushd "$reponame" > /dev/null git lfs clone "$GITSERVER/$reponame" 2>&1 | tee clone.log grep "WARNING: \`git lfs clone\` is deprecated and will not be updated" clone.log grep "\`git clone\` has been updated in upstream Git to have comparable" clone.log popd > /dev/null ) end_test git-lfs-3.6.1/t/t-clone.sh000077500000000000000000000752461472372047300152600ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" ensure_git_version_isnt $VERSION_LOWER "2.2.0" # Check for a libnss3 dependency in Git until we drop support for CentOS 7. GIT_LIBNSS=0 if [ "$IS_WINDOWS" -eq 0 -a "$IS_MAC" -eq 0 ]; then GIT_LIBNSS="$(ldd "$(git --exec-path)"/git-remote-https | grep -c '^\s*libnss3\.' || true)" fi export GIT_LIBNSS export CREDSDIR="$REMOTEDIR/creds-clone" setup_creds begin_test "clone" ( set -e reponame="$(basename "$0" ".sh")" setup_remote_repo "$reponame" clone_repo "$reponame" repo git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log git add .gitattributes git commit -m "Track *.dat" # generate some test data & commits with random LFS data echo "[ { \"CommitDate\":\"$(get_date -10d)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":100}, {\"Filename\":\"file2.dat\",\"Size\":75}] }, { \"CommitDate\":\"$(get_date -7d)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":110}, {\"Filename\":\"file3.dat\",\"Size\":66}, {\"Filename\":\"file4.dat\",\"Size\":23}] }, { \"CommitDate\":\"$(get_date -10d)\", \"Files\":[ {\"Filename\":\"file5.dat\",\"Size\":120}, {\"Filename\":\"file6.dat\",\"Size\":30}] } ]" | lfstest-testutils addcommits git push origin main # Now clone again, test specific clone dir cd "$TRASHDIR" newclonedir="testclone1" git lfs clone "$GITSERVER/$reponame" "$newclonedir" 2>&1 | tee lfsclone.log grep "Cloning into" lfsclone.log grep "Downloading LFS objects:" lfsclone.log # should be no filter errors grep "filter" lfsclone.log && exit 1 grep "error" lfsclone.log && exit 1 # should be cloned into location as per arg [ -d "$newclonedir" ] # check a few file sizes to make sure pulled pushd "$newclonedir" [ $(wc -c < "file1.dat") -eq 110 ] [ $(wc -c < "file2.dat") -eq 75 ] [ $(wc -c < "file3.dat") -eq 66 ] assert_hooks "$(dot_git_dir)" [ ! -e "lfs" ] assert_clean_status popd # Now check clone with implied dir rm -rf "$reponame" git lfs clone "$GITSERVER/$reponame" 2>&1 | tee lfsclone.log grep "Cloning into" lfsclone.log grep "Downloading LFS objects:" lfsclone.log # should be no filter errors grep "filter" lfsclone.log && exit 1 grep "error" lfsclone.log && exit 1 # clone location should be implied [ -d "$reponame" ] pushd "$reponame" [ $(wc -c < "file1.dat") -eq 110 ] [ $(wc -c < "file2.dat") -eq 75 ] [ $(wc -c < "file3.dat") -eq 66 ] assert_hooks "$(dot_git_dir)" [ ! -e "lfs" ] assert_clean_status popd # Now check clone with standard 'git clone' and smudge download rm -rf "$reponame" git clone "$GITSERVER/$reponame" 2>&1 | tee clone.log if [ "0" -ne "${PIPESTATUS[0]}" ]; then echo >&2 "fatal: expected clone to succeed ..." exit 1 fi grep "Cloning into" clone.log [ -d "$reponame" ] pushd "$reponame" [ $(wc -c < "file1.dat") -eq 110 ] [ $(wc -c < "file2.dat") -eq 75 ] [ $(wc -c < "file3.dat") -eq 66 ] assert_hooks "$(dot_git_dir)" [ ! -e "lfs" ] [ "6" -eq "$(find "$(dot_git_dir)/lfs/objects" -type f | wc -l)" ] assert_clean_status popd ) end_test begin_test "cloneSSL" ( set -e if [ "$GIT_LIBNSS" -eq 1 ]; then echo "skip: libnss does not support the Go httptest server certificate" exit 0 fi if [ "$IS_WINDOWS" -eq 1 ]; then git config --global "http.sslBackend" "openssl" fi reponame="test-cloneSSL" setup_remote_repo "$reponame" clone_repo_ssl "$reponame" "$reponame" git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log git add .gitattributes git commit -m "Track *.dat" # generate some test data & commits with random LFS data echo "[ { \"CommitDate\":\"$(get_date -5d)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":100}, {\"Filename\":\"file2.dat\",\"Size\":75}] }, { \"CommitDate\":\"$(get_date -1d)\", \"Files\":[ {\"Filename\":\"file3.dat\",\"Size\":30}] } ]" | lfstest-testutils addcommits git push origin main # Now SSL clone again with 'git lfs clone', test specific clone dir cd "$TRASHDIR" newclonedir="testcloneSSL1" git lfs clone "$SSLGITSERVER/$reponame" "$newclonedir" 2>&1 | tee lfsclone.log grep "Cloning into" lfsclone.log grep "Downloading LFS objects:" lfsclone.log # should be no filter errors grep "filter" lfsclone.log && exit 1 grep "error" lfsclone.log && exit 1 # should be cloned into location as per arg [ -d "$newclonedir" ] # check a few file sizes to make sure pulled pushd "$newclonedir" [ $(wc -c < "file1.dat") -eq 100 ] [ $(wc -c < "file2.dat") -eq 75 ] [ $(wc -c < "file3.dat") -eq 30 ] assert_hooks "$(dot_git_dir)" [ ! -e "lfs" ] assert_clean_status popd # Now check SSL clone with standard 'git clone' and smudge download rm -rf "$reponame" git clone "$SSLGITSERVER/$reponame" 2>&1 | tee clone.log if [ "0" -ne "${PIPESTATUS[0]}" ]; then echo >&2 "fatal: expected clone to succeed ..." exit 1 fi grep "Cloning into" clone.log [ -d "$reponame" ] pushd "$reponame" [ $(wc -c < "file1.dat") -eq 100 ] [ $(wc -c < "file2.dat") -eq 75 ] [ $(wc -c < "file3.dat") -eq 30 ] assert_hooks "$(dot_git_dir)" [ ! -e "lfs" ] [ "3" -eq "$(find "$(dot_git_dir)/lfs/objects" -type f | wc -l)" ] assert_clean_status popd ) end_test begin_test "clone ClientCert" ( set -e if [ "$GIT_LIBNSS" -eq 1 ]; then echo "skip: libnss does not support the Go httptest server certificate" exit 0 fi if [ "$IS_WINDOWS" -eq 1 ]; then git config --global "http.sslBackend" "openssl" fi # Note that the record files we create in the $CREDSDIR directory are not # used until we set the "http.sslCertPasswordProtected" option to "true" # and the "http..sslKey" option with the path to our TLS/SSL client # certificate's encrypted private key file. (The PEM certificate file # itself is not encrypted and does not contain the private key.) # # When these options are set, however, Git and Git LFS will independently # invoke "git credential fill" to retrieve the passphrase for the # encrypted private key. Because the "http.sslCertPasswordProtected" # option is set, Git will query the credential helper, passing a # "protocol=cert" line and a "path=" line with the path # from the "http..sslCert" option. Note that this path refers # to our unencrypted certificate file; Git does not use the path to # the encrypted private key file from the "http..sslKey" option # in its query to the credential helper. # # Separately, the Git LFS client will detect that the private key file # specified by the "http..sslKey" option is encrypted, and so will # invoke "git credential fill" to retrieve its passphrase, passing a # "protocol=cert" line and a "path=" line with the path # from the "http..sslKey" option. # # In order to satisfy both requests, our git-credential-lfstest helper # therefore needs two record files, both with the passphrase for the # encrypted private key file. For Git, one is associated with the path # to the certificate file, and for Git LFS, one is associated with the # path to the key file. write_creds_file "::pass" "$CREDSDIR/--$(echo "$LFS_CLIENT_CERT_FILE" | tr / -)" write_creds_file "::pass" "$CREDSDIR/--$(echo "$LFS_CLIENT_KEY_FILE_ENCRYPTED" | tr / -)" git config --global "http.$LFS_CLIENT_CERT_URL/.sslCert" "$LFS_CLIENT_CERT_FILE" git config --global "http.$LFS_CLIENT_CERT_URL/.sslKey" "$LFS_CLIENT_KEY_FILE" reponame="test-cloneClientCert" setup_remote_repo "$reponame" clone_repo_clientcert "$reponame" "$reponame" git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log git add .gitattributes git commit -m "Track *.dat" # generate some test data & commits with random LFS data echo "[ { \"CommitDate\":\"$(get_date -5d)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":100}, {\"Filename\":\"file2.dat\",\"Size\":75}] }, { \"CommitDate\":\"$(get_date -1d)\", \"Files\":[ {\"Filename\":\"file3.dat\",\"Size\":30}] } ]" | lfstest-testutils addcommits git push origin main # Now clone again with 'git lfs clone', test specific clone dir # Test with both unencrypted and encrypted client certificate keys cd "$TRASHDIR" for enc in "false" "true"; do if [ "$enc" = "true" ]; then git config --global "http.$LFS_CLIENT_CERT_URL/.sslKey" "$LFS_CLIENT_KEY_FILE_ENCRYPTED" git config --global "http.sslCertPasswordProtected" "$enc" fi newclonedir="${reponame}-${enc}" git lfs clone "$CLIENTCERTGITSERVER/$reponame" "$newclonedir" 2>&1 | tee lfsclone.log grep "Cloning into" lfsclone.log grep "Downloading LFS objects:" lfsclone.log # should be no filter errors grep "filter" lfsclone.log && exit 1 grep "error" lfsclone.log && exit 1 # should be cloned into location as per arg [ -d "$newclonedir" ] # check a few file sizes to make sure pulled pushd "$newclonedir" [ $(wc -c < "file1.dat") -eq 100 ] [ $(wc -c < "file2.dat") -eq 75 ] [ $(wc -c < "file3.dat") -eq 30 ] assert_hooks "$(dot_git_dir)" [ ! -e "lfs" ] assert_clean_status popd # Now check clone with standard 'git clone' and smudge download rm -rf "$reponame" git clone "$CLIENTCERTGITSERVER/$reponame" 2>&1 | tee clone.log if [ "0" -ne "${PIPESTATUS[0]}" ]; then echo >&2 "fatal: expected clone to succeed ..." exit 1 fi grep "Cloning into" clone.log [ -d "$reponame" ] pushd "$reponame" [ $(wc -c < "file1.dat") -eq 100 ] [ $(wc -c < "file2.dat") -eq 75 ] [ $(wc -c < "file3.dat") -eq 30 ] assert_hooks "$(dot_git_dir)" [ ! -e "lfs" ] [ "3" -eq "$(find "$(dot_git_dir)/lfs/objects" -type f | wc -l)" ] assert_clean_status popd done ) end_test begin_test "clone ClientCert with homedir certs" ( set -e if [ "$GIT_LIBNSS" -eq 1 ]; then echo "skip: libnss does not support the Go httptest server certificate" exit 0 fi if [ "$IS_WINDOWS" -eq 1 ]; then git config --global "http.sslBackend" "openssl" fi cp "$LFS_CLIENT_CERT_FILE" "$HOME/lfs-client-cert-file" cp "$LFS_CLIENT_KEY_FILE" "$HOME/lfs-client-key-file" cp "$LFS_CLIENT_KEY_FILE_ENCRYPTED" "$HOME/lfs-client-key-file-encrypted" # Note that the record files we create in the $CREDSDIR directory are not # used until we set the "http.sslCertPasswordProtected" option to "true" # and the "http..sslKey" option with the path to our TLS/SSL client # certificate's encrypted private key file. (The PEM certificate file # itself is not encrypted and does not contain the private key.) # # When these options are set, however, Git and Git LFS will independently # invoke "git credential fill" to retrieve the passphrase for the # encrypted private key. Because the "http.sslCertPasswordProtected" # option is set, Git will query the credential helper, passing a # "protocol=cert" line and a "path=" line with the path # from the "http..sslCert" option. Note that this path refers # to our unencrypted certificate file; Git does not use the path to # the encrypted private key file from the "http..sslKey" option # in its query to the credential helper. # # Separately, the Git LFS client will detect that the private key file # specified by the "http..sslKey" option is encrypted, and so will # invoke "git credential fill" to retrieve its passphrase, passing a # "protocol=cert" line and a "path=" line with the path # from the "http..sslKey" option. # # In order to satisfy both requests, our git-credential-lfstest helper # therefore needs two record files, both with the passphrase for the # encrypted private key file. For Git, one is associated with the path # to the certificate file, and for Git LFS, one is associated with the # path to the key file. if [ "$IS_WINDOWS" -eq 1 ]; then # In our MSYS2 CI environment we have to convert the Unix-style path # in $HOME, which starts with /tmp/, into a path of the form /a/... # so that the credential record filename we create from it matches # the one our git-credential-lfstest helper will construct from the # "path" values it receives from Git and Git LFS. homedir="$(cygpath -m "$HOME" | sed 's,^\([A-Z]\):,/\L\1,')" else homedir="$HOME" fi write_creds_file "::pass" "$CREDSDIR/--$(echo "$homedir/lfs-client-cert-file" | tr / -)" write_creds_file "::pass" "$CREDSDIR/--$(echo "$homedir/lfs-client-key-file-encrypted" | tr / -)" git config --global "http.$LFS_CLIENT_CERT_URL/.sslCert" "~/lfs-client-cert-file" git config --global "http.$LFS_CLIENT_CERT_URL/.sslKey" "~/lfs-client-key-file" reponame="test-cloneClientCert-homedir" setup_remote_repo "$reponame" clone_repo_clientcert "$reponame" "$reponame" git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log git add .gitattributes git commit -m "Track *.dat" # generate some test data & commits with random LFS data echo "[ { \"CommitDate\":\"$(get_date -5d)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":100}, {\"Filename\":\"file2.dat\",\"Size\":75}] }, { \"CommitDate\":\"$(get_date -1d)\", \"Files\":[ {\"Filename\":\"file3.dat\",\"Size\":30}] } ]" | lfstest-testutils addcommits git push origin main # Now clone again with 'git lfs clone', test specific clone dir # Test with both unencrypted and encrypted client certificate keys cd "$TRASHDIR" for enc in "false" "true"; do if [ "$enc" = "true" ]; then git config --global "http.$LFS_CLIENT_CERT_URL/.sslKey" "~/lfs-client-key-file-encrypted" git config --global "http.sslCertPasswordProtected" "$enc" fi newclonedir="${reponame}-${enc}" git lfs clone "$CLIENTCERTGITSERVER/$reponame" "$newclonedir" 2>&1 | tee lfsclone.log grep "Cloning into" lfsclone.log grep "Downloading LFS objects:" lfsclone.log # should be no filter errors grep "filter" lfsclone.log && exit 1 grep "error" lfsclone.log && exit 1 # should be cloned into location as per arg [ -d "$newclonedir" ] # check a few file sizes to make sure pulled pushd "$newclonedir" [ $(wc -c < "file1.dat") -eq 100 ] [ $(wc -c < "file2.dat") -eq 75 ] [ $(wc -c < "file3.dat") -eq 30 ] assert_hooks "$(dot_git_dir)" [ ! -e "lfs" ] assert_clean_status popd # Now check clone with standard 'git clone' and smudge download rm -rf "$reponame" git clone "$CLIENTCERTGITSERVER/$reponame" 2>&1 | tee clone.log if [ "0" -ne "${PIPESTATUS[0]}" ]; then echo >&2 "fatal: expected clone to succeed ..." exit 1 fi grep "Cloning into" clone.log [ -d "$reponame" ] pushd "$reponame" [ $(wc -c < "file1.dat") -eq 100 ] [ $(wc -c < "file2.dat") -eq 75 ] [ $(wc -c < "file3.dat") -eq 30 ] assert_hooks "$(dot_git_dir)" [ ! -e "lfs" ] [ "3" -eq "$(find "$(dot_git_dir)/lfs/objects" -type f | wc -l)" ] assert_clean_status popd done ) end_test begin_test "clone with flags" ( set -e reponame="$(basename "$0" ".sh")-flags" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log git add .gitattributes git commit -m "Track *.dat" # generate some test data & commits with random LFS data echo "[ { \"CommitDate\":\"$(get_date -10d)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":100}, {\"Filename\":\"file2.dat\",\"Size\":75}] }, { \"CommitDate\":\"$(get_date -7d)\", \"NewBranch\":\"branch2\", \"Files\":[ {\"Filename\":\"fileonbranch2.dat\",\"Size\":66}] }, { \"CommitDate\":\"$(get_date -3d)\", \"ParentBranches\":[\"main\"], \"Files\":[ {\"Filename\":\"file3.dat\",\"Size\":120}, {\"Filename\":\"file4.dat\",\"Size\":30}] } ]" | lfstest-testutils addcommits git push origin main branch2 # Now clone again, test specific clone dir cd "$TRASHDIR" mkdir "$TRASHDIR/templatedir" newclonedir="testflagsclone1" # many of these flags won't do anything but make sure they're not rejected git lfs clone --template "$TRASHDIR/templatedir" --local --no-hardlinks --shared --verbose --progress --recursive "$GITSERVER/$reponame" "$newclonedir" rm -rf "$newclonedir" # specific test for --no-checkout git lfs clone --quiet --no-checkout "$GITSERVER/$reponame" "$newclonedir" if [ -e "$newclonedir/file1.dat" ]; then exit 1 fi rm -rf "$newclonedir" # specific test for --branch and --origin git lfs clone --branch branch2 --recurse-submodules --origin differentorigin "$GITSERVER/$reponame" "$newclonedir" pushd "$newclonedir" # this file is only on branch2 [ -e "fileonbranch2.dat" ] # confirm remote is called differentorigin git remote get-url differentorigin assert_hooks "$(dot_git_dir)" [ ! -e "lfs" ] assert_clean_status popd rm -rf "$newclonedir" # specific test for --separate-git-dir gitdir="$TRASHDIR/separategitdir" git lfs clone --separate-git-dir "$gitdir" "$GITSERVER/$reponame" "$newclonedir" # .git should be a file not dir if [ -d "$newclonedir/.git" ]; then exit 1 fi [ -e "$newclonedir/.git" ] [ -d "$gitdir/objects" ] assert_hooks "$gitdir" pushd "$newclonedir" [ ! -e "lfs" ] assert_clean_status popd rm -rf "$newclonedir" rm -rf "$gitdir" # specific test for --bare git lfs clone --bare "$GITSERVER/$reponame" "$newclonedir" [ -d "$newclonedir/objects" ] rm -rf "$newclonedir" # short flags git lfs clone -l -v -n -s -b branch2 "$GITSERVER/$reponame" "$newclonedir" rm -rf "$newclonedir" ) end_test begin_test "clone (with include/exclude args)" ( set -e reponame="clone_include_exclude" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log contents_a="a" contents_a_oid=$(calc_oid "$contents_a") printf "%s" "$contents_a" > "a.dat" printf "%s" "$contents_a" > "a-dupe.dat" printf "%s" "$contents_a" > "dupe-a.dat" contents_b="b" contents_b_oid=$(calc_oid "$contents_b") printf "%s" "$contents_b" > "b.dat" git add *.dat .gitattributes git commit -m "add a.dat, b.dat" 2>&1 | tee commit.log grep "main (root-commit)" commit.log grep "5 files changed" commit.log grep "create mode 100644 a.dat" commit.log grep "create mode 100644 a-dupe.dat" commit.log grep "create mode 100644 dupe-a.dat" commit.log grep "create mode 100644 b.dat" commit.log grep "create mode 100644 .gitattributes" commit.log git push origin main 2>&1 | tee push.log grep "main -> main" push.log grep "Uploading LFS objects: 100% (2/2), 2 B" push.log cd "$TRASHDIR" local_reponame="clone_with_includes" git lfs clone "$GITSERVER/$reponame" "$local_reponame" -I "a*.dat" pushd "$local_reponame" assert_local_object "$contents_a_oid" 1 refute_local_object "$contents_b_oid" [ "a" = "$(cat a.dat)" ] [ "a" = "$(cat a-dupe.dat)" ] [ "$(pointer $contents_a_oid 1)" = "$(cat dupe-a.dat)" ] [ "$(pointer $contents_b_oid 1)" = "$(cat b.dat)" ] assert_hooks "$(dot_git_dir)" [ ! -e "lfs" ] assert_clean_status popd local_reponame="clone_with_excludes" git lfs clone "$GITSERVER/$reponame" "$local_reponame" -I "b.dat" -X "a.dat" pushd "$local_reponame" assert_local_object "$contents_b_oid" 1 refute_local_object "$contents_a_oid" [ "$(pointer $contents_a_oid 1)" = "$(cat a.dat)" ] [ "b" = "$(cat b.dat)" ] assert_hooks "$(dot_git_dir)" [ ! -e "lfs" ] assert_clean_status popd ) end_test begin_test "clone (with .lfsconfig)" ( set -e reponame="clone_with_lfsconfig" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log contents_a="a" contents_a_oid=$(calc_oid "$contents_a") printf "%s" "$contents_a" > "a.dat" contents_b="b" contents_b_oid=$(calc_oid "$contents_b") printf "%s" "$contents_b" > "b.dat" git add a.dat b.dat .gitattributes git commit -m "add a.dat, b.dat" 2>&1 | tee commit.log grep "main (root-commit)" commit.log grep "3 files changed" commit.log grep "create mode 100644 a.dat" commit.log grep "create mode 100644 b.dat" commit.log grep "create mode 100644 .gitattributes" commit.log git config -f ".lfsconfig" "lfs.fetchinclude" "a*" git add ".lfsconfig" git commit -m "config lfs.fetchinclude a*" 2>&1 | tee commit.log grep "main" commit.log grep "1 file changed" commit.log grep "create mode 100644 .lfsconfig" commit.log git push origin main 2>&1 | tee push.log grep "main -> main" push.log grep "Uploading LFS objects: 100% (2/2), 2 B" push.log pushd "$TRASHDIR" echo "test: clone with lfs.fetchinclude in .lfsconfig" local_reponame="clone_with_config_include" set +x git lfs clone "$GITSERVER/$reponame" "$local_reponame" ok="$?" set -x if [ "0" -ne "$ok" ]; then # TEMP: used to catch transient failure from above `clone` command, as in: # https://github.com/git-lfs/git-lfs/pull/1782#issuecomment-267678319 echo >&2 "[!] \`git lfs clone $GITSERVER/$reponame $local_reponame\` failed" git lfs logs last exit 1 fi pushd "$local_reponame" assert_local_object "$contents_a_oid" 1 refute_local_object "$contents_b_oid" assert_hooks "$(dot_git_dir)" [ ! -e "lfs" ] assert_clean_status popd echo "test: clone with lfs.fetchinclude in .lfsconfig, and args" local_reponame="clone_with_config_include_and_args" git lfs clone "$GITSERVER/$reponame" "$local_reponame" -I "b.dat" pushd "$local_reponame" refute_local_object "$contents_a_oid" assert_local_object "$contents_b_oid" 1 assert_hooks "$(dot_git_dir)" [ ! -e "lfs" ] assert_clean_status popd popd git config -f ".lfsconfig" "lfs.fetchinclude" "b*" git config -f ".lfsconfig" "lfs.fetchexclude" "a*" git add .lfsconfig git commit -m "config lfs.fetchinclude a*" 2>&1 | tee commit.log grep "main" commit.log grep "1 file changed" commit.log git push origin main 2>&1 | tee push.log grep "main -> main" push.log pushd "$TRASHDIR" echo "test: clone with lfs.fetchexclude in .lfsconfig" local_reponame="clone_with_config_exclude" git lfs clone "$GITSERVER/$reponame" "$local_reponame" pushd "$local_reponame" cat ".lfsconfig" assert_local_object "$contents_b_oid" 1 refute_local_object "$contents_a_oid" assert_hooks "$(dot_git_dir)" [ ! -e "lfs" ] assert_clean_status popd echo "test: clone with lfs.fetchexclude in .lfsconfig, and args" local_reponame="clone_with_config_exclude_and_args" git lfs clone "$GITSERVER/$reponame" "$local_reponame" -I "a.dat" -X "b.dat" pushd "$local_reponame" assert_local_object "$contents_a_oid" 1 refute_local_object "$contents_b_oid" assert_hooks "$(dot_git_dir)" [ ! -e "lfs" ] assert_clean_status popd popd ) end_test begin_test "clone (without clean filter)" ( set -e reponame="clone_with_clean" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log contents_a="a" contents_a_oid=$(calc_oid "$contents_a") printf "%s" "$contents_a" > "a.dat" git add *.dat .gitattributes git commit -m "add a.dat, b.dat" 2>&1 | tee commit.log grep "main (root-commit)" commit.log git push origin main 2>&1 | tee push.log grep "main -> main" push.log grep "Uploading LFS objects: 100% (1/1), 1 B" push.log cd "$TRASHDIR" git lfs uninstall git config --list > config.txt grep "filter.lfs.clean" config.txt && { echo "clean filter still configured:" cat config.txt exit 1 } local_reponame="clone_without_clean" git lfs clone "$GITSERVER/$reponame" "$local_reponame" -I "a*.dat" | tee clone.txt if [ "0" -ne "${PIPESTATUS[0]}" ]; then echo >&2 "fatal: expected clone to succeed ..." exit 1 fi grep "Git LFS is not installed" clone.txt cd "$local_reponame" assert_local_object "$contents_a_oid" 1 [ "$(pointer $contents_a_oid 1)" = "$(cat a.dat)" ] ) end_test begin_test "clone with submodules" ( set -e # set up a doubly nested submodule, each with LFS content reponame="submod-root" submodname1="submod-level1" submodname2="submod-level2" setup_remote_repo "$reponame" setup_remote_repo "$submodname1" setup_remote_repo "$submodname2" clone_repo "$submodname2" submod2 git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log contents_sub2="Inception. Now, before you bother telling me it's impossible..." contents_sub2_oid=$(calc_oid "$contents_sub2") printf "%s" "$contents_sub2" > "sub2.dat" git add sub2.dat .gitattributes git commit -m "Nested submodule level 2" git push origin main clone_repo "$submodname1" submod1 git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log contents_sub1="We're dreaming?" contents_sub1_oid=$(calc_oid "$contents_sub1") printf "%s" "$contents_sub1" > "sub1.dat" # add submodule2 as submodule of submodule1 git submodule add "$GITSERVER/$submodname2" sub2 git submodule update git add sub2 sub1.dat .gitattributes git commit -m "Nested submodule level 1" git push origin main clone_repo "$reponame" rootrepo git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log contents_root="Downwards is the only way forwards." contents_root_oid=$(calc_oid "$contents_root") printf "%s" "$contents_root" > "root.dat" # add submodule1 as submodule of root git submodule add "$GITSERVER/$submodname1" sub1 git submodule update git add sub1 root.dat .gitattributes git commit -m "Root repo" git push origin main pushd "$TRASHDIR" local_reponame="submod-clone" git lfs clone --recursive "$GITSERVER/$reponame" "$local_reponame" # check everything is where it should be cd $local_reponame assert_hooks "$(dot_git_dir)" [ ! -e "lfs" ] assert_clean_status # check LFS store and working copy assert_local_object "$contents_root_oid" "${#contents_root}" [ $(wc -c < "root.dat") -eq ${#contents_root} ] # and so on for nested subs cd sub1 assert_local_object "$contents_sub1_oid" "${#contents_sub1}" [ $(wc -c < "sub1.dat") -eq ${#contents_sub1} ] cd sub2 assert_local_object "$contents_sub2_oid" "${#contents_sub2}" [ $(wc -c < "sub2.dat") -eq ${#contents_sub2} ] popd ) end_test begin_test "clone in current directory" ( set -e reponame="clone_in_current_dir" setup_remote_repo "$reponame" clone_repo "$reponame" $reponame git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log contents="contents" contents_oid="$(calc_oid "$contents")" printf "%s" "$contents" > a.dat git add .gitattributes a.dat git commit -m "initial commit" 2>&1 | tee commit.log grep "main (root-commit)" commit.log grep "2 files changed" commit.log grep "create mode 100644 a.dat" commit.log grep "create mode 100644 .gitattributes" commit.log git push origin main 2>&1 | tee push.log pushd $TRASHDIR mkdir "$reponame-clone" cd "$reponame-clone" git lfs clone $GITSERVER/$reponame "." git lfs fsck assert_local_object "$contents_oid" 8 assert_hooks "$(dot_git_dir)" [ ! -e "lfs" ] assert_clean_status popd ) end_test begin_test "clone empty repository" ( set -e reponame="clone_empty" setup_remote_repo "$reponame" cd "$TRASHDIR" git lfs clone "$GITSERVER/$reponame" "$reponame" 2>&1 | tee clone.log if [ "0" -ne "${PIPESTATUS[0]}" ]; then echo >&2 "fatal: expected clone to succeed ..." exit 1 fi ) end_test begin_test "clone bare empty repository" ( set -e reponame="clone_bare_empty" setup_remote_repo "$reponame" cd "$TRASHDIR" git lfs clone "$GITSERVER/$reponame" "$reponame" --bare 2>&1 | tee clone.log if [ "0" -ne "${PIPESTATUS[0]}" ]; then echo >&2 "fatal: expected clone to succeed ..." exit 1 fi ) end_test begin_test "clone (HTTP server/proxy require cookies)" ( set -e # golang net.http.Cookie ignores cookies with IP instead of domain/hostname GITSERVER=$(echo "$GITSERVER" | sed 's/127\.0\.0\.1/localhost/') cp "$CREDSDIR/127.0.0.1" "$CREDSDIR/localhost" printf "localhost\tTRUE\t/\tFALSE\t2145916800\tCOOKIE_GITLFS\tsecret\n" >> "$REMOTEDIR/cookies.txt" git config --global http.cookieFile "$REMOTEDIR/cookies.txt" reponame="require-cookie-test" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log git add .gitattributes git commit -m "Track *.dat" # generate some test data & commits with random LFS data echo "[ { \"CommitDate\":\"$(get_date -10d)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":100}, {\"Filename\":\"file2.dat\",\"Size\":75}] }, { \"CommitDate\":\"$(get_date -7d)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":110}, {\"Filename\":\"file3.dat\",\"Size\":66}, {\"Filename\":\"file4.dat\",\"Size\":23}] }, { \"CommitDate\":\"$(get_date -10d)\", \"Files\":[ {\"Filename\":\"file5.dat\",\"Size\":120}, {\"Filename\":\"file6.dat\",\"Size\":30}] } ]" | lfstest-testutils addcommits git push origin main # Now clone again, test specific clone dir cd "$TRASHDIR" newclonedir="require-cookie-test1" git lfs clone "$GITSERVER/$reponame" "$newclonedir" 2>&1 | tee lfsclone.log grep "Cloning into" lfsclone.log grep "Downloading LFS objects:" lfsclone.log # should be no filter errors grep "filter" lfsclone.log && exit 1 grep "error" lfsclone.log && exit 1 # should be cloned into location as per arg [ -d "$newclonedir" ] # check a few file sizes to make sure pulled pushd "$newclonedir" [ $(wc -c < "file1.dat") -eq 110 ] [ $(wc -c < "file2.dat") -eq 75 ] [ $(wc -c < "file3.dat") -eq 66 ] assert_hooks "$(dot_git_dir)" [ ! -e "lfs" ] assert_clean_status popd # Now check clone with implied dir rm -rf "$reponame" git lfs clone "$GITSERVER/$reponame" 2>&1 | tee lfsclone.log grep "Cloning into" lfsclone.log grep "Downloading LFS objects:" lfsclone.log # should be no filter errors grep "filter" lfsclone.log && exit 1 grep "error" lfsclone.log && exit 1 # clone location should be implied [ -d "$reponame" ] pushd "$reponame" [ $(wc -c < "file1.dat") -eq 110 ] [ $(wc -c < "file2.dat") -eq 75 ] [ $(wc -c < "file3.dat") -eq 66 ] assert_hooks "$(dot_git_dir)" [ ! -e "lfs" ] assert_clean_status popd ) end_test git-lfs-3.6.1/t/t-commit-delete-push.sh000077500000000000000000000030031472372047300176430ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" begin_test "commit, delete, then push" ( set -e reponame="$(basename "$0" ".sh")" setup_remote_repo "$reponame" clone_repo "$reponame" repo git lfs track "*.dat" deleted_oid=$(calc_oid "deleted\n") echo "deleted" > deleted.dat git add deleted.dat .gitattributes git commit -m "add deleted file" git lfs push origin main --dry-run | grep "push ee31ef227442936872744b50d3297385c08b40ffc7baeaf34a39e6d81d6cd9ee => deleted.dat" assert_pointer "main" "deleted.dat" "$deleted_oid" 8 added_oid=$(calc_oid "added\n") echo "added" > added.dat git add added.dat git commit -m "add file" git lfs push origin main --dry-run | tee dryrun.log grep "push ee31ef227442936872744b50d3297385c08b40ffc7baeaf34a39e6d81d6cd9ee => deleted.dat" dryrun.log grep "push 3428719b7688c78a0cc8ba4b9e80b4e464c815fbccfd4b20695a15ffcefc22af => added.dat" dryrun.log git rm deleted.dat git commit -m "did not need deleted.dat after all" git lfs push origin main --dry-run 2>&1 | tee dryrun.log grep "push ee31ef227442936872744b50d3297385c08b40ffc7baeaf34a39e6d81d6cd9ee => deleted.dat" dryrun.log grep "push 3428719b7688c78a0cc8ba4b9e80b4e464c815fbccfd4b20695a15ffcefc22af => added.dat" dryrun.log git log git push origin main 2>&1 > push.log || { cat push.log git lfs logs last exit 1 } grep "(2 of 2 files)" push.log | cat push.log assert_server_object "$reponame" "$deleted_oid" assert_server_object "$reponame" "$added_oid" ) end_test git-lfs-3.6.1/t/t-completion.sh000077500000000000000000000014151472372047300163140ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" begin_test "completion: bash script" ( set -e git lfs completion bash | cmp - "$COMPLETIONSDIR/git-lfs-completion.bash" ) end_test begin_test "completion: fish script" ( set -e git lfs completion fish | cmp - "$COMPLETIONSDIR/git-lfs-completion.fish" ) end_test begin_test "completion: zsh script" ( set -e git lfs completion zsh | cmp - "$COMPLETIONSDIR/git-lfs-completion.zsh" ) end_test begin_test "completion: missing shell argument" ( set -e git lfs completion 2>&1 | tee completion.log grep "accepts 1 arg" completion.log ) end_test begin_test "completion: invalid shell argument" ( set -e git lfs completion ksh 2>&1 | tee completion.log grep "invalid argument" completion.log ) end_test git-lfs-3.6.1/t/t-config.sh000077500000000000000000000162371472372047300154200ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" begin_test "default config" ( set -e reponame="default-config" mkdir $reponame cd $reponame git init git remote add origin "$GITSERVER/$reponame" git lfs env | tee env.log grep "Endpoint=$GITSERVER/$reponame.git/info/lfs (auth=none)" env.log git config --file=.gitconfig lfs.url http://gitconfig-file-ignored git config --file=.lfsconfig lfs.url http://lfsconfig-file git config --file=.lfsconfig lfs.http://lfsconfig-file.access lfsconfig git lfs env | tee env.log grep "Endpoint=http://lfsconfig-file (auth=lfsconfig)" env.log git config --file=.lfsconfig --unset lfs.url git config --file=.lfsconfig --unset lfs.http://lfsconfig-file.access # new endpoint url from local git config # access setting no longer applied git config lfs.url http://local-lfsconfig git lfs env | tee env.log grep "Endpoint=http://local-lfsconfig (auth=none)" env.log # add the access setting to lfsconfig git config --file=.lfsconfig lfs.http://local-lfsconfig.access lfsconfig git lfs env | tee env.log grep "Endpoint=http://local-lfsconfig (auth=lfsconfig)" env.log git config --file=.lfsconfig --unset lfs.http://local-lfsconfig.access # add the access setting to git config git config lfs.http://local-lfsconfig.access gitconfig git lfs env | tee env.log grep "Endpoint=http://local-lfsconfig (auth=gitconfig)" env.log ) end_test begin_test "config reads from repository" ( set -e reponame="repository-config" setup_remote_repo "$reponame" mkdir $reponame cd $reponame git init git remote add origin "$GITSERVER/$reponame" git lfs env | tee env.log grep "Endpoint=$GITSERVER/$reponame.git/info/lfs (auth=none)" env.log git config --file=.lfsconfig lfs.url http://lfsconfig-file git config --file=.lfsconfig lfs.http://lfsconfig-file.access lfsconfig git add .lfsconfig git commit -m 'Add file' git push origin HEAD git checkout -b side git config --file=.lfsconfig lfs.url http://lfsconfig-file-side git config --file=.lfsconfig lfs.http://lfsconfig-file-side.access lfsconfig git add .lfsconfig git commit -m 'Add file for side' git push origin HEAD mkdir "../$reponame-2" cd "../$reponame-2" git init git remote add origin "$GITSERVER/$reponame" git lfs env | tee env.log grep "Endpoint=$GITSERVER/$reponame.git/info/lfs (auth=none)" env.log git fetch origin git symbolic-ref HEAD refs/remotes/origin/side git show "HEAD:.lfsconfig" git lfs env | tee env.log grep "Endpoint=http://lfsconfig-file-side (auth=lfsconfig)" env.log git read-tree refs/remotes/origin/main git lfs env | tee env.log grep "Endpoint=http://lfsconfig-file (auth=lfsconfig)" env.log ) end_test begin_test "can read LFS file with name before .lfsconfig" ( set -e reponame="early-file-config" setup_remote_repo "$reponame" mkdir $reponame cd $reponame git init git remote add origin "$GITSERVER/$reponame" git lfs track "*.bin" git config --file=.lfsconfig lfs.url "$GITSERVER/$reponame.git/info/lfs" echo "abc" > .bin echo "def" > a.bin git add . git commit -m "Add files" git push origin HEAD rm -fr .git/lfs/objects cd .. git clone "$reponame" "$reponame-2" cd "$reponame-2" grep abc .bin grep def a.bin ) end_test begin_test "extension config" ( set -e git config --global lfs.extension.env-test.clean "env-test-clean" git config --global lfs.extension.env-test.smudge "env-test-smudge" git config --global lfs.extension.env-test.priority 0 reponame="extension-config" mkdir $reponame cd $reponame git init expected0="Extension: env-test clean = env-test-clean smudge = env-test-smudge priority = 0" [ "$expected0" = "$(git lfs ext)" ] # any git config takes precedence over .lfsconfig git config --global --unset lfs.extension.env-test.priority git config --file=.lfsconfig lfs.extension.env-test.clean "file-env-test-clean" git config --file=.lfsconfig lfs.extension.env-test.smudge "file-env-test-smudge" git config --file=.lfsconfig lfs.extension.env-test.priority 1 cat .lfsconfig expected1="Extension: env-test clean = env-test-clean smudge = env-test-smudge priority = 1" [ "$expected1" = "$(GIT_TRACE=5 git lfs ext)" ] git config lfs.extension.env-test.clean "local-env-test-clean" git config lfs.extension.env-test.smudge "local-env-test-smudge" git config lfs.extension.env-test.priority 2 expected2="Extension: env-test clean = local-env-test-clean smudge = local-env-test-smudge priority = 2" [ "$expected2" = "$(git lfs ext)" ] ) end_test begin_test "url alias config" ( set -e mkdir url-alias cd url-alias git init # When more than one insteadOf strings match a given URL, the longest match is used. git config url."http://wrong-url/".insteadOf alias git config url."http://actual-url/".insteadOf alias: git config lfs.url alias:rest git lfs env | tee env.log grep "Endpoint=http://actual-url/rest (auth=none)" env.log ) end_test begin_test "ambiguous url alias" ( set -e mkdir url-alias-ambiguous cd url-alias-ambiguous git init git config url."http://actual-url/".insteadOf alias: git config url."http://dupe-url".insteadOf alias: git config lfs.url alias:rest git config -l | grep url git lfs env 2>&1 | tee env2.log grep "warning: Multiple 'url.*.insteadof'" env2.log ) end_test begin_test "multiple config" ( set -e mkdir url-alias-multiple cd url-alias-multiple git init # When more than one insteadOf strings match a given URL, the longest match is used. git config url."http://wrong-url/".insteadOf alias git config url."http://actual-url/".insteadOf alias: git config --add url."http://actual-url/".insteadOf alias2: git config lfs.url alias:rest git lfs env | tee env.log grep "Endpoint=http://actual-url/rest (auth=none)" env.log git config lfs.url alias2:rest git lfs env | tee env.log grep "Endpoint=http://actual-url/rest (auth=none)" env.log ) end_test begin_test "url alias must be prefix" ( set -e mkdir url-alias-bad cd url-alias-bad git init git config url."http://actual-url/".insteadOf alias: git config lfs.url badalias:rest git lfs env | tee env.log grep "SSH=badalias:rest" env.log ) end_test begin_test "config: ignoring unsafe lfsconfig keys" ( set -e reponame="config-unsafe-lfsconfig-keys" git init "$reponame" cd "$reponame" # Insert an 'unsafe' key into this repository's '.lfsconfig'. git config --file=.lfsconfig core.askpass unsafe git lfs env 2>&1 | tee status.log grep "warning: These unsafe '.lfsconfig' keys were ignored:" status.log grep " core.askpass" status.log ) end_test begin_test "config respects include.* directives when GIT_CONFIG is set" ( set -e mkdir include-directives cd include-directives git init git config lfs.url "http://some-url/rest" GIT_CONFIG="$(pwd)/.git/config" git lfs env | tee env.log grep "Endpoint=http://some-url/rest (auth=none)" env.log git config --file ./.git/b.config url."http://other-url/".insteadOf "http://some-url/" git config include.path "$(pwd)/.git/b.config" GIT_CONFIG="$(pwd)/.git/config" git lfs env | tee env.log grep "Endpoint=http://other-url/rest (auth=none)" env.log ) end_test git-lfs-3.6.1/t/t-content-type.sh000077500000000000000000000033741472372047300166020ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" begin_test "content-type: is enabled by default" ( set -e reponame="content-type-enabled-default" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.tar.gz" printf "aaaaaaaaaa" > a.txt tar -czf a.tar.gz a.txt rm a.txt git add .gitattributes a.tar.gz git commit -m "initial commit" GIT_CURL_VERBOSE=1 git push origin main 2>&1 | tee push.log [ 1 -eq "$(grep -c "Content-Type: application/x-gzip" push.log)" ] [ 0 -eq "$(grep -c "Content-Type: application/octet-stream" push.log)" ] ) end_test begin_test "content-type: is disabled by configuration" ( set -e reponame="content-type-disabled-by-configuration" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.tar.gz" printf "aaaaaaaaaa" > a.txt tar -czf a.tar.gz a.txt rm a.txt git add .gitattributes a.tar.gz git commit -m "initial commit" git config "lfs.$GITSERVER.contenttype" 0 GIT_CURL_VERBOSE=1 git push origin main 2>&1 | tee push.log [ 0 -eq "$(grep -c "Content-Type: application/x-gzip" push.log)" ] [ 1 -eq "$(grep -c "Content-Type: application/octet-stream" push.log)" ] ) end_test begin_test "content-type: warning message" ( set -e reponame="content-type-warning-message" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.txt" printf "status-storage-422" > a.txt git add .gitattributes a.txt git commit -m "initial commit" git push origin main 2>&1 | tee push.log grep "info: Uploading failed due to unsupported Content-Type header(s)." push.log grep "info: Consider disabling Content-Type detection with:" push.log grep "info: $ git config lfs.contenttype false" push.log ) end_test git-lfs-3.6.1/t/t-credentials-no-prompt.sh000077500000000000000000000025731472372047300203770ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" # these tests rely on GIT_TERMINAL_PROMPT to test properly ensure_git_version_isnt $VERSION_LOWER "2.3.0" begin_test "attempt private access without credential helper" ( set -e reponame="$(basename "$0" ".sh")" setup_remote_repo "$reponame" clone_repo "$reponame" without-creds git lfs track "*.dat" echo "hi" > hi.dat git add hi.dat git add .gitattributes git commit -m "initial commit" git config --global credential.helper lfsnoop git config credential.helper lfsnoop git config -l GIT_TERMINAL_PROMPT=0 git push origin main 2>&1 | tee push.log grep "Authorization error: $GITSERVER/$reponame" push.log || grep "Git credentials for $GITSERVER/$reponame not found" push.log ) end_test begin_test "askpass: push with bad askpass" ( set -e reponame="askpass-with-bad-askpass" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" echo "hello" > a.dat git add .gitattributes a.dat git commit -m "initial commit" git config "credential.helper" "" GIT_TERMINAL_PROMPT=0 GIT_ASKPASS="lfs-askpass-2" SSH_ASKPASS="dont-call-me" GIT_TRACE=1 git push origin main 2>&1 | tee push.log grep "failed to find GIT_ASKPASS command" push.log # attempt askpass grep "creds: git credential fill" push.log # attempt git credential ) end_test git-lfs-3.6.1/t/t-credentials-protect.sh000077500000000000000000000113701472372047300201170ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" ensure_git_version_isnt $VERSION_LOWER "2.3.0" export CREDSDIR="$REMOTEDIR/creds-credentials-protect" setup_creds # Copy the default record file for the test credential helper to match the # hostname used in the Git LFS configurations of the tests. cp "$CREDSDIR/127.0.0.1" "$CREDSDIR/localhost" begin_test "credentials rejected with line feed" ( set -e reponame="protect-linefeed" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" contents="a" contents_oid=$(calc_oid "$contents") git lfs track "*.dat" printf "%s" "$contents" >a.dat git add .gitattributes a.dat git commit -m "add a.dat" # Using localhost instead of 127.0.0.1 in the LFS API URL ensures this URL # is used when filling credentials rather than the Git remote URL, which # would otherwise be used since it would have the same scheme and hostname. gitserver="$(echo "$GITSERVER" | sed 's/127\.0\.0\.1/localhost/')" testreponame="test%0a$reponame" git config lfs.url "$gitserver/$testreponame.git/info/lfs" GIT_TRACE=1 git lfs push origin main 2>&1 | tee push.log if [ "0" -eq "${PIPESTATUS[0]}" ]; then echo >&2 "fatal: expected 'git lfs push' to fail ..." exit 1 fi grep "batch response: Git credentials for $gitserver.* not found" push.log grep "credential value for path contains newline" push.log refute_server_object "$testreponame" "$contents_oid" git config credential.protectProtocol false GIT_TRACE=1 git lfs push origin main 2>&1 | tee push.log if [ "0" -eq "${PIPESTATUS[0]}" ]; then echo >&2 "fatal: expected 'git lfs push' to fail ..." exit 1 fi grep "batch response: Git credentials for $gitserver.* not found" push.log grep "credential value for path contains newline" push.log refute_server_object "$testreponame" "$contents_oid" ) end_test begin_test "credentials rejected with carriage return" ( set -e reponame="protect-return" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" contents="a" contents_oid=$(calc_oid "$contents") git lfs track "*.dat" printf "%s" "$contents" >a.dat git add .gitattributes a.dat git commit -m "add a.dat" # Using localhost instead of 127.0.0.1 in the LFS API URL ensures this URL # is used when filling credentials rather than the Git remote URL, which # would otherwise be used since it would have the same scheme and hostname. gitserver="$(echo "$GITSERVER" | sed 's/127\.0\.0\.1/localhost/')" testreponame="test%0d$reponame" git config lfs.url "$gitserver/$testreponame.git/info/lfs" GIT_TRACE=1 git lfs push origin main 2>&1 | tee push.log if [ "0" -eq "${PIPESTATUS[0]}" ]; then echo >&2 "fatal: expected 'git lfs push' to fail ..." exit 1 fi grep "batch response: Git credentials for $gitserver.* not found" push.log grep "credential value for path contains carriage return" push.log refute_server_object "$testreponame" "$contents_oid" git config credential.protectProtocol false git lfs push origin main 2>&1 | tee push.log if [ "0" -ne "${PIPESTATUS[0]}" ]; then echo >&2 "fatal: expected 'git lfs push' to succeed ..." exit 1 fi [ $(grep -c "Uploading LFS objects: 100% (1/1)" push.log) -eq 1 ] assert_server_object "$testreponame" "$contents_oid" ) end_test begin_test "credentials rejected with null byte" ( set -e reponame="protect-null" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" contents="a" contents_oid=$(calc_oid "$contents") git lfs track "*.dat" printf "%s" "$contents" >a.dat git add .gitattributes a.dat git commit -m "add a.dat" # Using localhost instead of 127.0.0.1 in the LFS API URL ensures this URL # is used when filling credentials rather than the Git remote URL, which # would otherwise be used since it would have the same scheme and hostname. gitserver="$(echo "$GITSERVER" | sed 's/127\.0\.0\.1/localhost/')" testreponame="test%00$reponame" git config lfs.url "$gitserver/$testreponame.git/info/lfs" GIT_TRACE=1 git lfs push origin main 2>&1 | tee push.log if [ "0" -eq "${PIPESTATUS[0]}" ]; then echo >&2 "fatal: expected 'git lfs push' to fail ..." exit 1 fi grep "batch response: Git credentials for $gitserver.* not found" push.log grep "credential value for path contains null byte" push.log refute_server_object "$testreponame" "$contents_oid" git config credential.protectProtocol false GIT_TRACE=1 git lfs push origin main 2>&1 | tee push.log if [ "0" -eq "${PIPESTATUS[0]}" ]; then echo >&2 "fatal: expected 'git lfs push' to fail ..." exit 1 fi grep "batch response: Git credentials for $gitserver.* not found" push.log grep "credential value for path contains null byte" push.log refute_server_object "$testreponame" "$contents_oid" ) end_test git-lfs-3.6.1/t/t-credentials.sh000077500000000000000000000531621472372047300164460ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" ensure_git_version_isnt $VERSION_LOWER "2.3.0" export CREDSDIR="$REMOTEDIR/creds-credentials" setup_creds begin_test "credentials with url-specific helper skips askpass" ( set -e reponame="url-specific-helper" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git config credential.useHttpPath false git config credential.helper "" git config credential.$GITSERVER.helper "lfstest" git lfs track "*.dat" echo "hello" > a.dat git add .gitattributes a.dat git commit -m "initial commit" # askpass is skipped GIT_ASKPASS="lfs-bad-cmd" GIT_TRACE=1 git push origin main 2>&1 | tee push.log [ "0" -eq "$(grep "filling with GIT_ASKPASS" push.log | wc -l)" ] ) end_test begin_test "credentials without useHttpPath, with bad path password" ( set -e reponame="no-httppath-bad-password" setup_remote_repo "$reponame" printf ":path:wrong" > "$CREDSDIR/127.0.0.1--$reponame" clone_repo "$reponame" without-path git config credential.useHttpPath false git checkout -b without-path git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log printf "a" > a.dat git add a.dat git add .gitattributes git commit -m "add a.dat" GIT_TRACE=1 git push origin without-path 2>&1 | tee push.log grep "Uploading LFS objects: 100% (1/1), 1 B" push.log echo "approvals:" [ "1" -eq "$(cat push.log | grep "creds: git credential approve" | wc -l)" ] echo "fills:" [ "1" -eq "$(cat push.log | grep "creds: git credential fill" | wc -l)" ] echo "credential calls have no path:" credcalls="$(grep "creds: git credential" push.log)" [ "0" -eq "$(echo "$credcalls" | grep "no-httppath-bad-password" | wc -l)" ] expected="$(echo "$credcalls" | wc -l)" [ "$expected" -eq "$(printf "%s" "$credcalls" | grep '", "")' | wc -l)" ] ) end_test begin_test "credentials with url-specific useHttpPath, with bad path password" ( set -e reponame="url-specific-httppath-bad-password" setup_remote_repo "$reponame" printf ":path:wrong" > "$CREDSDIR/127.0.0.1--$reponame" clone_repo "$reponame" with-url-specific-path git config credential.$GITSERVER.useHttpPath false git config lfs.locksverify false git checkout -b without-path git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log printf "a" > a.dat git add a.dat git add .gitattributes git commit -m "add a.dat" GIT_TRACE=1 git push origin without-path 2>&1 | tee push.log grep "Uploading LFS objects: 100% (1/1), 1 B" push.log echo "approvals:" [ "1" -eq "$(cat push.log | grep "creds: git credential approve" | wc -l)" ] echo "fills:" [ "1" -eq "$(cat push.log | grep "creds: git credential fill" | wc -l)" ] ) end_test begin_test "credentials with useHttpPath, with wrong password" ( set -e reponame="httppath-bad-password" setup_remote_repo "$reponame" printf ":path:wrong" > "$CREDSDIR/127.0.0.1--$reponame" clone_repo "$reponame" with-path-wrong-pass git checkout -b with-path-wrong-pass git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log contents="a" contents_oid=$(calc_oid "$contents") printf "%s" "$contents" > a.dat git add a.dat git add .gitattributes git commit -m "add a.dat" GIT_TRACE=1 git push origin with-path-wrong-pass 2>&1 | tee push.log [ "0" = "$(grep -c "Uploading LFS objects: 100% (1/1), 0 B" push.log)" ] echo "approvals:" [ "0" -eq "$(cat push.log | grep "creds: git credential approve" | wc -l)" ] echo "fills:" [ "2" -eq "$(cat push.log | grep "creds: git credential fill" | wc -l)" ] ) end_test begin_test "credentials with useHttpPath, with correct password" ( set -e reponame="$(basename "$0" ".sh")" setup_remote_repo "$reponame" printf ":path:$reponame" > "$CREDSDIR/127.0.0.1--$reponame" clone_repo "$reponame" with-path-correct-pass git checkout -b with-path-correct-pass git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log # creating new branch does not re-send any objects existing on other # remote branches anymore, generate new object, different from prev tests contents="b" contents_oid=$(calc_oid "$contents") printf "%s" "$contents" > b.dat git add b.dat git add .gitattributes git commit -m "add b.dat" GIT_TRACE=1 git push origin with-path-correct-pass 2>&1 | tee push.log grep "Uploading LFS objects: 100% (1/1), 1 B" push.log echo "approvals:" [ "1" -eq "$(cat push.log | grep "creds: git credential approve" | wc -l)" ] echo "fills:" [ "1" -eq "$(cat push.log | grep "creds: git credential fill" | wc -l)" ] echo "credential calls have path:" credcalls="$(grep "creds: git credential" push.log)" [ "0" -eq "$(echo "$credcalls" | grep '", "")' | wc -l)" ] expected="$(echo "$credcalls" | wc -l)" [ "$expected" -eq "$(printf "%s" "$credcalls" | grep "t-credentials" | wc -l)" ] ) end_test begin_test "credentials send wwwauth[] by default" ( set -e ensure_git_version_isnt $VERSION_LOWER "2.41.0" export LFS_TEST_CREDS_WWWAUTH=required reponame="$(basename "$0" ".sh")-wwwauth-required" setup_remote_repo "$reponame" printf ":path:$reponame" > "$CREDSDIR/127.0.0.1--$reponame" clone_repo "$reponame" "$reponame" git checkout -b new-branch git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log # creating new branch does not re-send any objects existing on other # remote branches anymore, generate new object, different from prev tests contents="b" contents_oid=$(calc_oid "$contents") printf "%s" "$contents" > b.dat git add b.dat git add .gitattributes git commit -m "add b.dat" GIT_TERMINAL_PROMPT=0 GIT_TRACE=1 git push origin new-branch 2>&1 | tee push.log grep "Uploading LFS objects: 100% (1/1), 1 B" push.log echo "approvals:" [ "1" -eq "$(cat push.log | grep "creds: git credential approve" | wc -l)" ] echo "fills:" [ "1" -eq "$(cat push.log | grep "creds: git credential fill" | wc -l)" ] echo "credential calls have path:" credcalls="$(grep "creds: git credential" push.log)" [ "0" -eq "$(echo "$credcalls" | grep '", "")' | wc -l)" ] expected="$(echo "$credcalls" | wc -l)" [ "$expected" -eq "$(printf "%s" "$credcalls" | grep "t-credentials" | wc -l)" ] ) end_test begin_test "credentials sends wwwauth[] and fails with finicky helper" ( set -e ensure_git_version_isnt $VERSION_LOWER "2.41.0" export LFS_TEST_CREDS_WWWAUTH=forbidden reponame="$(basename "$0" ".sh")-wwwauth-forbidden-finicky" setup_remote_repo "$reponame" printf ":path:$reponame" > "$CREDSDIR/127.0.0.1--$reponame" clone_repo "$reponame" "$reponame" git checkout -b new-branch git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log # creating new branch does not re-send any objects existing on other # remote branches anymore, generate new object, different from prev tests contents="b" contents_oid=$(calc_oid "$contents") printf "%s" "$contents" > b.dat git add b.dat git add .gitattributes git commit -m "add b.dat" GIT_TERMINAL_PROMPT=0 GIT_TRACE=1 git push origin new-branch 2>&1 | tee push.log echo "approvals:" [ "0" -eq "$(cat push.log | grep "creds: git credential approve" | wc -l)" ] echo "fills:" [ "2" -eq "$(cat push.log | grep "creds: git credential fill" | wc -l)" ] ) end_test begin_test "credentials skips wwwauth[] with option" ( set -e ensure_git_version_isnt $VERSION_LOWER "2.41.0" export LFS_TEST_CREDS_WWWAUTH=forbidden reponame="$(basename "$0" ".sh")-wwwauth-skip" setup_remote_repo "$reponame" git config --global credential.$GITSERVER.skipwwwauth true printf ":path:$reponame" > "$CREDSDIR/127.0.0.1--$reponame" clone_repo "$reponame" "$reponame" git checkout -b new-branch git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log # creating new branch does not re-send any objects existing on other # remote branches anymore, generate new object, different from prev tests contents="b" contents_oid=$(calc_oid "$contents") printf "%s" "$contents" > b.dat git add b.dat git add .gitattributes git commit -m "add b.dat" GIT_TERMINAL_PROMPT=0 GIT_TRACE=1 git push origin new-branch 2>&1 | tee push.log grep "Uploading LFS objects: 100% (1/1), 1 B" push.log echo "approvals:" [ "1" -eq "$(cat push.log | grep "creds: git credential approve" | wc -l)" ] echo "fills:" [ "1" -eq "$(cat push.log | grep "creds: git credential fill" | wc -l)" ] echo "credential calls have path:" credcalls="$(grep "creds: git credential" push.log)" [ "0" -eq "$(echo "$credcalls" | grep '", "")' | wc -l)" ] expected="$(echo "$credcalls" | wc -l)" [ "$expected" -eq "$(printf "%s" "$credcalls" | grep "t-credentials" | wc -l)" ] ) end_test begin_test "credentials can authenticate with Bearer auth" ( set -e git credential capability "$CREDSDIR/127.0.0.1--$reponame" clone_repo "$reponame" "$reponame" git checkout -b new-branch git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log contents="b" printf "%s" "$contents" > b.dat git add b.dat git add .gitattributes git commit -m "add b.dat" GIT_TERMINAL_PROMPT=0 GIT_TRACE=1 GIT_TRANSFER_TRACE=1 GIT_CURL_VERBOSE=1 git push origin new-branch 2>&1 | tee push.log grep "Uploading LFS objects: 100% (1/1), 1 B" push.log [ "1" -eq "$(cat push.log | grep "creds: git credential approve" | wc -l)" ] [ "1" -eq "$(cat push.log | grep "creds: git credential fill" | wc -l)" ] ) end_test begin_test "credentials can authenticate with multistage auth" ( set -e [ $(git credential capability "$CREDSDIR/127.0.0.1--$reponame" clone_repo "$reponame" "$reponame" git checkout -b new-branch git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log contents="b" printf "%s" "$contents" > b.dat git add b.dat git add .gitattributes git commit -m "add b.dat" GIT_TERMINAL_PROMPT=0 GIT_TRACE=1 GIT_TRANSFER_TRACE=1 GIT_CURL_VERBOSE=1 git push origin new-branch 2>&1 | tee push.log grep "Uploading LFS objects: 100% (1/1), 1 B" push.log [ "1" -eq "$(cat push.log | grep "creds: git credential approve" | wc -l)" ] [ "2" -eq "$(cat push.log | grep "creds: git credential fill" | wc -l)" ] ) end_test begin_test "git credential" ( set -e printf ":git:server" > "$CREDSDIR/credential-test.com" printf ":git:path" > "$CREDSDIR/credential-test.com--some-path" printf 'Multistage::bazquux:state1:state2:\nMultistage::foobar::state1:true' > "$CREDSDIR/example.com" mkdir empty cd empty git init echo "protocol=http host=credential-test.com path=some/path" | GIT_TERMINAL_PROMPT=0 git credential fill > cred.log cat cred.log expected="protocol=http host=credential-test.com path=some/path username=git password=path" [ "$expected" = "$(cat cred.log)" ] git config credential.useHttpPath false echo "protocol=http host=credential-test.com" | GIT_TERMINAL_PROMPT=0 git credential fill > cred.log cat cred.log expected="protocol=http host=credential-test.com username=git password=server" [ "$expected" = "$(cat cred.log)" ] echo "protocol=http host=credential-test.com path=some/path" | GIT_TERMINAL_PROMPT=0 git credential fill > cred.log cat cred.log expected="protocol=http host=credential-test.com username=git password=server" [ "$expected" = "$(cat cred.log)" ] [ $(git credential capability cred.log cat cred.log expected="capability[]=authtype capability[]=state authtype=Multistage credential=foobar protocol=http host=example.com continue=1 state[]=lfstest:state1" [ "$expected" = "$(cat cred.log)" ] echo "capability[]=authtype capability[]=state protocol=http host=example.com state[]=lfstest:state1" | GIT_TERMINAL_PROMPT=0 git credential fill > cred.log cat cred.log expected="capability[]=authtype capability[]=state authtype=Multistage credential=bazquux protocol=http host=example.com state[]=lfstest:state2" [ "$expected" = "$(cat cred.log)" ] ) end_test if [[ $(uname) == *"MINGW"* ]]; then NETRCFILE="$HOME/_netrc" else NETRCFILE="$HOME/.netrc" fi begin_test "credentials from netrc" ( set -e printf "machine localhost\nlogin netrcuser\npassword netrcpass\n" >> "$NETRCFILE" echo $HOME echo "GITSERVER $GITSERVER" cat $NETRCFILE # prevent prompts on Windows particularly export SSH_ASKPASS= reponame="netrctest" setup_remote_repo "$reponame" clone_repo "$reponame" repo # Need a remote named "localhost" or 127.0.0.1 in netrc will interfere with the other auth git remote add "netrc" "$(echo $GITSERVER | sed s/127.0.0.1/localhost/)/netrctest" git lfs env git lfs track "*.dat" echo "push a" > a.dat git add .gitattributes a.dat git commit -m "add a.dat" GIT_TRACE=1 git lfs push netrc main 2>&1 | tee push.log grep "Uploading LFS objects: 100% (1/1), 7 B" push.log echo "any netrc credential calls:" [ "4" -eq "$(cat push.log | grep "netrc: git credential" | wc -l)" ] echo "any netrc credential fills:" [ "2" -eq "$(cat push.log | grep "netrc: git credential fill" | wc -l)" ] echo "any netrc credential approvals:" [ "2" -eq "$(cat push.log | grep "netrc: git credential approve" | wc -l)" ] ) end_test begin_test "credentials from netrc with unknown keyword" ( set -e printf "machine localhost\nlogin netrcuser\nnot-a-key something\npassword netrcpass\n" >> "$NETRCFILE" echo $HOME echo "GITSERVER $GITSERVER" cat $NETRCFILE # prevent prompts on Windows particularly export SSH_ASKPASS= reponame="netrctest" setup_remote_repo "$reponame" clone_repo "$reponame" repo2 # Need a remote named "localhost" or 127.0.0.1 in netrc will interfere with the other auth git remote add "netrc" "$(echo $GITSERVER | sed s/127.0.0.1/localhost/)/netrctest" git lfs env git lfs track "*.dat" echo "push a" > a.dat git add .gitattributes a.dat git commit -m "add a.dat" GIT_TRACE=1 git lfs push netrc main 2>&1 | tee push.log grep "Uploading LFS objects: 100% (1/1), 7 B" push.log echo "any netrc credential calls:" [ "4" -eq "$(cat push.log | grep "netrc: git credential" | wc -l)" ] echo "any netrc credential fills:" [ "2" -eq "$(cat push.log | grep "netrc: git credential fill" | wc -l)" ] echo "any netrc credential approvals:" [ "2" -eq "$(cat push.log | grep "netrc: git credential approve" | wc -l)" ] ) end_test begin_test "credentials from netrc with bad password" ( set -e printf "machine localhost\nlogin netrcuser\npassword badpass\n" >> "$NETRCFILE" echo $HOME echo "GITSERVER $GITSERVER" cat $NETRCFILE # prevent prompts on Windows particularly export SSH_ASKPASS= reponame="netrctest" setup_remote_repo "$reponame" clone_repo "$reponame" repo3 # Need a remote named "localhost" or 127.0.0.1 in netrc will interfere with the other auth git remote add "netrc" "$(echo $GITSERVER | sed s/127.0.0.1/localhost/)/netrctest" git lfs env git lfs track "*.dat" echo "push a" > a.dat git add .gitattributes a.dat git commit -m "add a.dat" git push netrc main 2>&1 | tee push.log [ "0" = "$(grep -c "Uploading LFS objects: 100% (1/1), 7 B" push.log)" ] ) end_test begin_test "credentials with bad netrc creds will retry" ( set -e printf "machine localhost\nlogin netrcuser\npassword badpassretry\n" >> "$NETRCFILE" echo $HOME echo "GITSERVER $GITSERVER" cat $NETRCFILE # prevent prompts on Windows particularly export SSH_ASKPASS= # ensure we provide the correct creds through ASKPASS so we can fall back # when .netrc fails export LFS_ASKPASS_USERNAME="netrcuser" export LFS_ASKPASS_PASSWORD="netrcpass" reponame="netrctest" setup_remote_repo "$reponame" clone_repo "$reponame" repo4 # Need a remote named "localhost" or 127.0.0.1 in netrc will interfere with the other auth git remote add "netrc" "$(echo $GITSERVER | sed s/127.0.0.1/localhost/)/netrctest" git lfs env git lfs track "*.dat" echo "push a" > a.dat git add .gitattributes a.dat git commit -m "add a.dat" GIT_TRACE=1 GIT_ASKPASS="lfs-askpass" git push netrc main 2>&1 | tee push.log grep -c "Uploading LFS objects: 100% (1/1), 7 B" push.log # netrc credentials should be attempted then rejected for the lock request echo "netrc credentials attempted:" [ "1" -eq "$(cat push.log | grep "netrc: git credential fill" | wc -l)" ] echo "netrc credentials rejected:" [ "1" -eq "$(cat push.log | grep "netrc: git credential reject" | wc -l)" ] # credhelper should then use askpass to find the proper credentials, which # should be successful echo "askpass credentials attempted:" [ "1" -eq "$(cat push.log | grep "creds: git credential fill" | wc -l)" ] echo "askpass credentials approved:" [ "1" -eq "$(cat push.log | grep "creds: git credential approve" | wc -l)" ] # askpass creds should be cached and used for the batch request echo "cached credentials used:" [ "1" -eq "$(cat push.log | grep "creds: git credential cache" | wc -l)" ] ) end_test begin_test "credentials from lfs.url" ( set -e reponame="requirecreds-lfsurl" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" echo "push a" > a.dat git add .gitattributes a.dat git commit -m "add a.dat" echo "bad push" git lfs env git lfs push origin main 2>&1 | tee push.log grep "Uploading LFS objects: 0% (0/1), 0 B" push.log echo "good push" gitserverhost=$(echo "$GITSERVER" | cut -d'/' -f3) git config lfs.url http://requirecreds:pass@$gitserverhost/$reponame.git/info/lfs git lfs env GIT_TRACE=1 git lfs push origin main 2>&1 | tee push.log # A 401 indicates URL access mode for the /storage endpoint # was used instead of for the lfsapi endpoint grep "HTTP: 401" push.log # Ensure we didn't make a second batch request, which means the request # was successfully retried internally grep "tq: retrying object" push.log && exit 1 grep "Uploading LFS objects: 0% (0/1), 0 B" push.log echo "bad fetch" rm -rf .git/lfs/objects git config lfs.url http://$gitserverhost/$reponame.git/info/lfs git lfs env git lfs fetch --all 2>&1 | tee fetch.log grep "Downloading LFS objects: 0% (0/1), 0 B" fetch.log echo "good fetch" rm -rf .git/lfs/objects git config lfs.url http://requirecreds:pass@$gitserverhost/$reponame.git/info/lfs git lfs env GIT_TRACE=1 git lfs fetch --all 2>&1 | tee fetch.log # No 401 should occur as we've already set an access mode for the # storage endpoint during the push grep "HTTP: 401" fetch.log && exit 1 git lfs fsck echo "good fetch, setting access mode" rm -rf .git/lfs/objects git config lfs.url http://requirecreds:pass@$gitserverhost/$reponame.git/info/lfs git config lfs.http://$gitserverhost/storage/.access "None" git lfs env GIT_TRACE=1 git lfs fetch --all 2>&1 | tee fetch.log # A 401 indicates URL access mode for the /storage endpoint # was used instead of for the lfsapi endpoint grep "HTTP: 401" fetch.log # Ensure we didn't make a second batch request, which means the request # was successfully retried internally grep "tq: retrying object" fetch.log && exit 1 git lfs fsck ) end_test begin_test "credentials from remote.origin.url" ( set -e reponame="requirecreds-remoteurl" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" echo "push b" > b.dat git add .gitattributes b.dat git commit -m "add b.dat" echo "bad push" git lfs env git lfs push origin main 2>&1 | tee push.log grep "Uploading LFS objects: 0% (0/1), 0 B" push.log echo "good push" gitserverhost=$(echo "$GITSERVER" | cut -d'/' -f3) git config remote.origin.url http://requirecreds:pass@$gitserverhost/$reponame.git git lfs env GIT_TRACE=1 git lfs push origin main 2>&1 | tee push.log # A 401 indicates URL access mode for the /storage endpoint # was used instead of for the lfsapi endpoint grep "HTTP: 401" push.log # Ensure we didn't make a second batch request, which means the request # was successfully retried internally grep "tq: retrying object" push.log && exit 1 grep "Uploading LFS objects: 100% (1/1), 7 B" push.log echo "bad fetch" rm -rf .git/lfs/objects git config remote.origin.url http://$gitserverhost/$reponame.git git lfs env git lfs fetch --all 2>&1 | tee fetch.log # Missing authentication causes `git lfs fetch` to fail before the progress # meter is printed to the TTY. echo "good fetch" rm -rf .git/lfs/objects git config remote.origin.url http://requirecreds:pass@$gitserverhost/$reponame.git git lfs env GIT_TRACE=1 git lfs fetch --all 2>&1 | tee fetch.log # No 401 should occur as we've already set an access mode for the # storage endpoint during the push grep "HTTP: 401" fetch.log && exit 1 git lfs fsck echo "good fetch, setting access mode" rm -rf .git/lfs/objects git config remote.origin.url http://requirecreds:pass@$gitserverhost/$reponame.git git config lfs.http://$gitserverhost/storage/.access "None" git lfs env GIT_TRACE=1 git lfs fetch --all 2>&1 | tee fetch.log # A 401 indicates URL access mode for the /storage endpoint # was used instead of for the lfsapi endpoint grep "HTTP: 401" fetch.log # Ensure we didn't make a second batch request, which means the request # was successfully retried internally grep "tq: retrying object" fetch.log && exit 1 git lfs fsck ) end_test git-lfs-3.6.1/t/t-custom-transfers.sh000077500000000000000000000224771472372047300174750ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" begin_test "custom-transfer-wrong-path" ( set -e # this repo name is the indicator to the server to support custom transfer reponame="test-custom-transfer-fail" setup_remote_repo "$reponame" clone_repo "$reponame" $reponame # deliberately incorrect path git config lfs.customtransfer.testcustom.path path-to-nothing git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log contents="jksgdfljkgsdlkjafg lsjdgf alkjgsd lkfjag sldjkgf alkjsgdflkjagsd kljfg asdjgf kalsd" contents_oid=$(calc_oid "$contents") printf "%s" "$contents" > a.dat git add a.dat git add .gitattributes git commit -m "add a.dat" 2>&1 | tee commit.log GIT_TRACE=1 GIT_TRANSFER_TRACE=1 git push origin main 2>&1 | tee pushcustom.log # use PIPESTATUS otherwise we get exit code from tee res=${PIPESTATUS[0]} grep "xfer: adapter \"testcustom\" Begin()" pushcustom.log grep "xfer: Aborting worker process" pushcustom.log if [ "$res" = "0" ]; then echo "Push should have failed because of an incorrect custom transfer path." exit 1 fi ) end_test begin_test "custom-transfer-upload-download" ( set -e # this repo name is the indicator to the server to support custom transfer reponame="test-custom-transfer-1" setup_remote_repo "$reponame" clone_repo "$reponame" $reponame # set up custom transfer adapter git config lfs.customtransfer.testcustom.path lfstest-customadapter git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log git add .gitattributes git commit -m "Tracking" # set up a decent amount of data so that there's work for multiple concurrent adapters echo "[ { \"CommitDate\":\"$(get_date -10d)\", \"Files\":[ {\"Filename\":\"verify.dat\",\"Size\":18,\"Data\":\"send-verify-action\"}, {\"Filename\":\"file1.dat\",\"Size\":1024}, {\"Filename\":\"file2.dat\",\"Size\":750}] }, { \"CommitDate\":\"$(get_date -7d)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":1050}, {\"Filename\":\"file3.dat\",\"Size\":660}, {\"Filename\":\"file4.dat\",\"Size\":230}] }, { \"CommitDate\":\"$(get_date -5d)\", \"Files\":[ {\"Filename\":\"file5.dat\",\"Size\":1200}, {\"Filename\":\"file6.dat\",\"Size\":300}] }, { \"CommitDate\":\"$(get_date -2d)\", \"Files\":[ {\"Filename\":\"file3.dat\",\"Size\":120}, {\"Filename\":\"file5.dat\",\"Size\":450}, {\"Filename\":\"file7.dat\",\"Size\":520}, {\"Filename\":\"file8.dat\",\"Size\":2048}] } ]" | lfstest-testutils addcommits GIT_TRACE=1 GIT_TRANSFER_TRACE=1 git push origin main 2>&1 | tee pushcustom.log # use PIPESTATUS otherwise we get exit code from tee [ ${PIPESTATUS[0]} = "0" ] grep "xfer: started custom adapter process" pushcustom.log grep "xfer\[lfstest-customadapter\]:" pushcustom.log grep "Uploading LFS objects: 100% (12/12)" pushcustom.log rm -rf .git/lfs/objects GIT_TRACE=1 GIT_TRANSFER_TRACE=1 git lfs fetch --all 2>&1 | tee fetchcustom.log [ ${PIPESTATUS[0]} = "0" ] grep "xfer: started custom adapter process" fetchcustom.log grep "xfer\[lfstest-customadapter\]:" fetchcustom.log grep "Terminating test custom adapter gracefully" fetchcustom.log objectlist=`find .git/lfs/objects -type f` [ "$(echo "$objectlist" | wc -l)" -eq 12 ] ) end_test begin_test "custom-transfer-standalone" ( set -e # setup a git repo to be used as a local repo, not remote reponame="test-custom-transfer-standalone" setup_remote_repo "$reponame" # clone directly, not through lfstest-gitserver clone_repo_url "$REMOTEDIR/$reponame.git" $reponame # set up custom transfer adapter to use a specific transfer agent git config lfs.customtransfer.testcustom.path lfstest-standalonecustomadapter git config lfs.customtransfer.testcustom.args "--arg1 '--arg2 --arg3' --arg4" git config lfs.customtransfer.testcustom.concurrent false git config lfs.standalonetransferagent testcustom export TEST_STANDALONE_BACKUP_PATH="$(pwd)/test-custom-transfer-standalone-backup" mkdir -p $TEST_STANDALONE_BACKUP_PATH rm -rf $TEST_STANDALONE_BACKUP_PATH/* git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log git add .gitattributes git commit -m "Tracking" # set up a decent amount of data so that there's work for multiple concurrent adapters echo "[ { \"CommitDate\":\"$(get_date -10d)\", \"Files\":[ {\"Filename\":\"verify.dat\",\"Size\":18,\"Data\":\"send-verify-action\"}, {\"Filename\":\"file1.dat\",\"Size\":1024}, {\"Filename\":\"file2.dat\",\"Size\":750}] }, { \"CommitDate\":\"$(get_date -7d)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":1050}, {\"Filename\":\"file3.dat\",\"Size\":660}, {\"Filename\":\"file4.dat\",\"Size\":230}] }, { \"CommitDate\":\"$(get_date -5d)\", \"Files\":[ {\"Filename\":\"file5.dat\",\"Size\":1200}, {\"Filename\":\"file6.dat\",\"Size\":300}] }, { \"CommitDate\":\"$(get_date -2d)\", \"Files\":[ {\"Filename\":\"file3.dat\",\"Size\":120}, {\"Filename\":\"file5.dat\",\"Size\":450}, {\"Filename\":\"file7.dat\",\"Size\":520}, {\"Filename\":\"file8.dat\",\"Size\":2048}] } ]" | lfstest-testutils addcommits GIT_TRACE=1 GIT_TRANSFER_TRACE=1 git push origin main 2>&1 | tee pushcustom.log # use PIPESTATUS otherwise we get exit code from tee [ ${PIPESTATUS[0]} = "0" ] # Make sure the lock verification is not attempted. grep "locks/verify$" pushcustom.log && false grep "xfer: started custom adapter process" pushcustom.log grep "xfer\[lfstest-standalonecustomadapter\]:" pushcustom.log grep "Uploading LFS objects: 100% (12/12)" pushcustom.log rm -rf .git/lfs/objects GIT_TRACE=1 GIT_TRANSFER_TRACE=1 git lfs fetch --all 2>&1 | tee fetchcustom.log [ ${PIPESTATUS[0]} = "0" ] grep "xfer: started custom adapter process" fetchcustom.log grep "xfer\[lfstest-standalonecustomadapter\]:" fetchcustom.log grep "Terminating test custom adapter gracefully" fetchcustom.log # Test argument parsing. grep 'Saw argument "--arg1"' fetchcustom.log grep 'Saw argument "--arg2 --arg3"' fetchcustom.log grep 'Saw argument "--arg4"' fetchcustom.log objectlist=`find .git/lfs/objects -type f` [ "$(echo "$objectlist" | wc -l)" -eq 12 ] git lfs fsck ) end_test begin_test "custom-transfer-standalone-urlmatch" ( set -e # setup a git repo to be used as a local repo, not remote reponame="test-custom-transfer-standalone-urlmatch" setup_remote_repo "$reponame" # clone directly, not through lfstest-gitserver clone_repo_url "$REMOTEDIR/$reponame.git" $reponame # set up custom transfer adapter to use a specific transfer agent, using a URL prefix match git config lfs.customtransfer.testcustom.path lfstest-standalonecustomadapter git config lfs.customtransfer.testcustom.concurrent false git config remote.origin.lfsurl https://git.example.com/example/path/to/repo git config lfs.https://git.example.com/example/path/.standalonetransferagent testcustom git config lfs.standalonetransferagent invalid-agent # git config lfs.standalonetransferagent testcustom export TEST_STANDALONE_BACKUP_PATH="$(pwd)/test-custom-transfer-standalone-urlmatch-backup" mkdir -p $TEST_STANDALONE_BACKUP_PATH rm -rf $TEST_STANDALONE_BACKUP_PATH/* git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log git add .gitattributes git commit -m "Tracking" # set up a decent amount of data so that there's work for multiple concurrent adapters echo "[ { \"CommitDate\":\"$(get_date -10d)\", \"Files\":[ {\"Filename\":\"verify.dat\",\"Size\":18,\"Data\":\"send-verify-action\"}, {\"Filename\":\"file1.dat\",\"Size\":1024}, {\"Filename\":\"file2.dat\",\"Size\":750}] }, { \"CommitDate\":\"$(get_date -7d)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":1050}, {\"Filename\":\"file3.dat\",\"Size\":660}, {\"Filename\":\"file4.dat\",\"Size\":230}] }, { \"CommitDate\":\"$(get_date -5d)\", \"Files\":[ {\"Filename\":\"file5.dat\",\"Size\":1200}, {\"Filename\":\"file6.dat\",\"Size\":300}] }, { \"CommitDate\":\"$(get_date -2d)\", \"Files\":[ {\"Filename\":\"file3.dat\",\"Size\":120}, {\"Filename\":\"file5.dat\",\"Size\":450}, {\"Filename\":\"file7.dat\",\"Size\":520}, {\"Filename\":\"file8.dat\",\"Size\":2048}] } ]" | lfstest-testutils addcommits GIT_TRACE=1 GIT_TRANSFER_TRACE=1 git push origin main 2>&1 | tee pushcustom.log # use PIPESTATUS otherwise we get exit code from tee [ ${PIPESTATUS[0]} = "0" ] # Make sure the lock verification is not attempted. grep "locks/verify$" pushcustom.log && false grep "xfer: started custom adapter process" pushcustom.log grep "xfer\[lfstest-standalonecustomadapter\]:" pushcustom.log grep "Uploading LFS objects: 100% (12/12)" pushcustom.log rm -rf .git/lfs/objects GIT_TRACE=1 GIT_TRANSFER_TRACE=1 git lfs fetch --all 2>&1 | tee fetchcustom.log [ ${PIPESTATUS[0]} = "0" ] grep "xfer: started custom adapter process" fetchcustom.log grep "xfer\[lfstest-standalonecustomadapter\]:" fetchcustom.log grep "Terminating test custom adapter gracefully" fetchcustom.log objectlist=`find .git/lfs/objects -type f` [ "$(echo "$objectlist" | wc -l)" -eq 12 ] git lfs fsck ) end_test git-lfs-3.6.1/t/t-dedup.sh000077500000000000000000000056651472372047300152570ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" begin_test "dedup" ( set -e reponame="dedup" git init $reponame cd $reponame # Confirm Git LFS extensions prevent de-duplication git config lfs.extension.foo.clean "foo-clean %f" git config lfs.extension.foo.smudge "foo-smudge %f" git config lfs.extension.foo.priority 0 result=$(git lfs dedup 2>&1) && true if ( echo $result | grep "This system does not support de-duplication." ); then exit fi echo "$result" | grep 'This platform supports file de-duplication, however, Git LFS extensions are configured and therefore de-duplication can not be used.' git config --unset lfs.extension.foo.clean git config --unset lfs.extension.foo.smudge git config --unset lfs.extension.foo.priority # Create a commit with some files tracked by git-lfs git lfs track *.dat echo "test data" > a.dat echo "test data 2" > b.dat git add .gitattributes *.dat git commit -m "first commit" # Delete file b and lock directory bOid=$(git log --patch b.dat | grep "^+oid" | cut -d ":" -f 2) bOid12=$(echo $bOid | cut -b 1-2) bOid34=$(echo $bOid | cut -b 3-4) rm ".git/lfs/objects/$bOid12/$bOid34/$bOid" # DO result=$(git lfs dedup 2>&1) && true # VERIFY: Expected # Success: a.dat # Success: b.dat echo "$result" | grep 'Success: a.dat' echo "$result" | grep -E 'Success:\s+b.dat|Skipped:\s+b.dat' # Sometimes mediafile of b.bat is restored by timing issue? ) end_test begin_test "dedup test" ( set -e reponame="dedup_test" git init $reponame cd $reponame # Confirm Git LFS extensions prevent de-duplication git config lfs.extension.foo.clean "foo-clean %f" git config lfs.extension.foo.smudge "foo-smudge %f" git config lfs.extension.foo.priority 0 result=$(git lfs dedup --test 2>&1) && true if ( echo $result | grep "This system does not support de-duplication." ); then exit fi echo "$result" | grep 'This platform supports file de-duplication, however, Git LFS extensions are configured and therefore de-duplication can not be used.' git config --unset lfs.extension.foo.clean git config --unset lfs.extension.foo.smudge git config --unset lfs.extension.foo.priority # DO result=$(git lfs dedup --test 2>&1) && true # Verify: This platform and repository support file de-duplication. echo "$result" | grep 'This platform and repository support file de-duplication.' ) end_test begin_test "dedup dirty workdir" ( set -e reponame="dedup_dirty_workdir" git init $reponame cd $reponame # Make working tree dirty. echo "test data" > a.dat git add a.dat git commit -m "first commit" echo "modify" >> a.dat # DO result=$(git lfs dedup 2>&1) && true if ( echo $result | grep "This system does not support de-duplication." ); then exit fi # Verify: Working tree is dirty. Please commit or reset your change. echo "$result" | grep 'Working tree is dirty. Please commit or reset your change.' ) end_test git-lfs-3.6.1/t/t-duplicate-oids.sh000077500000000000000000000027511472372047300170550ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" begin_test "multiple revs with same OID get pushed once" ( set -e reponame="multiple-revs-one-oid" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" git add .gitattributes git commit -m "initial commit" contents="contents" contents_oid="$(calc_oid "$contents")" # Stash the contents of the file that we want to commit in .git/lfs/objects. object_dir="$(echo $contents_oid \ | awk '{ print substr($0, 1, 2) "/" substr($0, 3, 2) }')" mkdir -p ".git/lfs/objects/$object_dir" printf "%s" "$contents" > ".git/lfs/objects/$object_dir/$contents_oid" # Create a pointer with the old "http://git-media.io" spec legacy_pointer="$(pointer $contents_oid 8 http://git-media.io/v/2)" # Create a pointer with the latest spec to create a modification, but leave # the OID untouched. latest_pointer="$(pointer $contents_oid 8)" # Commit the legacy pointer printf "%s" "$legacy_pointer" > a.dat git add a.dat git commit -m "commit legacy" # Commit the new pointer, causing a diff on a.dat, but leaving the OID # unchanged. printf "%s" "$latest_pointer" > a.dat git add a.dat git commit -m "commit latest" # Delay the push until here, so the server doesn't have a copy of the OID that # we're trying to push. git push origin main 2>&1 | tee push.log grep "Uploading LFS objects: 100% (1/1), 8 B" push.log assert_server_object "$reponame" "$contents_oid" ) end_test git-lfs-3.6.1/t/t-env.sh000077500000000000000000001010421472372047300147300ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" envInitConfig='git config filter.lfs.process = "git-lfs filter-process" git config filter.lfs.smudge = "git-lfs smudge -- %f" git config filter.lfs.clean = "git-lfs clean -- %f"' if [ "$IS_WINDOWS" -eq 1 ]; then export MSYS2_ENV_CONV_EXCL="GIT_LFS_TEST_DIR" fi # The "git lfs env" command should ignore this environment variable # despite the "GIT_" strings in its name and value. export TEST_GIT_EXAMPLE="GIT_EXAMPLE" begin_test "env with no remote" ( set -e reponame="env-no-remote" mkdir $reponame cd $reponame git init localwd=$(canonical_path "$TRASHDIR/$reponame") localgit=$(canonical_path "$TRASHDIR/$reponame/.git") localgitstore=$(canonical_path "$TRASHDIR/$reponame/.git") lfsstorage=$(canonical_path "$TRASHDIR/$reponame/.git/lfs") localmedia=$(canonical_path "$TRASHDIR/$reponame/.git/lfs/objects") tempdir=$(canonical_path "$TRASHDIR/$reponame/.git/lfs/tmp") envVars=$(printf "%s" "$(env | grep "^GIT_")") expected=$(printf '%s %s LocalWorkingDir=%s LocalGitDir=%s LocalGitStorageDir=%s LocalMediaDir=%s LocalReferenceDirs= TempDir=%s ConcurrentTransfers=8 TusTransfers=false BasicTransfersOnly=false SkipDownloadErrors=false FetchRecentAlways=false FetchRecentRefsDays=7 FetchRecentCommitsDays=0 FetchRecentRefsIncludeRemotes=true PruneOffsetDays=3 PruneVerifyRemoteAlways=false PruneVerifyUnreachableAlways=false PruneRemoteName=origin LfsStorageDir=%s AccessDownload=none AccessUpload=none DownloadTransfers=basic,lfs-standalone-file,ssh UploadTransfers=basic,lfs-standalone-file,ssh %s %s ' "$(git lfs version)" "$(git version)" "$localwd" "$localgit" "$localgitstore" "$localmedia" "$tempdir" "$lfsstorage" "$envVars" "$envInitConfig") actual=$(git lfs env | grep -v "^GIT_EXEC_PATH=") contains_same_elements "$expected" "$actual" ) end_test begin_test "env with origin remote" ( set -e reponame="env-origin-remote" mkdir $reponame cd $reponame git init git remote add origin "$GITSERVER/env-origin-remote" endpoint="$GITSERVER/$reponame.git/info/lfs (auth=none)" localwd=$(canonical_path "$TRASHDIR/$reponame") localgit=$(canonical_path "$TRASHDIR/$reponame/.git") localgitstore=$(canonical_path "$TRASHDIR/$reponame/.git") lfsstorage=$(canonical_path "$TRASHDIR/$reponame/.git/lfs") localmedia=$(canonical_path "$TRASHDIR/$reponame/.git/lfs/objects") tempdir=$(canonical_path "$TRASHDIR/$reponame/.git/lfs/tmp") envVars=$(printf "%s" "$(env | grep "^GIT_")") expected=$(printf '%s %s Endpoint=%s LocalWorkingDir=%s LocalGitDir=%s LocalGitStorageDir=%s LocalMediaDir=%s LocalReferenceDirs= TempDir=%s ConcurrentTransfers=8 TusTransfers=false BasicTransfersOnly=false SkipDownloadErrors=false FetchRecentAlways=false FetchRecentRefsDays=7 FetchRecentCommitsDays=0 FetchRecentRefsIncludeRemotes=true PruneOffsetDays=3 PruneVerifyRemoteAlways=false PruneVerifyUnreachableAlways=false PruneRemoteName=origin LfsStorageDir=%s AccessDownload=none AccessUpload=none DownloadTransfers=basic,lfs-standalone-file,ssh UploadTransfers=basic,lfs-standalone-file,ssh %s %s ' "$(git lfs version)" "$(git version)" "$endpoint" "$localwd" "$localgit" "$localgitstore" "$localmedia" "$tempdir" "$lfsstorage" "$envVars" "$envInitConfig") actual=$(git lfs env | grep -v "^GIT_EXEC_PATH=") contains_same_elements "$expected" "$actual" cd .git expected2=$(echo "$expected" | sed -e 's/LocalWorkingDir=.*/LocalWorkingDir=/') actual2=$(git lfs env | grep -v "^GIT_EXEC_PATH=") contains_same_elements "$expected2" "$actual2" ) end_test begin_test "env with multiple remotes" ( set -e reponame="env-multiple-remotes" mkdir $reponame cd $reponame git init git remote add origin "$GITSERVER/env-origin-remote" git remote add other "$GITSERVER/env-other-remote" endpoint="$GITSERVER/env-origin-remote.git/info/lfs (auth=none)" endpoint2="$GITSERVER/env-other-remote.git/info/lfs (auth=none)" localwd=$(canonical_path "$TRASHDIR/$reponame") localgit=$(canonical_path "$TRASHDIR/$reponame/.git") localgitstore=$(canonical_path "$TRASHDIR/$reponame/.git") lfsstorage=$(canonical_path "$TRASHDIR/$reponame/.git/lfs") localmedia=$(canonical_path "$TRASHDIR/$reponame/.git/lfs/objects") tempdir=$(canonical_path "$TRASHDIR/$reponame/.git/lfs/tmp") envVars=$(printf "%s" "$(env | grep "^GIT_")") expected=$(printf '%s %s Endpoint=%s Endpoint (other)=%s LocalWorkingDir=%s LocalGitDir=%s LocalGitStorageDir=%s LocalMediaDir=%s LocalReferenceDirs= TempDir=%s ConcurrentTransfers=8 TusTransfers=false BasicTransfersOnly=false SkipDownloadErrors=false FetchRecentAlways=false FetchRecentRefsDays=7 FetchRecentCommitsDays=0 FetchRecentRefsIncludeRemotes=true PruneOffsetDays=3 PruneVerifyRemoteAlways=false PruneVerifyUnreachableAlways=false PruneRemoteName=origin LfsStorageDir=%s AccessDownload=none AccessUpload=none DownloadTransfers=basic,lfs-standalone-file,ssh UploadTransfers=basic,lfs-standalone-file,ssh %s %s ' "$(git lfs version)" "$(git version)" "$endpoint" "$endpoint2" "$localwd" "$localgit" "$localgitstore" "$localmedia" "$tempdir" "$lfsstorage" "$envVars" "$envInitConfig") actual=$(git lfs env | grep -v "^GIT_EXEC_PATH=") contains_same_elements "$expected" "$actual" cd .git expected2=$(echo "$expected" | sed -e 's/LocalWorkingDir=.*/LocalWorkingDir=/') actual2=$(git lfs env | grep -v "^GIT_EXEC_PATH=") contains_same_elements "$expected2" "$actual2" ) end_test begin_test "env with other remote" ( set -e reponame="env-other-remote" mkdir $reponame cd $reponame git init git remote add other "$GITSERVER/env-other-remote" endpoint="$GITSERVER/env-other-remote.git/info/lfs (auth=none)" localwd=$(canonical_path "$TRASHDIR/$reponame") localgit=$(canonical_path "$TRASHDIR/$reponame/.git") localgitstore=$(canonical_path "$TRASHDIR/$reponame/.git") lfsstorage=$(canonical_path "$TRASHDIR/$reponame/.git/lfs") localmedia=$(canonical_path "$TRASHDIR/$reponame/.git/lfs/objects") tempdir=$(canonical_path "$TRASHDIR/$reponame/.git/lfs/tmp") envVars=$(printf "%s" "$(env | grep "^GIT_")") expected=$(printf '%s %s Endpoint (other)=%s LocalWorkingDir=%s LocalGitDir=%s LocalGitStorageDir=%s LocalMediaDir=%s LocalReferenceDirs= TempDir=%s ConcurrentTransfers=8 TusTransfers=false BasicTransfersOnly=false SkipDownloadErrors=false FetchRecentAlways=false FetchRecentRefsDays=7 FetchRecentCommitsDays=0 FetchRecentRefsIncludeRemotes=true PruneOffsetDays=3 PruneVerifyRemoteAlways=false PruneVerifyUnreachableAlways=false PruneRemoteName=origin LfsStorageDir=%s AccessDownload=none AccessUpload=none DownloadTransfers=basic,lfs-standalone-file,ssh UploadTransfers=basic,lfs-standalone-file,ssh %s %s ' "$(git lfs version)" "$(git version)" "$endpoint" "$localwd" "$localgit" "$localgitstore" "$localmedia" "$tempdir" "$lfsstorage" "$envVars" "$envInitConfig") actual=$(git lfs env | grep -v "^GIT_EXEC_PATH=") contains_same_elements "$expected" "$actual" cd .git expected2=$(echo "$expected" | sed -e 's/LocalWorkingDir=.*/LocalWorkingDir=/') actual2=$(git lfs env | grep -v "^GIT_EXEC_PATH=") contains_same_elements "$expected2" "$actual2" ) end_test begin_test "env with multiple remotes and lfs.url config" ( set -e reponame="env-multiple-remotes-with-lfs-url" mkdir $reponame cd $reponame git init git remote add origin "$GITSERVER/env-origin-remote" git remote add other "$GITSERVER/env-other-remote" git config lfs.url "http://foo/bar" localwd=$(canonical_path "$TRASHDIR/$reponame") localgit=$(canonical_path "$TRASHDIR/$reponame/.git") localgitstore=$(canonical_path "$TRASHDIR/$reponame/.git") lfsstorage=$(canonical_path "$TRASHDIR/$reponame/.git/lfs") localmedia=$(canonical_path "$TRASHDIR/$reponame/.git/lfs/objects") tempdir=$(canonical_path "$TRASHDIR/$reponame/.git/lfs/tmp") envVars=$(printf "%s" "$(env | grep "^GIT_")") expected=$(printf '%s %s Endpoint=http://foo/bar (auth=none) Endpoint (other)=http://foo/bar (auth=none) LocalWorkingDir=%s LocalGitDir=%s LocalGitStorageDir=%s LocalMediaDir=%s LocalReferenceDirs= TempDir=%s ConcurrentTransfers=8 TusTransfers=false BasicTransfersOnly=false SkipDownloadErrors=false FetchRecentAlways=false FetchRecentRefsDays=7 FetchRecentCommitsDays=0 FetchRecentRefsIncludeRemotes=true PruneOffsetDays=3 PruneVerifyRemoteAlways=false PruneVerifyUnreachableAlways=false PruneRemoteName=origin LfsStorageDir=%s AccessDownload=none AccessUpload=none DownloadTransfers=basic,lfs-standalone-file,ssh UploadTransfers=basic,lfs-standalone-file,ssh %s %s ' "$(git lfs version)" "$(git version)" "$localwd" "$localgit" "$localgitstore" "$localmedia" "$tempdir" "$lfsstorage" "$envVars" "$envInitConfig") actual=$(git lfs env | grep -v "^GIT_EXEC_PATH=") contains_same_elements "$expected" "$actual" cd .git expected2=$(echo "$expected" | sed -e 's/LocalWorkingDir=.*/LocalWorkingDir=/') actual2=$(git lfs env | grep -v "^GIT_EXEC_PATH=") contains_same_elements "$expected2" "$actual2" ) end_test begin_test "env with multiple remotes and lfs configs" ( set -e reponame="env-multiple-remotes-lfs-configs" mkdir $reponame cd $reponame git init git remote add origin "$GITSERVER/env-origin-remote" git remote add other "$GITSERVER/env-other-remote" git config lfs.url "http://foo/bar" git config remote.origin.lfsurl "http://custom/origin" git config remote.other.lfsurl "http://custom/other" localwd=$(canonical_path "$TRASHDIR/$reponame") localgit=$(canonical_path "$TRASHDIR/$reponame/.git") localgitstore=$(canonical_path "$TRASHDIR/$reponame/.git") lfsstorage=$(canonical_path "$TRASHDIR/$reponame/.git/lfs") localmedia=$(canonical_path "$TRASHDIR/$reponame/.git/lfs/objects") tempdir=$(canonical_path "$TRASHDIR/$reponame/.git/lfs/tmp") envVars=$(printf "%s" "$(env | grep "^GIT_")") expected=$(printf '%s %s Endpoint=http://foo/bar (auth=none) Endpoint (other)=http://foo/bar (auth=none) LocalWorkingDir=%s LocalGitDir=%s LocalGitStorageDir=%s LocalMediaDir=%s LocalReferenceDirs= TempDir=%s ConcurrentTransfers=8 TusTransfers=false BasicTransfersOnly=false SkipDownloadErrors=false FetchRecentAlways=false FetchRecentRefsDays=7 FetchRecentCommitsDays=0 FetchRecentRefsIncludeRemotes=true PruneOffsetDays=3 PruneVerifyRemoteAlways=false PruneVerifyUnreachableAlways=false PruneRemoteName=origin LfsStorageDir=%s AccessDownload=none AccessUpload=none DownloadTransfers=basic,lfs-standalone-file,ssh UploadTransfers=basic,lfs-standalone-file,ssh %s %s ' "$(git lfs version)" "$(git version)" "$localwd" "$localgit" "$localgitstore" "$localmedia" "$tempdir" "$lfsstorage" "$envVars" "$envInitConfig") actual=$(git lfs env | grep -v "^GIT_EXEC_PATH=") contains_same_elements "$expected" "$actual" cd .git expected2=$(echo "$expected" | sed -e 's/LocalWorkingDir=.*/LocalWorkingDir=/') actual2=$(git lfs env | grep -v "^GIT_EXEC_PATH=") contains_same_elements "$expected2" "$actual2" ) end_test begin_test "env with multiple remotes and batch configs" ( set -e reponame="env-multiple-remotes-lfs-batch-configs" mkdir $reponame cd $reponame git init git remote add origin "$GITSERVER/env-origin-remote" git remote add other "$GITSERVER/env-other-remote" git config lfs.concurrenttransfers 5 git config remote.origin.lfsurl "http://foo/bar" git config remote.other.lfsurl "http://custom/other" localwd=$(canonical_path "$TRASHDIR/$reponame") localgit=$(canonical_path "$TRASHDIR/$reponame/.git") localgitstore=$(canonical_path "$TRASHDIR/$reponame/.git") lfsstorage=$(canonical_path "$TRASHDIR/$reponame/.git/lfs") localmedia=$(canonical_path "$TRASHDIR/$reponame/.git/lfs/objects") tempdir=$(canonical_path "$TRASHDIR/$reponame/.git/lfs/tmp") envVars=$(printf "%s" "$(env | grep "^GIT_")") expected=$(printf '%s %s Endpoint=http://foo/bar (auth=none) Endpoint (other)=http://custom/other (auth=none) LocalWorkingDir=%s LocalGitDir=%s LocalGitStorageDir=%s LocalMediaDir=%s LocalReferenceDirs= TempDir=%s ConcurrentTransfers=5 TusTransfers=false BasicTransfersOnly=false SkipDownloadErrors=false FetchRecentAlways=false FetchRecentRefsDays=7 FetchRecentCommitsDays=0 FetchRecentRefsIncludeRemotes=true PruneOffsetDays=3 PruneVerifyRemoteAlways=false PruneVerifyUnreachableAlways=false PruneRemoteName=origin LfsStorageDir=%s AccessDownload=none AccessUpload=none DownloadTransfers=basic,lfs-standalone-file,ssh UploadTransfers=basic,lfs-standalone-file,ssh %s %s ' "$(git lfs version)" "$(git version)" "$localwd" "$localgit" "$localgitstore" "$localmedia" "$tempdir" "$lfsstorage" "$envVars" "$envInitConfig") actual=$(git lfs env | grep -v "^GIT_EXEC_PATH=") contains_same_elements "$expected" "$actual" cd .git expected2=$(echo "$expected" | sed -e 's/LocalWorkingDir=.*/LocalWorkingDir=/') actual2=$(git lfs env | grep -v "^GIT_EXEC_PATH=") contains_same_elements "$expected2" "$actual2" ) end_test begin_test "env with .lfsconfig" ( set -e reponame="env-with-lfsconfig" git init $reponame cd $reponame git remote add origin "$GITSERVER/env-origin-remote" echo '[remote "origin"] lfsurl = http://foobar:8080/ [lfs] batch = false concurrenttransfers = 5 ' > .lfsconfig echo '[remote "origin"] lfsurl = http://foobar:5050/ [lfs] batch = true concurrenttransfers = 50 ' > .gitconfig localwd=$(canonical_path "$TRASHDIR/$reponame") localgit=$(canonical_path "$TRASHDIR/$reponame/.git") localgitstore=$(canonical_path "$TRASHDIR/$reponame/.git") lfsstorage=$(canonical_path "$TRASHDIR/$reponame/.git/lfs") localmedia=$(canonical_path "$TRASHDIR/$reponame/.git/lfs/objects") tempdir=$(canonical_path "$TRASHDIR/$reponame/.git/lfs/tmp") envVars=$(printf "%s" "$(env | grep "^GIT_")") expected=$(printf '%s %s Endpoint=http://foobar:8080/ (auth=none) LocalWorkingDir=%s LocalGitDir=%s LocalGitStorageDir=%s LocalMediaDir=%s LocalReferenceDirs= TempDir=%s ConcurrentTransfers=8 TusTransfers=false BasicTransfersOnly=false SkipDownloadErrors=false FetchRecentAlways=false FetchRecentRefsDays=7 FetchRecentCommitsDays=0 FetchRecentRefsIncludeRemotes=true PruneOffsetDays=3 PruneVerifyRemoteAlways=false PruneVerifyUnreachableAlways=false PruneRemoteName=origin LfsStorageDir=%s AccessDownload=none AccessUpload=none DownloadTransfers=basic,lfs-standalone-file,ssh UploadTransfers=basic,lfs-standalone-file,ssh %s %s ' "$(git lfs version)" "$(git version)" "$localwd" "$localgit" "$localgitstore" "$localmedia" "$tempdir" "$lfsstorage" "$envVars" "$envInitConfig") actual=$(git lfs env | grep -v "^GIT_EXEC_PATH=") contains_same_elements "$expected" "$actual" mkdir a cd a actual2=$(git lfs env | grep -v "^GIT_EXEC_PATH=") contains_same_elements "$expected" "$actual2" ) end_test begin_test "env with environment variables" ( set -e reponame="env-with-envvars" git init $reponame mkdir -p $reponame/a/b/c gitDir=$(canonical_path "$TRASHDIR/$reponame/.git") workTree=$(canonical_path "$TRASHDIR/$reponame/a/b") localwd=$(canonical_path "$TRASHDIR/$reponame/a/b") localgit=$(canonical_path "$TRASHDIR/$reponame/.git") localgitstore=$(canonical_path "$TRASHDIR/$reponame/.git") lfsstorage=$(canonical_path "$TRASHDIR/$reponame/.git/lfs") localmedia=$(canonical_path "$TRASHDIR/$reponame/.git/lfs/objects") tempdir=$(canonical_path "$TRASHDIR/$reponame/.git/lfs/tmp") envVars="$(GIT_DIR=$gitDir GIT_WORK_TREE=$workTree env | grep "^GIT_" | sort)" expected=$(printf '%s %s LocalWorkingDir=%s LocalGitDir=%s LocalGitStorageDir=%s LocalMediaDir=%s LocalReferenceDirs= TempDir=%s ConcurrentTransfers=8 TusTransfers=false BasicTransfersOnly=false SkipDownloadErrors=false FetchRecentAlways=false FetchRecentRefsDays=7 FetchRecentCommitsDays=0 FetchRecentRefsIncludeRemotes=true PruneOffsetDays=3 PruneVerifyRemoteAlways=false PruneVerifyUnreachableAlways=false PruneRemoteName=origin LfsStorageDir=%s AccessDownload=none AccessUpload=none DownloadTransfers=basic,lfs-standalone-file,ssh UploadTransfers=basic,lfs-standalone-file,ssh %s %s ' "$(git lfs version)" "$(git version)" "$localwd" "$localgit" "$localgitstore" "$localmedia" "$tempdir" "$lfsstorage" "$envVars" "$envInitConfig") actual=$(GIT_DIR=$gitDir GIT_WORK_TREE=$workTree git lfs env \ | grep -v "^GIT_EXEC_PATH=") contains_same_elements "$expected" "$actual" cd $TRASHDIR/$reponame actual2=$(GIT_DIR=$gitDir GIT_WORK_TREE=$workTree git lfs env \ | grep -v "^GIT_EXEC_PATH=") contains_same_elements "$expected" "$actual2" cd $TRASHDIR/$reponame/.git actual3=$(GIT_DIR=$gitDir GIT_WORK_TREE=$workTree git lfs env \ | grep -v "^GIT_EXEC_PATH=") contains_same_elements "$expected" "$actual3" cd $TRASHDIR/$reponame/a/b/c actual4=$(GIT_DIR=$gitDir GIT_WORK_TREE=$workTree git lfs env \ | grep -v "^GIT_EXEC_PATH=") contains_same_elements "$expected" "$actual4" envVars="$(GIT_DIR=$gitDir GIT_WORK_TREE=a/b env | grep "^GIT_" | sort)" # `a/b` is an invalid relative path from where we are now and results in an # error, so resulting output will have many fields blank or invalid mediaDir5=$(native_path "lfs/objects") tempDir5=$(native_path "lfs/tmp") expected5=$(printf '%s %s LocalWorkingDir= LocalGitDir= LocalGitStorageDir= LocalMediaDir=%s LocalReferenceDirs= TempDir=%s ConcurrentTransfers=8 TusTransfers=false BasicTransfersOnly=false SkipDownloadErrors=false FetchRecentAlways=false FetchRecentRefsDays=7 FetchRecentCommitsDays=0 FetchRecentRefsIncludeRemotes=true PruneOffsetDays=3 PruneVerifyRemoteAlways=false PruneVerifyUnreachableAlways=false PruneRemoteName=origin LfsStorageDir=lfs AccessDownload=none AccessUpload=none DownloadTransfers=basic,lfs-standalone-file,ssh UploadTransfers=basic,lfs-standalone-file,ssh %s git config filter.lfs.process = "" git config filter.lfs.smudge = "" git config filter.lfs.clean = "" ' "$(git lfs version)" "$(git version)" "$mediaDir5" "$tempDir5" "$envVars") actual5=$(GIT_DIR=$gitDir GIT_WORK_TREE=a/b git lfs env \ | grep -v "^GIT_EXEC_PATH=") contains_same_elements "$expected5" "$actual5" cd $TRASHDIR/$reponame/a/b envVars="$(GIT_DIR=$gitDir env | grep "^GIT_" | sort)" expected7=$(printf '%s %s LocalWorkingDir=%s LocalGitDir=%s LocalGitStorageDir=%s LocalMediaDir=%s LocalReferenceDirs= TempDir=%s ConcurrentTransfers=8 TusTransfers=false BasicTransfersOnly=false SkipDownloadErrors=false FetchRecentAlways=false FetchRecentRefsDays=7 FetchRecentCommitsDays=0 FetchRecentRefsIncludeRemotes=true PruneOffsetDays=3 PruneVerifyRemoteAlways=false PruneVerifyUnreachableAlways=false PruneRemoteName=origin LfsStorageDir=%s AccessDownload=none AccessUpload=none DownloadTransfers=basic,lfs-standalone-file,ssh UploadTransfers=basic,lfs-standalone-file,ssh %s %s ' "$(git lfs version)" "$(git version)" "$localwd" "$localgit" "$localgitstore" "$localmedia" "$tempdir" "$lfsstorage" "$envVars" "$envInitConfig") actual7=$(GIT_DIR=$gitDir git lfs env | grep -v "^GIT_EXEC_PATH=") contains_same_elements "$expected7" "$actual7" cd $TRASHDIR/$reponame/a envVars="$(GIT_WORK_TREE=$workTree env | grep "^GIT_" | sort)" expected8=$(printf '%s %s LocalWorkingDir=%s LocalGitDir=%s LocalGitStorageDir=%s LocalMediaDir=%s LocalReferenceDirs= TempDir=%s ConcurrentTransfers=8 TusTransfers=false BasicTransfersOnly=false SkipDownloadErrors=false FetchRecentAlways=false FetchRecentRefsDays=7 FetchRecentCommitsDays=0 FetchRecentRefsIncludeRemotes=true PruneOffsetDays=3 PruneVerifyRemoteAlways=false PruneVerifyUnreachableAlways=false PruneRemoteName=origin LfsStorageDir=%s AccessDownload=none AccessUpload=none DownloadTransfers=basic,lfs-standalone-file,ssh UploadTransfers=basic,lfs-standalone-file,ssh %s %s ' "$(git lfs version)" "$(git version)" "$localwd" "$localgit" "$localgitstore" "$localmedia" "$tempdir" "$lfsstorage" "$envVars" "$envInitConfig") actual8=$(GIT_WORK_TREE=$workTree git lfs env | grep -v "^GIT_EXEC_PATH=") contains_same_elements "$expected8" "$actual8" ) end_test begin_test "env with bare repo" ( set -e reponame="env-with-bare-repo" git init --bare $reponame cd $reponame localgit=$(canonical_path "$TRASHDIR/$reponame") localgitstore=$(canonical_path "$TRASHDIR/$reponame") lfsstorage=$(canonical_path "$TRASHDIR/$reponame/lfs") localmedia=$(canonical_path "$TRASHDIR/$reponame/lfs/objects") tempdir=$(canonical_path "$TRASHDIR/$reponame/lfs/tmp") envVars=$(printf "%s" "$(env | grep "^GIT_")") expected=$(printf "%s\n%s\n LocalWorkingDir= LocalGitDir=%s LocalGitStorageDir=%s LocalMediaDir=%s LocalReferenceDirs= TempDir=%s ConcurrentTransfers=8 TusTransfers=false BasicTransfersOnly=false SkipDownloadErrors=false FetchRecentAlways=false FetchRecentRefsDays=7 FetchRecentCommitsDays=0 FetchRecentRefsIncludeRemotes=true PruneOffsetDays=3 PruneVerifyRemoteAlways=false PruneVerifyUnreachableAlways=false PruneRemoteName=origin LfsStorageDir=%s AccessDownload=none AccessUpload=none DownloadTransfers=basic,lfs-standalone-file,ssh UploadTransfers=basic,lfs-standalone-file,ssh %s %s " "$(git lfs version)" "$(git version)" "$localgit" "$localgitstore" "$localmedia" "$tempdir" "$lfsstorage" "$envVars" "$envInitConfig") actual=$(git lfs env | grep -v "^GIT_EXEC_PATH=") contains_same_elements "$expected" "$actual" ) end_test begin_test "env with multiple ssh remotes" ( set -e reponame="env-with-ssh" mkdir $reponame cd $reponame git init git remote add origin git@git-server.com:user/repo.git git remote add other git@other-git-server.com:user/repo.git expected='Endpoint=https://git-server.com/user/repo.git/info/lfs (auth=none) SSH=git@git-server.com:user/repo.git Endpoint (other)=https://other-git-server.com/user/repo.git/info/lfs (auth=none) SSH=git@other-git-server.com:user/repo.git GIT_SSH=lfs-ssh-echo' contains_same_elements "$expected" "$(git lfs env \ | grep -v "^GIT_EXEC_PATH=" | grep -e "Endpoint" -e "SSH=")" ) end_test begin_test "env with skip download errors" ( set -e reponame="env-with-skip-dl" git init $reponame cd $reponame git config lfs.skipdownloaderrors 1 localgit=$(canonical_path "$TRASHDIR/$reponame") localgitstore=$(canonical_path "$TRASHDIR/$reponame") lfsstorage=$(canonical_path "$TRASHDIR/$reponame/lfs") localmedia=$(canonical_path "$TRASHDIR/$reponame/lfs/objects") tempdir=$(canonical_path "$TRASHDIR/$reponame/lfs/tmp") envVars=$(printf "%s" "$(env | grep "^GIT_")") localwd=$(canonical_path "$TRASHDIR/$reponame") localgit=$(canonical_path "$TRASHDIR/$reponame/.git") localgitstore=$(canonical_path "$TRASHDIR/$reponame/.git") lfsstorage=$(canonical_path "$TRASHDIR/$reponame/.git/lfs") localmedia=$(canonical_path "$TRASHDIR/$reponame/.git/lfs/objects") tempdir=$(canonical_path "$TRASHDIR/$reponame/.git/lfs/tmp") envVars=$(printf "%s" "$(env | grep "^GIT_")") expectedenabled=$(printf '%s %s LocalWorkingDir=%s LocalGitDir=%s LocalGitStorageDir=%s LocalMediaDir=%s LocalReferenceDirs= TempDir=%s ConcurrentTransfers=8 TusTransfers=false BasicTransfersOnly=false SkipDownloadErrors=true FetchRecentAlways=false FetchRecentRefsDays=7 FetchRecentCommitsDays=0 FetchRecentRefsIncludeRemotes=true PruneOffsetDays=3 PruneVerifyRemoteAlways=false PruneVerifyUnreachableAlways=false PruneRemoteName=origin LfsStorageDir=%s AccessDownload=none AccessUpload=none DownloadTransfers=basic,lfs-standalone-file,ssh UploadTransfers=basic,lfs-standalone-file,ssh %s %s ' "$(git lfs version)" "$(git version)" "$localwd" "$localgit" "$localgitstore" "$localmedia" "$tempdir" "$lfsstorage" "$envVars" "$envInitConfig") actual=$(git lfs env | grep -v "^GIT_EXEC_PATH=") contains_same_elements "$expectedenabled" "$actual" git config --unset lfs.skipdownloaderrors # prove it's usually off expecteddisabled=$(printf '%s %s LocalWorkingDir=%s LocalGitDir=%s LocalGitStorageDir=%s LocalMediaDir=%s LocalReferenceDirs= TempDir=%s ConcurrentTransfers=8 TusTransfers=false BasicTransfersOnly=false SkipDownloadErrors=false FetchRecentAlways=false FetchRecentRefsDays=7 FetchRecentCommitsDays=0 FetchRecentRefsIncludeRemotes=true PruneOffsetDays=3 PruneVerifyRemoteAlways=false PruneVerifyUnreachableAlways=false PruneRemoteName=origin LfsStorageDir=%s AccessDownload=none AccessUpload=none DownloadTransfers=basic,lfs-standalone-file,ssh UploadTransfers=basic,lfs-standalone-file,ssh %s %s ' "$(git lfs version)" "$(git version)" "$localwd" "$localgit" "$localgitstore" "$localmedia" "$tempdir" "$lfsstorage" "$envVars" "$envInitConfig") actual=$(git lfs env | grep -v "^GIT_EXEC_PATH=") contains_same_elements "$expecteddisabled" "$actual" # now enable via env var envVarsEnabled=$(printf "%s" "$(GIT_LFS_SKIP_DOWNLOAD_ERRORS=1 env | grep "^GIT_")") expectedenabled2=$(printf '%s %s LocalWorkingDir=%s LocalGitDir=%s LocalGitStorageDir=%s LocalMediaDir=%s LocalReferenceDirs= TempDir=%s ConcurrentTransfers=8 TusTransfers=false BasicTransfersOnly=false SkipDownloadErrors=true FetchRecentAlways=false FetchRecentRefsDays=7 FetchRecentCommitsDays=0 FetchRecentRefsIncludeRemotes=true PruneOffsetDays=3 PruneVerifyRemoteAlways=false PruneVerifyUnreachableAlways=false PruneRemoteName=origin LfsStorageDir=%s AccessDownload=none AccessUpload=none DownloadTransfers=basic,lfs-standalone-file,ssh UploadTransfers=basic,lfs-standalone-file,ssh %s %s ' "$(git lfs version)" "$(git version)" "$localwd" "$localgit" "$localgitstore" "$localmedia" "$tempdir" "$lfsstorage" "$envVarsEnabled" "$envInitConfig") actual=$(GIT_LFS_SKIP_DOWNLOAD_ERRORS=1 git lfs env | grep -v "^GIT_EXEC_PATH=") contains_same_elements "$expectedenabled2" "$actual" ) end_test begin_test "env with extra transfer methods" ( set -e reponame="env-with-transfers" git init $reponame cd $reponame git config lfs.tustransfers true git config lfs.customtransfer.supertransfer.path /path/to/something localgit=$(canonical_path "$TRASHDIR/$reponame") localgitstore=$(canonical_path "$TRASHDIR/$reponame") lfsstorage=$(canonical_path "$TRASHDIR/$reponame/lfs") localmedia=$(canonical_path "$TRASHDIR/$reponame/lfs/objects") tempdir=$(canonical_path "$TRASHDIR/$reponame/lfs/tmp") envVars=$(printf "%s" "$(env | grep "^GIT_")") localwd=$(canonical_path "$TRASHDIR/$reponame") localgit=$(canonical_path "$TRASHDIR/$reponame/.git") localgitstore=$(canonical_path "$TRASHDIR/$reponame/.git") lfsstorage=$(canonical_path "$TRASHDIR/$reponame/.git/lfs") localmedia=$(canonical_path "$TRASHDIR/$reponame/.git/lfs/objects") tempdir=$(canonical_path "$TRASHDIR/$reponame/.git/lfs/tmp") envVars=$(printf "%s" "$(env | grep "^GIT_")") expectedenabled=$(printf '%s %s LocalWorkingDir=%s LocalGitDir=%s LocalGitStorageDir=%s LocalMediaDir=%s LocalReferenceDirs= TempDir=%s ConcurrentTransfers=8 TusTransfers=true BasicTransfersOnly=false SkipDownloadErrors=false FetchRecentAlways=false FetchRecentRefsDays=7 FetchRecentCommitsDays=0 FetchRecentRefsIncludeRemotes=true PruneOffsetDays=3 PruneVerifyRemoteAlways=false PruneVerifyUnreachableAlways=false PruneRemoteName=origin LfsStorageDir=%s AccessDownload=none AccessUpload=none DownloadTransfers=basic,lfs-standalone-file,ssh,supertransfer UploadTransfers=basic,lfs-standalone-file,ssh,supertransfer,tus %s %s ' "$(git lfs version)" "$(git version)" "$localwd" "$localgit" "$localgitstore" "$localmedia" "$tempdir" "$lfsstorage" "$envVars" "$envInitConfig") actual=$(git lfs env | grep -v "^GIT_EXEC_PATH=") contains_same_elements "$expectedenabled" "$actual" ) end_test begin_test "env with multiple remotes and ref" ( set -e reponame="env-multiple-remotes-ref" mkdir $reponame cd $reponame git init git remote add origin "$GITSERVER/env-origin-remote" git remote add other "$GITSERVER/env-other-remote" touch a.txt git add a.txt git commit -m "initial commit" endpoint="$GITSERVER/env-origin-remote.git/info/lfs (auth=none)" endpoint2="$GITSERVER/env-other-remote.git/info/lfs (auth=none)" localwd=$(canonical_path "$TRASHDIR/$reponame") localgit=$(canonical_path "$TRASHDIR/$reponame/.git") localgitstore=$(canonical_path "$TRASHDIR/$reponame/.git") lfsstorage=$(canonical_path "$TRASHDIR/$reponame/.git/lfs") localmedia=$(canonical_path "$TRASHDIR/$reponame/.git/lfs/objects") tempdir=$(canonical_path "$TRASHDIR/$reponame/.git/lfs/tmp") envVars=$(printf "%s" "$(env | grep "^GIT_")") expected=$(printf '%s %s Endpoint=%s Endpoint (other)=%s LocalWorkingDir=%s LocalGitDir=%s LocalGitStorageDir=%s LocalMediaDir=%s LocalReferenceDirs= TempDir=%s ConcurrentTransfers=8 TusTransfers=false BasicTransfersOnly=false SkipDownloadErrors=false FetchRecentAlways=false FetchRecentRefsDays=7 FetchRecentCommitsDays=0 FetchRecentRefsIncludeRemotes=true PruneOffsetDays=3 PruneVerifyRemoteAlways=false PruneVerifyUnreachableAlways=false PruneRemoteName=origin LfsStorageDir=%s AccessDownload=none AccessUpload=none DownloadTransfers=basic,lfs-standalone-file,ssh UploadTransfers=basic,lfs-standalone-file,ssh %s %s ' "$(git lfs version)" "$(git version)" "$endpoint" "$endpoint2" "$localwd" "$localgit" "$localgitstore" "$localmedia" "$tempdir" "$lfsstorage" "$envVars" "$envInitConfig") actual=$(git lfs env | grep -v "^GIT_EXEC_PATH=") contains_same_elements "$expected" "$actual" ) end_test begin_test "env with unicode" ( set -e # This contains a Unicode apostrophe, an E with grave accent, and a Euro sign. # Only the middle one is representable in ISO-8859-1. reponame="env-d’autre-nom-très-bizarr€" mkdir $reponame cd $reponame git init git remote add origin "$GITSERVER/env-origin-remote" git remote add other "$GITSERVER/env-other-remote" touch a.txt git add a.txt git commit -m "initial commit" # Set by the testsuite. unset LC_ALL endpoint="$GITSERVER/env-origin-remote.git/info/lfs (auth=none)" endpoint2="$GITSERVER/env-other-remote.git/info/lfs (auth=none)" localwd=$(canonical_path "$TRASHDIR/$reponame") localgit=$(canonical_path "$TRASHDIR/$reponame/.git") localgitstore=$(canonical_path "$TRASHDIR/$reponame/.git") lfsstorage=$(canonical_path "$TRASHDIR/$reponame/.git/lfs") localmedia=$(canonical_path "$TRASHDIR/$reponame/.git/lfs/objects") tempdir=$(canonical_path "$TRASHDIR/$reponame/.git/lfs/tmp") envVars=$(printf "%s" "$(env | grep "^GIT_")") expected=$(printf '%s %s Endpoint=%s Endpoint (other)=%s LocalWorkingDir=%s LocalGitDir=%s LocalGitStorageDir=%s LocalMediaDir=%s LocalReferenceDirs= TempDir=%s ConcurrentTransfers=8 TusTransfers=false BasicTransfersOnly=false SkipDownloadErrors=false FetchRecentAlways=false FetchRecentRefsDays=7 FetchRecentCommitsDays=0 FetchRecentRefsIncludeRemotes=true PruneOffsetDays=3 PruneVerifyRemoteAlways=false PruneVerifyUnreachableAlways=false PruneRemoteName=origin LfsStorageDir=%s AccessDownload=none AccessUpload=none DownloadTransfers=basic,lfs-standalone-file,ssh UploadTransfers=basic,lfs-standalone-file,ssh %s %s ' "$(git lfs version)" "$(git version)" "$endpoint" "$endpoint2" "$localwd" "$localgit" "$localgitstore" "$localmedia" "$tempdir" "$lfsstorage" "$envVars" "$envInitConfig") actual=$(git lfs env | grep -v "^GIT_EXEC_PATH=") contains_same_elements "$expected" "$actual" ) end_test begin_test "env outside a repository" ( set -e # This may or may not work, depending on the system, but it should at least # potentially cause Git to print non-English messages. export LC_ALL=fr_FR.UTF-8 localmedia="$(native_path "lfs/objects")" lfsstorage=lfs tempdir="$(native_path "lfs/tmp")" envVars=$(printf "%s" "$(env | grep "^GIT_")") expected=$(printf '%s %s LocalWorkingDir= LocalGitDir= LocalGitStorageDir= LocalMediaDir=%s LocalReferenceDirs= TempDir=%s ConcurrentTransfers=8 TusTransfers=false BasicTransfersOnly=false SkipDownloadErrors=false FetchRecentAlways=false FetchRecentRefsDays=7 FetchRecentCommitsDays=0 FetchRecentRefsIncludeRemotes=true PruneOffsetDays=3 PruneVerifyRemoteAlways=false PruneVerifyUnreachableAlways=false PruneRemoteName=origin LfsStorageDir=%s AccessDownload=none AccessUpload=none DownloadTransfers=basic,lfs-standalone-file,ssh UploadTransfers=basic,lfs-standalone-file,ssh %s %s ' "$(git lfs version)" "$(git version)" "$localmedia" "$tempdir" "$lfsstorage" "$envVars" "$envInitConfig") # We redirect the standard error here because we should not get any error # messages, and if we do, we want to fail. actual=$(git lfs env 2>&1 | grep -v "^GIT_EXEC_PATH=") contains_same_elements "$expected" "$actual" ) end_test begin_test "env with duplicate endpoints" ( set -e reponame="env-duplicate-endpoints" mkdir $reponame cd $reponame git init git remote add origin "$GITSERVER/env-origin-remote" git remote add other "$GITSERVER/env-other-remote" touch a.txt git add a.txt git commit -m "initial commit" cat <>.git/config [url "https://host.example/domain/"] insteadOf = git@host.example:domain/ [url "https://host.example/domain/"] insteadOf = git@host.example:domain/ EOF git lfs env 2>&1 | tee test.log if grep 'warning.*same alias' test.log then exit 1 fi cat <>.git/config [url "https://somewhere-else.example/domain/"] insteadOf = git@host.example:domain/ EOF git lfs env 2>&1 | tee test.log grep 'warning.*same alias' test.log ) end_test git-lfs-3.6.1/t/t-expired.sh000077500000000000000000000027061472372047300156070ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" declare -a expiration_types=("absolute" "relative" "both") for typ in "${expiration_types[@]}"; do begin_test "expired action ($typ time)" ( set -e reponame="expired-$typ" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" contents="contents" contents_oid="$(calc_oid "$contents")" git lfs track "*.dat" git add .gitattributes git commit -m "initial commit" printf "%s" "$contents" > a.dat git add a.dat git commit -m "add a.dat" GIT_TRACE=1 git push origin main 2>&1 | tee push.log if [ "0" -eq "${PIPESTATUS[0]}" ]; then echo >&2 "fatal: expected push to fail, didn't" exit 1 fi refute_server_object "$reponame" "$contents_oid" ) end_test done for typ in "${expiration_types[@]}"; do begin_test "ssh expired ($typ time; git-lfs-authenticate)" ( set -e reponame="ssh-expired-$typ" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" sshurl="${GITSERVER/http:\/\//ssh://git@}/$reponame" git config lfs.url "$sshurl" contents="contents" contents_oid="$(calc_oid "$contents")" git lfs track "*.dat" git add .gitattributes git commit -m "initial commit" printf "%s" "$contents" > a.dat git add a.dat git commit -m "add a.dat" GIT_TRACE=1 git push origin main 2>&1 | tee push.log grep "ssh cache expired" push.log ) end_test done git-lfs-3.6.1/t/t-ext.sh000077500000000000000000000027111472372047300147430ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" begin_test "ext" ( set -e # no need to setup a remote repo, since this test does not need to push or pull mkdir ext cd ext git init git config lfs.extension.foo.clean "foo-clean %f" git config lfs.extension.foo.smudge "foo-smudge %f" git config lfs.extension.foo.priority 0 git config lfs.extension.bar.clean "bar-clean %f" git config lfs.extension.bar.smudge "bar-smudge %f" git config lfs.extension.bar.priority 1 git config lfs.extension.baz.clean "baz-clean %f" git config lfs.extension.baz.smudge "baz-smudge %f" git config lfs.extension.baz.priority 2 fooExpected="Extension: foo clean = foo-clean %f smudge = foo-smudge %f priority = 0" barExpected="Extension: bar clean = bar-clean %f smudge = bar-smudge %f priority = 1" bazExpected="Extension: baz clean = baz-clean %f smudge = baz-smudge %f priority = 2" actual=$(git lfs ext list foo) [ "$actual" = "$fooExpected" ] actual=$(git lfs ext list bar) [ "$actual" = "$barExpected" ] actual=$(git lfs ext list baz) [ "$actual" = "$bazExpected" ] actual=$(git lfs ext list foo bar) expected=$(printf "%s\n%s" "$fooExpected" "$barExpected") [ "$actual" = "$expected" ] actual=$(git lfs ext list) expected=$(printf "%s\n%s\n%s" "$fooExpected" "$barExpected" "$bazExpected") [ "$actual" = "$expected" ] actual=$(git lfs ext) [ "$actual" = "$expected" ] ) end_test git-lfs-3.6.1/t/t-extra-header.sh000077500000000000000000000075311472372047300165210ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" begin_test "http..extraHeader" ( set -e reponame="copy-headers" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" url="$(git config remote.origin.url).git/info/lfs" git config --add "http.$url.extraHeader" "X-Foo: bar" git config --add "http.$url.extraHeader" "X-Foo: baz" git lfs track "*.dat" printf "contents" > a.dat git add .gitattributes a.dat git commit -m "initial commit" GIT_CURL_VERBOSE=1 GIT_TRACE=1 git push origin main 2>&1 | tee curl.log grep "> X-Foo: bar" curl.log grep "> X-Foo: baz" curl.log ) end_test begin_test "http..extraHeader with authorization" ( set -e reponame="requirecreds-extraHeader" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" # See: test/cmd/lfstest-gitserver.go:missingRequiredCreds(). user="requirecreds" pass="pass" auth="Basic $(echo -n $user:$pass | base64)" git config --add "http.extraHeader" "Authorization: $auth" git lfs track "*.dat" printf "contents" > a.dat git add .gitattributes a.dat git commit -m "initial commit" git push origin main 2>&1 | tee curl.log if [ "0" -ne "${PIPESTATUS[0]}" ]; then echo >&2 "expected \`git push origin main\` to succeed, didn't" exit 1 fi [ "0" -eq "$(grep -c "creds: filling with GIT_ASKPASS" curl.log)" ] [ "0" -eq "$(grep -c "creds: git credential approve" curl.log)" ] [ "0" -eq "$(grep -c "creds: git credential cache" curl.log)" ] [ "0" -eq "$(grep -c "creds: git credential fill" curl.log)" ] [ "0" -eq "$(grep -c "creds: git credential reject" curl.log)" ] ) end_test begin_test "http..extraHeader with authorization (casing)" ( set -e reponame="requirecreds-extraHeaderCasing" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" # See: test/cmd/lfstest-gitserver.go:missingRequiredCreds(). user="requirecreds" pass="pass" auth="Basic $(echo -n $user:$pass | base64)" git config --local --add lfs.access basic # N.B.: "AUTHORIZATION" is not the correct casing, and is therefore the # subject of this test. See lfsapi.Client.extraHeaders() for more. git config --local --add "http.extraHeader" "AUTHORIZATION: $auth" git lfs track "*.dat" printf "contents" > a.dat git add .gitattributes a.dat git commit -m "initial commit" git push origin main 2>&1 | tee curl.log if [ "0" -ne "${PIPESTATUS[0]}" ]; then echo >&2 "expected \`git push origin main\` to succeed, didn't" exit 1 fi [ "0" -eq "$(grep -c "creds: filling with GIT_ASKPASS" curl.log)" ] [ "0" -eq "$(grep -c "creds: git credential approve" curl.log)" ] [ "0" -eq "$(grep -c "creds: git credential cache" curl.log)" ] [ "0" -eq "$(grep -c "creds: git credential fill" curl.log)" ] [ "0" -eq "$(grep -c "creds: git credential reject" curl.log)" ] ) end_test begin_test "http..extraHeader with mixed-case URLs" ( set -e reponame="Mixed-Case-Headers" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" # These config options check for several things. # # First, we check for mixed-case URLs being read properly and not forced to # lowercase. Second, we check that the user can specify a config option for # the Git URL and have that apply to the LFS URL, which exercises the # URLConfig lookup code. Finally, we also write "ExtraHeader" in mixed-case as # well to test that we lower-case the rightmost portion of the config key # during lookup. url="$(git config remote.origin.url).git" git config --add "http.$url.ExtraHeader" "X-Foo: bar" git config --add "http.$url.ExtraHeader" "X-Foo: baz" git lfs track "*.dat" printf "contents" > a.dat git add .gitattributes a.dat git commit -m "initial commit" GIT_CURL_VERBOSE=1 GIT_TRACE=1 git push origin main 2>&1 | tee curl.log grep "> X-Foo: bar" curl.log grep "> X-Foo: baz" curl.log ) end_test git-lfs-3.6.1/t/t-fetch-include.sh000077500000000000000000000034711472372047300166610ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" reponame="$(basename "$0" ".sh")" contents="big file" contents_oid=$(calc_oid "$contents") begin_test "fetch: setup for include test" ( set -e setup_remote_repo "$reponame" clone_repo "$reponame" repo git lfs track "*.big" mkdir -p big/a mkdir -p big/b printf "%s" "$contents" > big/a/a1.big printf "%s" "$contents" > big/b/b1.big contents2="big file 2" printf "%s" "$contents2" > big/big1.big printf "%s" "$contents2" > big/big2.big printf "%s" "$contents2" > big/big3.big git add .gitattributes big git commit -m "commit" | tee commit.log grep "6 files changed" commit.log grep "create mode 100644 .gitattributes" commit.log grep "create mode 100644 big/a/a1.big" commit.log grep "create mode 100644 big/b/b1.big" commit.log grep "create mode 100644 big/big1.big" commit.log grep "create mode 100644 big/big2.big" commit.log grep "create mode 100644 big/big3.big" commit.log git push origin main | tee push.log grep "Uploading LFS objects: 100% (2/2), 18 B" push.log assert_server_object "$reponame" "$contents_oid" ) end_test begin_test "fetch: include first matching file" ( set -e mkdir clone-1 cd clone-1 git init git lfs install --local --skip-smudge git remote add origin $GITSERVER/$reponame git pull origin main refute_local_object "$contents_oid" git lfs ls-files git lfs fetch --include=big/a assert_local_object "$contents_oid" "8" ) end_test begin_test "fetch: include second matching file" ( set -e mkdir clone-2 cd clone-2 git init git lfs install --local --skip-smudge git remote add origin $GITSERVER/$reponame git pull origin main refute_local_object "$contents_oid" git lfs ls-files git lfs fetch --include=big/b assert_local_object "$contents_oid" "8" ) end_test git-lfs-3.6.1/t/t-fetch-paths.sh000077500000000000000000000036541472372047300163600ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" reponame="$(basename "$0" ".sh")" contents="a" contents_oid=$(calc_oid "$contents") begin_test "init fetch unclean paths" ( set -e setup_remote_repo $reponame clone_repo $reponame repo git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log mkdir dir printf "%s" "$contents" > dir/a.dat git add dir/a.dat git add .gitattributes git commit -m "add dir/a.dat" 2>&1 | tee commit.log grep "main (root-commit)" commit.log grep "2 files changed" commit.log grep "create mode 100644 dir/a.dat" commit.log grep "create mode 100644 .gitattributes" commit.log [ "a" = "$(cat dir/a.dat)" ] assert_local_object "$contents_oid" 1 refute_server_object "$contents_oid" git push origin main 2>&1 | tee push.log grep "Uploading LFS objects: 100% (1/1), 1 B" push.log grep "main -> main" push.log assert_server_object "$reponame" "$contents_oid" # This clone is used for subsequent tests clone_repo "$reponame" clone ) end_test begin_test "fetch unclean paths with include filter in gitconfig" ( set -e cd clone rm -rf .git/lfs/objects git config "lfs.fetchinclude" "dir/" git lfs fetch assert_local_object "$contents_oid" 1 ) end_test begin_test "fetch unclean paths with exclude filter in gitconfig" ( set -e cd clone rm -rf .git/lfs/objects git config --unset "lfs.fetchinclude" git config "lfs.fetchexclude" "dir/" git lfs fetch refute_local_object "$contents_oid" ) end_test begin_test "fetch unclean paths with include filter in cli" ( set -e cd clone rm -rf .git/lfs/objects git config --unset "lfs.fetchexclude" rm -rf .git/lfs/objects git lfs fetch -I="dir/" assert_local_object "$contents_oid" 1 ) end_test begin_test "fetch unclean paths with exclude filter in cli" ( set -e cd clone rm -rf .git/lfs/objects git lfs fetch -X="dir/" refute_local_object "$contents_oid" ) end_test git-lfs-3.6.1/t/t-fetch-recent.sh000077500000000000000000000123761472372047300165220ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" reponame="fetch-recent" # generate content we'll use content0="filecontent0" content1="filecontent1" content2="filecontent2" content3="filecontent3" content4="filecontent4" content5="filecontent5" oid0=$(calc_oid "$content0") oid1=$(calc_oid "$content1") oid2=$(calc_oid "$content2") oid3=$(calc_oid "$content3") oid4=$(calc_oid "$content4") oid5=$(calc_oid "$content5") begin_test "init fetch-recent" ( set -e setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log echo "[ { \"CommitDate\":\"$(get_date -18d)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":${#content0}, \"Data\":\"$content0\"}, {\"Filename\":\"file3.dat\",\"Size\":${#content5}, \"Data\":\"$content5\"}] }, { \"CommitDate\":\"$(get_date -14d)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":${#content1}, \"Data\":\"$content1\"}] }, { \"CommitDate\":\"$(get_date -5d)\", \"NewBranch\":\"other_branch\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":${#content4}, \"Data\":\"$content4\"}] }, { \"CommitDate\":\"$(get_date -1d)\", \"ParentBranches\":[\"main\"], \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":${#content2}, \"Data\":\"$content2\"}, {\"Filename\":\"file2.dat\",\"Size\":${#content3}, \"Data\":\"$content3\"}] } ]" | lfstest-testutils addcommits git push origin main git push origin other_branch assert_server_object "$reponame" "$oid0" assert_server_object "$reponame" "$oid1" assert_server_object "$reponame" "$oid2" assert_server_object "$reponame" "$oid3" assert_server_object "$reponame" "$oid4" # This clone is used for subsequent tests clone_repo "$reponame" clone git checkout other_branch git checkout main ) end_test begin_test "fetch-recent normal" ( set -e cd clone rm -rf .git/lfs/objects git config lfs.fetchrecentalways false git config lfs.fetchrecentrefsdays 0 git config lfs.fetchrecentremoterefs false git config lfs.fetchrecentcommitsdays 7 # fetch normally, should just get the last state for file1/2 git lfs fetch origin main assert_local_object "$oid2" "${#content2}" assert_local_object "$oid3" "${#content3}" assert_local_object "$oid5" "${#content5}" refute_local_object "$oid0" refute_local_object "$oid1" refute_local_object "$oid4" ) end_test begin_test "fetch-recent commits" ( set -e cd clone rm -rf .git/lfs/objects # now fetch recent - just commits for now git config lfs.fetchrecentrefsdays 0 git config lfs.fetchrecentremoterefs false git config lfs.fetchrecentcommitsdays 7 # force color codes in git diff meta-information git config color.diff always git lfs fetch --recent origin # that should have fetched main plus previous state needed within 7 days # current state assert_local_object "$oid2" "${#content2}" assert_local_object "$oid3" "${#content3}" # previous state is the 'before' state of any commits made in last 7 days # ie you can check out anything in last 7 days (may have non-LFS commits in between) assert_local_object "$oid1" "${#content1}" refute_local_object "$oid0" refute_local_object "$oid4" ) end_test begin_test "fetch-recent days" ( set -e cd clone rm -rf .git/lfs/objects # now fetch other_branch as well git config lfs.fetchrecentrefsdays 6 git config lfs.fetchrecentremoterefs false git config lfs.fetchrecentcommitsdays 7 git lfs fetch --recent origin # that should have fetched main plus previous state needed within 7 days # current state PLUS refs within 6 days (& their commits within 7) assert_local_object "$oid2" "${#content2}" assert_local_object "$oid3" "${#content3}" assert_local_object "$oid1" "${#content1}" assert_local_object "$oid4" "${#content4}" # still omits oid0 since that's at best 13 days prior to other_branch tip refute_local_object "$oid0" ) end_test begin_test "fetch-recent older commits" ( set -e cd clone # now test that a 14 day limit picks oid0 up from other_branch # because other_branch was itself 5 days ago, 5+14=19 day search limit git config lfs.fetchrecentcommitsdays 14 rm -rf .git/lfs/objects git lfs fetch --recent origin assert_local_object "$oid0" "${#content0}" ) end_test begin_test "fetch-recent remote branch" ( set -e cd "$reponame" # push branch & test remote branch recent git push origin other_branch cd ../clone git branch -D other_branch rm -rf .git/lfs/objects git config lfs.fetchrecentcommitsdays 0 git config lfs.fetchrecentremoterefs false git config lfs.fetchrecentrefsdays 6 git lfs fetch --recent origin # should miss #4 until we include remote branches (#1 will always be missing commitdays=0) assert_local_object "$oid2" "${#content2}" assert_local_object "$oid3" "${#content3}" refute_local_object "$oid1" refute_local_object "$oid0" refute_local_object "$oid4" ) end_test begin_test "fetch-recent remote refs" ( set -e cd clone rm -rf .git/lfs/objects # pick up just snapshot at remote ref, ie #4 git config lfs.fetchrecentremoterefs true git lfs fetch --recent origin assert_local_object "$oid4" "${#content4}" refute_local_object "$oid0" refute_local_object "$oid1" ) end_test git-lfs-3.6.1/t/t-fetch-refspec.sh000077500000000000000000000040211472372047300166550ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" begin_test "fetch with good ref" ( set -e reponame="fetch-main-branch-required" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" echo "a" > a.dat git add .gitattributes a.dat git commit -m "add a.dat" git push origin main # $ echo "a" | shasum -a 256 oid="87428fc522803d31065e7bce3cf03fe475096631e5e07bbd7a0fde60c4cf25c7" assert_local_object "$oid" 2 assert_server_object "$reponame" "$oid" "refs/heads/main" rm -rf .git/lfs/objects git lfs fetch --all assert_local_object "$oid" 2 ) end_test begin_test "fetch with tracked ref" ( set -e reponame="fetch-tracked-branch-required" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" echo "a" > a.dat git add .gitattributes a.dat git commit -m "add a.dat" git push origin main:tracked # $ echo "a" | shasum -a 256 oid="87428fc522803d31065e7bce3cf03fe475096631e5e07bbd7a0fde60c4cf25c7" assert_local_object "$oid" 2 assert_server_object "$reponame" "$oid" "refs/heads/tracked" rm -rf .git/lfs/objects git config push.default upstream git config branch.main.merge refs/heads/tracked git lfs fetch --all assert_local_object "$oid" 2 ) end_test begin_test "fetch with bad ref" ( set -e reponame="fetch-other-branch-required" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" echo "a" > a.dat git add .gitattributes a.dat git commit -m "add a.dat" git push origin main:other # $ echo "a" | shasum -a 256 oid="87428fc522803d31065e7bce3cf03fe475096631e5e07bbd7a0fde60c4cf25c7" assert_local_object "$oid" 2 assert_server_object "$reponame" "$oid" "refs/heads/other" rm -rf .git/lfs/objects GIT_CURL_VERBOSE=1 git lfs fetch --all 2>&1 | tee fetch.log if [ "0" -eq "${PIPESTATUS[0]}" ]; then echo >&2 "fatal: expected 'git lfs fetch' to fail" exit 1 fi grep 'Expected ref "refs/heads/other", got "refs/heads/main"' fetch.log ) end_test git-lfs-3.6.1/t/t-fetch.sh000077500000000000000000000367361472372047300152520ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" contents="a" contents_oid=$(calc_oid "$contents") b="b" b_oid=$(calc_oid "$b") reponame="$(basename "$0" ".sh")" begin_test "init for fetch tests" ( set -e setup_remote_repo "$reponame" clone_repo "$reponame" repo git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log printf "%s" "$contents" > a.dat git add a.dat git add .gitattributes git commit -m "add a.dat" 2>&1 | tee commit.log grep "main (root-commit)" commit.log grep "2 files changed" commit.log grep "create mode 100644 a.dat" commit.log grep "create mode 100644 .gitattributes" commit.log [ "a" = "$(cat a.dat)" ] assert_local_object "$contents_oid" 1 refute_server_object "$reponame" "$contents_oid" git push origin main 2>&1 | tee push.log grep "Uploading LFS objects: 100% (1/1), 1 B" push.log grep "main -> main" push.log assert_server_object "$reponame" "$contents_oid" # Add a file in a different branch git checkout -b newbranch printf "%s" "$b" > b.dat git add b.dat git commit -m "add b.dat" assert_local_object "$b_oid" 1 git push origin newbranch assert_server_object "$reponame" "$b_oid" # These clones are used for subsequent tests clone_repo "$reponame" clone git clone --shared "$TRASHDIR/clone" "$TRASHDIR/shared" ) end_test begin_test "fetch" ( set -e cd clone rm -rf .git/lfs/objects git lfs fetch assert_local_object "$contents_oid" 1 git lfs fsck 2>&1 | tee fsck.log grep "Git LFS fsck OK" fsck.log ) end_test begin_test "fetch (empty file)" ( set -e cd clone rm -rf .git/lfs/objects touch empty.dat git add empty.dat git commit -m 'empty' git lfs fetch git lfs fsck 2>&1 | tee fsck.log grep "Git LFS fsck OK" fsck.log ) end_test begin_test "fetch (shared repository)" ( set -e cd shared rm -rf .git/lfs/objects git lfs fetch 2>&1 | tee fetch.log grep "Could not scan" fetch.log && exit 1 assert_local_object "$contents_oid" 1 git lfs fsck 2>&1 | tee fsck.log grep "Git LFS fsck OK" fsck.log ) end_test begin_test "fetch with remote" ( set -e cd clone rm -rf .git/lfs/objects git lfs fetch origin assert_local_object "$contents_oid" 1 refute_local_object "$b_oid" 1 git lfs fsck 2>&1 | tee fsck.log grep "Git LFS fsck OK" fsck.log ) end_test begin_test "fetch with remote and branches" ( set -e cd clone git checkout newbranch git checkout main rm -rf .git/lfs/objects git lfs fetch origin main newbranch assert_local_object "$contents_oid" 1 assert_local_object "$b_oid" 1 git lfs fsck 2>&1 | tee fsck.log grep "Git LFS fsck OK" fsck.log ) end_test begin_test "fetch with main commit sha1" ( set -e cd clone rm -rf .git/lfs/objects main_sha1=$(git rev-parse main) git lfs fetch origin "$main_sha1" assert_local_object "$contents_oid" 1 refute_local_object "$b_oid" 1 git lfs fsck 2>&1 | tee fsck.log grep "Git LFS fsck OK" fsck.log ) end_test begin_test "fetch with newbranch commit sha1" ( set -e cd clone rm -rf .git/lfs/objects newbranch_sha1=$(git rev-parse newbranch) git lfs fetch origin "$newbranch_sha1" assert_local_object "$contents_oid" 1 assert_local_object "$b_oid" 1 git lfs fsck 2>&1 | tee fsck.log grep "Git LFS fsck OK" fsck.log ) end_test begin_test "fetch with include filters in gitconfig" ( set -e cd clone rm -rf .git/lfs/objects git config "lfs.fetchinclude" "a*" git lfs fetch origin main newbranch assert_local_object "$contents_oid" 1 refute_local_object "$b_oid" git lfs fsck 2>&1 | tee fsck.log grep "Git LFS fsck OK" fsck.log ) end_test begin_test "fetch with exclude filters in gitconfig" ( set -e cd clone git config --unset "lfs.fetchinclude" rm -rf .git/lfs/objects git config "lfs.fetchexclude" "a*" git lfs fetch origin main newbranch refute_local_object "$contents_oid" assert_local_object "$b_oid" 1 git lfs fsck 2>&1 | tee fsck.log grep "Git LFS fsck OK" fsck.log ) end_test begin_test "fetch with include/exclude filters in gitconfig" ( set -e cd clone rm -rf .git/lfs/objects git config --unset "lfs.fetchexclude" git config "lfs.fetchinclude" "a*,b*" git config "lfs.fetchexclude" "c*,d*" git lfs fetch origin main newbranch assert_local_object "$contents_oid" 1 assert_local_object "$b_oid" 1 rm -rf .git/lfs/objects git config "lfs.fetchinclude" "c*,d*" git config "lfs.fetchexclude" "a*,b*" git lfs fetch origin main newbranch refute_local_object "$contents_oid" refute_local_object "$b_oid" ) end_test begin_test "fetch with include filter in cli" ( set -e cd clone git config --unset "lfs.fetchinclude" git config --unset "lfs.fetchexclude" rm -rf .git/lfs/objects git lfs fetch --include="a*" origin main newbranch assert_local_object "$contents_oid" 1 refute_local_object "$b_oid" ) end_test begin_test "fetch with exclude filter in cli" ( set -e cd clone rm -rf .git/lfs/objects git lfs fetch --exclude="a*" origin main newbranch refute_local_object "$contents_oid" assert_local_object "$b_oid" 1 ) end_test begin_test "fetch with include/exclude filters in cli" ( set -e cd clone rm -rf .git/lfs/objects git lfs fetch -I "a*,b*" -X "c*,d*" origin main newbranch assert_local_object "$contents_oid" 1 assert_local_object "$b_oid" 1 rm -rf .git/lfs/objects git lfs fetch --include="c*,d*" --exclude="a*,b*" origin main newbranch refute_local_object "$contents_oid" refute_local_object "$b_oid" ) end_test begin_test "fetch with include filter overriding exclude filter" ( set -e cd clone rm -rf .git/lfs/objects git config lfs.fetchexclude "b*" git lfs fetch -I "b.dat" -X "" origin main newbranch assert_local_object "$b_oid" "1" ) end_test begin_test "fetch with missing object" ( set -e cd clone git config --unset lfs.fetchexclude rm -rf .git/lfs/objects delete_server_object "$reponame" "$b_oid" refute_server_object "$reponame" "$b_oid" # should return non-zero, but should also download all the other valid files too set +e git lfs fetch origin main newbranch fetch_exit=$? set -e [ "$fetch_exit" != "0" ] assert_local_object "$contents_oid" 1 refute_local_object "$b_oid" ) end_test begin_test "fetch does not crash on empty key files" ( set -e cd clone rm -rf .git/lfs/objects git config --local http.sslKey /dev/null git config --local http.sslCert /dev/null git lfs fetch origin main 2>&1 | tee fetch.log grep "Error decoding PEM block" fetch.log ) end_test begin_test "fetch-all" ( set -e reponame="fetch-all" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log NUMFILES=12 # generate content we'll use for ((a=0; a < NUMFILES ; a++)) do content[$a]="filecontent$a" oid[$a]=$(calc_oid "${content[$a]}") done echo "[ { \"CommitDate\":\"$(get_date -180d)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":${#content[0]}, \"Data\":\"${content[0]}\"}, {\"Filename\":\"file2.dat\",\"Size\":${#content[1]}, \"Data\":\"${content[1]}\"}] }, { \"NewBranch\":\"branch1\", \"CommitDate\":\"$(get_date -140d)\", \"Files\":[ {\"Filename\":\"file3.dat\",\"Size\":${#content[2]}, \"Data\":\"${content[2]}\"}] }, { \"ParentBranches\":[\"main\"], \"CommitDate\":\"$(get_date -100d)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":${#content[3]}, \"Data\":\"${content[3]}\"}] }, { \"NewBranch\":\"remote_branch_only\", \"CommitDate\":\"$(get_date -80d)\", \"Files\":[ {\"Filename\":\"file2.dat\",\"Size\":${#content[4]}, \"Data\":\"${content[4]}\"}] }, { \"ParentBranches\":[\"main\"], \"CommitDate\":\"$(get_date -75d)\", \"Files\":[ {\"Filename\":\"file4.dat\",\"Size\":${#content[5]}, \"Data\":\"${content[5]}\"}] }, { \"NewBranch\":\"tag_only\", \"Tags\":[\"tag1\"], \"CommitDate\":\"$(get_date -70d)\", \"Files\":[ {\"Filename\":\"file4.dat\",\"Size\":${#content[6]}, \"Data\":\"${content[6]}\"}] }, { \"ParentBranches\":[\"main\"], \"CommitDate\":\"$(get_date -60d)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":${#content[7]}, \"Data\":\"${content[7]}\"}] }, { \"NewBranch\":\"branch3\", \"CommitDate\":\"$(get_date -50d)\", \"Files\":[ {\"Filename\":\"file4.dat\",\"Size\":${#content[8]}, \"Data\":\"${content[8]}\"}] }, { \"CommitDate\":\"$(get_date -40d)\", \"ParentBranches\":[\"main\"], \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":${#content[9]}, \"Data\":\"${content[9]}\"}, {\"Filename\":\"file2.dat\",\"Size\":${#content[10]}, \"Data\":\"${content[10]}\"}] }, { \"ParentBranches\":[\"main\"], \"CommitDate\":\"$(get_date -30d)\", \"Files\":[ {\"Filename\":\"file4.dat\",\"Size\":${#content[11]}, \"Data\":\"${content[11]}\"}] } ]" | lfstest-testutils addcommits git push origin main git push origin branch1 git push origin branch3 git push origin remote_branch_only git push origin tag_only git push origin tag1 for ((a=0; a < NUMFILES ; a++)) do assert_server_object "$reponame" "${oid[$a]}" done # delete remote_branch_only and make sure that objects are downloaded even # though not checked out to a local branch (full backup always) git branch -D remote_branch_only # delete tag_only to make sure objects are downloaded when only reachable from tag git branch -D tag_only rm -rf .git/lfs/objects git lfs fetch --all origin for ((a=0; a < NUMFILES ; a++)) do assert_local_object "${oid[$a]}" "${#content[$a]}" done rm -rf .git/lfs/objects # fetch all objects reachable from the main branch only git lfs fetch --all origin main for a in 0 1 3 5 7 9 10 11 do assert_local_object "${oid[$a]}" "${#content[$a]}" done for a in 2 4 6 8 do refute_local_object "${oid[$a]}" done rm -rf .git/lfs/objects # fetch all objects reachable from branch1 and tag1 only git lfs fetch --all origin branch1 tag1 for a in 0 1 2 3 5 6 do assert_local_object "${oid[$a]}" "${#content[$a]}" done for a in 4 7 8 9 10 11 do refute_local_object "${oid[$a]}" done # Make a bare clone of the repository cd .. git clone --bare "$GITSERVER/$reponame" "$reponame-bare" cd "$reponame-bare" # Perform the same assertion as above, on the same data git lfs fetch --all origin for ((a=0; a < NUMFILES ; a++)) do assert_local_object "${oid[$a]}" "${#content[$a]}" done rm -rf lfs/objects # fetch all objects reachable from the main branch only git lfs fetch --all origin main for a in 0 1 3 5 7 9 10 11 do assert_local_object "${oid[$a]}" "${#content[$a]}" done for a in 2 4 6 8 do refute_local_object "${oid[$a]}" done rm -rf lfs/objects # fetch all objects reachable from branch1 and tag1 only git lfs fetch --all origin branch1 tag1 for a in 0 1 2 3 5 6 do assert_local_object "${oid[$a]}" "${#content[$a]}" done for a in 4 7 8 9 10 11 do refute_local_object "${oid[$a]}" done ) end_test begin_test "fetch: outside git repository" ( set +e git lfs fetch 2>&1 > fetch.log res=$? set -e if [ "$res" = "0" ]; then echo "Passes because $GIT_LFS_TEST_DIR is unset." exit 0 fi [ "$res" = "128" ] grep "Not in a Git repository" fetch.log ) end_test begin_test "fetch with no origin remote" ( set -e reponame="fetch-no-remote" setup_remote_repo "$reponame" clone_repo "$reponame" no-remote-clone clone_repo "$reponame" no-remote-repo git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log contents="a" contents_oid=$(calc_oid "$contents") printf "%s" "$contents" > a.dat git add a.dat git add .gitattributes git commit -m "add a.dat" 2>&1 | tee commit.log grep "main (root-commit)" commit.log grep "2 files changed" commit.log grep "create mode 100644 a.dat" commit.log grep "create mode 100644 .gitattributes" commit.log [ "a" = "$(cat a.dat)" ] assert_local_object "$contents_oid" 1 refute_server_object "$reponame" "$contents_oid" git push origin main 2>&1 | tee push.log grep "Uploading LFS objects: 100% (1/1), 1 B" push.log grep "main -> main" push.log # change to the clone's working directory cd ../no-remote-clone # pull commits & lfs git pull origin main 2>&1 assert_local_object "$contents_oid" 1 # now checkout detached HEAD so we're not tracking anything on remote git checkout --detach # delete lfs rm -rf .git/lfs # rename remote from 'origin' to 'something' git remote rename origin something # fetch should still pick this remote as in the case of no tracked remote, # and no origin, but only 1 remote, should pick the only one as default git lfs fetch assert_local_object "$contents_oid" 1 ) end_test begin_test "fetch --prune" ( set -e reponame="fetch_prune" setup_remote_repo "remote_$reponame" clone_repo "remote_$reponame" "clone_$reponame" git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log content_head="HEAD content" content_commit2="Content for commit 2 (prune)" content_commit1="Content for commit 1 (prune)" oid_head=$(calc_oid "$content_head") oid_commit2=$(calc_oid "$content_commit2") oid_commit1=$(calc_oid "$content_commit1") echo "[ { \"CommitDate\":\"$(get_date -50d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_commit1}, \"Data\":\"$content_commit1\"}] }, { \"CommitDate\":\"$(get_date -35d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_commit2}, \"Data\":\"$content_commit2\"}] }, { \"CommitDate\":\"$(get_date -25d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_head}, \"Data\":\"$content_head\"}] } ]" | lfstest-testutils addcommits # push all so no unpushed reason to not prune git push origin main # set no recents so max ability to prune git config lfs.fetchrecentrefsdays 0 git config lfs.fetchrecentcommitsdays 0 # delete HEAD object to prove that we still download something # also prune at the same time which will remove anything other than HEAD delete_local_object "$oid_head" git lfs fetch --prune assert_local_object "$oid_head" "${#content_head}" refute_local_object "$oid_commit1" refute_local_object "$oid_commit2" ) end_test begin_test "fetch raw remote url" ( set -e mkdir raw cd raw git init git lfs install --local --skip-smudge git remote add origin "$GITSERVER/$reponame" git pull origin main # LFS object not downloaded, pointer in working directory refute_local_object "$contents_oid" grep "$content_oid" a.dat git lfs fetch "$GITSERVER/$reponame" # LFS object downloaded, pointer still in working directory assert_local_object "$contents_oid" 1 grep "$content_oid" a.dat ) end_test begin_test "fetch with invalid ref" ( set -e cd repo git lfs fetch origin jibberish >fetch.log 2>&1 && exit 1 grep "Invalid ref argument" fetch.log ) end_test begin_test "fetch with invalid remote" ( set -e cd repo git lfs fetch not-a-remote 2>&1 | tee fetch.log grep "Invalid remote name" fetch.log ) end_test begin_test "fetch fails when LFS directory has wrong permissions" ( set -e # Windows lacks POSIX permissions. [ "$IS_WINDOWS" -eq 1 ] && exit 0 # Root is exempt from permissions. [ "$(id -u)" -eq 0 ] && exit 0 cd shared rm -rf .git/lfs/objects mkdir .git/lfs/objects chmod 400 .git/lfs/objects git lfs fetch 2>&1 | tee fetch.log grep "error trying to create local storage directory" fetch.log ) end_test git-lfs-3.6.1/t/t-filter-branch.sh000077500000000000000000000016441472372047300166670ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" begin_test "filter-branch (git-lfs/git-lfs#1773)" ( set -e reponame="filter-branch" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" contents_a="contents (a)" printf "%s" "$contents_a" > a.dat git add a.dat git commit -m "add a.dat" contents_b="contents (b)" printf "%s" "$contents_b" > b.dat git add b.dat git commit -m "add b.dat" contents_c="contents (c)" printf "%s" "$contents_c" > c.dat git add c.dat git commit -m "add c.dat" git filter-branch -f --prune-empty \ --tree-filter ' echo >&2 "---" git rm --cached -r -q . git lfs track "*.dat" git add . ' --tag-name-filter cat -- --all assert_pointer "main" "a.dat" "$(calc_oid "$contents_a")" 12 assert_pointer "main" "b.dat" "$(calc_oid "$contents_b")" 12 assert_pointer "main" "c.dat" "$(calc_oid "$contents_c")" 12 ) end_test git-lfs-3.6.1/t/t-filter-process.sh000077500000000000000000000153551472372047300171140ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" # Only test with Git version 2.11.0 and above as that version introduced # support for the "filter" attribute and protocol. ensure_git_version_isnt $VERSION_LOWER "2.11.0" begin_test "filter process: checking out a branch" ( set -e reponame="filter_process_checkout" setup_remote_repo "$reponame" clone_repo "$reponame" repo git lfs track "*.dat" git add .gitattributes git commit -m "initial commit" contents_a="contents_a" contents_a_oid="$(calc_oid $contents_a)" printf "%s" "$contents_a" > a.dat git add a.dat git commit -m "add a.dat" git checkout -b b contents_b="contents_b" contents_b_oid="$(calc_oid $contents_b)" printf "%s" "$contents_b" > b.dat git add b.dat git commit -m "add b.dat" git push origin --all pushd .. # Git will choose filter.lfs.process over `filter.lfs.clean` and # `filter.lfs.smudge` GIT_TRACE_PACKET=1 git \ -c "filter.lfs.process=git-lfs filter-process" \ -c "filter.lfs.clean=false"\ -c "filter.lfs.smudge=false" \ -c "filter.lfs.required=true" \ clone "$GITSERVER/$reponame" "$reponame-assert" cd "$reponame-assert" # Assert that we are on the "main" branch, and have a.dat [ "main" = "$(git rev-parse --abbrev-ref HEAD)" ] [ "$contents_a" = "$(cat a.dat)" ] assert_pointer "main" "a.dat" "$contents_a_oid" 10 git checkout b # Assert that we are on the "b" branch, and have b.dat [ "b" = "$(git rev-parse --abbrev-ref HEAD)" ] [ "$contents_b" = "$(cat b.dat)" ] assert_pointer "b" "b.dat" "$contents_b_oid" 10 popd ) end_test begin_test "filter process: include/exclude" ( set -e reponame="$(basename "$0" ".sh")-includeexclude" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" mkdir -p foo/bar contents_a="contents_a" contents_a_oid="$(calc_oid $contents_a)" printf "%s" "$contents_a" > a.dat cp a.dat foo cp a.dat foo/bar git add .gitattributes a.dat foo git commit -m "initial commit" git push origin main # The Git LFS objects for a.dat and foo/bar/a.dat would both download except # we're going to prevent them from doing so with include/exclude. # We also need to prevent MSYS from rewriting /foo into a Windows path. MSYS_NO_PATHCONV=1 git config --global "lfs.fetchinclude" "/foo" MSYS_NO_PATHCONV=1 git config --global "lfs.fetchexclude" "/foo/bar" pushd .. # Git will choose filter.lfs.process over `filter.lfs.clean` and # `filter.lfs.smudge` GIT_TRACE_PACKET=1 git \ -c "filter.lfs.process=git-lfs filter-process" \ -c "filter.lfs.clean=false"\ -c "filter.lfs.smudge=false" \ -c "filter.lfs.required=true" \ clone "$GITSERVER/$reponame" "$reponame-assert" cd "$reponame-assert" pointer="$(pointer "$contents_a_oid" 10)" [ "$pointer" = "$(cat a.dat)" ] assert_pointer "main" "a.dat" "$contents_a_oid" 10 [ "$contents_a" = "$(cat foo/a.dat)" ] assert_pointer "main" "foo/a.dat" "$contents_a_oid" 10 [ "$pointer" = "$(cat foo/bar/a.dat)" ] assert_pointer "main" "foo/bar/a.dat" "$contents_a_oid" 10 popd ) end_test begin_test "filter process: adding a file" ( set -e reponame="filter_process_add" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" git add .gitattributes git commit -m "initial commit" contents="contents" contents_oid="$(calc_oid "$contents")" printf "%s" "$contents" > a.dat git add a.dat expected="$(pointer "$contents_oid" "${#contents}")" got="$(git cat-file -p :a.dat)" diff -u <(echo "$expected") <(echo "$got") ) end_test # https://github.com/git-lfs/git-lfs/issues/1697 begin_test "filter process: add a file with 1024 bytes" ( set -e mkdir repo-issue-1697 cd repo-issue-1697 git init git lfs track "*.dat" dd if=/dev/zero of=first.dat bs=1024 count=1 printf "any contents" > second.dat git add . ) end_test begin_test "filter process: hash-object --stdin --path does not hang" ( set -e mkdir repo-hash-object cd repo-hash-object git init git lfs track "*.dat" contents="test" contents_oid="$(calc_oid "$contents")" expected=$(pointer "$contents_oid" 4 | git hash-object --stdin) dd if=/dev/zero of=first.dat bs=1000 count=1 echo a > second.dat # Works for existing file longer than this one. output=$(printf test | git hash-object --path first.dat --stdin) [ "$expected" = "$output" ] # Works for existing file shorter than this one. output=$(printf test | git hash-object --path second.dat --stdin) [ "$expected" = "$output" ] # Works for absent file. output=$(printf test | git hash-object --path third.dat --stdin) [ "$expected" = "$output" ] dd if=/dev/zero of=large.dat bs=65537 count=1 oid=$(calc_oid_file large.dat) expected=$(pointer "$oid" 65537 | git hash-object --stdin) output=$(git hash-object --path third.dat --stdin a.dat git add a.dat git commit -m "add a.dat" git checkout -b b contents_b="contents_b" contents_b_oid="$(calc_oid $contents_b)" printf "%s" "$contents_b" > b.dat git add b.dat git commit -m "add b.dat" git lfs install --local --skip-smudge git checkout main rm a.dat git checkout-index -af git lfs pointer --check --file a.dat assert_pointer "main" "a.dat" "$contents_a_oid" 10 git checkout b rm *.dat git checkout-index -af git lfs pointer --check --file a.dat git lfs pointer --check --file b.dat # Assert that we are on the "b" branch, and have b.dat assert_pointer "b" "b.dat" "$contents_b_oid" 10 ) end_test begin_test "filter process: git archive does not invoke SSH" ( set -e setup_pure_ssh reponame="filter-process-archive" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" sshurl=$(ssh_remote "$reponame") git config lfs.url "$sshurl" contents="test" git lfs track "*.dat" printf "%s" "$contents" > test.dat git add .gitattributes test.dat git commit -m "initial commit" git push origin main 2>&1 cd .. GIT_TRACE=1 git clone "$sshurl" "$reponame-2" 2>&1 | tee trace.log grep "lfs-ssh-echo.*git-lfs-transfer .*$reponame.git download" trace.log cd "$reponame-2" GIT_TRACE=1 GIT_TRACE_PACKET=1 git archive -o foo.tar HEAD 2>&1 | tee archive.log grep 'pure SSH' archive.log && exit 1 true ) end_test git-lfs-3.6.1/t/t-fsck.sh000077500000000000000000000333571472372047300151030ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" begin_test "fsck default" ( set -e reponame="fsck-default" git init $reponame cd $reponame # Create a commit with some files tracked by git-lfs git lfs track *.dat echo "test data" > a.dat echo "test data 2" > b.dat git add .gitattributes *.dat git commit -m "first commit" [ "Git LFS fsck OK" = "$(git lfs fsck)" ] aOid=$(git log --patch a.dat | grep "^+oid" | cut -d ":" -f 2) aOid12=$(echo $aOid | cut -b 1-2) aOid34=$(echo $aOid | cut -b 3-4) if [ "$aOid" != "$(calc_oid_file .git/lfs/objects/$aOid12/$aOid34/$aOid)" ]; then echo "oid for a.dat does not match" exit 1 fi bOid=$(git log --patch b.dat | grep "^+oid" | cut -d ":" -f 2) bOid12=$(echo $bOid | cut -b 1-2) bOid34=$(echo $bOid | cut -b 3-4) if [ "$bOid" != "$(calc_oid_file .git/lfs/objects/$bOid12/$bOid34/$bOid)" ]; then echo "oid for b.dat does not match" exit 1 fi echo "CORRUPTION" >> .git/lfs/objects/$aOid12/$aOid34/$aOid moved=$(canonical_path "$TRASHDIR/$reponame/.git/lfs/bad") expected="$(printf 'objects: corruptObject: a.dat (%s) is corrupt objects: repair: moving corrupt objects to %s' "$aOid" "$moved")" [ "$expected" = "$(git lfs fsck)" ] [ -e ".git/lfs/bad/$aOid" ] [ ! -e ".git/lfs/objects/$aOid12/$aOid34/$aOid" ] [ "$bOid" = "$(calc_oid_file .git/lfs/objects/$bOid12/$bOid34/$bOid)" ] ) end_test begin_test "fsck dry run" ( set -e reponame="fsck-dry-run" git init $reponame cd $reponame # Create a commit with some files tracked by git-lfs git lfs track *.dat echo "test data" > a.dat echo "test data 2" > b.dat git add .gitattributes *.dat git commit -m "first commit" [ "Git LFS fsck OK" = "$(git lfs fsck --dry-run)" ] aOid=$(git log --patch a.dat | grep "^+oid" | cut -d ":" -f 2) aOid12=$(echo $aOid | cut -b 1-2) aOid34=$(echo $aOid | cut -b 3-4) if [ "$aOid" != "$(calc_oid_file .git/lfs/objects/$aOid12/$aOid34/$aOid)" ]; then echo "oid for a.dat does not match" exit 1 fi bOid=$(git log --patch b.dat | grep "^+oid" | cut -d ":" -f 2) bOid12=$(echo $bOid | cut -b 1-2) bOid34=$(echo $bOid | cut -b 3-4) if [ "$bOid" != "$(calc_oid_file .git/lfs/objects/$bOid12/$bOid34/$bOid)" ]; then echo "oid for b.dat does not match" exit 1 fi echo "CORRUPTION" >> .git/lfs/objects/$aOid12/$aOid34/$aOid [ "objects: corruptObject: a.dat ($aOid) is corrupt" = "$(git lfs fsck --dry-run)" ] if [ "$aOid" = "$(calc_oid_file .git/lfs/objects/$aOid12/$aOid34/$aOid)" ]; then echo "oid for a.dat still matches match" exit 1 fi if [ "$bOid" != "$(calc_oid_file .git/lfs/objects/$bOid12/$bOid34/$bOid)" ]; then echo "oid for b.dat does not match" exit 1 fi ) end_test begin_test "fsck does not fail with shell characters in paths" ( set -e mkdir '[[path]]' cd '[[path]]' reponame="fsck-shell-paths" git init $reponame cd $reponame # Create a commit with some files tracked by git-lfs git lfs track *.dat echo "test data" > a.dat echo "test data 2" > b.dat git add .gitattributes *.dat git commit -m "first commit" # Verify that the pack code handles glob patterns properly. git gc --aggressive --prune=now [ "Git LFS fsck OK" = "$(git lfs fsck)" ] ) end_test begin_test "fsck: outside git repository" ( set +e git lfs fsck 2>&1 > fsck.log res=$? set -e if [ "$res" = "0" ]; then echo "Passes because $GIT_LFS_TEST_DIR is unset." exit 0 fi [ "$res" = "128" ] grep "Not in a Git repository" fsck.log ) end_test create_invalid_pointers() { valid="$1" ext="${2:-dat}" git cat-file blob ":$valid" | awk '{ sub(/$/, "\r"); print }' >"crlf.$ext" lfstest-genrandom --base64 1025 >"large.$ext" git \ -c "filter.lfs.process=" \ -c "filter.lfs.clean=cat" \ -c "filter.lfs.required=false" \ add "crlf.$ext" "large.$ext" git commit -m "invalid pointers" } setup_invalid_pointers () { git init $reponame cd $reponame # Create a commit with some files tracked by git-lfs git lfs track *.dat echo "test data" > a.dat echo "test data 2" > b.dat git add .gitattributes *.dat git commit -m "first commit" create_invalid_pointers "a.dat" } begin_test "fsck detects invalid pointers" ( set -e reponame="fsck-pointers" setup_invalid_pointers set +e git lfs fsck >test.log 2>&1 RET=$? git lfs fsck --pointers >>test.log 2>&1 RET2=$? set -e [ "$RET" -eq 1 ] [ "$RET2" -eq 1 ] [ $(grep -c 'pointer: nonCanonicalPointer: Pointer.*was not canonical' test.log) -eq 2 ] [ $(grep -c 'pointer: unexpectedGitObject: "large.dat".*should have been a pointer but was not' test.log) -eq 2 ] ) end_test begin_test "fsck detects invalid pointers with macro patterns" ( set -e reponame="fsck-pointers-macros" git init $reponame cd $reponame printf '[attr]lfs filter=lfs diff=lfs merge=lfs -text\n*.dat lfs\n' \ >.gitattributes echo "test data" >a.dat mkdir dir printf '*.bin lfs\n' >dir/.gitattributes git add .gitattributes a.dat dir git commit -m "first commit" create_invalid_pointers "a.dat" cd dir create_invalid_pointers "a.dat" "bin" cd .. # NOTE: We should also create a .dir directory with the same files as # as in the dir/ directory, and confirm those .dir/*.bin files are # reported by "git lfs fsck" as well. However, at the moment # "git lfs fsck" will not resolve a macro attribute reference # in .dir/.gitattributes because it sorts that file before # .gitattributes and then processes them in that order. set +e git lfs fsck >test.log 2>&1 RET=$? git lfs fsck --pointers >>test.log 2>&1 RET2=$? set -e [ "$RET" -eq 1 ] [ "$RET2" -eq 1 ] [ $(grep -c 'pointer: nonCanonicalPointer: Pointer.*was not canonical' test.log) -eq 4 ] [ $(grep -c 'pointer: unexpectedGitObject: "large.dat".*should have been a pointer but was not' test.log) -eq 2 ] [ $(grep -c 'pointer: unexpectedGitObject: "dir/large.bin".*should have been a pointer but was not' test.log) -eq 2 ] ) end_test begin_test "fsck detects invalid pointers with GIT_OBJECT_DIRECTORY" ( set -e reponame="fsck-pointers-object-directory" setup_invalid_pointers head=$(git rev-parse HEAD) objdir="$(lfstest-realpath .git/objects)" cd .. git init "$reponame-2" gitdir="$(lfstest-realpath "$reponame-2/.git")" GIT_WORK_TREE="$reponame-2" GIT_DIR="$gitdir" GIT_OBJECT_DIRECTORY="$objdir" git update-ref refs/heads/main "$head" set +e GIT_WORK_TREE="$reponame-2" GIT_DIR="$gitdir" GIT_OBJECT_DIRECTORY="$objdir" git lfs fsck --pointers >test.log 2>&1 RET=$? set -e [ "$RET" -eq 1 ] grep 'pointer: nonCanonicalPointer: Pointer.*was not canonical' test.log grep 'pointer: unexpectedGitObject: "large.dat".*should have been a pointer but was not' test.log ) end_test begin_test "fsck does not detect invalid pointers with no LFS objects" ( set -e reponame="fsck-pointers-none" git init "$reponame" cd "$reponame" echo "# README" > README.md git add README.md git commit -m "Add README" git lfs fsck git lfs fsck --pointers ) end_test begin_test "fsck does not detect invalid pointers with symlinks" ( set -e reponame="fsck-pointers-symlinks" git init "$reponame" cd "$reponame" git lfs track '*.dat' echo "# Test" > a.dat ln -s a.dat b.dat git add .gitattributes *.dat git commit -m "Add files" git lfs fsck git lfs fsck --pointers ) end_test begin_test "fsck does not detect invalid pointers with negated patterns" ( set -e reponame="fsck-pointers-none" git init "$reponame" cd "$reponame" cat > .gitattributes < a.dat cp a.dat b.dat git add .gitattributes *.dat git commit -m "Add files" git lfs fsck git lfs fsck --pointers ) end_test begin_test "fsck does not detect invalid pointers with negated macro patterns" ( set -e reponame="fsck-pointers-macros-none" git init "$reponame" cd "$reponame" printf '[attr]lfs filter=lfs diff=lfs merge=lfs -text\n*.dat lfs\nb.dat !lfs\n' \ >.gitattributes echo "test data" >a.dat cp a.dat b.dat mkdir dir .dir printf '*.dat !lfs\n' >dir/.gitattributes cp b.dat dir printf '*.dat !lfs\n' >.dir/.gitattributes cp b.dat .dir git add .gitattributes *.dat dir .dir git commit -m "first commit" # NOTE: The "git lfs fsck" command exempts the .dir/b.dat file from the # *.dat pattern from the top-level .gitattributes and so permits # it as a valid non-pointer file; however, it permits it for a # different reason than the dir/b.dat file, because it processes # the .dir/.gitattributes file before the .gitattributes one # and does not recognize the "!lfs" macro attribute reference until # after it has processed .gitattributes. Ideally both the dir/ # and .dir/ directories should be processed identically. git lfs fsck git lfs fsck --pointers ) end_test setup_invalid_objects () { git init $reponame cd $reponame # Create a commit with some files tracked by git-lfs git lfs track *.dat echo "test data" > a.dat echo "test data 2" > b.dat mkdir foo echo "test test 3" > foo/a.dat echo "test data 4" > foo/b.dat git add .gitattributes *.dat foo git commit -m "first commit" oid1=$(calc_oid_file a.dat) oid2=$(calc_oid_file b.dat) oid3=$(calc_oid_file foo/a.dat) oid4=$(calc_oid_file foo/b.dat) echo "CORRUPTION" >>".git/lfs/objects/${oid1:0:2}/${oid1:2:2}/$oid1" rm ".git/lfs/objects/${oid2:0:2}/${oid2:2:2}/$oid2" echo "CORRUPTION" >>".git/lfs/objects/${oid3:0:2}/${oid3:2:2}/$oid3" rm ".git/lfs/objects/${oid4:0:2}/${oid4:2:2}/$oid4" } begin_test "fsck detects invalid objects" ( set -e reponame="fsck-objects" setup_invalid_objects set +e git lfs fsck >test.log 2>&1 RET=$? set -e [ "$RET" -eq 1 ] [ $(grep -c 'objects: corruptObject: a.dat (.*) is corrupt' test.log) -eq 1 ] [ $(grep -c 'objects: openError: b.dat (.*) could not be checked: .*' test.log) -eq 1 ] [ $(grep -c 'objects: corruptObject: foo/a.dat (.*) is corrupt' test.log) -eq 1 ] [ $(grep -c 'objects: openError: foo/b.dat (.*) could not be checked: .*' test.log) -eq 1 ] [ $(grep -c 'objects: repair: moving corrupt objects to .*' test.log) -eq 1 ] cd .. rm -rf $reponame setup_invalid_objects set +e git lfs fsck --objects >test.log 2>&1 RET=$? set -e [ "$RET" -eq 1 ] [ $(grep -c 'objects: corruptObject: a.dat (.*) is corrupt' test.log) -eq 1 ] [ $(grep -c 'objects: openError: b.dat (.*) could not be checked: .*' test.log) -eq 1 ] [ $(grep -c 'objects: corruptObject: foo/a.dat (.*) is corrupt' test.log) -eq 1 ] [ $(grep -c 'objects: openError: foo/b.dat (.*) could not be checked: .*' test.log) -eq 1 ] [ $(grep -c 'objects: repair: moving corrupt objects to .*' test.log) -eq 1 ] ) end_test begin_test "fsck detects invalid objects except in excluded paths" ( set -e reponame="fsck-objects-exclude" setup_invalid_objects # We need to prevent MSYS from rewriting /foo into a Windows path. MSYS_NO_PATHCONV=1 git config "lfs.fetchexclude" "/foo" set +e git lfs fsck >test.log 2>&1 RET=$? set -e [ "$RET" -eq 1 ] [ $(grep -c 'objects: corruptObject: a.dat (.*) is corrupt' test.log) -eq 1 ] [ $(grep -c 'objects: openError: b.dat (.*) could not be checked: .*' test.log) -eq 1 ] [ $(grep -c 'objects: corruptObject: foo/a.dat (.*) is corrupt' test.log) -eq 0 ] [ $(grep -c 'objects: openError: foo/b.dat (.*) could not be checked: .*' test.log) -eq 0 ] [ $(grep -c 'objects: repair: moving corrupt objects to .*' test.log) -eq 1 ] cd .. rm -rf $reponame setup_invalid_objects # We need to prevent MSYS from rewriting /foo into a Windows path. MSYS_NO_PATHCONV=1 git config "lfs.fetchexclude" "/foo" set +e git lfs fsck --objects >test.log 2>&1 RET=$? set -e [ "$RET" -eq 1 ] [ $(grep -c 'objects: corruptObject: a.dat (.*) is corrupt' test.log) -eq 1 ] [ $(grep -c 'objects: openError: b.dat (.*) could not be checked: .*' test.log) -eq 1 ] [ $(grep -c 'objects: corruptObject: foo/a.dat (.*) is corrupt' test.log) -eq 0 ] [ $(grep -c 'objects: openError: foo/b.dat (.*) could not be checked: .*' test.log) -eq 0 ] [ $(grep -c 'objects: repair: moving corrupt objects to .*' test.log) -eq 1 ] ) end_test begin_test "fsck does not detect invalid objects with no LFS objects" ( set -e reponame="fsck-objects-none" git init "$reponame" cd "$reponame" echo "# README" > README.md git add README.md git commit -m "Add README" git lfs fsck git lfs fsck --objects ) end_test begin_test "fsck operates on specified refs" ( set -e reponame="fsck-refs" setup_invalid_pointers git rm -f crlf.dat large.dat echo "# Test" > new.dat git add new.dat git commit -m 'third commit' git commit --allow-empty -m 'fourth commit' # Should succeed. (HEAD and index). git lfs fsck git lfs fsck HEAD git lfs fsck HEAD^^ && exit 1 git lfs fsck HEAD^ git lfs fsck HEAD^..HEAD git lfs fsck HEAD^^^..HEAD && exit 1 git lfs fsck HEAD^^^..HEAD^ && exit 1 git lfs fsck --pointers HEAD^^^..HEAD^^ >test.log 2>&1 && exit 1 grep 'pointer: nonCanonicalPointer: Pointer.*was not canonical' test.log grep 'pointer: unexpectedGitObject: "large.dat".*should have been a pointer but was not' test.log oid=$(calc_oid_file new.dat) echo "CORRUPTION" >>".git/lfs/objects/${oid:0:2}/${oid:2:2}/$oid" git lfs fsck --objects HEAD^^..HEAD^ >test.log 2>&1 && exit 1 grep 'objects: corruptObject: new.dat (.*) is corrupt' test.log grep 'objects: repair: moving corrupt objects to .*' test.log # Make the result of the subshell a success. true ) end_test begin_test "fsck detects invalid ref" ( set -e reponame="fsck-default" git init $reponame cd $reponame git lfs fsck jibberish >fsck.log 2>&1 && exit 1 grep "can't resolve ref" fsck.log ) end_test git-lfs-3.6.1/t/t-happy-path.sh000077500000000000000000000120741472372047300162210ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" # This is a sample Git LFS test. See test/README.md and testhelpers.sh for # more documentation. begin_test "happy path" ( set -e # This initializes a new bare git repository in test/remote. # These remote repositories are global to every test, so keep the names # unique. reponame="$(basename "$0" ".sh")" setup_remote_repo "$reponame" # Clone the repository from the test Git server. This is empty, and will be # used to test a "git pull" below. The repo is cloned to $TRASHDIR/clone clone_repo "$reponame" clone # Clone the repository again to $TRASHDIR/repo. This will be used to commit # and push objects. clone_repo "$reponame" repo # This executes Git LFS from the local repo that was just cloned. git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log contents="a" contents_oid=$(calc_oid "$contents") # Regular Git commands can be used. printf "%s" "$contents" > a.dat git add a.dat git add .gitattributes git commit -m "add a.dat" 2>&1 | tee commit.log grep "main (root-commit)" commit.log grep "2 files changed" commit.log grep "create mode 100644 a.dat" commit.log grep "create mode 100644 .gitattributes" commit.log [ "a" = "$(cat a.dat)" ] # This is a small shell function that runs several git commands together. assert_pointer "main" "a.dat" "$contents_oid" 1 refute_server_object "$reponame" "$contents_oid" # This pushes to the remote repository set up at the top of the test. git push origin main 2>&1 | tee push.log grep "Uploading LFS objects: 100% (1/1), 1 B" push.log grep "main -> main" push.log assert_server_object "$reponame" "$contents_oid" # change to the clone's working directory cd ../clone git pull origin main [ "a" = "$(cat a.dat)" ] assert_pointer "main" "a.dat" "$contents_oid" 1 ) end_test begin_test "happy path on non-origin remote" ( set -e reponame="happy-without-origin" setup_remote_repo "$reponame" clone_repo "$reponame" repo-without-origin git lfs track "*.dat" git add .gitattributes git commit -m "track" git push origin main clone_repo "$reponame" clone-without-origin git remote rename origin happy-path cd ../repo-without-origin echo "a" > a.dat git add a.dat git commit -m "boom" git push origin main cd ../clone-without-origin echo "remotes:" git remote git pull happy-path main ) end_test begin_test "happy path on good ref" ( set -e reponame="happy-path-main-branch-required" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" echo "a" > a.dat git add .gitattributes a.dat git commit -m "add a.dat" git push origin main # $ echo "a" | shasum -a 256 oid="87428fc522803d31065e7bce3cf03fe475096631e5e07bbd7a0fde60c4cf25c7" assert_local_object "$oid" 2 assert_server_object "$reponame" "$oid" "refs/heads/main" clone_repo "$reponame" "$reponame-clone" assert_local_object "$oid" 2 ) end_test begin_test "happy path on tracked ref" ( set -e reponame="happy-path-tracked-branch-required" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" echo "a" > a.dat git add .gitattributes a.dat git commit -m "add a.dat" git push origin main:tracked # $ echo "a" | shasum -a 256 oid="87428fc522803d31065e7bce3cf03fe475096631e5e07bbd7a0fde60c4cf25c7" assert_local_object "$oid" 2 assert_server_object "$reponame" "$oid" "refs/heads/tracked" git lfs clone "$GITSERVER/$reponame" --exclude "*" git config credential.helper lfstest git config push.default upstream git config branch.main.merge refs/heads/tracked git checkout assert_local_object "$oid" 2 ) end_test begin_test "clears local temp objects" ( set -e mkdir repo-temp-objects cd repo-temp-objects git init # abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz01 mkdir -p .git/lfs/objects/go/od mkdir -p .git/lfs/tmp/objects touch .git/lfs/objects/go/od/goodabcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwx touch .git/lfs/tmp/objects/goodabcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwx-rand123 touch .git/lfs/tmp/objects/goodabcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwx-rand456 touch .git/lfs/tmp/objects/badabcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxy-rand123 touch .git/lfs/tmp/objects/badabcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxy-rand456 GIT_TRACE=5 git lfs env # object file exists [ -e ".git/lfs/objects/go/od/goodabcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwx" ] # newer tmp files exist [ -e ".git/lfs/tmp/objects/badabcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxy-rand123" ] [ -e ".git/lfs/tmp/objects/badabcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxy-rand456" ] # existing tmp files were cleaned up [ ! -e ".git/lfs/tmp/objects/goodabcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwx-rand123" ] [ ! -e ".git/lfs/tmp/objects/goodabcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwx-rand456" ] ) end_test git-lfs-3.6.1/t/t-install-custom-hooks-path-unsupported.sh000077500000000000000000000012251472372047300235610ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" # These tests rely on behavior found in Git versions less than 2.9.0 to perform # themselves, specifically: # - lack of core.hooksPath support ensure_git_version_isnt $VERSION_HIGHER "2.9.0" begin_test "install with unsupported core.hooksPath" ( set -e repo_name="unsupported-custom-hooks-path" git init "$repo_name" cd "$repo_name" hooks_dir="custom_hooks_dir" mkdir -p "$hooks_dir" git config --local core.hooksPath "$hooks_dir" git lfs install 2>&1 | tee install.log grep "Updated Git hooks" install.log [ ! -e "$hooks_dir/pre-push" ] [ -e ".git/hooks/pre-push" ] ) end_test git-lfs-3.6.1/t/t-install-custom-hooks-path.sh000077500000000000000000000036071472372047300212010ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" # These tests rely on behavior found in 2.9.0 to perform themselves, # specifically: # - core.hooksPath support ensure_git_version_isnt $VERSION_LOWER "2.9.0" assert_hooks() { local hooks_dir="$1" [ -e "$hooks_dir/pre-push" ] [ ! -e ".git/pre-push" ] [ -e "$hooks_dir/post-checkout" ] [ ! -e ".git/post-checkout" ] [ -e "$hooks_dir/post-commit" ] [ ! -e ".git/post-commit" ] [ -e "$hooks_dir/post-merge" ] [ ! -e ".git/post-merge" ] } refute_hooks() { local hooks_dir="$1" [ ! -e "$hooks_dir/pre-push" ] [ ! -e "$hooks_dir/post-checkout" ] [ ! -e "$hooks_dir/post-commit" ] [ ! -e "$hooks_dir/post-merge" ] } begin_test "install with supported core.hooksPath" ( set -e repo_name="supported-custom-hooks-path" git init "$repo_name" cd "$repo_name" hooks_dir="custom_hooks_dir" mkdir -p "$hooks_dir" git config --local core.hooksPath "$hooks_dir" git lfs install 2>&1 | tee install.log grep "Updated Git hooks" install.log assert_hooks "$hooks_dir" ) end_test begin_test "install with supported core.hooksPath in subdirectory" ( set -e repo_name="supported-custom-hooks-path-subdir" git init "$repo_name" cd "$repo_name" hooks_dir="custom_hooks_dir" mkdir subdir git config --local core.hooksPath "$hooks_dir" (cd subdir && git lfs install 2>&1 | tee install.log) grep "Updated Git hooks" subdir/install.log assert_hooks "$hooks_dir" refute_hooks "subdir/$hooks_dir" ) end_test begin_test "install with supported expandable core.hooksPath" ( set -e repo_name="supported-custom-hooks-expandable-path" git init "$repo_name" cd "$repo_name" hooks_dir="~/custom_hooks_dir" mkdir -p "$hooks_dir" git config --local core.hooksPath "$hooks_dir" git lfs install 2>&1 | tee install.log grep "Updated Git hooks" install.log assert_hooks "$HOME/custom_hooks_dir" ) end_test git-lfs-3.6.1/t/t-install-worktree-unsupported.sh000077500000000000000000000011431472372047300220350ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" # These tests rely on behavior found in Git versions less than 2.20.0 to # perform themselves, specifically: # - lack of worktreeConfig extension support ensure_git_version_isnt $VERSION_HIGHER "2.20.0" begin_test "install --worktree with unsupported worktreeConfig extension" ( set -e reponame="$(basename "$0" ".sh")-unsupported" mkdir "$reponame" cd "$reponame" git init set +e git lfs install --worktree 2>err.log res=$? set -e cat err.log grep -i "error" err.log grep -- "--worktree" err.log [ "0" != "$res" ] ) end_test git-lfs-3.6.1/t/t-install-worktree.sh000077500000000000000000000117171472372047300174570ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" # These tests rely on behavior found in Git versions higher than 2.20.0 to # perform themselves, specifically: # - worktreeConfig extension support ensure_git_version_isnt $VERSION_LOWER "2.20.0" begin_test "install --worktree outside repository" ( set -e # If run inside the git-lfs source dir this will update its .git/config & cause issues if [ "$GIT_LFS_TEST_DIR" == "" ]; then echo "Skipping install --worktree because GIT_LFS_TEST_DIR is not set" exit 0 fi has_test_dir || exit 0 set +e git lfs install --worktree >out.log res=$? set -e [ "Not in a Git repository." = "$(cat out.log)" ] [ "0" != "$res" ] ) end_test begin_test "install --worktree with single working tree" ( set -e # old values that should be ignored by `install --worktree` git config --global filter.lfs.smudge "global smudge" git config --global filter.lfs.clean "global clean" git config --global filter.lfs.process "global filter" reponame="$(basename "$0" ".sh")-single-tree" mkdir "$reponame" cd "$reponame" git init git lfs install --worktree # local configs are correct [ "git-lfs smudge -- %f" = "$(git config filter.lfs.smudge)" ] [ "git-lfs smudge -- %f" = "$(git config --local filter.lfs.smudge)" ] [ "git-lfs smudge -- %f" = "$(git config --worktree filter.lfs.smudge)" ] [ "git-lfs clean -- %f" = "$(git config filter.lfs.clean)" ] [ "git-lfs clean -- %f" = "$(git config --local filter.lfs.clean)" ] [ "git-lfs clean -- %f" = "$(git config --worktree filter.lfs.clean)" ] [ "git-lfs filter-process" = "$(git config filter.lfs.process)" ] [ "git-lfs filter-process" = "$(git config --local filter.lfs.process)" ] [ "git-lfs filter-process" = "$(git config --worktree filter.lfs.process)" ] # global configs [ "global smudge" = "$(git config --global filter.lfs.smudge)" ] [ "global clean" = "$(git config --global filter.lfs.clean)" ] [ "global filter" = "$(git config --global filter.lfs.process)" ] ) end_test begin_test "install --worktree with multiple working trees" ( set -e reponame="$(basename "$0" ".sh")-multi-tree" mkdir "$reponame" cd "$reponame" git init # old values that should be ignored by `install --worktree` git config --global filter.lfs.smudge "global smudge" git config --global filter.lfs.clean "global clean" git config --global filter.lfs.process "global filter" git config --local filter.lfs.smudge "local smudge" git config --local filter.lfs.clean "local clean" git config --local filter.lfs.process "local filter" touch a.txt git add a.txt git commit -m "initial commit" git config core.repositoryformatversion 1 git config extensions.worktreeConfig true treename="../$reponame-wt" git worktree add "$treename" cd "$treename" git lfs install --worktree # worktree configs are correct [ "git-lfs smudge -- %f" = "$(git config filter.lfs.smudge)" ] [ "git-lfs smudge -- %f" = "$(git config --worktree filter.lfs.smudge)" ] [ "git-lfs clean -- %f" = "$(git config filter.lfs.clean)" ] [ "git-lfs clean -- %f" = "$(git config --worktree filter.lfs.clean)" ] [ "git-lfs filter-process" = "$(git config filter.lfs.process)" ] [ "git-lfs filter-process" = "$(git config --worktree filter.lfs.process)" ] # local configs are correct [ "local smudge" = "$(git config --local filter.lfs.smudge)" ] [ "local clean" = "$(git config --local filter.lfs.clean)" ] [ "local filter" = "$(git config --local filter.lfs.process)" ] # global configs [ "global smudge" = "$(git config --global filter.lfs.smudge)" ] [ "global clean" = "$(git config --global filter.lfs.clean)" ] [ "global filter" = "$(git config --global filter.lfs.process)" ] ) end_test begin_test "install --worktree without worktreeConfig extension" ( set -e reponame="$(basename "$0" ".sh")-multi-tree-no-config" mkdir "$reponame" cd "$reponame" git init touch a.txt git add a.txt git commit -m "initial commit" treename="../$reponame-wt" git worktree add "$treename" cd "$treename" set +e git lfs install --worktree >out.log res=$? set -e cat out.log grep -E "error running.*git.*config" out.log [ "$res" -eq 2 ] ) end_test begin_test "install --worktree with conflicting scope" ( set -e reponame="$(basename "$0" ".sh")-scope-conflict" mkdir "$reponame" cd "$reponame" git init set +e git lfs install --local --worktree 2>err.log res=$? set -e [ "Only one of the --local, --system, --worktree, and --file options can be specified." = "$(cat err.log)" ] [ "0" != "$res" ] set +e git lfs install --worktree --system 2>err.log res=$? set -e [ "Only one of the --local, --system, --worktree, and --file options can be specified." = "$(cat err.log)" ] [ "0" != "$res" ] set +e git lfs install --worktree --file test-file 2>err.log res=$? set -e [ "Only one of the --local, --system, --worktree, and --file options can be specified." = "$(cat err.log)" ] [ "0" != "$res" ] ) end_test git-lfs-3.6.1/t/t-install.sh000077500000000000000000000263231472372047300156160ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" begin_test "install again" ( set -eo pipefail smudge="$(git config filter.lfs.smudge)" clean="$(git config filter.lfs.clean)" filter="$(git config filter.lfs.process)" [ "$smudge" = "git-lfs smudge -- %f" ] [ "$clean" = "git-lfs clean -- %f" ] [ "$filter" = "git-lfs filter-process" ] GIT_TRACE=1 git lfs install --skip-repo 2>&1 | tee install.log if grep -q "--replace-all" install.log; then echo >&2 "fatal: unexpected git config --replace-all via 'git lfs install'" exit 1 fi [ "$smudge" = "$(git config filter.lfs.smudge)" ] [ "$clean" = "$(git config filter.lfs.clean)" ] [ "$filter" = "$(git config filter.lfs.process)" ] ) end_test begin_test "install with old (non-upgradeable) settings" ( set -e git config --global filter.lfs.smudge "git-lfs smudge --something %f" git config --global filter.lfs.clean "git-lfs clean --something %f" git lfs install | tee install.log [ "${PIPESTATUS[0]}" = 2 ] grep -E "(clean|smudge)\" attribute should be" install.log [ `grep -c "(MISSING)" install.log` = "0" ] [ "git-lfs smudge --something %f" = "$(git config --global filter.lfs.smudge)" ] [ "git-lfs clean --something %f" = "$(git config --global filter.lfs.clean)" ] git lfs install --force [ "git-lfs smudge -- %f" = "$(git config --global filter.lfs.smudge)" ] [ "git-lfs clean -- %f" = "$(git config --global filter.lfs.clean)" ] ) end_test begin_test "install with upgradeable settings" ( set -e git config --global filter.lfs.smudge "git-lfs smudge %f" git config --global filter.lfs.clean "git-lfs clean %f" # should not need force, should upgrade this old style git lfs install [ "git-lfs smudge -- %f" = "$(git config --global filter.lfs.smudge)" ] [ "git-lfs clean -- %f" = "$(git config --global filter.lfs.clean)" ] [ "git-lfs filter-process" = "$(git config --global filter.lfs.process)" ] ) end_test begin_test "install updates repo hooks" ( set -e mkdir install-repo-hooks cd install-repo-hooks git init pre_push_hook="#!/bin/sh command -v git-lfs >/dev/null 2>&1 || { printf >&2 \"\\n%s\\n\\n\" \"This repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting the 'pre-push' file in the hooks directory (set by 'core.hookspath'; usually '.git/hooks').\"; exit 2; } git lfs pre-push \"\$@\"" post_checkout_hook="#!/bin/sh command -v git-lfs >/dev/null 2>&1 || { printf >&2 \"\\n%s\\n\\n\" \"This repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting the 'post-checkout' file in the hooks directory (set by 'core.hookspath'; usually '.git/hooks').\"; exit 2; } git lfs post-checkout \"\$@\"" post_commit_hook="#!/bin/sh command -v git-lfs >/dev/null 2>&1 || { printf >&2 \"\\n%s\\n\\n\" \"This repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting the 'post-commit' file in the hooks directory (set by 'core.hookspath'; usually '.git/hooks').\"; exit 2; } git lfs post-commit \"\$@\"" post_merge_hook="#!/bin/sh command -v git-lfs >/dev/null 2>&1 || { printf >&2 \"\\n%s\\n\\n\" \"This repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting the 'post-merge' file in the hooks directory (set by 'core.hookspath'; usually '.git/hooks').\"; exit 2; } git lfs post-merge \"\$@\"" [ "Updated Git hooks. Git LFS initialized." = "$(git lfs install)" ] [ "$pre_push_hook" = "$(cat .git/hooks/pre-push)" ] [ "$post_checkout_hook" = "$(cat .git/hooks/post-checkout)" ] [ "$post_commit_hook" = "$(cat .git/hooks/post-commit)" ] [ "$post_merge_hook" = "$(cat .git/hooks/post-merge)" ] # replace old hook # more-comprehensive hook update tests are in test-update.sh echo "#!/bin/sh git lfs push --stdin \$*" > .git/hooks/pre-push [ "Updated Git hooks. Git LFS initialized." = "$(git lfs install)" ] [ "$pre_push_hook" = "$(cat .git/hooks/pre-push)" ] # don't replace unexpected hook expected="Hook already exists: pre-push test To resolve this, either: 1: run \`git lfs update --manual\` for instructions on how to merge hooks. 2: run \`git lfs update --force\` to overwrite your hook." echo "test" > .git/hooks/pre-push echo "test" > .git/hooks/post-checkout echo "test" > .git/hooks/post-commit echo "test" > .git/hooks/post-merge [ "test" = "$(cat .git/hooks/pre-push)" ] [ "$expected" = "$(git lfs install 2>&1)" ] [ "test" = "$(cat .git/hooks/pre-push)" ] [ "test" = "$(cat .git/hooks/post-checkout)" ] [ "test" = "$(cat .git/hooks/post-commit)" ] [ "test" = "$(cat .git/hooks/post-merge)" ] # Make sure returns non-zero set +e git lfs install if [ $? -eq 0 ] then exit 1 fi set -e # force replace unexpected hook [ "Updated Git hooks. Git LFS initialized." = "$(git lfs install --force)" ] [ "$pre_push_hook" = "$(cat .git/hooks/pre-push)" ] [ "$post_checkout_hook" = "$(cat .git/hooks/post-checkout)" ] [ "$post_commit_hook" = "$(cat .git/hooks/post-commit)" ] [ "$post_merge_hook" = "$(cat .git/hooks/post-merge)" ] has_test_dir || exit 0 echo "test with bare repository" cd .. git clone --mirror install-repo-hooks bare-install-repo-hooks cd bare-install-repo-hooks git lfs env git lfs install ls -al hooks [ "$pre_push_hook" = "$(cat hooks/pre-push)" ] ) end_test begin_test "install outside repository directory" ( set -e if [ -d "hooks" ]; then ls -al echo "hooks dir exists" exit 1 fi git lfs install > check.log 2>&1 if [ -d "hooks" ]; then ls -al echo "hooks dir exists" exit 1 fi cat check.log # doesn't print this because being in a git repo is not necessary for install [ "$(grep -c "Not in a Git repository" check.log)" = "0" ] [ "$(grep -c "Error" check.log)" = "0" ] ) end_test begin_test "install --skip-smudge" ( set -e mkdir install-skip-smudge-test cd install-skip-smudge-test git lfs install [ "git-lfs clean -- %f" = "$(git config --global filter.lfs.clean)" ] [ "git-lfs smudge -- %f" = "$(git config --global filter.lfs.smudge)" ] [ "git-lfs filter-process" = "$(git config --global filter.lfs.process)" ] git lfs install --skip-smudge [ "git-lfs clean -- %f" = "$(git config --global filter.lfs.clean)" ] [ "git-lfs smudge --skip -- %f" = "$(git config --global filter.lfs.smudge)" ] [ "git-lfs filter-process --skip" = "$(git config --global filter.lfs.process)" ] git lfs install [ "git-lfs clean -- %f" = "$(git config --global filter.lfs.clean)" ] [ "git-lfs smudge -- %f" = "$(git config --global filter.lfs.smudge)" ] [ "git-lfs filter-process" = "$(git config --global filter.lfs.process)" ] [ ! -e "lfs" ] ) end_test begin_test "install --local" ( set -e # old values that should be ignored by `install --local` git config --global filter.lfs.smudge "global smudge" git config --global filter.lfs.clean "global clean" git config --global filter.lfs.process "global filter" mkdir install-local-repo cd install-local-repo git init git lfs install --local # local configs are correct [ "git-lfs smudge -- %f" = "$(git config filter.lfs.smudge)" ] [ "git-lfs smudge -- %f" = "$(git config --local filter.lfs.smudge)" ] [ "git-lfs clean -- %f" = "$(git config filter.lfs.clean)" ] [ "git-lfs clean -- %f" = "$(git config --local filter.lfs.clean)" ] [ "git-lfs filter-process" = "$(git config filter.lfs.process)" ] [ "git-lfs filter-process" = "$(git config --local filter.lfs.process)" ] # global configs [ "global smudge" = "$(git config --global filter.lfs.smudge)" ] [ "global clean" = "$(git config --global filter.lfs.clean)" ] [ "global filter" = "$(git config --global filter.lfs.process)" ] ) end_test begin_test "install --file" ( set -e # old values that should be ignored by `install --local` git config --global filter.lfs.smudge "global smudge" git config --global filter.lfs.clean "global clean" git config --global filter.lfs.process "global filter" mkdir install-file-repo cd install-file-repo git init git lfs install --file=test-file # local configs are correct [ "git-lfs smudge -- %f" = "$(git config --file test-file filter.lfs.smudge)" ] [ "git-lfs clean -- %f" = "$(git config --file test-file filter.lfs.clean)" ] [ "git-lfs filter-process" = "$(git config --file test-file filter.lfs.process)" ] # global configs [ "global smudge" = "$(git config --global filter.lfs.smudge)" ] [ "global clean" = "$(git config --global filter.lfs.clean)" ] [ "global filter" = "$(git config --global filter.lfs.process)" ] ) end_test begin_test "install --local with failed permissions" ( set -e # Windows lacks POSIX permissions. [ "$IS_WINDOWS" -eq 1 ] && exit 0 # Root is exempt from permissions. [ "$(id -u)" -eq 0 ] && exit 0 mkdir install-local-repo-perms cd install-local-repo-perms git init # Make it impossible to write a new .git/config file so we can't write config # options. chmod 500 .git res=0 git lfs install --local >out.log || res=$? # Cleanup fails without this. chmod 700 .git cat out.log grep -E "error running.*git.*config" out.log [ "$res" -eq 2 ] ) end_test begin_test "install --local outside repository" ( set -e # If run inside the git-lfs source dir this will update its .git/config & cause issues if [ "$GIT_LFS_TEST_DIR" == "" ]; then echo "Skipping install --local because GIT_LFS_TEST_DIR is not set" exit 0 fi has_test_dir || exit 0 set +e git lfs install --local >out.log res=$? set -e [ "Not in a Git repository." = "$(cat out.log)" ] [ "0" != "$res" ] ) end_test begin_test "install --local with conflicting scope" ( set -e reponame="$(basename "$0" ".sh")-scope-conflict" mkdir "$reponame" cd "$reponame" git init set +e git lfs install --local --system 2>err.log res=$? set -e [ "Only one of the --local, --system, --worktree, and --file options can be specified." = "$(cat err.log)" ] [ "0" != "$res" ] ) end_test begin_test "install in directory without access to .git/lfs" ( set -e mkdir not-a-repo cd not-a-repo mkdir .git touch .git/lfs touch lfs git config --global filter.lfs.clean whatevs [ "whatevs" = "$(git config filter.lfs.clean)" ] git lfs install --force [ "git-lfs clean -- %f" = "$(git config filter.lfs.clean)" ] ) end_test begin_test "install in repo without changing hooks" ( set -e git init non-lfs-repo cd non-lfs-repo git lfs install --skip-repo # should not install hooks [ ! -f .git/hooks/pre-push ] [ ! -f .git/hooks/post-checkout ] [ ! -f .git/hooks/post-merge ] [ ! -f .git/hooks/post-commit ] # filters should still be installed [ "git-lfs clean -- %f" = "$(git config filter.lfs.clean)" ] [ "git-lfs smudge -- %f" = "$(git config filter.lfs.smudge)" ] [ "git-lfs filter-process" = "$(git config filter.lfs.process)" ] ) end_test begin_test "can install when multiple global values registered" ( set -e git config --global filter.lfs.smudge "git-lfs smudge --something %f" git config --global --add filter.lfs.smudge "git-lfs smudge --something-else %f" git lfs install --force ) end_test git-lfs-3.6.1/t/t-lock.sh000077500000000000000000000216231472372047300150760ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" begin_test "lock with good ref" ( set -e reponame="lock-main-branch-required" setup_remote_repo_with_file "$reponame" "a.dat" clone_repo "$reponame" "$reponame" git lfs lock "a.dat" --json 2>&1 | tee lock.json if [ "0" -ne "${PIPESTATUS[0]}" ]; then echo >&2 "fatal: expected \'git lfs lock \'a.dat\'\' to succeed" exit 1 fi id=$(assert_lock lock.json a.dat) assert_server_lock "$reponame" "$id" "refs/heads/main" ) end_test begin_test "lock with good tracked ref" ( set -e reponame="lock-tracked-branch-required" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" echo "a" > a.dat git add .gitattributes a.dat git commit -m "add a.dat" git config push.default upstream git config branch.main.merge refs/heads/tracked git config branch.main.remote origin git push origin main git lfs lock "a.dat" --json 2>&1 | tee lock.json if [ "0" -ne "${PIPESTATUS[0]}" ]; then echo >&2 "fatal: expected \'git lfs lock \'a.dat\'\' to succeed" exit 1 fi id=$(assert_lock lock.json a.dat) assert_server_lock "$reponame" "$id" "refs/heads/tracked" ) end_test begin_test "lock with bad ref" ( set -e reponame="lock-other-branch-required" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" echo "a" > a.dat git add .gitattributes a.dat git commit -m "add a.dat" git push origin main:other GIT_CURL_VERBOSE=1 git lfs lock "a.dat" 2>&1 | tee lock.json if [ "0" -eq "${PIPESTATUS[0]}" ]; then echo >&2 "fatal: expected \'git lfs lock \'a.dat\'\' to fail" exit 1 fi grep 'Locking a.dat failed: Expected ref "refs/heads/other", got "refs/heads/main"' lock.json ) end_test begin_test "lock multiple files" ( set -e reponame="lock-multiple-files" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" echo "a" > a.dat echo "b" > b.dat git add .gitattributes a.dat b.dat git commit -m "add dat files" git push origin main:other GIT_TRACE=0 git lfs lock *.dat >log 2>errlog [ $(grep -c "Locked [ab].dat" log) -eq 2 ] grep -v CREDS errlog && exit 1 grep "Usage:" errlog && exit 1 true ) end_test begin_test "lock multiple files (JSON)" ( set -e reponame="lock-multiple-files-json" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" echo "a" > a.dat echo "b" > b.dat git add .gitattributes a.dat b.dat git commit -m "add dat files" git push origin main:other git lfs lock --json *.dat | tee lock.json grep -E '\[\{"id":"[^"]+","path":"a\.dat","owner":\{"name":"Git LFS Tests"\},"locked_at":"[^"]+"\},\{"id":"[^"]+","path":"b\.dat","owner":\{"name":"Git LFS Tests"\},"locked_at":"[^"]+"\}\]' lock.json ) end_test begin_test "lock absolute path" ( set -e reponame="lock-absolute-path" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" echo "a" > a.dat echo "b" > b.dat git add .gitattributes a.dat b.dat git commit -m "add dat files" git push origin main:other git lfs lock --json "$(pwd)/a.dat" | tee lock.json id=$(assert_lock lock.json a.dat) assert_server_lock "$reponame" "$id" ) end_test begin_test "create lock with server using client cert" ( set -e git config --global "http.$LFS_CLIENT_CERT_URL/.sslCert" "$LFS_CLIENT_CERT_FILE" git config --global "http.$LFS_CLIENT_CERT_URL/.sslKey" "$LFS_CLIENT_KEY_FILE" reponame="lock_create_client_cert" setup_remote_repo_with_file "$reponame" "cc.dat" git config lfs.url "$CLIENTCERTGITSERVER/$reponame.git/info/lfs" git lfs lock --json "cc.dat" | tee lock.json id=$(assert_lock lock.json cc.dat) assert_server_lock "$reponame" "$id" ) end_test begin_test "creating a lock (with output)" ( set -e reponame="lock_create_simple_output" setup_remote_repo_with_file "$reponame" "a_output.dat" git lfs lock "a_output.dat" | tee lock.log grep "Locked a_output.dat" lock.log id=$(grep -oh "\((.*)\)" lock.log | tr -d \(\)) assert_server_lock "$reponame" "$id" ) end_test begin_test "locking a file that doesn't exist" ( set -e reponame="lock_create_nonexistent" setup_remote_repo_with_file "$reponame" "a_output.dat" git lfs lock "b_output.dat" | tee lock.log grep "Locked b_output.dat" lock.log id=$(grep -oh "\((.*)\)" lock.log | tr -d \(\)) assert_server_lock "$reponame" "$id" ) end_test begin_test "locking a previously locked file" ( set -e reponame="lock_create_previously_created" setup_remote_repo_with_file "$reponame" "b.dat" git lfs lock --json "b.dat" | tee lock.json id=$(assert_lock lock.json b.dat) assert_server_lock "$reponame" "$id" grep "lock already created" <(git lfs lock "b.dat" 2>&1) ) end_test begin_test "locking a directory" ( set -e reponame="locking_directories" setup_remote_repo "remote_$reponame" clone_repo "remote_$reponame" "clone_$reponame" git lfs track "*.dat" mkdir dir echo "a" > dir/a.dat git add dir/a.dat .gitattributes git commit -m "add dir/a.dat" | tee commit.log grep "main (root-commit)" commit.log grep "2 files changed" commit.log grep "create mode 100644 dir/a.dat" commit.log grep "create mode 100644 .gitattributes" commit.log git push origin main 2>&1 | tee push.log grep "main -> main" push.log git lfs lock ./dir/ 2>&1 | tee lock.log grep "cannot lock directory" lock.log ) end_test begin_test "locking a nested file" ( set -e reponame="locking-nested-file" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" --lockable git add .gitattributes git commit -m "initial commit" mkdir -p foo/bar/baz contents="contents" contents_oid="$(calc_oid "$contents")" printf "%s" "$contents" > foo/bar/baz/a.dat git add foo/bar/baz/a.dat git commit -m "add a.dat" git push origin main assert_server_object "$reponame" "$contents_oid" git lfs lock foo/bar/baz/a.dat 2>&1 | tee lock.log grep "Locked foo/bar/baz/a.dat" lock.log git lfs locks 2>&1 | tee locks.log grep "foo/bar/baz/a.dat" locks.log ) end_test begin_test "creating a lock (within subdirectory)" ( set -e reponame="lock_create_within_subdirectory" setup_remote_repo_with_file "$reponame" "sub/a.dat" cd sub git lfs lock --json "a.dat" | tee lock.json if [ "0" -ne "${PIPESTATUS[0]}" ]; then echo >&2 "fatal: expected \'git lfs lock \'a.dat\'\' to succeed" exit 1 fi id=$(assert_lock lock.json sub/a.dat) assert_server_lock "$reponame" "$id" ) end_test begin_test "creating a lock (symlinked working directory)" ( set -eo pipefail if [[ $(uname) == *"MINGW"* ]]; then echo >&2 "info: skipped on Windows ..." exit 0 fi reponame="lock-in-symlinked-working-directory" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track -l "*.dat" mkdir -p folder1 folder2 printf "hello" > folder2/a.dat add_symlink "../folder2" "folder1/folder2" git add --all . git commit -m "initial commit" git push origin main pushd "$TRASHDIR" > /dev/null ln -s "$reponame" "$reponame-symlink" cd "$reponame-symlink" git lfs lock --json folder1/folder2/a.dat 2>&1 | tee lock.json id="$(assert_lock lock.json folder1/folder2/a.dat)" assert_server_lock "$reponame" "$id" main popd > /dev/null ) end_test begin_test "lock with .gitignore" ( set -e reponame="lock-with-gitignore" setup_remote_repo_with_file "$reponame" "a.txt" clone_repo "$reponame" "$reponame" echo "*.txt filter=lfs diff=lfs merge=lfs -text lockable" > .gitattributes git add .gitattributes git commit -m ".gitattributes: mark 'a.txt' as lockable" rm -f a.txt && git checkout a.txt refute_file_writeable a.txt echo "*.txt" > .gitignore git add .gitignore git commit -m ".gitignore: ignore 'a.txt'" rm -f a.txt && git checkout a.txt refute_file_writeable a.txt ) end_test begin_test "lock with .gitignore and lfs.lockignoredfiles" ( set -e reponame="lock-with-gitignore-and-ignoredfiles" setup_remote_repo_with_file "$reponame" "a.txt" clone_repo "$reponame" "$reponame" git config lfs.lockignoredfiles true echo "*.txt filter=lfs diff=lfs merge=lfs -text lockable" > .gitattributes git add .gitattributes git commit -m ".gitattributes: mark 'a.txt' as lockable" rm -f a.txt && git checkout a.txt refute_file_writeable a.txt echo "*.txt" > .gitignore git add .gitignore git commit -m ".gitignore: ignore 'a.txt'" rm -f a.txt && git checkout a.txt refute_file_writeable a.txt ) end_test begin_test "lock with git-lfs-transfer" ( set -e setup_pure_ssh reponame="lock-with-git-lfs-transfer" setup_remote_repo_with_file "$reponame" "f.dat" clone_repo "$reponame" "$reponame" sshurl=$(ssh_remote "$reponame") git config lfs.url "$sshurl" GIT_TRACE_PACKET=1 git lfs lock --json "f.dat" | tee lock.log id=$(assert_lock lock.log f.dat) assert_server_lock_ssh "$reponame" "$id" "refs/heads/main" ) end_test git-lfs-3.6.1/t/t-locks.sh000077500000000000000000000176631472372047300152720ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" begin_test "list a single lock with bad ref" ( set -e reponame="locks-list-other-branch-required" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" echo "f" > f.dat git add .gitattributes f.dat git commit -m "add f.dat" git push origin main:other git checkout -b other git lfs lock --json "f.dat" | tee lock.log git checkout main git lfs locks --path "f.dat" 2>&1 | tee locks.log if [ "0" -eq "${PIPESTATUS[0]}" ]; then echo >&2 "fatal: expected 'git lfs lock \'a.dat\'' to fail" exit 1 fi grep 'Expected ref "refs/heads/other", got "refs/heads/main"' locks.log ) end_test begin_test "list a single lock" ( set -e reponame="locks-list-main-branch-required" setup_remote_repo_with_file "$reponame" "f.dat" clone_repo "$reponame" "$reponame" git lfs lock --json "f.dat" | tee lock.log id=$(assert_lock lock.log f.dat) assert_server_lock "$reponame" "$id" "refs/heads/main" git lfs locks --path "f.dat" | tee locks.log [ $(wc -l < locks.log) -eq 1 ] grep "f.dat" locks.log grep "Git LFS Tests" locks.log ) end_test begin_test "list a single lock (SSH; git-lfs-authenticate)" ( set -e reponame="locks-list-ssh" setup_remote_repo_with_file "$reponame" "f.dat" clone_repo "$reponame" "$reponame" sshurl="${GITSERVER/http:\/\//ssh://git@}/$reponame" git config lfs.url "$sshurl" git lfs lock --json "f.dat" | tee lock.log id=$(assert_lock lock.log f.dat) assert_server_lock "$reponame" "$id" "refs/heads/main" GIT_TRACE=1 git lfs locks --path "f.dat" 2>trace.log | tee locks.log cat trace.log [ $(wc -l < locks.log) -eq 1 ] grep "f.dat" locks.log grep "Git LFS Tests" locks.log grep "lfs-ssh-echo.*git-lfs-authenticate /$reponame download" trace.log GIT_TRACE=1 git -c lfs."$sshurl".sshtransfer=never lfs locks --path "f.dat" 2>trace.log | tee locks.log [ $(wc -l < locks.log) -eq 1 ] grep "f.dat" locks.log grep "Git LFS Tests" locks.log grep "lfs-ssh-echo.*git-lfs-authenticate /$reponame download" trace.log grep "skipping pure SSH protocol" trace.log GIT_TRACE=1 git -c lfs."$sshurl".sshtransfer=always lfs locks --path "f.dat" 2>trace.log && exit 1 grep "git-lfs-authenticate has been disabled by request" trace.log ) end_test begin_test "list a single lock (SSH; git-lfs-transfer)" ( set -e setup_pure_ssh reponame="locks-list-ssh-pure" setup_remote_repo_with_file "$reponame" "f.dat" clone_repo "$reponame" "$reponame" sshurl=$(ssh_remote "$reponame") git config lfs.url "$sshurl" GIT_TRACE_PACKET=1 git lfs lock --json "f.dat" | tee lock.log id=$(assert_lock lock.log f.dat) assert_server_lock_ssh "$reponame" "$id" "refs/heads/main" GIT_TRACE=1 git lfs locks --path "f.dat" 2>trace.log | tee locks.log cat trace.log [ $(wc -l < locks.log) -eq 1 ] grep "f.dat" locks.log grep "lfs-ssh-echo.*git-lfs-transfer .*$reponame.git download" trace.log GIT_TRACE=1 git -c lfs."$sshurl".sshtransfer=always lfs locks --path "f.dat" 2>trace.log | tee locks.log [ $(wc -l < locks.log) -eq 1 ] grep "f.dat" locks.log grep "lfs-ssh-echo.*git-lfs-transfer .*$reponame.git download" trace.log GIT_TRACE=1 git -c lfs."$sshurl".sshtransfer=negotiate lfs locks --path "f.dat" 2>trace.log | tee locks.log [ $(wc -l < locks.log) -eq 1 ] grep "f.dat" locks.log grep "lfs-ssh-echo.*git-lfs-transfer .*$reponame.git download" trace.log ) end_test begin_test "list a single lock (--json)" ( set -e reponame="locks_list_single_json" setup_remote_repo_with_file "$reponame" "f_json.dat" git lfs lock --json "f_json.dat" | tee lock.log id=$(assert_lock lock.log f_json.dat) assert_server_lock "$reponame" "$id" git lfs locks --json --path "f_json.dat" | tee locks.log grep "\"path\":\"f_json.dat\"" locks.log grep "\"owner\":{\"name\":\"Git LFS Tests\"}" locks.log ) end_test begin_test "list locks with a limit" ( set -e reponame="locks_list_limit" setup_remote_repo "$reponame" clone_repo "$reponame" "clone_$reponame" git lfs track "*.dat" echo "foo" > "g_1.dat" echo "bar" > "g_2.dat" git add "g_1.dat" "g_2.dat" ".gitattributes" git commit -m "add files" | tee commit.log grep "3 files changed" commit.log grep "create mode 100644 g_1.dat" commit.log grep "create mode 100644 g_2.dat" commit.log grep "create mode 100644 .gitattributes" commit.log git push origin main 2>&1 | tee push.log grep "main -> main" push.log git lfs lock --json "g_1.dat" | tee lock.log assert_server_lock "$reponame" "$(assert_log "lock.log" g_1.dat)" git lfs lock --json "g_2.dat" | tee lock.log assert_server_lock "$reponame" "$(assert_lock "lock.log" g_2.dat)" git lfs locks --limit 1 | tee locks.log [ $(wc -l < locks.log) -eq 1 ] ) end_test begin_test "list locks with pagination" ( set -e reponame="locks_list_paginate" setup_remote_repo "$reponame" clone_repo "$reponame" "clone_$reponame" git lfs track "*.dat" for i in $(seq 1 5); do echo "$i" > "h_$i.dat" done git add "h_1.dat" "h_2.dat" "h_3.dat" "h_4.dat" "h_5.dat" ".gitattributes" git commit -m "add files" | tee commit.log grep "6 files changed" commit.log for i in $(seq 1 5); do grep "create mode 100644 h_$i.dat" commit.log done grep "create mode 100644 .gitattributes" commit.log git push origin main 2>&1 | tee push.log grep "main -> main" push.log for i in $(seq 1 5); do git lfs lock --json "h_$i.dat" | tee lock.log assert_server_lock "$reponame" "$(assert_lock "lock.log" "h_$i.dat")" done # The server will return, at most, three locks at a time git lfs locks --limit 4 | tee locks.log [ $(wc -l < locks.log) -eq 4 ] ) end_test begin_test "cached locks" ( set -e reponame="cached_locks" setup_remote_repo "remote_$reponame" clone_repo "remote_$reponame" "clone_$reponame" git lfs track "*.dat" echo "foo" > "cached1.dat" echo "bar" > "cached2.dat" git add "cached1.dat" "cached2.dat" ".gitattributes" git commit -m "add files" | tee commit.log grep "3 files changed" commit.log grep "create mode 100644 cached1.dat" commit.log grep "create mode 100644 cached2.dat" commit.log grep "create mode 100644 .gitattributes" commit.log git push origin main 2>&1 | tee push.log grep "main -> main" push.log git lfs lock --json "cached1.dat" | tee lock.log assert_server_lock "$(assert_lock "lock.log" cached1.dat)" git lfs lock --json "cached2.dat" | tee lock.log assert_server_lock "$(assert_lock "lock.log" cached2.dat)" git lfs locks --local | tee locks.log [ $(wc -l < locks.log) -eq 2 ] # delete the remote to prove we're using the local records git remote remove origin git lfs locks --local --path "cached1.dat" | tee locks.log [ $(wc -l < locks.log) -eq 1 ] grep "cached1.dat" locks.log git lfs locks --local --limit 1 | tee locks.log [ $(wc -l < locks.log) -eq 1 ] ) end_test begin_test "cached locks with failed lock" ( set -e reponame="cached-locks-failed-lock" setup_remote_repo "remote_$reponame" clone_repo "remote_$reponame" "clone_$reponame" git lfs track "*.dat" echo "foo" > "cached1.dat" echo "bar" > "cached2.dat" git add "cached1.dat" "cached2.dat" ".gitattributes" git commit -m "add files" | tee commit.log grep "3 files changed" commit.log grep "create mode 100644 cached1.dat" commit.log grep "create mode 100644 cached2.dat" commit.log grep "create mode 100644 .gitattributes" commit.log git push origin main 2>&1 | tee push.log grep "main -> main" push.log git lfs lock --json "cached1.dat" | tee lock.log assert_server_lock "$(assert_lock "lock.log" cached1.dat)" git lfs lock --json "cached1.dat" "cached2.dat" | tee lock.log assert_server_lock "$(assert_lock "lock.log" cached2.dat)" git lfs locks --local | tee locks.log [ $(wc -l < locks.log) -eq 2 ] git lfs unlock --json "cached1.dat" git lfs unlock --json "cached1.dat" "cached2.dat" || true git lfs locks --local | tee locks.log [ $(wc -l < locks.log) -eq 0 ] ) end_test git-lfs-3.6.1/t/t-logs.sh000077500000000000000000000006641472372047300151140ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" begin_test "logs" ( set -e mkdir logs cd logs git init boomtownExit="" set +e git lfs logs boomtown boomtownExit=$? set -e [ "$boomtownExit" = "2" ] logname=`ls .git/lfs/logs` logfile=".git/lfs/logs/$logname" cat "$logfile" echo "... grep ..." grep "$ git-lfs logs boomtown" "$logfile" [ "$(cat "$logfile")" = "$(git lfs logs last)" ] ) end_test git-lfs-3.6.1/t/t-ls-files.sh000077500000000000000000000526601472372047300156710ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" begin_test "ls-files" ( set -e mkdir repo cd repo git init git lfs track "*.dat" | grep "Tracking \"\*.dat\"" echo "some data" > some.dat echo "some text" > some.txt echo "missing" > missing.dat git add missing.dat git commit -m "add missing file" [ "6bbd052ab0 * missing.dat" = "$(git lfs ls-files)" ] git rm missing.dat git add some.dat some.txt git commit -m "added some files, removed missing one" git lfs ls-files | tee ls.log grep some.dat ls.log [ `wc -l < ls.log` = 1 ] diff -u <(git lfs ls-files --debug) <(cat <<-EOF filepath: some.dat size: 10 checkout: true download: true oid: sha256 5aa03f96c77536579166fba147929626cc3a97960e994057a9d80271a736d10f version: https://git-lfs.github.com/spec/v1 EOF) ) end_test begin_test "ls-files: files in subdirectory" ( set -e reponame="ls-files-subdir" git init "$reponame" cd "$reponame" git lfs track "*.dat" git add .gitattributes git commit -m "initial commit" mkdir subdir missing="missing" missing_oid="$(calc_oid "$missing")" printf "%s" "$missing" > subdir/missing.dat git add subdir git commit -m "add file in subdirectory" contents="some data" oid="$(calc_oid "$contents")" printf "%s" "$contents" > subdir/some.dat echo "some text" > subdir/some.txt [ "${missing_oid:0:10} * subdir/missing.dat" = "$(git lfs ls-files)" ] git rm subdir/missing.dat git add subdir git commit -m "add and remove files in subdirectory" expected="${oid:0:10} * subdir/some.dat" [ "$expected" = "$(git lfs ls-files)" ] diff -u <(git lfs ls-files --debug) <(cat <<-EOF filepath: subdir/some.dat size: 9 checkout: true download: true oid: sha256 $oid version: https://git-lfs.github.com/spec/v1 EOF) ) end_test begin_test "ls-files: run within subdirectory" ( set -e reponame="ls-files-in-subdir" git init "$reponame" cd "$reponame" git lfs track "*.dat" git add .gitattributes git commit -m "initial commit" mkdir subdir contents1="a" oid1="$(calc_oid "$contents1")" printf "%s" "$contents1" > a.dat contents2="b" oid2="$(calc_oid "$contents2")" printf "%s" "$contents2" > subdir/b.dat cd subdir [ "" = "$(git lfs ls-files)" ] git add ../a.dat b.dat expected="${oid1:0:10} * a.dat ${oid2:0:10} * subdir/b.dat" [ "$expected" = "$(git lfs ls-files)" ] diff -u <(git lfs ls-files --debug) <(cat <<-EOF filepath: a.dat size: 1 checkout: true download: true oid: sha256 $oid1 version: https://git-lfs.github.com/spec/v1 filepath: subdir/b.dat size: 1 checkout: true download: true oid: sha256 $oid2 version: https://git-lfs.github.com/spec/v1 EOF) ) end_test begin_test "ls-files: checkout and download status" ( set -e reponame="ls-files-status" git init "$reponame" cd "$reponame" git lfs track "*.dat" git add .gitattributes git commit -m "initial commit" contents1="a" oid1="$(calc_oid "$contents1")" printf "%s" "$contents1" > a.dat contents2="b" oid2="$(calc_oid "$contents2")" printf "%s" "$contents2" > b.dat [ "" = "$(git lfs ls-files)" ] # Note that if we don't remove b.dat from the working tree as well as the # Git LFS object cache, Git calls (as invoked by Git LFS) may restore the # cache copy from the working tree copy by re-invoking Git LFS in # "clean" filter mode. git add a.dat b.dat rm a.dat b.dat rm ".git/lfs/objects/${oid2:0:2}/${oid2:2:2}/$oid2" expected="${oid1:0:10} - a.dat ${oid2:0:10} - b.dat" [ "$expected" = "$(git lfs ls-files)" ] diff -u <(git lfs ls-files --debug) <(cat <<-EOF filepath: a.dat size: 1 checkout: false download: true oid: sha256 $oid1 version: https://git-lfs.github.com/spec/v1 filepath: b.dat size: 1 checkout: false download: false oid: sha256 $oid2 version: https://git-lfs.github.com/spec/v1 EOF) ) end_test begin_test "ls-files: checkout and download status (run within subdirectory)" ( set -e reponame="ls-files-status-in-subdir" git init "$reponame" cd "$reponame" git lfs track "*.dat" git add .gitattributes git commit -m "initial commit" contents1="a" oid1="$(calc_oid "$contents1")" printf "%s" "$contents1" > a.dat contents2="b" oid2="$(calc_oid "$contents2")" printf "%s" "$contents2" > b.dat mkdir subdir cd subdir contents3="c" oid3="$(calc_oid "$contents3")" printf "%s" "$contents3" > c.dat contents4="d" oid4="$(calc_oid "$contents4")" printf "%s" "$contents4" > d.dat [ "" = "$(git lfs ls-files)" ] # Note that if we don't remove b.dat and d.dat from the working tree as # well as the Git LFS object cache, Git calls (as invoked by Git LFS) may # restore the cache copies from the working tree copies by re-invoking # Git LFS in "clean" filter mode. git add ../a.dat ../b.dat c.dat d.dat rm ../a.dat ../b.dat c.dat d.dat rm "../.git/lfs/objects/${oid2:0:2}/${oid2:2:2}/$oid2" rm "../.git/lfs/objects/${oid4:0:2}/${oid4:2:2}/$oid4" expected="${oid1:0:10} - a.dat ${oid2:0:10} - b.dat ${oid3:0:10} - subdir/c.dat ${oid4:0:10} - subdir/d.dat" [ "$expected" = "$(git lfs ls-files)" ] diff -u <(git lfs ls-files --debug) <(cat <<-EOF filepath: a.dat size: 1 checkout: false download: true oid: sha256 $oid1 version: https://git-lfs.github.com/spec/v1 filepath: b.dat size: 1 checkout: false download: false oid: sha256 $oid2 version: https://git-lfs.github.com/spec/v1 filepath: subdir/c.dat size: 1 checkout: false download: true oid: sha256 $oid3 version: https://git-lfs.github.com/spec/v1 filepath: subdir/d.dat size: 1 checkout: false download: false oid: sha256 $oid4 version: https://git-lfs.github.com/spec/v1 EOF) ) end_test begin_test "ls-files: --size" ( set -e reponame="ls-files-size" git init "$reponame" cd "$reponame" git lfs track "*.dat" git add .gitattributes git commit -m "initial commit" contents="contents" size="$(printf "%s" "$contents" | wc -c | awk '{ print $1 }')" printf "%s" "$contents" > a.dat git add a.dat git commit -m "add a.dat" git lfs ls-files --size 2>&1 | tee ls.log [ "d1b2a59fbe * a.dat (8 B)" = "$(cat ls.log)" ] ) end_test begin_test "ls-files: indexed files without tree" ( set -e reponame="ls-files-indexed-files-without-tree" git init "$reponame" cd "$reponame" git lfs track '*.dat' git add .gitattributes contents="a" oid="$(calc_oid "$contents")" printf "%s" "$contents" > a.dat [ "" = "$(git lfs ls-files)" ] git add a.dat [ "${oid:0:10} * a.dat" = "$(git lfs ls-files)" ] ) end_test begin_test "ls-files: indexed file with tree" ( set -e reponame="ls-files-indexed-files-with-tree" git init "$reponame" cd "$reponame" git lfs track '*.dat' git add .gitattributes git commit -m "initial commit" tree_contents="a" tree_oid="$(calc_oid "$tree_contents")" printf "%s" "$tree_contents" > a.dat git add a.dat git commit -m "add a.dat" index_contents="b" index_oid="$(calc_oid "$index_contents")" printf "%s" "$index_contents" > a.dat git add a.dat [ "${index_oid:0:10} * a.dat" = "$(git lfs ls-files)" ] ) end_test begin_test "ls-files: historical reference ignores index" ( set -e reponame="ls-files-historical-reference-ignores-index" git init "$reponame" cd "$reponame" git lfs track "*.txt" echo "a.txt" > a.txt echo "b.txt" > b.txt echo "c.txt" > c.txt git add .gitattributes a.txt git commit -m "a.txt: initial commit" git add b.txt git commit -m "b.txt: initial commit" git add c.txt git lfs ls-files "$(git rev-parse HEAD~1)" 2>&1 | tee ls-files.log [ 1 -eq "$(grep -c "a.txt" ls-files.log)" ] [ 0 -eq "$(grep -c "b.txt" ls-files.log)" ] [ 0 -eq "$(grep -c "c.txt" ls-files.log)" ] ) end_test begin_test "ls-files: non-HEAD reference referring to HEAD ignores index" ( set -e reponame="ls-files-HEAD-ish-ignores-index" git init "$reponame" cd "$reponame" git lfs track "*.txt" echo "a.txt" > a.txt echo "b.txt" > b.txt git add .gitattributes a.txt git commit -m "a.txt: initial commit" tagname="v1.0.0" git tag "$tagname" git add b.txt git lfs ls-files "$tagname" 2>&1 | tee ls-files.log [ 1 -eq "$(grep -c "a.txt" ls-files.log)" ] [ 0 -eq "$(grep -c "b.txt" ls-files.log)" ] ) end_test begin_test "ls-files: outside git repository" ( set +e git lfs ls-files 2>&1 > ls-files.log res=$? set -e if [ "$res" = "0" ]; then echo "Passes because $GIT_LFS_TEST_DIR is unset." exit 0 fi [ "$res" = "128" ] grep "Not in a Git repository" ls-files.log ) end_test begin_test "ls-files: --include" ( set -e git init ls-files-include cd ls-files-include git lfs track "*.dat" "*.bin" echo "a" > a.dat echo "b" > b.dat echo "c" > c.bin git add *.gitattributes a.dat b.dat c.bin git commit -m "initial commit" git lfs ls-files --include="*.dat" 2>&1 | tee ls-files.log [ "0" -eq "$(grep -c "\.bin" ls-files.log)" ] [ "2" -eq "$(grep -c "\.dat" ls-files.log)" ] ) end_test begin_test "ls-files: --exclude" ( set -e git init ls-files-exclude cd ls-files-exclude mkdir dir git lfs track "*.dat" echo "a" > a.dat echo "b" > b.dat echo "c" > dir/c.dat git add *.gitattributes a.dat b.dat dir/c.dat git commit -m "initial commit" git lfs ls-files --exclude="dir/" 2>&1 | tee ls-files.log [ "0" -eq "$(grep -c "dir" ls-files.log)" ] [ "2" -eq "$(grep -c "\.dat" ls-files.log)" ] ) end_test begin_test "ls-files: before first commit" ( set -e reponame="ls-files-before-first-commit" git init "$reponame" cd "$reponame" if [ 0 -ne $(git lfs ls-files | wc -l) ]; then echo >&2 "Expected \`git lfs ls-files\` to produce no output" exit 1 fi ) end_test begin_test "ls-files: show duplicate files" ( set -e mkdir dupRepoShort cd dupRepoShort git init git lfs track "*.tgz" | grep "Tracking \"\*.tgz\"" echo "test content" > one.tgz echo "test content" > two.tgz git add one.tgz git add two.tgz git commit -m "add duplicate files" expected="$(echo "a1fff0ffef * one.tgz a1fff0ffef * two.tgz")" [ "$expected" = "$(git lfs ls-files)" ] ) end_test begin_test "ls-files: show duplicate files with long OID" ( set -e mkdir dupRepoLong cd dupRepoLong git init git lfs track "*.tgz" | grep "Tracking \"\*.tgz\"" echo "test content" > one.tgz echo "test content" > two.tgz git add one.tgz git add two.tgz git commit -m "add duplicate files with long OID" expected="$(echo "a1fff0ffefb9eace7230c24e50731f0a91c62f9cefdfe77121c2f607125dffae * one.tgz a1fff0ffefb9eace7230c24e50731f0a91c62f9cefdfe77121c2f607125dffae * two.tgz")" [ "$expected" = "$(git lfs ls-files --long)" ] ) end_test begin_test "ls-files: history with --all" ( set -e reponame="ls-files-history-with-all" git init "$reponame" cd "$reponame" git lfs track '*.dat' printf "a" > a.dat printf "b" > b.dat git add .gitattributes a.dat b.dat git commit -m "initial commit" rm b.dat git add b.dat git commit -m "remove b.dat" git lfs ls-files 2>&1 | tee ls-files.log [ 1 -eq $(grep -c "a\.dat" ls-files.log) ] [ 0 -eq $(grep -c "b\.dat" ls-files.log) ] git lfs ls-files --all 2>&1 | tee ls-files-all.log [ 1 -eq $(grep -c "a\.dat" ls-files-all.log) ] [ 1 -eq $(grep -c "b\.dat" ls-files-all.log) ] ) end_test begin_test "ls-files: --all with argument(s)" ( set -e reponame="ls-files-all-with-arguments" git init "$reponame" cd "$reponame" git lfs ls-files --all main 2>&1 | tee ls-files.log if [ "0" -eq "${PIPESTATUS[0]}" ]; then echo >&2 "fatal: \`git lfs ls-files --all main\` to fail" exit 1 fi [ "Cannot use --all with explicit reference" = "$(cat ls-files.log)" ] ) end_test begin_test "ls-files: reference with --deleted" ( set -e reponame="ls-files-reference-with-deleted" git init "$reponame" cd "$reponame" git lfs track "*.dat" printf "a" > a.dat git add .gitattributes a.dat git commit -m "initial commit" rm a.dat git add a.dat git commit -m "a.dat: remove a.dat" git lfs ls-files 2>&1 | tee ls-files.log git lfs ls-files --deleted 2>&1 | tee ls-files-deleted.log [ 0 -eq $(grep -c "a\.dat" ls-files.log) ] [ 1 -eq $(grep -c "a\.dat" ls-files-deleted.log) ] ) end_test begin_test "ls-files: invalid --all ordering" ( set -e reponame="ls-files-invalid---all-ordering" git init "$reponame" cd "$reponame" git lfs track "*.dat" echo "Hello world" > a.dat git add .gitattributes a.dat git commit -m "initial commit" git lfs ls-files -- --all 2>&1 | tee ls-files.out if [ ${PIPESTATUS[0]} = "0" ]; then echo >&2 "Expected \`git lfs ls-files -- --all\' to fail" exit 1 fi grep "Did you mean \`git lfs ls-files --all --\` ?" ls-files.out ) end_test begin_test "ls-files: list/stat files with escaped runes in path before commit" ( set -e reponame=runes-in-path content="zero" checksum="d3eb539a55" pathWithGermanRunes="german/äöü" fileWithGermanRunes="schüüch.bin" mkdir $reponame git init "$reponame" cd $reponame git lfs track "**/*" echo "$content" > regular echo "$content" > "$fileWithGermanRunes" mkdir -p "$pathWithGermanRunes" echo "$content" > "$pathWithGermanRunes/regular" echo "$content" > "$pathWithGermanRunes/$fileWithGermanRunes" git add * # check short form [ 4 -eq "$(git lfs ls-files | grep -c '*')" ] # also check long format [ 4 -eq "$(git lfs ls-files -l | grep -c '*')" ] ) end_test begin_test "ls-files: --name-only" ( set -e reponame="ls-files-name" git init "$reponame" cd "$reponame" git lfs track "*.dat" git add .gitattributes git commit -m "initial commit" contents="test contents" echo "$contents" > a.dat git add a.dat git commit -m "add a.dat" git lfs ls-files --name-only 2>&1 | tee ls.log [ "a.dat" = "$(cat ls.log)" ] ) end_test begin_test "ls-files: history with reference range" ( set -e reponame="ls-files-history-with-range" git init "$reponame" cd "$reponame" git lfs track "*.dat" git add .gitattributes git commit -m 'intial commit' echo "content of a-file" > a.dat git add a.dat git commit -m 'add a.dat' echo "content of b-file" > b.dat git add b.dat git commit -m 'add b.dat' git tag b-commit echo "content of c-file" > c.dat git add c.dat git commit -m 'add c.dat' echo "content of c-file and later modified" > c.dat git add c.dat git commit -m 'modify c.dat' git tag c-commit git rm a.dat git commit -m 'remove a.dat' git lfs ls-files --all 2>&1 | tee ls-files.log [ 1 -eq $(grep -c "a\.dat" ls-files.log) ] [ 1 -eq $(grep -c "b\.dat" ls-files.log) ] [ 2 -eq $(grep -c "c\.dat" ls-files.log) ] git lfs ls-files b-commit c-commit 2>&1 | tee ls-files.log [ 0 -eq $(grep -c "a\.dat" ls-files.log) ] [ 0 -eq $(grep -c "b\.dat" ls-files.log) ] [ 2 -eq $(grep -c "c\.dat" ls-files.log) ] git lfs ls-files c-commit~ c-commit 2>&1 | tee ls-files.log [ 0 -eq $(grep -c "a\.dat" ls-files.log) ] [ 0 -eq $(grep -c "b\.dat" ls-files.log) ] [ 1 -eq $(grep -c "c\.dat" ls-files.log) ] git lfs ls-files HEAD~ HEAD 2>&1 | tee ls-files.log [ 0 -eq $(grep -c "a\.dat" ls-files.log) ] [ 0 -eq $(grep -c "b\.dat" ls-files.log) ] [ 0 -eq $(grep -c "c\.dat" ls-files.log) ] ) end_test begin_test "ls-files: not affected by lfs.fetchexclude" ( set -e mkdir repo-fetchexclude cd repo-fetchexclude git init git lfs track "*.dat" | grep "Tracking \"\*.dat\"" echo "some data" > some.dat echo "some text" > some.txt echo "missing" > missing.dat git add missing.dat git commit -m "add missing file" git config lfs.fetchexclude '*' [ "6bbd052ab0 * missing.dat" = "$(git lfs ls-files)" ] ) end_test begin_test "ls-files --json" ( set -e reponame="ls-files-json" git init "$reponame" cd "$reponame" git lfs track "*.dat" | grep "Tracking \"\*.dat\"" echo "some data" > some.dat echo "some text" > some.txt echo "missing" > missing.dat git add missing.dat git commit -m "add missing file" git lfs ls-files --json > actual cat > expected <<-EOF { "files": [ { "name": "missing.dat", "size": 8, "checkout": true, "downloaded": true, "oid_type": "sha256", "oid": "6bbd052ab054ef222c1c87be60cd191addedd24cc882d1f5f7f7be61dc61bb3a", "version": "https://git-lfs.github.com/spec/v1" } ] } EOF diff -u actual expected git rm missing.dat git add some.dat some.txt git commit -m "added some files, removed missing one" git lfs ls-files --json > actual cat > expected <<-EOF { "files": [ { "name": "some.dat", "size": 10, "checkout": true, "downloaded": true, "oid_type": "sha256", "oid": "5aa03f96c77536579166fba147929626cc3a97960e994057a9d80271a736d10f", "version": "https://git-lfs.github.com/spec/v1" } ] } EOF diff -u actual expected ) end_test begin_test "ls-files: files in subdirectory (--json)" ( set -e reponame="ls-files-subdir-json" git init "$reponame" cd "$reponame" git lfs track "*.dat" git add .gitattributes git commit -m "initial commit" mkdir subdir missing="missing" missing_oid="$(calc_oid "$missing")" printf "%s" "$missing" > subdir/missing.dat git add subdir git commit -m "add file in subdirectory" contents="some data" oid="$(calc_oid "$contents")" printf "%s" "$contents" > subdir/some.dat echo "some text" > subdir/some.txt git rm subdir/missing.dat git add subdir git commit -m "add and remove files in subdirectory" diff -u <(git lfs ls-files --json) <(cat <<-EOF { "files": [ { "name": "subdir/some.dat", "size": 9, "checkout": true, "downloaded": true, "oid_type": "sha256", "oid": "$oid", "version": "https://git-lfs.github.com/spec/v1" } ] } EOF) ) end_test begin_test "ls-files: run within subdirectory (--json)" ( set -e reponame="ls-files-in-subdir-json" git init "$reponame" cd "$reponame" git lfs track "*.dat" git add .gitattributes git commit -m "initial commit" mkdir subdir contents1="a" oid1="$(calc_oid "$contents1")" printf "%s" "$contents1" > a.dat contents2="b" oid2="$(calc_oid "$contents2")" printf "%s" "$contents2" > subdir/b.dat cd subdir git add ../a.dat b.dat diff -u <(git lfs ls-files --json) <(cat <<-EOF { "files": [ { "name": "a.dat", "size": 1, "checkout": true, "downloaded": true, "oid_type": "sha256", "oid": "$oid1", "version": "https://git-lfs.github.com/spec/v1" }, { "name": "subdir/b.dat", "size": 1, "checkout": true, "downloaded": true, "oid_type": "sha256", "oid": "$oid2", "version": "https://git-lfs.github.com/spec/v1" } ] } EOF) ) end_test begin_test "ls-files: checkout and download status (--json)" ( set -e reponame="ls-files-status-json" git init "$reponame" cd "$reponame" git lfs track "*.dat" git add .gitattributes git commit -m "initial commit" contents1="a" oid1="$(calc_oid "$contents1")" printf "%s" "$contents1" > a.dat contents2="b" oid2="$(calc_oid "$contents2")" printf "%s" "$contents2" > b.dat # Note that if we don't remove b.dat from the working tree as well as the # Git LFS object cache, Git calls (as invoked by Git LFS) may restore the # cache copy from the working tree copy by re-invoking Git LFS in # "clean" filter mode. git add a.dat b.dat rm a.dat b.dat rm ".git/lfs/objects/${oid2:0:2}/${oid2:2:2}/$oid2" diff -u <(git lfs ls-files --json) <(cat <<-EOF { "files": [ { "name": "a.dat", "size": 1, "checkout": false, "downloaded": true, "oid_type": "sha256", "oid": "$oid1", "version": "https://git-lfs.github.com/spec/v1" }, { "name": "b.dat", "size": 1, "checkout": false, "downloaded": false, "oid_type": "sha256", "oid": "$oid2", "version": "https://git-lfs.github.com/spec/v1" } ] } EOF) ) end_test begin_test "ls-files: checkout and download status (run within subdirectory) (--json)" ( set -e reponame="ls-files-status-in-subdir-json" git init "$reponame" cd "$reponame" git lfs track "*.dat" git add .gitattributes git commit -m "initial commit" contents1="a" oid1="$(calc_oid "$contents1")" printf "%s" "$contents1" > a.dat contents2="b" oid2="$(calc_oid "$contents2")" printf "%s" "$contents2" > b.dat mkdir subdir cd subdir contents3="c" oid3="$(calc_oid "$contents3")" printf "%s" "$contents3" > c.dat contents4="d" oid4="$(calc_oid "$contents4")" printf "%s" "$contents4" > d.dat # Note that if we don't remove b.dat and d.dat from the working tree as # well as the Git LFS object cache, Git calls (as invoked by Git LFS) may # restore the cache copies from the working tree copies by re-invoking # Git LFS in "clean" filter mode. git add ../a.dat ../b.dat c.dat d.dat rm ../a.dat ../b.dat c.dat d.dat rm "../.git/lfs/objects/${oid2:0:2}/${oid2:2:2}/$oid2" rm "../.git/lfs/objects/${oid4:0:2}/${oid4:2:2}/$oid4" diff -u <(git lfs ls-files --json) <(cat <<-EOF { "files": [ { "name": "a.dat", "size": 1, "checkout": false, "downloaded": true, "oid_type": "sha256", "oid": "$oid1", "version": "https://git-lfs.github.com/spec/v1" }, { "name": "b.dat", "size": 1, "checkout": false, "downloaded": false, "oid_type": "sha256", "oid": "$oid2", "version": "https://git-lfs.github.com/spec/v1" }, { "name": "subdir/c.dat", "size": 1, "checkout": false, "downloaded": true, "oid_type": "sha256", "oid": "$oid3", "version": "https://git-lfs.github.com/spec/v1" }, { "name": "subdir/d.dat", "size": 1, "checkout": false, "downloaded": false, "oid_type": "sha256", "oid": "$oid4", "version": "https://git-lfs.github.com/spec/v1" } ] } EOF) ) end_test git-lfs-3.6.1/t/t-malformed-pointers.sh000077500000000000000000000044071472372047300177560ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" begin_test "malformed pointers" ( set -e reponame="malformed-pointers" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" git add .gitattributes git commit -m "initial commit" lfstest-genrandom --base64 1023 >malformed_small.dat lfstest-genrandom --base64 1024 >malformed_exact.dat lfstest-genrandom --base64 1025 >malformed_large.dat lfstest-genrandom --base64 1048576 >malformed_xxl.dat git \ -c "filter.lfs.process=" \ -c "filter.lfs.clean=cat" \ -c "filter.lfs.required=false" \ add *.dat git commit -m "add malformed pointer" git push origin main pushd .. >/dev/null clone_repo "$reponame" "$reponame-assert" grep "malformed_small.dat" clone.log grep "malformed_exact.dat" clone.log grep "malformed_large.dat" clone.log grep "malformed_xxl.dat" clone.log expected_small="$(cat ../$reponame/malformed_small.dat)" expected_exact="$(cat ../$reponame/malformed_exact.dat)" expected_large="$(cat ../$reponame/malformed_large.dat)" expected_xxl="$(cat ../$reponame/malformed_xxl.dat)" actual_small="$(cat malformed_small.dat)" actual_exact="$(cat malformed_exact.dat)" actual_large="$(cat malformed_large.dat)" actual_xxl="$(cat malformed_xxl.dat)" [ "$expected_small" = "$actual_small" ] [ "$expected_exact" = "$actual_exact" ] [ "$expected_large" = "$actual_large" ] [ "$expected_xxl" = "$actual_xxl" ] popd >/dev/null ) end_test begin_test "empty pointers" ( set -e reponame="empty-pointers" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" git add .gitattributes git commit -m "initial commit" touch empty.dat git \ -c "filter.lfs.process=" \ -c "filter.lfs.clean=cat" \ -c "filter.lfs.required=false" \ add empty.dat git commit -m "add empty pointer" [ "0" -eq "$(git cat-file -p :empty.dat | wc -c)" ] [ "0" -eq "$(wc -c < empty.dat)" ] git push origin main pushd .. >/dev/null clone_repo "$reponame" "$reponame-assert" [ "0" -eq "$(grep -c "empty.dat" clone.log)" ] [ "0" -eq "$(git cat-file -p :empty.dat | wc -c)" ] [ "0" -eq "$(wc -c < empty.dat)" ] popd >/dev/null ) end_test git-lfs-3.6.1/t/t-merge-driver.sh000077500000000000000000000132501472372047300165330ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" setup_successful_repo () { if [ "$1" != "--track-later" ] then git lfs track '*.dat' else touch .gitattributes fi seq 1 10 > a.dat git add .gitattributes *.dat git commit -m 'Initial import' git checkout -b other # sed -i isn't portable. sed -e 's/9/B/' a.dat > b.dat mv b.dat a.dat git add -u git commit -m 'B' if [ "$1" = "--track-later" ] then git lfs track '*.dat' git add .gitattributes git commit -m 'B2' fi git checkout --force main sed -e 's/2/A/' a.dat > b.dat mv b.dat a.dat git add -u git commit -m 'A' if [ "$1" = "--track-later" ] then git lfs track '*.dat' git add -u git commit -m 'A2' fi } setup_custom_repo () { git lfs track '*.dat' seq 1 10 > a.dat git add .gitattributes *.dat git commit -m 'Initial import' git checkout -b other # sed -i isn't portable. sed -e 's/2/B/' -e 's/9/B/' a.dat > b.dat mv b.dat a.dat git add -u git commit -m 'B' git checkout main sed -e 's/2/A/' -e 's/9/A/' a.dat > b.dat mv b.dat a.dat git add -u git commit -m 'A' } setup_conflicting_repo () { if [ "$1" != "--track-later" ] then git lfs track '*.dat' else touch .gitattributes fi seq 1 10 > a.dat git add .gitattributes *.dat git commit -m 'Initial import' git checkout -b other # sed -i isn't portable. sed -e 's/3/B/' a.dat > b.dat mv b.dat a.dat git add -u git commit -m 'B' if [ "$1" = "--track-later" ] then git lfs track '*.dat' git add .gitattributes git commit -m 'B2' fi git checkout --force main sed -e 's/2/A/' a.dat > b.dat mv b.dat a.dat git add -u git commit -m 'A' if [ "$1" = "--track-later" ] then git lfs track '*.dat' git add -u git commit -m 'A2' fi } begin_test "merge-driver uses Git merge by default" ( set -e reponame="merge-driver-basic" git init "$reponame" cd "$reponame" result="07b26d7b3123467282635a68fdf9b59e81269cf9faf12282cedf30f393a55e5b" git config merge.lfs.driver 'git lfs merge-driver --ancestor %O --current %A --other %B --marker-size %L --output %A' setup_successful_repo git merge other ( set - echo 1 echo A seq 3 8 echo B echo 10 ) > expected.dat diff -u a.dat expected.dat assert_pointer "main" "a.dat" "$result" 21 assert_local_object "$result" 21 ) end_test begin_test "merge-driver uses Git merge when explicit" ( set -e reponame="merge-driver-explicit" git init "$reponame" cd "$reponame" result="07b26d7b3123467282635a68fdf9b59e81269cf9faf12282cedf30f393a55e5b" git config merge.lfs.driver 'git lfs merge-driver --ancestor %O --current %A --other %B --marker-size %L --output %A --program '\''git merge-file --stdout --marker-size=%%L %%A %%O %%B >%%D'\''' git lfs track '*.dat' setup_successful_repo git merge other ( set -e echo 1 echo A seq 3 8 echo B echo 10 ) > expected.dat diff -u a.dat expected.dat assert_pointer "main" "a.dat" "$result" 21 assert_local_object "$result" 21 ) end_test begin_test "merge-driver uses custom driver when explicit" ( set -e reponame="merge-driver-custom" git init "$reponame" cd "$reponame" result="07b26d7b3123467282635a68fdf9b59e81269cf9faf12282cedf30f393a55e5b" git config merge.lfs.driver 'git lfs merge-driver --ancestor %O --current %A --other %B --marker-size %L --output %A --program '\''(sed -n 1,5p %%A; sed -n 6,10p %%B) >%%D'\''' git lfs track '*.dat' setup_custom_repo git merge other ( set -e echo 1 echo A seq 3 8 echo B echo 10 ) > expected.dat diff -u a.dat expected.dat assert_pointer "main" "a.dat" "$result" 21 assert_local_object "$result" 21 ) end_test begin_test "merge-driver reports conflicts" ( set -e reponame="merge-driver-conflicts" git init "$reponame" cd "$reponame" git config merge.lfs.driver 'git lfs merge-driver --ancestor %O --current %A --other %B --marker-size %L --output %A --program '\''git merge-file --stdout --marker-size=%%L %%A %%O %%B >%%D'\''' git lfs track '*.dat' setup_conflicting_repo git merge other && exit 1 sed -e 's/<<<<<<<.*/<<<<<<>>>>>>.*/>>>>>>>/' a.dat > actual.dat ( set -e echo 1 echo "<<<<<<<" echo A echo 3 echo "=======" echo 2 echo B echo ">>>>>>>" seq 4 10 ) > expected.dat diff -u actual.dat expected.dat ) end_test begin_test "merge-driver gracefully handles non-pointer" ( set -e reponame="merge-driver-non-pointer" git init "$reponame" cd "$reponame" result="07b26d7b3123467282635a68fdf9b59e81269cf9faf12282cedf30f393a55e5b" git config merge.lfs.driver 'git lfs merge-driver --ancestor %O --current %A --other %B --marker-size %L --output %A' setup_successful_repo --track-later git merge other ( set - echo 1 echo A seq 3 8 echo B echo 10 ) > expected.dat diff -u a.dat expected.dat assert_pointer "main" "a.dat" "$result" 21 assert_local_object "$result" 21 ) end_test begin_test "merge-driver reports conflicts with non-pointer" ( set -e reponame="conflicts-non-pointer" git init "$reponame" cd "$reponame" git config merge.lfs.driver 'git lfs merge-driver --ancestor %O --current %A --other %B --marker-size %L --output %A --program '\''git merge-file --stdout --marker-size=%%L %%A %%O %%B >%%D'\''' setup_conflicting_repo --track-later git merge other && exit 1 sed -e 's/<<<<<<<.*/<<<<<<>>>>>>.*/>>>>>>>/' a.dat > actual.dat ( set -e echo 1 echo "<<<<<<<" echo A echo 3 echo "=======" echo 2 echo B echo ">>>>>>>" seq 4 10 ) > expected.dat diff -u actual.dat expected.dat ) end_test git-lfs-3.6.1/t/t-mergetool.sh000077500000000000000000000017611472372047300161440ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" begin_test "mergetool works with large files" ( set -e reponame="mergetool-works-with-large-files" git init "$reponame" cd "$reponame" git lfs track "*.dat" printf "base" > conflict.dat git add .gitattributes conflict.dat git commit -m "initial commit" git checkout -b conflict printf "b" > conflict.dat git add conflict.dat git commit -m "conflict.dat: b" git checkout main printf "a" > conflict.dat git add conflict.dat git commit -m "conflict.dat: a" set +e git merge conflict set -e git config mergetool.inspect.cmd ' for i in BASE LOCAL REMOTE; do echo "\$$i=$(eval "cat \"\$$i\"")"; done; exit 1 ' git config mergetool.inspect.trustExitCode true yes | git mergetool \ --no-prompt \ --tool=inspect \ -- conflict.dat 2>&1 \ | tee mergetool.log grep "\$BASE=base" mergetool.log grep "\$LOCAL=a" mergetool.log grep "\$REMOTE=b" mergetool.log ) end_test git-lfs-3.6.1/t/t-migrate-export.sh000077500000000000000000000375151472372047300171240ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/fixtures/migrate.sh" . "$(dirname "$0")/testlib.sh" begin_test "migrate export (default branch)" ( set -e setup_multiple_local_branches_tracked # Add b.md, a pointer existing only on main lfstest-genrandom --base64 160 >b.md git add b.md git commit -m "add b.md" md_oid="$(calc_oid "$(cat a.md)")" txt_oid="$(calc_oid "$(cat a.txt)")" b_md_oid="$(calc_oid "$(cat b.md)")" git checkout my-feature md_feature_oid="$(calc_oid "$(cat a.md)")" git checkout main assert_pointer "refs/heads/main" "a.md" "$md_oid" "140" assert_pointer "refs/heads/main" "a.txt" "$txt_oid" "120" assert_pointer "refs/heads/main" "b.md" "$b_md_oid" "160" assert_pointer "refs/heads/my-feature" "a.md" "$md_feature_oid" "30" git lfs migrate export --include="*.md, *.txt" refute_pointer "refs/heads/main" "a.md" refute_pointer "refs/heads/main" "a.txt" refute_pointer "refs/heads/main" "b.md" assert_pointer "refs/heads/my-feature" "a.md" "$md_feature_oid" "30" # b.md should be pruned as no pointer exists to reference it refute_local_object "$b_md_oid" "160" # Other objects should not be pruned as they're still referenced in `feature` # by pointers assert_local_object "$md_oid" "140" assert_local_object "$txt_oid" "120" assert_local_object "$md_feature_oid" "30" main="$(git rev-parse refs/heads/main)" feature="$(git rev-parse refs/heads/my-feature)" main_attrs="$(git cat-file -p "$main:.gitattributes")" feature_attrs="$(git cat-file -p "$feature:.gitattributes")" echo "$main_attrs" | grep -q "*.md !text !filter !merge !diff" echo "$main_attrs" | grep -q "*.txt !text !filter !merge !diff" echo "$feature_attrs" | grep -q "*.md !text !filter !merge !diff" && exit 1 echo "$feature_attrs" | grep -q "*.txt !text !filter !merge !diff" && exit 1 true ) end_test begin_test "migrate export (with remote)" ( set -e setup_single_remote_branch_tracked git push origin main md_oid="$(calc_oid "$(cat a.md)")" txt_oid="$(calc_oid "$(cat a.txt)")" assert_pointer "refs/heads/main" "a.md" "$md_oid" "50" assert_pointer "refs/heads/main" "a.txt" "$txt_oid" "30" assert_pointer "refs/remotes/origin/main" "a.md" "$md_oid" "50" assert_pointer "refs/remotes/origin/main" "a.txt" "$txt_oid" "30" # Flush the cache to ensure all objects have to be downloaded rm -rf .git/lfs/objects git lfs migrate export --everything --include="*.md, *.txt" refute_pointer "refs/heads/main" "a.md" refute_pointer "refs/heads/main" "a.txt" # All pointers have been exported, so all objects should be pruned refute_local_object "$md_oid" "50" refute_local_object "$txt_oid" "30" main="$(git rev-parse refs/heads/main)" main_attrs="$(git cat-file -p "$main:.gitattributes")" echo "$main_attrs" | grep -q "*.md !text !filter !merge !diff" echo "$main_attrs" | grep -q "*.txt !text !filter !merge !diff" ) end_test begin_test "migrate export (include/exclude args)" ( set -e setup_single_local_branch_tracked md_oid="$(calc_oid "$(cat a.md)")" txt_oid="$(calc_oid "$(cat a.txt)")" assert_pointer "refs/heads/main" "a.txt" "$txt_oid" "120" assert_pointer "refs/heads/main" "a.md" "$md_oid" "140" git lfs migrate export --include="*" --exclude="a.md" refute_pointer "refs/heads/main" "a.txt" assert_pointer "refs/heads/main" "a.md" "$md_oid" "140" refute_local_object "$txt_oid" "120" assert_local_object "$md_oid" "140" main="$(git rev-parse refs/heads/main)" main_attrs="$(git cat-file -p "$main:.gitattributes")" echo "$main_attrs" | grep -q "* !text !filter !merge !diff" echo "$main_attrs" | grep -q "a.md filter=lfs diff=lfs merge=lfs" ) end_test begin_test "migrate export (bare repository)" ( set -e setup_single_remote_branch_tracked git push origin main md_oid="$(calc_oid "$(cat a.md)")" txt_oid="$(calc_oid "$(cat a.txt)")" make_bare assert_pointer "refs/heads/main" "a.txt" "$txt_oid" "30" assert_pointer "refs/heads/main" "a.md" "$md_oid" "50" git lfs migrate export --everything --include="*" refute_pointer "refs/heads/main" "a.md" refute_pointer "refs/heads/main" "a.txt" # All pointers have been exported, so all objects should be pruned refute_local_object "$md_oid" "50" refute_local_object "$txt_oid" "30" ) end_test begin_test "migrate export (given branch)" ( set -e setup_multiple_local_branches_tracked md_oid="$(calc_oid "$(cat a.md)")" txt_oid="$(calc_oid "$(cat a.txt)")" git checkout my-feature md_feature_oid="$(calc_oid "$(cat a.md)")" git checkout main assert_pointer "refs/heads/my-feature" "a.md" "$md_feature_oid" "30" assert_pointer "refs/heads/my-feature" "a.txt" "$txt_oid" "120" assert_pointer "refs/heads/main" "a.md" "$md_oid" "140" assert_pointer "refs/heads/main" "a.txt" "$txt_oid" "120" git lfs migrate export --include="*.md,*.txt" my-feature refute_pointer "refs/heads/my-feature" "a.md" refute_pointer "refs/heads/my-feature" "a.txt" refute_pointer "refs/heads/main" "a.md" refute_pointer "refs/heads/main" "a.txt" # No pointers left, so all objects should be pruned refute_local_object "$md_feature_oid" "30" refute_local_object "$txt_oid" "120" refute_local_object "$md_oid" "140" main="$(git rev-parse refs/heads/main)" feature="$(git rev-parse refs/heads/my-feature)" main_attrs="$(git cat-file -p "$main:.gitattributes")" feature_attrs="$(git cat-file -p "$feature:.gitattributes")" echo "$main_attrs" | grep -q "*.md !text !filter !merge !diff" echo "$main_attrs" | grep -q "*.txt !text !filter !merge !diff" echo "$feature_attrs" | grep -q "*.md !text !filter !merge !diff" echo "$feature_attrs" | grep -q "*.txt !text !filter !merge !diff" ) end_test begin_test "migrate export (no filter)" ( set -e setup_multiple_local_branches_tracked git lfs migrate export --yes 2>&1 | tee migrate.log if [ ${PIPESTATUS[0]} -eq 0 ]; then echo >&2 "fatal: expected git lfs migrate export to fail, didn't" exit 1 fi grep "One or more files must be specified with --include" migrate.log ) end_test begin_test "migrate export (exclude remote refs)" ( set -e setup_single_remote_branch_tracked md_oid="$(calc_oid "$(cat a.md)")" txt_oid="$(calc_oid "$(cat a.txt)")" git checkout refs/remotes/origin/main md_remote_oid="$(calc_oid "$(cat a.md)")" txt_remote_oid="$(calc_oid "$(cat a.txt)")" git checkout main assert_pointer "refs/heads/main" "a.md" "$md_oid" "50" assert_pointer "refs/heads/main" "a.txt" "$txt_oid" "30" assert_pointer "refs/remotes/origin/main" "a.md" "$md_remote_oid" "140" assert_pointer "refs/remotes/origin/main" "a.txt" "$txt_remote_oid" "120" git lfs migrate export --include="*.md,*.txt" refute_pointer "refs/heads/main" "a.md" refute_pointer "refs/heads/main" "a.txt" refute_local_object "$md_oid" "50" refute_local_object "$txt_oid" "30" assert_pointer "refs/remotes/origin/main" "a.md" "$md_remote_oid" "140" assert_pointer "refs/remotes/origin/main" "a.txt" "$txt_remote_oid" "120" # Since these two objects exist on the remote, they should be removed with # our prune operation refute_local_object "$md_remote_oid" "140" refute_local_object "$txt_remote_oid" "120" main="$(git rev-parse refs/heads/main)" remote="$(git rev-parse refs/remotes/origin/main)" main_attrs="$(git cat-file -p "$main:.gitattributes")" remote_attrs="$(git cat-file -p "$remote:.gitattributes")" echo "$main_attrs" | grep -q "*.md !text !filter !merge !diff" echo "$main_attrs" | grep -q "*.txt !text !filter !merge !diff" echo "$remote_attrs" | grep -q "*.md !text !filter !merge !diff" && exit 1 echo "$remote_attrs" | grep -q "*.txt !text !filter !merge !diff" && exit 1 true ) end_test begin_test "migrate export (--skip-fetch)" ( set -e setup_single_remote_branch_tracked md_main_oid="$(calc_oid "$(cat a.md)")" txt_main_oid="$(calc_oid "$(cat a.txt)")" git checkout refs/remotes/origin/main md_remote_oid="$(calc_oid "$(cat a.md)")" txt_remote_oid="$(calc_oid "$(cat a.txt)")" git checkout main git tag pseudo-remote "$(git rev-parse refs/remotes/origin/main)" # Remove the refs/remotes/origin/main ref, and instruct 'git lfs migrate' to # not fetch it. git update-ref -d refs/remotes/origin/main assert_pointer "refs/heads/main" "a.md" "$md_main_oid" "50" assert_pointer "pseudo-remote" "a.md" "$md_remote_oid" "140" assert_pointer "refs/heads/main" "a.txt" "$txt_main_oid" "30" assert_pointer "pseudo-remote" "a.txt" "$txt_remote_oid" "120" git lfs migrate export --skip-fetch --include="*.md,*.txt" refute_pointer "refs/heads/main" "a.md" refute_pointer "pseudo-remote" "a.md" refute_pointer "refs/heads/main" "a.txt" refute_pointer "pseudo-remote" "a.txt" refute_local_object "$md_main_oid" "50" refute_local_object "$md_remote_oid" "140" refute_local_object "$txt_main_oid" "30" refute_local_object "$txt_remote_oid" "120" main="$(git rev-parse refs/heads/main)" remote="$(git rev-parse pseudo-remote)" main_attrs="$(git cat-file -p "$main:.gitattributes")" remote_attrs="$(git cat-file -p "$remote:.gitattributes")" echo "$main_attrs" | grep -q "*.md !text !filter !merge !diff" echo "$main_attrs" | grep -q "*.txt !text !filter !merge !diff" echo "$remote_attrs" | grep -q "*.md !text !filter !merge !diff" echo "$remote_attrs" | grep -q "*.txt !text !filter !merge !diff" ) end_test begin_test "migrate export (include/exclude ref)" ( set -e setup_multiple_remote_branches_gitattrs md_main_oid="$(calc_oid "$(cat a.md)")" txt_main_oid="$(calc_oid "$(cat a.txt)")" git checkout refs/remotes/origin/main md_remote_oid="$(calc_oid "$(cat a.md)")" txt_remote_oid="$(calc_oid "$(cat a.txt)")" git checkout my-feature md_feature_oid="$(calc_oid "$(cat a.md)")" txt_feature_oid="$(calc_oid "$(cat a.txt)")" git checkout main git lfs migrate export \ --include="*.txt" \ --include-ref=refs/heads/my-feature \ --exclude-ref=refs/heads/main assert_pointer "refs/heads/main" "a.md" "$md_main_oid" "21" assert_pointer "refs/heads/main" "a.txt" "$txt_main_oid" "20" assert_pointer "refs/remotes/origin/main" "a.md" "$md_remote_oid" "11" assert_pointer "refs/remotes/origin/main" "a.txt" "$txt_remote_oid" "10" assert_pointer "refs/heads/my-feature" "a.md" "$md_feature_oid" "31" refute_pointer "refs/heads/my-feature" "a.txt" # Master objects should not be pruned as they exist in unpushed commits assert_local_object "$md_main_oid" "21" assert_local_object "$txt_main_oid" "20" # Remote main objects should be pruned as they exist in the remote refute_local_object "$md_remote_oid" "11" refute_local_object "$txt_remote_oid" "10" # txt_feature_oid should be pruned as it's no longer a pointer, but # md_feature_oid should remain as it's still a pointer in unpushed commits assert_local_object "$md_feature_oid" "31" refute_local_object "$txt_feature_oid" "30" main="$(git rev-parse refs/heads/main)" feature="$(git rev-parse refs/heads/my-feature)" remote="$(git rev-parse refs/remotes/origin/main)" main_attrs="$(git cat-file -p "$main:.gitattributes")" remote_attrs="$(git cat-file -p "$remote:.gitattributes")" feature_attrs="$(git cat-file -p "$feature:.gitattributes")" echo "$main_attrs" | grep -q "*.txt !text !filter !merge !diff" && exit 1 echo "$remote_attrs" | grep -q "*.txt !text !filter !merge !diff" && exit 1 echo "$feature_attrs" | grep -q "*.txt !text !filter !merge !diff" ) end_test begin_test "migrate export (invalid ref)" ( set -e remove_and_create_local_repo "migrate-export-invalid-ref" git commit --allow-empty -m "initial commit" git lfs migrate export --yes --include="*" jibberish >migrate.log 2>&1 && exit 1 grep "can't resolve ref" migrate.log ) end_test begin_test "migrate export (.gitattributes with different permissions)" ( set -e # Windows lacks POSIX permissions. [ "$IS_WINDOWS" -eq 1 ] && exit 0 setup_single_local_branch_tracked 0755 md_oid="$(calc_oid "$(cat a.md)")" txt_oid="$(calc_oid "$(cat a.txt)")" assert_pointer "refs/heads/main" "a.txt" "$txt_oid" "120" assert_pointer "refs/heads/main" "a.md" "$md_oid" "140" [ -x .gitattributes ] git lfs migrate export --include="*.txt" [ ! -x .gitattributes ] refute_pointer "refs/heads/main" "a.txt" assert_pointer "refs/heads/main" "a.md" "$md_oid" "140" refute_local_object "$txt_oid" "120" assert_local_object "$md_oid" "140" main="$(git rev-parse refs/heads/main)" main_attrs="$(git cat-file -p "$main:.gitattributes")" echo "$main_attrs" | grep -q "*.txt !text !filter !merge !diff" attrs_main_sha="$(git show $main:.gitattributes | git hash-object --stdin)" md_main_sha="$(git show $main:a.md | git hash-object --stdin)" txt_main_sha="$(git show $main:a.txt | git hash-object --stdin)" diff -u <(git ls-tree $main) <(cat <<-EOF 100644 blob $attrs_main_sha .gitattributes 100644 blob $md_main_sha a.md 100644 blob $txt_main_sha a.txt EOF ) ) end_test begin_test "migrate export (.gitattributes symlink)" ( set -e setup_single_local_branch_tracked link git lfs migrate export --yes --include="*.txt" 2>&1 | tee migrate.log if [ ${PIPESTATUS[0]} -eq 0 ]; then echo >&2 "fatal: expected git lfs migrate export to fail, didn't" exit 1 fi grep "migrate: expected '.gitattributes' to be a file, got a symbolic link" migrate.log main="$(git rev-parse refs/heads/main)" attrs_main_sha="$(git show $main:.gitattributes | git hash-object --stdin)" diff -u <(git ls-tree $main -- .gitattributes) <(cat <<-EOF 120000 blob $attrs_main_sha .gitattributes EOF ) ) end_test begin_test "migrate export (--object-map)" ( set -e setup_multiple_local_branches_tracked output_dir="$GIT_LFS_TEST_DIR/export-object-map-$(lfstest-genrandom --base64url 32)" mkdir -p "$output_dir" git log --all --pretty='format:%H' > "${output_dir}/old_sha.txt" git lfs migrate export --everything --include="*" --object-map "${output_dir}/object-map.txt" git log --all --pretty='format:%H' > "${output_dir}/new_sha.txt" paste -d',' "${output_dir}/old_sha.txt" "${output_dir}/new_sha.txt" > "${output_dir}/expected-map.txt" diff -u <(sort "${output_dir}/expected-map.txt") <(sort "${output_dir}/object-map.txt") ) end_test begin_test "migrate export (--verbose)" ( set -e setup_multiple_local_branches_tracked git lfs migrate export --everything --include="*" --verbose 2>&1 | grep -q "migrate: commit " ) end_test begin_test "migrate export (--remote)" ( set -e setup_single_remote_branch_tracked git push origin main md_oid="$(calc_oid "$(cat a.md)")" txt_oid="$(calc_oid "$(cat a.txt)")" assert_pointer "refs/heads/main" "a.md" "$md_oid" "50" assert_pointer "refs/heads/main" "a.txt" "$txt_oid" "30" # Flush the cache to ensure all objects have to be downloaded rm -rf .git/lfs/objects # Setup a new remote and invalidate the default remote_url="$(git config --get remote.origin.url)" git remote add zeta "$remote_url" git remote set-url origin "" git lfs migrate export --everything --remote="zeta" --include="*.md, *.txt" refute_pointer "refs/heads/main" "a.md" refute_pointer "refs/heads/main" "a.txt" refute_local_object "$md_oid" "50" refute_local_object "$txt_oid" "30" ) end_test begin_test "migrate export (invalid --remote)" ( set -e setup_single_remote_branch_tracked git lfs migrate export --include="*" --remote="zz" --yes 2>&1 \ | tee migrate.log if [ ${PIPESTATUS[0]} -eq 0 ]; then echo >&2 "fatal: expected git lfs migrate export to fail, didn't" exit 1 fi grep "Invalid remote zz provided" migrate.log ) end_test begin_test "migrate export (invalid pointer)" ( set -e git init repo1 git init repo2 cd repo1 echo "git-lfs" > problematic_file git add . git commit -m "create repo" git lfs migrate export --include="*" --everything --yes cd ../repo2 echo "not git-lfs" > problematic_file git add . git commit -m "create repo" git lfs migrate export --include="*" --everything --yes ) end_test git-lfs-3.6.1/t/t-migrate-fixup.sh000077500000000000000000000135251472372047300167310ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/fixtures/migrate.sh" . "$(dirname "$0")/testlib.sh" begin_test "migrate import (--fixup)" ( set -e setup_single_local_branch_tracked_corrupt txt_oid="$(calc_oid "$(git cat-file -p :a.txt)")" git lfs migrate import --everything --fixup --yes assert_pointer "refs/heads/main" "a.txt" "$txt_oid" "120" assert_local_object "$txt_oid" "120" main="$(git rev-parse refs/heads/main)" main_attrs="$(git cat-file -p "$main:.gitattributes")" echo "$main_attrs" | grep -q "*.txt filter=lfs diff=lfs merge=lfs" ) end_test begin_test "migrate import (--fixup, complex nested)" ( set -e setup_single_local_branch_complex_tracked a_oid="$(calc_oid "$(git cat-file -p :a.txt)")" b_oid="$(calc_oid "$(git cat-file -p :dir/b.txt)")" git lfs migrate import --everything --fixup --yes assert_pointer "refs/heads/main" "a.txt" "$a_oid" "1" refute_pointer "refs/heads/main" "dir/b.txt" assert_local_object "$a_oid" "1" refute_local_object "$b_oid" "1" main="$(git rev-parse refs/heads/main)" main_attrs="$(git cat-file -p "$main:.gitattributes")" main_dir_attrs="$(git cat-file -p "$main:dir/.gitattributes")" echo "$main_attrs" | grep -q "*.txt filter=lfs diff=lfs merge=lfs" echo "$main_dir_attrs" | grep -q "*.txt !filter !diff !merge" ) end_test begin_test "migrate import (--fixup, --include)" ( set -e setup_single_local_branch_tracked_corrupt git lfs migrate import --everything --fixup --yes --include="*.txt" 2>&1 \ | tee migrate.log if [ "${PIPESTATUS[0]}" -eq 0 ]; then echo >&2 "Expected 'git lfs migrate ...' to fail, didn't ..." exit 1 fi grep -q "Cannot use --fixup with --include, --exclude" migrate.log ) end_test begin_test "migrate import (--fixup, --exclude)" ( set -e setup_single_local_branch_tracked_corrupt git lfs migrate import --everything --fixup --yes --exclude="*.txt" 2>&1 \ | tee migrate.log if [ "${PIPESTATUS[0]}" -eq 0 ]; then echo >&2 "Expected 'git lfs migrate ...' to fail, didn't ..." exit 1 fi grep -q "Cannot use --fixup with --include, --exclude" migrate.log ) end_test begin_test "migrate import (--fixup, --no-rewrite)" ( set -e setup_single_local_branch_tracked_corrupt git lfs migrate import --everything --fixup --yes --no-rewrite 2>&1 \ | tee migrate.log if [ "${PIPESTATUS[0]}" -eq 0 ]; then echo >&2 "Expected 'git lfs migrate ...' to fail, didn't ..." exit 1 fi grep -qe "--no-rewrite and --fixup cannot be combined" migrate.log ) end_test begin_test "migrate import (--fixup with remote tags)" ( set -e setup_single_local_branch_tracked_corrupt git lfs uninstall lfstest-genrandom --base64 120 >b.txt git add b.txt git commit -m "b.txt" git tag -m tag1 -a tag1 git reset --hard HEAD^ git lfs install cwd=$(pwd) cd "$TRASHDIR" git clone "$cwd" "$reponame-2" cd "$reponame-2" # We're checking here that this succeeds even though it does nothing in this # case. git lfs migrate import --fixup --yes main ) end_test begin_test "migrate import (--fixup, .gitattributes symlink)" ( set -e setup_single_local_branch_tracked_corrupt link git lfs migrate import --everything --fixup --yes 2>&1 | tee migrate.log if [ ${PIPESTATUS[0]} -eq 0 ]; then echo >&2 "fatal: expected git lfs migrate import to fail, didn't" exit 1 fi grep "migrate: expected '.gitattributes' to be a file, got a symbolic link" migrate.log main="$(git rev-parse refs/heads/main)" attrs_main_sha="$(git show $main:.gitattributes | git hash-object --stdin)" diff -u <(git ls-tree $main -- .gitattributes) <(cat <<-EOF 120000 blob $attrs_main_sha .gitattributes EOF ) ) end_test begin_test "migrate import (--fixup, .gitattributes with macro)" ( set -e setup_single_local_branch_tracked_corrupt macro txt_oid="$(calc_oid "$(git cat-file -p :a.txt)")" git lfs migrate import --everything --fixup --yes assert_pointer "refs/heads/main" "a.txt" "$txt_oid" "120" assert_local_object "$txt_oid" "120" main="$(git rev-parse refs/heads/main)" main_attrs="$(git cat-file -p "$main:.gitattributes")" echo "$main_attrs" | grep -q "*.txt filter=lfs diff=lfs merge=lfs" ) end_test # NOTE: We skip this test for now as the "git lfs migrate" commands do not # fully process macro attribute definitions yet. #begin_test "migrate info (--fixup, .gitattributes with LFS macro)" #( # set -e # # setup_single_local_branch_tracked_corrupt lfsmacro # # txt_oid="$(calc_oid "$(git cat-file -p :a.txt)")" # # git lfs migrate import --everything --fixup --yes # # assert_pointer "refs/heads/main" "a.txt" "$txt_oid" "120" # assert_local_object "$txt_oid" "120" # # main="$(git rev-parse refs/heads/main)" # main_attrs="$(git cat-file -p "$main:.gitattributes")" # echo "$main_attrs" | grep -q "*.txt filter=lfs diff=lfs merge=lfs" #) #end_test begin_test "migrate import (no potential fixup, --fixup, no .gitattributes)" ( set -e setup_multiple_local_branches original_head="$(git rev-parse HEAD)" # Ensure "fixup" command reports nothing if no files are tracked by LFS. git lfs migrate import --everything --fixup --yes >migrate.log [ "0" -eq "$(cat migrate.log | wc -l)" ] migrated_head="$(git rev-parse HEAD)" assert_ref_unmoved "HEAD" "$original_head" "$migrated_head" ) end_test begin_test "migrate import (no potential fixup, --fixup, .gitattributes with macro)" ( set -e setup_multiple_local_branches echo "[attr]foo foo" >.gitattributes lfstest-genrandom --base64 30 >a.md git add .gitattributes a.md git commit -m macro original_head="$(git rev-parse HEAD)" # Ensure "fixup" command reports nothing if no files are tracked by LFS. git lfs migrate import --everything --fixup --yes >migrate.log [ "0" -eq "$(cat migrate.log | wc -l)" ] migrated_head="$(git rev-parse HEAD)" assert_ref_unmoved "HEAD" "$original_head" "$migrated_head" ) end_test git-lfs-3.6.1/t/t-migrate-import-no-rewrite.sh000077500000000000000000000167671472372047300212140ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/fixtures/migrate.sh" . "$(dirname "$0")/testlib.sh" begin_test "migrate import --no-rewrite (default branch)" ( set -e setup_local_branch_with_gitattrs txt_oid="$(calc_oid "$(git cat-file -p :a.txt)")" prev_commit_oid="$(git rev-parse HEAD)" git lfs migrate import --no-rewrite --yes *.txt # Ensure our desired files were imported into git-lfs assert_pointer "refs/heads/main" "a.txt" "$txt_oid" "120" assert_local_object "$txt_oid" "120" # Ensure the git history remained the same new_commit_oid="$(git rev-parse HEAD~1)" if [ "$prev_commit_oid" != "$new_commit_oid" ]; then exit 1 fi # Ensure a new commit was made new_head_oid="$(git rev-parse HEAD)" if [ "$prev_commit_oid" == "$new_oid" ]; then exit 1 fi # Ensure a new commit message was generated based on the list of imported files commit_msg="$(git log -1 --pretty=format:%s)" echo "$commit_msg" | grep -q "a.txt: convert to Git LFS" # Ensure we write a valid commit object. git fsck ) end_test begin_test "migrate import --no-rewrite (bare repository)" ( set -e setup_single_remote_branch_with_gitattrs prev_commit_oid="$(git rev-parse HEAD)" txt_oid="$(calc_oid "$(git cat-file -p :a.txt)")" md_oid="$(calc_oid "$(git cat-file -p :a.md)")" git lfs migrate import --no-rewrite --yes a.txt a.md # Ensure our desired files were imported assert_pointer "refs/heads/main" "a.txt" "$txt_oid" "30" assert_pointer "refs/heads/main" "a.md" "$md_oid" "50" # Ensure the git history remained the same new_commit_oid="$(git rev-parse HEAD~1)" if [ "$prev_commit_oid" != "$new_commit_oid" ]; then exit 1 fi # Ensure a new commit was made new_head_oid="$(git rev-parse HEAD)" if [ "$prev_commit_oid" == "$new_oid" ]; then exit 1 fi ) end_test begin_test "migrate import --no-rewrite (multiple branches)" ( set -e setup_multiple_local_branches_with_gitattrs prev_commit_oid="$(git rev-parse HEAD)" md_oid="$(calc_oid "$(git cat-file -p :a.md)")" txt_oid="$(calc_oid "$(git cat-file -p :a.txt)")" md_feature_oid="$(calc_oid "$(git cat-file -p my-feature:a.md)")" git lfs migrate import --no-rewrite --yes *.txt *.md # Ensure our desired files were imported assert_pointer "refs/heads/main" "a.md" "$md_oid" "140" assert_pointer "refs/heads/main" "a.txt" "$txt_oid" "120" assert_local_object "$md_oid" "140" assert_local_object "$txt_oid" "120" # Ensure our other branch was unmodified refute_local_object "$md_feature_oid" "30" # Ensure the git history remained the same new_commit_oid="$(git rev-parse HEAD~1)" if [ "$prev_commit_oid" != "$new_commit_oid" ]; then exit 1 fi # Ensure a new commit was made new_head_oid="$(git rev-parse HEAD)" if [ "$prev_commit_oid" == "$new_oid" ]; then exit 1 fi ) end_test begin_test "migrate import --no-rewrite (no .gitattributes)" ( set -e setup_multiple_local_branches # Ensure command fails if no .gitattributes files are present git lfs migrate import --no-rewrite --yes *.txt *.md 2>&1 | tee migrate.log if [ ${PIPESTATUS[0]} -eq 0 ]; then echo >&2 "Expected git lfs migrate import --no-rewrite to fail, didn't" exit 1 fi grep "No Git LFS filters found in '.gitattributes'" migrate.log ) end_test begin_test "migrate import --no-rewrite (nested .gitattributes)" ( set -e setup_local_branch_with_nested_gitattrs # Ensure a .md filter does not exist in the top-level .gitattributes main_attrs="$(git cat-file -p "$main:.gitattributes")" echo "$main_attrs" | grep -q ".md" && exit 1 # Ensure a .md filter exists in the nested .gitattributes nested_attrs="$(git cat-file -p "$main:b/.gitattributes")" echo "$nested_attrs" | grep -q "*.md filter=lfs diff=lfs merge=lfs" md_oid="$(calc_oid "$(git cat-file -p :a.md)")" nested_md_oid="$(calc_oid "$(git cat-file -p :b/a.md)")" txt_oid="$(calc_oid "$(git cat-file -p :a.txt)")" git lfs migrate import --no-rewrite --yes a.txt b/a.md # Ensure a.txt and subtree/a.md were imported, even though *.md only exists in the # nested subtree/.gitattributes file assert_pointer "refs/heads/main" "b/a.md" "$nested_md_oid" "140" assert_pointer "refs/heads/main" "a.txt" "$txt_oid" "120" assert_local_object "$nested_md_oid" 140 assert_local_object "$txt_oid" 120 refute_local_object "$md_oid" 140 # Failure should occur when trying to import a.md as no entry exists in # top-level .gitattributes file git lfs migrate import --no-rewrite --yes a.md 2>&1 | tee migrate.log if [ ${PIPESTATUS[0]} -eq 0 ]; then echo >&2 "Expected git lfs migrate import --no-rewrite to fail, didn't" exit 1 fi grep "a.md did not match any Git LFS filters in '.gitattributes'" migrate.log ) end_test begin_test "migrate import --no-rewrite (with commit message)" ( set -e setup_local_branch_with_gitattrs prev_commit_oid="$(git rev-parse HEAD)" expected_commit_msg="run git-lfs migrate import --no-rewrite" git lfs migrate import --message "$expected_commit_msg" --no-rewrite --yes *.txt # Ensure the git history remained the same new_commit_oid="$(git rev-parse HEAD~1)" if [ "$prev_commit_oid" != "$new_commit_oid" ]; then exit 1 fi # Ensure a new commit was made new_head_oid="$(git rev-parse HEAD)" if [ "$prev_commit_oid" == "$new_oid" ]; then exit 1 fi # Ensure the provided commit message was used commit_msg="$(git log -1 --pretty=format:%s)" if [ "$commit_msg" != "$expected_commit_msg" ]; then exit 1 fi ) end_test begin_test "migrate import --no-rewrite (with empty commit message)" ( set -e setup_local_branch_with_gitattrs prev_commit_oid="$(git rev-parse HEAD)" git lfs migrate import -m "" --no-rewrite --yes *.txt # Ensure the git history remained the same new_commit_oid="$(git rev-parse HEAD~1)" if [ "$prev_commit_oid" != "$new_commit_oid" ]; then exit 1 fi # Ensure a new commit was made new_head_oid="$(git rev-parse HEAD)" if [ "$prev_commit_oid" == "$new_oid" ]; then exit 1 fi # Ensure the provided commit message was used commit_msg="$(git log -1 --pretty=format:%s)" if [ "$commit_msg" != "" ]; then exit 1 fi ) end_test begin_test "migrate import --no-rewrite (strict .gitattributes)" ( set -e reponame="$(basename "$0" ".sh")-strict-match" clone_repo "$reponame" repo-strict-match mkdir -p major-oak/mainst/.yarn-offline-mirror/ mkdir -p major-oak/major-oak/frontend/.yarn-offline-mirror/ foo_contents="foo" foo_oid=$(calc_oid "$foo_contents") bar_contents="bar" bar_oid=$(calc_oid "$bar_contents") printf "$foo_contents" > major-oak/mainst/.yarn-offline-mirror/typescript-3.4.3.tgz printf "$bar_contents" > major-oak/major-oak/frontend/.yarn-offline-mirror/typescript-2.9.2.tgz git add . git commit -m 'Initial import' cat >.gitattributes <migrate.log 2>&1 && exit 1 grep "can't resolve ref" migrate.log ) end_test begin_test "migrate import (above)" ( set -e setup_single_local_branch_untracked md_main_oid="$(calc_oid "$(git cat-file -p "refs/heads/main:a.md")")" txt_main_oid="$(calc_oid "$(git cat-file -p "refs/heads/main:a.txt")")" git lfs migrate import --above 121B # Ensure that 'a.md', whose size is above our 121 byte threshold # was converted into a git-lfs pointer by the migration. assert_local_object "$md_main_oid" "140" assert_pointer "refs/heads/main" "a.md" "$md_main_oid" "140" refute_pointer "refs/heads/main" "a.txt" "$txt_main_oid" "120" refute_local_object "$txt_main_oid" "120" # The migration should have identified that *.md files are now # tracked because it migrated a.md main_attrs="$(git cat-file -p "$main:.gitattributes")" echo "$main_attrs" | grep -q "/a.md filter=lfs diff=lfs merge=lfs" echo "$main_attrs" | grep -vq "/a.txt filter=lfs diff=lfs merge=lfs" git check-attr filter -- a.txt | grep -vq lfs ) end_test begin_test "migrate import (above without extension)" ( set -e setup_single_local_branch_untracked "just-b" b_main_oid="$(calc_oid "$(git cat-file -p "refs/heads/main:just-b")")" txt_main_oid="$(calc_oid "$(git cat-file -p "refs/heads/main:a.txt")")" git lfs migrate import --above 121B # Ensure that 'b', whose size is above our 121 byte threshold # was converted into a git-lfs pointer by the migration. assert_local_object "$b_main_oid" "140" assert_pointer "refs/heads/main" "just-b" "$b_main_oid" "140" refute_pointer "refs/heads/main" "a.txt" "$txt_main_oid" "120" refute_local_object "$txt_main_oid" "120" # The migration should have identified that /b is now tracked # because it migrated it. main_attrs="$(git cat-file -p "$main:.gitattributes")" echo "$main_attrs" | grep -q "/just-b filter=lfs diff=lfs merge=lfs" echo "$main_attrs" | grep -vq "/a.txt filter=lfs diff=lfs merge=lfs" git check-attr filter -- a.txt | grep -vq lfs ) end_test begin_test "migrate import (above with multiple files)" ( set -e # It is important that this file sort after "a.txt". setup_single_local_branch_untracked "b.txt" a_main_oid="$(calc_oid "$(git cat-file -p "refs/heads/main:a.txt")")" b_main_oid="$(calc_oid "$(git cat-file -p "refs/heads/main:b.txt")")" git lfs migrate import --above 121B # Ensure that 'a.md', whose size is above our 121 byte threshold # was converted into a git-lfs pointer by the migration. assert_local_object "$b_main_oid" "140" assert_pointer "refs/heads/main" "b.txt" "$b_main_oid" "140" refute_pointer "refs/heads/main" "a.txt" "$a_main_oid" "120" refute_local_object "$a_main_oid" "120" # The migration should have identified that *.md files are now # tracked because it migrated a.md main_attrs="$(git cat-file -p "$main:.gitattributes")" echo "$main_attrs" | grep -q "/b.txt filter=lfs diff=lfs merge=lfs" git check-attr filter -- a.txt | grep -vq lfs ) end_test begin_test "migrate import (above with include or exclude)" ( set -e setup_single_local_branch_untracked md_main_oid="$(calc_oid "$(git cat-file -p "refs/heads/main:a.md")")" txt_main_oid="$(calc_oid "$(git cat-file -p "refs/heads/main:a.txt")")" git lfs migrate import --above 121B --include "*.md" && exit 1 git lfs migrate import --above 121B --exclude "*.txt" && exit 1 git lfs migrate import --above 121B --fixup && exit 1 true ) end_test begin_test "migrate import (existing .gitattributes)" ( set -e setup_local_branch_with_gitattrs main="$(git rev-parse refs/heads/main)" txt_main_oid="$(calc_oid "$(git cat-file -p "$main:a.txt")")" git lfs migrate import --yes --include-ref=refs/heads/main --include="*.txt" assert_local_object "$txt_main_oid" "120" main="$(git rev-parse refs/heads/main)" prev="$(git rev-parse refs/heads/main^1)" diff -u <(git cat-file -p $main:.gitattributes) <(cat <<-EOF *.txt filter=lfs diff=lfs merge=lfs -text *.other filter=lfs diff=lfs merge=lfs -text EOF) diff -u <(git cat-file -p $prev:.gitattributes) <(cat <<-EOF *.txt filter=lfs diff=lfs merge=lfs -text EOF) ) end_test begin_test "migrate import (--exclude with existing .gitattributes)" ( set -e setup_local_branch_with_gitattrs main="$(git rev-parse refs/heads/main)" txt_main_oid="$(calc_oid "$(git cat-file -p "$main:a.txt")")" git lfs migrate import --yes --include-ref=refs/heads/main --include="*.txt" --exclude="*.bin" assert_local_object "$txt_main_oid" "120" main="$(git rev-parse refs/heads/main)" prev="$(git rev-parse refs/heads/main^1)" diff -u <(git cat-file -p $main:.gitattributes) <(cat <<-EOF *.txt filter=lfs diff=lfs merge=lfs -text *.other filter=lfs diff=lfs merge=lfs -text *.bin !text -filter -merge -diff EOF) diff -u <(git cat-file -p $prev:.gitattributes) <(cat <<-EOF *.txt filter=lfs diff=lfs merge=lfs -text *.bin !text -filter -merge -diff EOF) ) end_test begin_test "migrate import (existing .gitattributes with different permissions)" ( set -e # Windows lacks POSIX permissions. [ "$IS_WINDOWS" -eq 1 ] && exit 0 setup_local_branch_with_gitattrs 0755 main="$(git rev-parse refs/heads/main)" txt_main_oid="$(calc_oid "$(git cat-file -p "$main:a.txt")")" [ -x .gitattributes ] git lfs migrate import --yes --include-ref=refs/heads/main --include="*.txt" [ ! -x .gitattributes ] assert_local_object "$txt_main_oid" "120" main="$(git rev-parse refs/heads/main)" prev="$(git rev-parse refs/heads/main^1)" diff -u <(git cat-file -p $main:.gitattributes) <(cat <<-EOF *.txt filter=lfs diff=lfs merge=lfs -text *.other filter=lfs diff=lfs merge=lfs -text EOF ) diff -u <(git cat-file -p $prev:.gitattributes) <(cat <<-EOF *.txt filter=lfs diff=lfs merge=lfs -text EOF ) attrs_main_sha="$(git show $main:.gitattributes | git hash-object --stdin)" txt_main_sha="$(git show $main:a.txt | git hash-object --stdin)" diff -u <(git ls-tree $main) <(cat <<-EOF 100644 blob $attrs_main_sha .gitattributes 100644 blob $txt_main_sha a.txt EOF ) ) end_test begin_test "migrate import (existing .gitattributes symlink)" ( set -e setup_local_branch_with_gitattrs link git lfs migrate import --yes --include-ref=refs/heads/main --include="*.txt" 2>&1 | tee migrate.log if [ ${PIPESTATUS[0]} -eq 0 ]; then echo >&2 "fatal: expected git lfs migrate import to fail, didn't" exit 1 fi grep "migrate: expected '.gitattributes' to be a file, got a symbolic link" migrate.log main="$(git rev-parse refs/heads/main)" attrs_main_sha="$(git show $main:.gitattributes | git hash-object --stdin)" diff -u <(git ls-tree $main -- .gitattributes) <(cat <<-EOF 120000 blob $attrs_main_sha .gitattributes EOF ) ) end_test begin_test "migrate import (identical contents, different permissions)" ( set -e # Windows lacks POSIX permissions. [ "$IS_WINDOWS" -eq 1 ] && exit 0 setup_multiple_local_branches git checkout main echo "foo" >foo.dat git add . git commit -m "add file" chmod u+x foo.dat git add . git commit -m "make file executable" # Verify we have executable permissions. [ -x foo.dat ] git lfs migrate import --everything --include="*.dat" # Verify we have executable permissions. [ -x foo.dat ] ) end_test begin_test "migrate import (tags with same name as branches)" ( set -e setup_multiple_local_branches git checkout main contents="hello" oid=$(calc_oid "$contents") printf "$contents" >hello.dat git add . git commit -m "add file" git branch foo git tag foo git tag bar git lfs migrate import --everything --include="*.dat" [ "$(git rev-parse refs/heads/foo)" = "$(git rev-parse refs/tags/foo)" ] [ "$(git rev-parse refs/heads/foo)" = "$(git rev-parse refs/tags/bar)" ] assert_pointer "refs/heads/foo" hello.dat "$oid" 5 ) end_test begin_test "migrate import (bare repository)" ( set -e setup_multiple_local_branches make_bare git lfs migrate import \ --include-ref=main ) end_test begin_test "migrate import (nested sub-trees, no filter)" ( set -e setup_single_local_branch_deep_trees oid="$(calc_oid "$(git cat-file -p :foo/bar/baz/a.txt)")" size="$(git cat-file -p :foo/bar/baz/a.txt | wc -c | awk '{ print $1 }')" git lfs migrate import --everything assert_local_object "$oid" "$size" ) end_test begin_test "migrate import (prefix include(s))" ( set -e includes="foo/bar/baz/** foo/**/baz/a.txt *.txt" for include in $includes; do setup_single_local_branch_deep_trees oid="$(calc_oid "$(git cat-file -p :foo/bar/baz/a.txt)")" git lfs migrate import --include="$include" assert_local_object "$oid" 120 cd .. done ) end_test begin_test "migrate import (--everything)" ( set -e setup_multiple_local_branches git checkout main main_txt_oid="$(calc_oid "$(git cat-file -p :a.txt)")" main_md_oid="$(calc_oid "$(git cat-file -p :a.md)")" feature_md_oid="$(calc_oid "$(git cat-file -p my-feature:a.md)")" main_txt_size="$(git cat-file -p :a.txt | wc -c | awk '{ print $1 }')" main_md_size="$(git cat-file -p :a.md | wc -c | awk '{ print $1 }')" feature_md_size="$(git cat-file -p my-feature:a.md | wc -c | awk '{ print $1 }')" git lfs migrate import --everything assert_pointer "main" "a.txt" "$main_txt_oid" "$main_txt_size" assert_pointer "main" "a.md" "$main_md_oid" "$main_md_size" assert_pointer "my-feature" "a.md" "$feature_md_oid" "$feature_md_size" ) end_test begin_test "migrate import (ambiguous reference)" ( set -e setup_multiple_local_branches # Create an ambiguously named reference sharing the name as the SHA-1 of # "HEAD". sha="$(git rev-parse HEAD)" git tag "$sha" git lfs migrate import --everything ) end_test begin_test "migrate import (--everything with args)" ( set -e setup_multiple_local_branches [ "$(git lfs migrate import --everything main 2>&1)" = \ "Cannot use --everything with explicit reference arguments" ] ) end_test begin_test "migrate import (--everything with --include-ref)" ( set -e setup_multiple_local_branches [ "$(git lfs migrate import --everything --include-ref=refs/heads/main 2>&1)" = \ "Cannot use --everything with --include-ref or --exclude-ref" ] ) end_test begin_test "migrate import (--everything with --exclude-ref)" ( set -e setup_multiple_local_branches [ "$(git lfs migrate import --everything --exclude-ref=refs/heads/main 2>&1)" = \ "Cannot use --everything with --include-ref or --exclude-ref" ] ) end_test begin_test "migrate import (--everything and --include with glob pattern)" ( set -e setup_multiple_local_branches md_main_oid="$(calc_oid "$(git cat-file -p "refs/heads/main:a.md")")" txt_main_oid="$(calc_oid "$(git cat-file -p "refs/heads/main:a.txt")")" md_feature_oid="$(calc_oid "$(git cat-file -p "refs/heads/my-feature:a.md")")" txt_feature_oid="$(calc_oid "$(git cat-file -p "refs/heads/my-feature:a.txt")")" git lfs migrate import --verbose --everything --include='*.[mM][dD]' assert_pointer "refs/heads/main" "a.md" "$md_main_oid" "140" assert_pointer "refs/heads/my-feature" "a.md" "$md_feature_oid" "30" assert_local_object "$md_main_oid" "140" assert_local_object "$md_feature_oid" "30" refute_local_object "$txt_main_oid" refute_local_object "$txt_feature_oid" ) end_test begin_test "migrate import (--everything with tag pointing to tag)" ( set -e setup_multiple_local_branches md_main_oid="$(calc_oid "$(git cat-file -p "refs/heads/main:a.md")")" txt_main_oid="$(calc_oid "$(git cat-file -p "refs/heads/main:a.txt")")" md_feature_oid="$(calc_oid "$(git cat-file -p "refs/heads/my-feature:a.md")")" txt_feature_oid="$(calc_oid "$(git cat-file -p "refs/heads/my-feature:a.txt")")" git tag -a -m abc abc refs/heads/main git tag -a -m def def refs/tags/abc git lfs migrate import --verbose --everything --include='*.[mM][dD]' assert_pointer "refs/heads/main" "a.md" "$md_main_oid" "140" assert_pointer "refs/tags/abc" "a.md" "$md_main_oid" "140" assert_pointer "refs/tags/def" "a.md" "$md_main_oid" "140" assert_pointer "refs/heads/my-feature" "a.md" "$md_feature_oid" "30" git tag --points-at refs/tags/abc | grep -q def git tag --points-at refs/tags/def | grep -q abc && exit 1 assert_local_object "$md_main_oid" "140" assert_local_object "$md_feature_oid" "30" refute_local_object "$txt_main_oid" refute_local_object "$txt_feature_oid" ) end_test begin_test "migrate import (nested sub-trees and --include with wildcard)" ( set -e setup_single_local_branch_deep_trees oid="$(calc_oid "$(git cat-file -p :foo/bar/baz/a.txt)")" size="$(git cat-file -p :foo/bar/baz/a.txt | wc -c | awk '{ print $1 }')" git lfs migrate import --include="**/*ar/**" assert_pointer "refs/heads/main" "foo/bar/baz/a.txt" "$oid" "$size" assert_local_object "$oid" "$size" ) end_test begin_test "migrate import (handle copies of files)" ( set -e setup_single_local_branch_deep_trees # add the object from the sub-tree to the root directory cp foo/bar/baz/a.txt a.txt git add a.txt git commit -m "duplicated file" oid_root="$(calc_oid "$(git cat-file -p :a.txt)")" oid_tree="$(calc_oid "$(git cat-file -p :foo/bar/baz/a.txt)")" size="$(git cat-file -p :foo/bar/baz/a.txt | wc -c | awk '{ print $1 }')" # only import objects under "foo" git lfs migrate import --include="foo/**" assert_pointer "refs/heads/main" "foo/bar/baz/a.txt" "$oid_tree" "$size" assert_local_object "$oid_tree" "$size" # "a.txt" is not under "foo" and therefore should not be in LFS oid_root_after_migration="$(calc_oid "$(git cat-file -p :a.txt)")" [ "$oid_root" = "$oid_root_after_migration" ] ) end_test begin_test "migrate import (filter matches files only)" ( set -e setup_single_local_branch_same_file_tree_ext txt_root_oid="$(calc_oid "$(git cat-file -p :a.txt)")" txt_foo_oid="$(calc_oid "$(git cat-file -p :foo/a.txt)")" md_bar_oid="$(calc_oid "$(git cat-file -p :bar.txt/b.md)")" txt_bar_oid="$(calc_oid "$(git cat-file -p :bar.txt/b.txt)")" git lfs migrate import --include="*.txt" assert_local_object "$txt_root_oid" "120" assert_local_object "$txt_foo_oid" "120" assert_local_object "$txt_bar_oid" "120" refute_local_object "$md_bar_oid" ) end_test begin_test "migrate import (--object-map)" ( set -e setup_multiple_local_branches output_dir="$GIT_LFS_TEST_DIR/import-object-map-$(lfstest-genrandom --base64url 32)" mkdir -p "$output_dir" git log --all --pretty='format:%H' > "${output_dir}/old_sha.txt" git lfs migrate import --everything --object-map "${output_dir}/object-map.txt" git log --all --pretty='format:%H' > "${output_dir}/new_sha.txt" paste -d',' "${output_dir}/old_sha.txt" "${output_dir}/new_sha.txt" > "${output_dir}/expected-map.txt" diff -u <(sort "${output_dir}/expected-map.txt") <(sort "${output_dir}/object-map.txt") ) end_test begin_test "migrate import (--include with space)" ( set -e setup_local_branch_with_space oid="$(calc_oid "$(git cat-file -p :"a file.txt")")" git lfs migrate import --include "a file.txt" assert_pointer "refs/heads/main" "a file.txt" "$oid" 50 cat .gitattributes if [ 1 -ne "$(grep -c "a\[\[:space:\]\]file.txt" .gitattributes)" ]; then echo >&2 "fatal: expected \"a[[:space:]]file.txt\" to appear in .gitattributes" echo >&2 "fatal: got" sed -e 's/^/ /g' < .gitattributes >&2 exit 1 fi ) end_test begin_test "migrate import (handle symbolic link)" ( set -e setup_local_branch_with_symlink txt_oid="$(calc_oid "$(git cat-file -p :a.txt)")" link_oid="$(calc_oid "$(git cat-file -p :link.txt)")" git lfs migrate import --include="*.txt" assert_pointer "refs/heads/main" "a.txt" "$txt_oid" "120" assert_local_object "$txt_oid" "120" # "link.txt" is a symbolic link so it should be not in LFS refute_local_object "$link_oid" "5" ) end_test begin_test "migrate import (commit --allow-empty)" ( set -e reponame="migrate---allow-empty" git init "$reponame" cd "$reponame" git commit --allow-empty -m "initial commit" original_head="$(git rev-parse HEAD)" git lfs migrate import --everything migrated_head="$(git rev-parse HEAD)" assert_ref_unmoved "HEAD" "$original_head" "$migrated_head" ) end_test begin_test "migrate import (multiple remotes)" ( set -e setup_multiple_remotes original_main="$(git rev-parse main)" git lfs migrate import migrated_main="$(git rev-parse main)" assert_ref_unmoved "main" "$original_main" "$migrated_main" ) end_test begin_test "migrate import (dirty copy, default negative answer)" ( set -e setup_local_branch_with_dirty_copy original_main="$(git rev-parse main)" echo | git lfs migrate import --everything 2>&1 | tee migrate.log grep "migrate: working copy must not be dirty" migrate.log migrated_main="$(git rev-parse main)" assert_ref_unmoved "main" "$original_main" "$migrated_main" ) end_test begin_test "migrate import (dirty copy, negative answer)" ( set -e setup_local_branch_with_dirty_copy original_main="$(git rev-parse main)" echo "n" | git lfs migrate import --everything 2>&1 | tee migrate.log grep "migrate: working copy must not be dirty" migrate.log migrated_main="$(git rev-parse main)" assert_ref_unmoved "main" "$original_main" "$migrated_main" ) end_test begin_test "migrate import (dirty copy, unknown then negative answer)" ( set -e setup_local_branch_with_dirty_copy original_main="$(git rev-parse main)" echo "x\nn" | git lfs migrate import --everything 2>&1 | tee migrate.log cat migrate.log [ "2" -eq "$(grep -o "override changes in your working copy" migrate.log \ | wc -l | awk '{ print $1 }')" ] grep "migrate: working copy must not be dirty" migrate.log migrated_main="$(git rev-parse main)" assert_ref_unmoved "main" "$original_main" "$migrated_main" ) end_test begin_test "migrate import (dirty copy, positive answer)" ( set -e setup_local_branch_with_dirty_copy oid="$(calc_oid "$(git cat-file -p :a.txt)")" echo "y" | git lfs migrate import --everything 2>&1 | tee migrate.log grep "migrate: changes in your working copy will be overridden ..." \ migrate.log assert_pointer "refs/heads/main" "a.txt" "$oid" "5" assert_local_object "$oid" "5" ) end_test begin_test "migrate import (non-standard refs)" ( set -e setup_multiple_local_branches_non_standard md_oid="$(calc_oid "$(git cat-file -p :a.md)")" txt_oid="$(calc_oid "$(git cat-file -p :a.txt)")" md_feature_oid="$(calc_oid "$(git cat-file -p my-feature:a.md)")" git lfs migrate import --everything assert_pointer "refs/heads/main" "a.md" "$md_oid" "140" assert_pointer "refs/heads/main" "a.txt" "$txt_oid" "120" assert_pointer "refs/pull/1/base" "a.md" "$md_oid" "140" assert_pointer "refs/pull/1/base" "a.txt" "$txt_oid" "120" assert_pointer "refs/heads/my-feature" "a.txt" "$txt_oid" "120" assert_pointer "refs/pull/1/head" "a.txt" "$txt_oid" "120" assert_local_object "$md_oid" "140" assert_local_object "$txt_oid" "120" assert_local_object "$md_feature_oid" "30" ) end_test begin_test "migrate import (copied file)" ( set -e setup_local_branch_with_copied_file git lfs migrate import --above=1b # Expect attributes for "/dir/a" and "/a" if ! grep -q "^/dir/a.txt" ./.gitattributes || ! grep -q "^/a.txt" ./.gitattributes; then exit 1 fi ) end_test begin_test "migrate import (copied file with only a single path)" ( set -e setup_local_branch_with_copied_file oid="$(calc_oid "$(git cat-file -p :a.txt)")" # Prevent MSYS from rewriting /a.txt into a Windows path. MSYS_NO_PATHCONV=1 git lfs migrate import --include="/a.txt" --everything # Expect attribute for only "/a.txt". if grep -q "^/dir/a.txt" ./.gitattributes || ! grep -q "^/a.txt" ./.gitattributes; then exit 1 fi refute_pointer "refs/heads/main" "dir/a.txt" "$oid" 5 ) end_test begin_test "migrate import (filename special characters)" ( set -e setup_local_branch_with_special_character_files git lfs migrate import --above=1b # Windows does not allow creation of files with '*', so expect 2 files, not 3 if [ "$IS_WINDOWS" -eq "1" ] ; then test "$(git check-attr filter -- *.bin |grep lfs | wc -l)" -eq 2 || exit 1 else test "$(git check-attr filter -- *.bin |grep lfs | wc -l)" -eq 3 || exit 1 fi ) end_test git-lfs-3.6.1/t/t-migrate-info.sh000077500000000000000000000647011472372047300165330ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/fixtures/migrate.sh" . "$(dirname "$0")/testlib.sh" begin_test "migrate info (default branch)" ( set -e setup_multiple_local_branches original_head="$(git rev-parse HEAD)" diff -u <(git lfs migrate info 2>&1 | tail -n 2) <(cat <<-EOF *.md 140 B 1/1 file 100% *.txt 120 B 1/1 file 100% EOF) migrated_head="$(git rev-parse HEAD)" assert_ref_unmoved "HEAD" "$original_head" "$migrated_head" ) end_test begin_test "migrate info (bare repository)" ( set -e setup_multiple_remote_branches git lfs migrate info --everything ) end_test begin_test "migrate info (given branch)" ( set -e setup_multiple_local_branches original_main="$(git rev-parse refs/heads/main)" original_feature="$(git rev-parse refs/heads/my-feature)" diff -u <(git lfs migrate info my-feature 2>&1 | tail -n 2) <(cat <<-EOF *.md 170 B 2/2 files 100% *.txt 120 B 1/1 file 100% EOF) migrated_main="$(git rev-parse refs/heads/main)" migrated_feature="$(git rev-parse refs/heads/my-feature)" assert_ref_unmoved "refs/heads/main" "$original_main" "$migrated_main" assert_ref_unmoved "refs/heads/my-feature" "$original_feature" "$migrated_feature" ) end_test begin_test "migrate info (default branch with filter)" ( set -e setup_multiple_local_branches original_head="$(git rev-parse HEAD)" diff -u <(git lfs migrate info --include "*.md" 2>&1 | tail -n 1) <(cat <<-EOF *.md 140 B 1/1 file 100% EOF) migrated_head="$(git rev-parse HEAD)" assert_ref_unmoved "refs/heads/main" "$original_head" "$migrated_head" ) end_test begin_test "migrate info (given branch with filter)" ( set -e setup_multiple_local_branches original_main="$(git rev-parse refs/heads/main)" original_feature="$(git rev-parse refs/heads/my-feature)" diff -u <(git lfs migrate info --include "*.md" my-feature 2>&1 | tail -n 1) <(cat <<-EOF *.md 170 B 2/2 files 100% EOF) migrated_main="$(git rev-parse refs/heads/main)" migrated_feature="$(git rev-parse refs/heads/my-feature)" assert_ref_unmoved "refs/heads/main" "$original_main" "$migrated_main" assert_ref_unmoved "refs/heads/my-feature" "$original_feature" "$migrated_feature" ) end_test begin_test "migrate info (default branch, exclude remote refs)" ( set -e setup_single_remote_branch git show-ref original_remote="$(git rev-parse refs/remotes/origin/main)" original_main="$(git rev-parse refs/heads/main)" diff -u <(git lfs migrate info 2>&1 | tail -n 2) <(cat <<-EOF *.md 50 B 1/1 file 100% *.txt 30 B 1/1 file 100% EOF) migrated_remote="$(git rev-parse refs/remotes/origin/main)" migrated_main="$(git rev-parse refs/heads/main)" assert_ref_unmoved "refs/heads/main" "$original_main" "$migrated_main" assert_ref_unmoved "refs/remotes/origin/main" "$original_remote" "$migrated_remote" ) end_test begin_test "migrate info (given branch, exclude remote refs)" ( set -e setup_multiple_remote_branches original_remote="$(git rev-parse refs/remotes/origin/main)" original_main="$(git rev-parse refs/heads/main)" original_feature="$(git rev-parse refs/heads/my-feature)" diff -u <(git lfs migrate info my-feature 2>&1 | tail -n 2) <(cat <<-EOF *.md 52 B 2/2 files 100% *.txt 50 B 2/2 files 100% EOF) migrated_remote="$(git rev-parse refs/remotes/origin/main)" migrated_main="$(git rev-parse refs/heads/main)" migrated_feature="$(git rev-parse refs/heads/my-feature)" assert_ref_unmoved "refs/remotes/origin/main" "$original_remote" "$migrated_remote" assert_ref_unmoved "refs/heads/main" "$original_main" "$migrated_main" assert_ref_unmoved "refs/heads/my-feature" "$original_feature" "$migrated_feature" ) end_test begin_test "migrate info (given ref, --skip-fetch)" ( set -e setup_single_remote_branch original_remote="$(git rev-parse refs/remotes/origin/main)" original_main="$(git rev-parse refs/heads/main)" git tag pseudo-remote "$original_remote" # Remove the refs/remotes/origin/main ref, and instruct 'git lfs migrate' to # not fetch it. git update-ref -d refs/remotes/origin/main diff -u <(git lfs migrate info --skip-fetch 2>&1 | tail -n 2) <(cat <<-EOF *.md 190 B 2/2 files 100% *.txt 150 B 2/2 files 100% EOF) migrated_remote="$(git rev-parse pseudo-remote)" migrated_main="$(git rev-parse refs/heads/main)" assert_ref_unmoved "refs/remotes/origin/main" "$original_remote" "$migrated_remote" assert_ref_unmoved "refs/heads/main" "$original_main" "$migrated_main" ) end_test begin_test "migrate info (include/exclude ref)" ( set -e setup_multiple_remote_branches original_main="$(git rev-parse refs/heads/main)" original_feature="$(git rev-parse refs/heads/my-feature)" diff -u <(git lfs migrate info \ --include-ref=refs/heads/my-feature \ --exclude-ref=refs/heads/main 2>&1 | tail -n 2) <(cat <<-EOF *.md 31 B 1/1 file 100% *.txt 30 B 1/1 file 100% EOF) migrated_main="$(git rev-parse refs/heads/main)" migrated_feature="$(git rev-parse refs/heads/my-feature)" assert_ref_unmoved "refs/heads/main" "$original_main" "$migrated_main" assert_ref_unmoved "refs/heads/my-feature" "$original_feature" "$migrated_feature" ) end_test begin_test "migrate info (include/exclude ref args)" ( set -e setup_multiple_remote_branches original_main="$(git rev-parse refs/heads/main)" original_feature="$(git rev-parse refs/heads/my-feature)" diff -u <(git lfs migrate info \ my-feature ^main 2>&1 | tail -n 2) <(cat <<-EOF *.md 31 B 1/1 file 100% *.txt 30 B 1/1 file 100% EOF) migrated_main="$(git rev-parse refs/heads/main)" migrated_feature="$(git rev-parse refs/heads/my-feature)" assert_ref_unmoved "refs/heads/main" "$original_main" "$migrated_main" assert_ref_unmoved "refs/heads/my-feature" "$original_feature" "$migrated_feature" ) end_test begin_test "migrate info (include/exclude ref with filter)" ( set -e setup_multiple_remote_branches original_main="$(git rev-parse refs/heads/main)" original_feature="$(git rev-parse refs/heads/my-feature)" diff -u <(git lfs migrate info \ --include="*.txt" \ --include-ref=refs/heads/my-feature \ --exclude-ref=refs/heads/main 2>&1 | tail -n 1) <(cat <<-EOF *.txt 30 B 1/1 file 100% EOF) migrated_main="$(git rev-parse refs/heads/main)" migrated_feature="$(git rev-parse refs/heads/my-feature)" assert_ref_unmoved "refs/heads/main" "$original_main" "$migrated_main" assert_ref_unmoved "refs/heads/my-feature" "$original_feature" "$migrated_feature" ) end_test begin_test "migrate info (invalid ref)" ( set -e remove_and_create_local_repo "migrate-info-invalid-ref" git commit --allow-empty -m "initial commit" git lfs migrate info jibberish >migrate.log 2>&1 && exit 1 grep "can't resolve ref" migrate.log ) end_test begin_test "migrate info (nested sub-trees, no filter)" ( set -e setup_single_local_branch_deep_trees original_main="$(git rev-parse refs/heads/main)" diff -u <(git lfs migrate info 2>/dev/null) <(cat <<-EOF *.txt 120 B 1/1 file 100% EOF) migrated_main="$(git rev-parse refs/heads/main)" assert_ref_unmoved "refs/heads/main" "$original_main" "$migrated_main" ) end_test begin_test "migrate info (above threshold)" ( set -e setup_multiple_local_branches original_head="$(git rev-parse HEAD)" diff -u <(git lfs migrate info --above=130B 2>&1 | tail -n 1) <(cat <<-EOF *.md 140 B 1/1 file 100% EOF) migrated_head="$(git rev-parse HEAD)" assert_ref_unmoved "HEAD" "$original_head" "$migrated_head" ) end_test begin_test "migrate info (above threshold, top)" ( set -e setup_multiple_local_branches lfstest-genrandom --base64 160 >b.bin git add b.bin git commit -m "b.bin" original_head="$(git rev-parse HEAD)" # Ensure command reports only single highest entry due to --top=1 argument. diff -u <(git lfs migrate info --above=130B --top=1 2>&1 | tail -n 1) <(cat <<-EOF *.bin 160 B 1/1 file 100% EOF) migrated_head="$(git rev-parse HEAD)" assert_ref_unmoved "HEAD" "$original_head" "$migrated_head" ) end_test begin_test "migrate info (top)" ( set -e setup_multiple_local_branches lfstest-genrandom --base64 160 >b.bin git add b.bin git commit -m "b.bin" original_head="$(git rev-parse HEAD)" # Ensure command reports nothing if --top argument is less than zero. [ "0" -eq "$(git lfs migrate info --everything --top=-1 2>/dev/null | wc -l)" ] # Ensure command reports nothing if --top argument is zero. [ "0" -eq "$(git lfs migrate info --everything --top=0 2>/dev/null | wc -l)" ] # Ensure command reports no more entries than specified by --top argument. diff -u <(git lfs migrate info --everything --top=2 2>&1 | tail -n 2) <(cat <<-EOF *.md 170 B 2/2 files 100% *.bin 160 B 1/1 file 100% EOF) # Ensure command succeeds if --top argument is greater than total number of entries. diff -u <(git lfs migrate info --everything --top=10 2>&1 | tail -n 3) <(cat <<-EOF *.md 170 B 2/2 files 100% *.bin 160 B 1/1 file 100% *.txt 120 B 1/1 file 100% EOF) migrated_head="$(git rev-parse HEAD)" assert_ref_unmoved "HEAD" "$original_head" "$migrated_head" ) end_test begin_test "migrate info (given unit)" ( set -e setup_multiple_local_branches original_head="$(git rev-parse HEAD)" diff -u <(git lfs migrate info --unit=kb 2>&1 | tail -n 2) <(cat <<-EOF *.md 0.1 1/1 file 100% *.txt 0.1 1/1 file 100% EOF) migrated_head="$(git rev-parse HEAD)" assert_ref_unmoved "HEAD" "$original_head" "$migrated_head" ) end_test begin_test "migrate info (doesn't show empty info entries)" ( set -e setup_multiple_local_branches original_head="$(git rev-parse HEAD)" [ "0" -eq "$(git lfs migrate info --above=1mb 2>/dev/null | wc -l)" ] migrated_head="$(git rev-parse HEAD)" assert_ref_unmoved "HEAD" "$original_head" "$migrated_head" ) end_test begin_test "migrate info (empty set)" ( set -e setup_multiple_local_branches migrate="$(git lfs migrate info \ --include-ref=refs/heads/main \ --exclude-ref=refs/heads/main 2>/dev/null )" [ "0" -eq "$(echo -n "$migrate" | wc -c | awk '{ print $1 }')" ] ) end_test begin_test "migrate info (no-extension files)" ( set -e setup_multiple_local_branches_with_alternate_names git checkout main original_main="$(git rev-parse refs/heads/main)" original_feature="$(git rev-parse refs/heads/my-feature)" diff -u <(git lfs migrate info --everything 2>&1 | tail -n 2) <(cat <<-EOF no_extension 220 B 2/2 files 100% *.txt 170 B 2/2 files 100% EOF) migrated_main="$(git rev-parse refs/heads/main)" migrated_feature="$(git rev-parse refs/heads/my-feature)" assert_ref_unmoved "refs/heads/main" "$original_main" "$migrated_main" assert_ref_unmoved "refs/heads/my-feature" "$original_feature" "$migrated_feature" ) end_test begin_test "migrate info (--everything)" ( set -e setup_multiple_local_branches git checkout main original_main="$(git rev-parse refs/heads/main)" original_feature="$(git rev-parse refs/heads/my-feature)" diff -u <(git lfs migrate info --everything 2>&1 | tail -n 2) <(cat <<-EOF *.md 170 B 2/2 files 100% *.txt 120 B 1/1 file 100% EOF) migrated_main="$(git rev-parse refs/heads/main)" migrated_feature="$(git rev-parse refs/heads/my-feature)" assert_ref_unmoved "refs/heads/main" "$original_main" "$migrated_main" assert_ref_unmoved "refs/heads/my-feature" "$original_feature" "$migrated_feature" ) end_test begin_test "migrate info (existing .gitattributes symlink)" ( set -e setup_local_branch_with_gitattrs link git lfs migrate info --everything 2>&1 | tee migrate.log if [ ${PIPESTATUS[0]} -eq 0 ]; then echo >&2 "fatal: expected git lfs migrate info to fail, didn't" exit 1 fi grep "migrate: expected '.gitattributes' to be a file, got a symbolic link" migrate.log main="$(git rev-parse refs/heads/main)" attrs_main_sha="$(git show $main:.gitattributes | git hash-object --stdin)" diff -u <(git ls-tree $main -- .gitattributes) <(cat <<-EOF 120000 blob $attrs_main_sha .gitattributes EOF ) ) end_test begin_test "migrate info (potential fixup, --fixup, .gitattributes symlink)" ( set -e setup_single_local_branch_tracked_corrupt link git lfs migrate info 2>&1 | tee migrate.log if [ ${PIPESTATUS[0]} -eq 0 ]; then echo >&2 "fatal: expected git lfs migrate info to fail, didn't" exit 1 fi grep "migrate: expected '.gitattributes' to be a file, got a symbolic link" migrate.log main="$(git rev-parse refs/heads/main)" attrs_main_sha="$(git show $main:.gitattributes | git hash-object --stdin)" diff -u <(git ls-tree $main -- .gitattributes) <(cat <<-EOF 120000 blob $attrs_main_sha .gitattributes EOF ) ) end_test begin_test "migrate info (no potential fixup, --fixup, no .gitattributes)" ( set -e setup_multiple_local_branches original_head="$(git rev-parse HEAD)" # Ensure "fixup" command reports nothing if no files are tracked by LFS. git lfs migrate info --everything --fixup >migrate.log [ "0" -eq "$(cat migrate.log | wc -l)" ] migrated_head="$(git rev-parse HEAD)" assert_ref_unmoved "HEAD" "$original_head" "$migrated_head" ) end_test begin_test "migrate info (no potential fixup, --fixup, .gitattributes with macro)" ( set -e setup_multiple_local_branches echo "[attr]foo foo" >.gitattributes lfstest-genrandom --base64 30 >a.md git add .gitattributes a.md git commit -m macro original_head="$(git rev-parse HEAD)" # Ensure "fixup" command reports nothing if no files are tracked by LFS. git lfs migrate info --everything --fixup >migrate.log [ "0" -eq "$(cat migrate.log | wc -l)" ] migrated_head="$(git rev-parse HEAD)" assert_ref_unmoved "HEAD" "$original_head" "$migrated_head" ) end_test begin_test "migrate info (all files tracked)" ( set -e setup_single_local_branch_tracked original_head="$(git rev-parse HEAD)" # Ensure default command reports objects if all files are tracked by LFS. diff -u <(git lfs migrate info 2>&1 | tail -n 3) <(cat <<-EOF *.gitattributes 83 B 1/1 file 100% LFS Objects 260 B 2/2 files 100% EOF) migrated_head="$(git rev-parse HEAD)" assert_ref_unmoved "HEAD" "$original_head" "$migrated_head" ) end_test begin_test "migrate info (all files tracked, --pointers=follow)" ( set -e setup_single_local_branch_tracked original_head="$(git rev-parse HEAD)" # Ensure "follow" command reports objects if all files are tracked by LFS. diff -u <(git lfs migrate info --pointers=follow 2>&1 | tail -n 3) <(cat <<-EOF *.gitattributes 83 B 1/1 file 100% LFS Objects 260 B 2/2 files 100% EOF) migrated_head="$(git rev-parse HEAD)" assert_ref_unmoved "HEAD" "$original_head" "$migrated_head" ) end_test begin_test "migrate info (all files tracked, --pointers=no-follow)" ( set -e setup_single_local_branch_tracked original_head="$(git rev-parse HEAD)" # Ensure "no-follow" command reports pointers if all files are tracked by LFS. diff -u <(git lfs migrate info --pointers=no-follow 2>&1 | tail -n 3) <(cat <<-EOF *.md 128 B 1/1 file 100% *.txt 128 B 1/1 file 100% *.gitattributes 83 B 1/1 file 100% EOF) migrated_head="$(git rev-parse HEAD)" assert_ref_unmoved "HEAD" "$original_head" "$migrated_head" ) end_test begin_test "migrate info (all files tracked, --pointers=ignore)" ( set -e setup_single_local_branch_tracked original_head="$(git rev-parse HEAD)" # Ensure "ignore" command reports no objects if all files are tracked by LFS. diff -u <(git lfs migrate info --pointers=ignore 2>&1 | tail -n 1) <(cat <<-EOF *.gitattributes 83 B 1/1 file 100% EOF) migrated_head="$(git rev-parse HEAD)" assert_ref_unmoved "HEAD" "$original_head" "$migrated_head" ) end_test begin_test "migrate info (all files tracked, --fixup)" ( set -e setup_single_local_branch_tracked original_head="$(git rev-parse HEAD)" # Ensure "fixup" command reports nothing if all files are tracked by LFS. [ "0" -eq "$(git lfs migrate info --fixup 2>/dev/null | wc -l)" ] migrated_head="$(git rev-parse HEAD)" assert_ref_unmoved "HEAD" "$original_head" "$migrated_head" ) end_test begin_test "migrate info (all files tracked, --everything)" ( set -e setup_multiple_local_branches_tracked original_main="$(git rev-parse refs/heads/main)" original_feature="$(git rev-parse refs/heads/my-feature)" # Ensure default command reports objects if all files are tracked by LFS. diff -u <(git lfs migrate info --everything 2>&1 | tail -n 3) <(cat <<-EOF *.gitattributes 83 B 1/1 file 100% LFS Objects 290 B 3/3 files 100% EOF) migrated_main="$(git rev-parse refs/heads/main)" migrated_feature="$(git rev-parse refs/heads/my-feature)" assert_ref_unmoved "refs/heads/main" "$original_main" "$migrated_main" assert_ref_unmoved "refs/heads/my-feature" "$original_feature" "$migrated_feature" ) end_test begin_test "migrate info (all files tracked, --everything and --pointers=follow)" ( set -e setup_multiple_local_branches_tracked original_main="$(git rev-parse refs/heads/main)" original_feature="$(git rev-parse refs/heads/my-feature)" # Ensure "follow" command reports objects if all files are tracked by LFS. diff -u <(git lfs migrate info --everything --pointers=follow 2>&1 | tail -n 3) <(cat <<-EOF *.gitattributes 83 B 1/1 file 100% LFS Objects 290 B 3/3 files 100% EOF) migrated_main="$(git rev-parse refs/heads/main)" migrated_feature="$(git rev-parse refs/heads/my-feature)" assert_ref_unmoved "refs/heads/main" "$original_main" "$migrated_main" assert_ref_unmoved "refs/heads/my-feature" "$original_feature" "$migrated_feature" ) end_test begin_test "migrate info (all files tracked, --everything and --pointers=no-follow)" ( set -e setup_multiple_local_branches_tracked original_main="$(git rev-parse refs/heads/main)" original_feature="$(git rev-parse refs/heads/my-feature)" # Ensure "no-follow" command reports pointers if all files are tracked by LFS. diff -u <(git lfs migrate info --everything --pointers=no-follow 2>&1 | tail -n 3) <(cat <<-EOF *.md 255 B 2/2 files 100% *.txt 128 B 1/1 file 100% *.gitattributes 83 B 1/1 file 100% EOF) migrated_main="$(git rev-parse refs/heads/main)" migrated_feature="$(git rev-parse refs/heads/my-feature)" assert_ref_unmoved "refs/heads/main" "$original_main" "$migrated_main" assert_ref_unmoved "refs/heads/my-feature" "$original_feature" "$migrated_feature" ) end_test begin_test "migrate info (all files tracked, --everything and --pointers=ignore)" ( set -e setup_multiple_local_branches_tracked original_main="$(git rev-parse refs/heads/main)" original_feature="$(git rev-parse refs/heads/my-feature)" # Ensure "ignore" command reports no objects if all files are tracked by LFS. diff -u <(git lfs migrate info --everything --pointers=ignore 2>&1 | tail -n 1) <(cat <<-EOF *.gitattributes 83 B 1/1 file 100% EOF) migrated_main="$(git rev-parse refs/heads/main)" migrated_feature="$(git rev-parse refs/heads/my-feature)" assert_ref_unmoved "refs/heads/main" "$original_main" "$migrated_main" assert_ref_unmoved "refs/heads/my-feature" "$original_feature" "$migrated_feature" ) end_test begin_test "migrate info (all files tracked, --everything and --fixup)" ( set -e setup_multiple_local_branches_tracked original_main="$(git rev-parse refs/heads/main)" original_feature="$(git rev-parse refs/heads/my-feature)" # Ensure "fixup" command reports nothing if all files are tracked by LFS. [ "0" -eq "$(git lfs migrate info --everything --fixup 2>/dev/null | wc -l)" ] migrated_main="$(git rev-parse refs/heads/main)" migrated_feature="$(git rev-parse refs/heads/my-feature)" assert_ref_unmoved "refs/heads/main" "$original_main" "$migrated_main" assert_ref_unmoved "refs/heads/my-feature" "$original_feature" "$migrated_feature" ) end_test begin_test "migrate info (potential fixup)" ( set -e setup_single_local_branch_tracked_corrupt original_head="$(git rev-parse HEAD)" # Ensure command reports files which should be tracked but have not been # stored properly as LFS pointers. diff -u <(git lfs migrate info 2>&1 | tail -n 2) <(cat <<-EOF *.txt 120 B 1/1 file 100% *.gitattributes 42 B 1/1 file 100% EOF) migrated_head="$(git rev-parse HEAD)" assert_ref_unmoved "HEAD" "$original_head" "$migrated_head" ) end_test begin_test "migrate info (potential fixup, --fixup)" ( set -e setup_single_local_branch_tracked_corrupt original_head="$(git rev-parse HEAD)" # Ensure "fixup" command reports files which should be tracked but have not # been stored properly as LFS pointers, and ignores .gitattributes files. diff -u <(git lfs migrate info --fixup 2>&1 | tail -n 1) <(cat <<-EOF *.txt 120 B 1/1 file 100% EOF) migrated_head="$(git rev-parse HEAD)" assert_ref_unmoved "HEAD" "$original_head" "$migrated_head" ) end_test begin_test "migrate info (potential fixup, --fixup, .gitattributes with macro)" ( set -e setup_single_local_branch_tracked_corrupt macro original_head="$(git rev-parse HEAD)" # Ensure "fixup" command reports files which should be tracked but have not # been stored properly as LFS pointers, and ignores .gitattributes files. diff -u <(git lfs migrate info --fixup 2>&1 | tail -n 1) <(cat <<-EOF *.txt 120 B 1/1 file 100% EOF) migrated_head="$(git rev-parse HEAD)" assert_ref_unmoved "HEAD" "$original_head" "$migrated_head" ) end_test # NOTE: We skip this test for now as the "git lfs migrate" commands do not # fully process macro attribute definitions yet. #begin_test "migrate info (potential fixup, --fixup, .gitattributes with LFS macro)" #( # set -e # # setup_single_local_branch_tracked_corrupt lfsmacro # # original_head="$(git rev-parse HEAD)" # # # Ensure "fixup" command reports files which should be tracked but have not # # been stored properly as LFS pointers, and ignores .gitattributes files. # diff -u <(git lfs migrate info --fixup 2>&1 | tail -n 1) <(cat <<-EOF # *.txt 120 B 1/1 file 100% # EOF) # # migrated_head="$(git rev-parse HEAD)" # # assert_ref_unmoved "HEAD" "$original_head" "$migrated_head" #) #end_test begin_test "migrate info (potential fixup, complex nested)" ( set -e setup_single_local_branch_complex_tracked original_head="$(git rev-parse HEAD)" # Ensure command reports the file which should be tracked but has not been # stored properly (a.txt) and the file which is not tracked (dir/b.txt). diff -u <(git lfs migrate info 2>&1 | tail -n 2) <(cat <<-EOF *.gitattributes 69 B 2/2 files 100% *.txt 2 B 2/2 files 100% EOF) migrated_head="$(git rev-parse HEAD)" assert_ref_unmoved "HEAD" "$original_head" "$migrated_head" ) end_test begin_test "migrate info (potential fixup, complex nested, --fixup)" ( set -e setup_single_local_branch_complex_tracked original_head="$(git rev-parse HEAD)" # Ensure "fixup" command reports the file which should be tracked but has not # been stored properly (a.txt), and ignores .gitattributes files and # the file which is not tracked (dir/b.txt). diff -u <(git lfs migrate info --fixup 2>&1 | tail -n 1) <(cat <<-EOF *.txt 1 B 1/1 file 100% EOF) migrated_head="$(git rev-parse HEAD)" assert_ref_unmoved "HEAD" "$original_head" "$migrated_head" ) end_test begin_test "migrate info (ambiguous reference)" ( set -e setup_multiple_local_branches # Create an ambiguously named reference sharing the name as the SHA-1 of # "HEAD". sha="$(git rev-parse HEAD)" git tag "$sha" git lfs migrate info --everything ) end_test begin_test "migrate info (--everything with args)" ( set -e setup_multiple_local_branches git lfs migrate info --everything main 2>&1 | tee migrate.log if [ "${PIPESTATUS[0]}" -eq 1 ]; then echo >&2 "fatal: expected 'git lfs migrate ...' to fail, didn't ..." exit 1 fi grep -q "Cannot use --everything with explicit reference arguments" \ migrate.log ) end_test begin_test "migrate info (--everything with --include-ref)" ( set -e setup_multiple_local_branches git lfs migrate info --everything --include-ref=refs/heads/main 2>&1 | \ tee migrate.log if [ "${PIPESTATUS[0]}" -eq 1 ]; then echo >&2 "fatal: expected 'git lfs migrate ...' to fail, didn't ..." exit 1 fi grep -q "Cannot use --everything with --include-ref or --exclude-ref" \ migrate.log ) end_test begin_test "migrate info (--everything with --exclude-ref)" ( set -e setup_multiple_local_branches git lfs migrate info --everything --exclude-ref=refs/heads/main 2>&1 | \ tee migrate.log if [ "${PIPESTATUS[0]}" -eq 1 ]; then echo >&2 "fatal: expected 'git lfs migrate ...' to fail, didn't ..." exit 1 fi grep -q "Cannot use --everything with --include-ref or --exclude-ref" \ migrate.log ) end_test begin_test "migrate info (--pointers invalid)" ( set -e setup_multiple_local_branches git lfs migrate info --everything --pointers=foo 2>&1 | tee migrate.log if [ "${PIPESTATUS[0]}" -eq 1 ]; then echo >&2 "fatal: expected 'git lfs migrate ...' to fail, didn't ..." exit 1 fi grep -q "Unsupported --pointers option value" migrate.log ) end_test begin_test "migrate info (--fixup, --pointers=follow)" ( set -e setup_single_local_branch_tracked_corrupt git lfs migrate info --everything --fixup --pointers=follow 2>&1 \ | tee migrate.log if [ "${PIPESTATUS[0]}" -eq 1 ]; then echo >&2 "fatal: expected 'git lfs migrate ...' to fail, didn't ..." exit 1 fi grep -q "Cannot use --fixup with --pointers=follow" migrate.log ) end_test begin_test "migrate info (--fixup, --pointers=no-follow)" ( set -e setup_single_local_branch_tracked_corrupt git lfs migrate info --everything --fixup --pointers=no-follow 2>&1 \ | tee migrate.log if [ "${PIPESTATUS[0]}" -eq 0 ]; then echo >&2 "fatal: expected 'git lfs migrate ...' to fail, didn't ..." exit 1 fi grep -q "Cannot use --fixup with --pointers=no-follow" migrate.log ) end_test begin_test "migrate info (--fixup, --include)" ( set -e setup_single_local_branch_tracked_corrupt git lfs migrate info --everything --fixup --include="*.txt" 2>&1 \ | tee migrate.log if [ "${PIPESTATUS[0]}" -eq 0 ]; then echo >&2 "fatal: expected 'git lfs migrate ...' to fail, didn't ..." exit 1 fi grep -q "Cannot use --fixup with --include, --exclude" migrate.log ) end_test begin_test "migrate info (--fixup, --exclude)" ( set -e setup_single_local_branch_tracked_corrupt git lfs migrate info --everything --fixup --exclude="*.txt" 2>&1 \ | tee migrate.log if [ "${PIPESTATUS[0]}" -eq 0 ]; then echo >&2 "fatal: expected 'git lfs migrate ...' to fail, didn't ..." exit 1 fi grep -q "Cannot use --fixup with --include, --exclude" migrate.log ) end_test git-lfs-3.6.1/t/t-multiple-remotes.sh000077500000000000000000000103061472372047300174510ustar00rootroot00000000000000#!/usr/bin/env bash # Test lfs capability to download data when blobs are stored in different # endpoints . "$(dirname "$0")/testlib.sh" # This feature depends on the treeish parameter that is provided as metadata # in git versions higher or equal than 2.27 ensure_git_version_isnt $VERSION_LOWER "2.27.0" reponame="$(basename "$0" ".sh")" prepare_consumer() { local consumer="$1" mkdir "$consumer" cd "$consumer" git init git remote add mr "file://$(urlify "$REMOTEDIR/$smain.git")" git remote add fr "file://$(urlify "$REMOTEDIR/$sfork.git")" git fetch mr git fetch fr } prepare_forks () { local testcase="$1" smain="$reponame"-"$testcase"-main-remote sfork="$reponame"-"$testcase"-fork-remote cmain="$HOME"/"$reponame"-"$testcase"-main-repo cfork="$HOME"/"$reponame"-"$testcase"-fork-repo setup_remote_repo "$smain" setup_remote_repo "$sfork" prepare_consumer "$cmain" git checkout -b main git lfs track '*.bin' git add --all git commit -m "Initial commit" git push -u mr main git push -u fr main #Add a .bin in main repo touch a.bin printf "1234" > a.bin git add --all git commit -m "Add Bin file" git push mr main prepare_consumer "$cfork" } exec_fail_git(){ set +e git "$@" res=$? set -e if [ "$res" = "0" ]; then exit 1 fi } begin_test "accept reset to different remote" ( set -e prepare_forks "a-reset" git checkout fr/main git config lfs.remote.searchall false git config lfs.remote.autodetect true git reset --hard mr/main ) end_test begin_test "accept pull from different remote" ( set -e prepare_forks "a-pull" git checkout fr/main git config lfs.remote.searchall false git config lfs.remote.autodetect true git pull mr main ) end_test begin_test "accept checkout different remote" ( set -e prepare_forks "a-checkout" git checkout fr/main git config lfs.remote.searchall false git config lfs.remote.autodetect true git checkout mr/main ) end_test begin_test "accept rebase different remote" ( set -e prepare_forks "a-rebase" git checkout fr/main git config lfs.remote.searchall false git config lfs.remote.autodetect true git rebase mr/main ) end_test begin_test "accept add bin file with sparsecheckout" ( set -e prepare_forks "a-sparsecheckout" git sparse-checkout init --no-cone git sparse-checkout set /.gitignore git checkout mr/main git config lfs.remote.searchall false git config lfs.remote.autodetect true git sparse-checkout add a.bin ) end_test begin_test "accept cherry-pick head different remote" ( set -e prepare_forks "a-cherrypick" git checkout -b main --track fr/main git config lfs.remote.searchall true git config lfs.remote.autodetect false git cherry-pick mr/main ) end_test begin_test "reject reset to different remote" ( set -e prepare_forks "r-reset" git checkout fr/main git config lfs.remote.searchall false git config lfs.remote.autodetect false exec_fail_git reset --hard mr/main ) end_test begin_test "reject pull from different remote" ( set -e prepare_forks "r-pull" git checkout fr/main git config lfs.remote.searchall false git config lfs.remote.autodetect false exec_fail_git pull mr main ) end_test begin_test "reject checkout different remote" ( set -e prepare_forks "r-checkout" git checkout fr/main git config lfs.remote.searchall false git config lfs.remote.autodetect false exec_fail_git checkout mr/main ) end_test begin_test "reject rebase different remote" ( set -e prepare_forks "r-rebase" git checkout fr/main git config lfs.remote.searchall false git config lfs.remote.autodetect false exec_fail_git rebase mr/main ) end_test begin_test "reject add bin file with sparsecheckout" ( set -e prepare_forks "r-sparsecheckout" git sparse-checkout init --no-cone git sparse-checkout set /.gitignore git checkout mr/main git config lfs.remote.searchall false git config lfs.remote.autodetect false exec_fail_git sparse-checkout add a.bin ) end_test begin_test "reject cherry-pick head different remote" ( set -e prepare_forks "r-cherrypick" git checkout -b main --track fr/main git config lfs.remote.searchall false git config lfs.remote.autodetect false exec_fail_git cherry-pick mr/main ) end_test git-lfs-3.6.1/t/t-no-remote.sh000077500000000000000000000034341472372047300160530ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" reponame_src="$(basename "$0" ".sh")-src" reponame_dst="$(basename "$0" ".sh")-dst" begin_test "fetch lfs-tracked file despite no remote" ( set -e # First, a repo with an lfs-tracked file we can fetch from setup_remote_repo_with_file "$reponame_src" "test_file.dat" # Grab the rev for `git archive` later echo $(pwd) rev=$(git rev-parse HEAD) cd .. # Initialize a bare repo we can fetch into mkdir $reponame_dst cd $reponame_dst git init . --bare echo $(pwd) git fetch "$GITSERVER/$reponame_src" refs/heads/main:refs/heads/main git archive $rev -o archive.out # Verify archive contains our file grep "test_file.dat" archive.out ) end_test reponame_src_A="$(basename "$0" ".sh")-src-A" reponame_src_B="$(basename "$0" ".sh")-src-B" reponame_dst_2="$(basename "$0" ".sh")-dst-2" begin_test "fallback ignored when remote present" ( set -e # Initialize 2 repos with different files setup_remote_repo_with_file "$reponame_src_A" "test_file_A.dat" rev=$(git rev-parse HEAD) cd .. setup_remote_repo_with_file "$reponame_src_B" "test_file_B.dat" cd .. mkdir $reponame_dst_2 cd $reponame_dst_2 git init . --bare echo $(pwd) # This part is subtle # Add repo A as a remote and fetch from it # But then fetch from repo B. This points FETCH_HEAD to repo B # We're testing that git-lfs will ignore FETCH_HEAD, since FETCH_HEAD is # a fallback, only used when no remote is set git remote add origin "$GITSERVER/$reponame_src_A" git fetch git fetch "$GITSERVER/$reponame_src_B" refs/heads/main:refs/heads/main git archive $rev -o archive.out # Verify archive contains file from second repo, but not first repo grep "test_file_A.dat" archive.out grep -v "test_file_B.dat" archive.out ) end_test git-lfs-3.6.1/t/t-object-authenticated.sh000077500000000000000000000010361472372047300202300ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" # these tests rely on GIT_TERMINAL_PROMPT to test properly ensure_git_version_isnt $VERSION_LOWER "2.3.0" begin_test "download authenticated object" ( set -e reponame="$(basename "$0" ".sh")" setup_remote_repo "$reponame" clone_repo "$reponame" without-creds git lfs track "*.dat" printf "object-authenticated" > hi.dat git add hi.dat git add .gitattributes git commit -m "initial commit" GIT_CURL_VERBOSE=1 GIT_TERMINAL_PROMPT=0 git lfs push origin main ) end_test git-lfs-3.6.1/t/t-path.sh000077500000000000000000000173411472372047300151040ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" begin_test "does not look in current directory for git" ( set -e reponame="$(basename "$0" ".sh")" git init "$reponame" cd "$reponame" cp "$BINPATH/lfstest-badpathcheck$X" "git$X" # This should always succeed, even if git-lfs is incorrectly searching for # executables in the current directory first, because the "git-lfs env" # command ignores all errors when it runs "git config". So we should always # pass this step and then, if our malicious Git was executed, detect # its output below. If this command does fail, something else is wrong. PATH="$BINPATH" PATHEXT="$X" "git-lfs$X" env >output.log 2>&1 grep "exploit" output.log && false [ ! -f exploit ] ) end_test begin_test "does not look in current directory for git with credential helper" ( set -e reponame="$(basename "$0" ".sh")-credentials" setup_remote_repo "$reponame" clone_repo "$reponame" credentials-1 git lfs track "*.dat" printf abc > z.dat git add z.dat git add .gitattributes GITPATH="$(dirname "$(command -v git)")" SHELLPATH="$(dirname "$(command -v sh)")" # We add our malicious Git to the index and then remove it from the # work tree so it is not found early, before we perform our key test. # Specifically, our "git push" below will run git-lfs, which then runs # "git credential", so if we are looking for Git in the current directory # first when running a credential helper, we will fail at that point # because our malicious Git will be found first. # # We prefer to check for this behavior during our "git-lfs pull" further # below when we are populating LFS objects into a clone of this repo # (which contains the malicious Git), so for now we remove the malicious # Git as soon as possible. # # As of Go 1.19 we also need to specify the GODEBUG environment variable # with a value of "execerrdot=0" in order to avoid occasional failures # our "git add" command below. These failures occur due to a specific # set of conditions. First, if the last-modified time of the .git/index # file is within a second of that of z.dat, the "git add" command will # refresh the Git index (assuming Git was compiled with USE_NSEC=0, as # appears to be the case for Git for Windows), and Git LFS will be invoked # to "clean" the z.dat file again. # # If that occurs, then when Git LFS runs it looks for Git, and until we # revise Git LFS to rely on Go's os/exec package to not execute programs # found in the current working directory (as described in # https://go.dev/blog/path-security), the os/exec package will detect our # malicious Git program in the current working directory and report an # error. This occurs when Git LFS first initializes a new exec.Cmd # structure, even though Git LFS would then locate the true Git executable # from our custom PATH and reset the Path member of the Cmd structure # before trying to execute the program. # # Since we explicitly test Git LFS's avoidance of programs in the current # working directory using the "git-lfs pull" command further below, here # we just want "git add" to succeed, and so for the time being we disable # Go's new security checks for this command only. We will revisit this # when we address the larger issue of re-adopting Go's own logic for # locating executable programs. cp "$BINPATH/lfstest-badpathcheck$X" "git$X" GODEBUG=execerrdot=0 \ PATH="$BINPATH:$GITPATH:$SHELLPATH" "$GITPATH/git$X" add "git$X" rm "git$X" git commit -m "Add files" git push origin HEAD cd .. unset GIT_ASKPASS SSH_ASKPASS # When we call "git clone" below, it will run git-lfs as a smudge filter # during the post-clone checkout phase, and specifically will run git-lfs # in the newly cloned repository directory which contains a copy of our # malicious Git. So, if we are looking for Git in the current directory # first in most cases (and not just when running a credential helper), # then when git-lfs runs "git config" we will fail at that point because # our malicious Git will be found first. This occurs even if we specify # GIT_LFS_SKIP_SMUDGE=1 because git-lfs will still run "git config". # # We could ignore errors from clone_repo() and then search for the output # of our malicious Git in the t-path-credentials-2 directory; however, # this may be somewhat fragile as clone_repo() performs other steps such # as changing the current working directory to the new repo clone and # attempting to run "git config" there. # # Instead, since our key check of "git-lfs pull" below will also detect # the general failure case where we are looking for Git in the current # directory first when running most commands, we temporarily uninstall # Git LFS so no smudge filter will execute when "git clone" checks out the # repository. # # We also remove any "exploit" file potentially created by our malicious # Git in case it was run anywhere in clone_repo(), which may happen if # PATH contains the "." directory already. Note that we reset PATH # to contain only the necessary directories in our key "git-lfs pull" # check below. git lfs uninstall clone_repo "$reponame" t-path-credentials-2 rm -f exploit pushd .. git lfs install popd # As noted, if we are looking for Git in the current directory first # only when running a credential helper, then when this runs # "git credential", it will find our malicious Git in the current directory # and execute it. # # If we are looking for Git in the current directory first when running # most commands (and not just when running a credential helper), then this # will also find our malicious Git. However, in this case it will find it # earlier when we try to run "git config" rather than later when we try # to run "git credential". # # We use a pipeline with "tee" here so as to avoid an early failure in the # case that our "git-lfs pull" command executes our malicious Git. # Unlike "git-lfs env" in the other tests, "git-lfs pull" will halt when # it does not receive the normal output from Git. This in turn halts # our test due to our use of the "set -e" option, unless we terminate a # pipeline with successful command like "tee". PATH="$BINPATH:$GITPATH" PATHEXT="$X" "git-lfs$X" pull 2>&1 | tee output.log grep "exploit" output.log && false [ ! -f exploit ] ) end_test begin_test "does not look in current directory for wrong binary using PATHEXT" ( set -e # Windows is the only platform where Go searches for executable files # by appending file extensions from PATHEXT. [ "$IS_WINDOWS" -eq 0 ] && exit 0 reponame="$(basename "$0" ".sh")-notfound" git init "$reponame" cd "$reponame" # Go on Windows always looks in the current directory first when creating # a command handler, so we need a dummy git.exe for it to find there since # we will restrict PATH to exclude the real Git when we run "git-lfs env" # below. If our git-lfs incorrectly proceeds to run the command handler # despite not finding Git in PATH either, Go may then search for a file # named "." with any path extension from PATHEXT and execute that file # instead, so we create a malicious file named "..exe" to check this case. touch "git$X" cp "$BINPATH/lfstest-badpathcheck$X" ".$X" # This should always succeed, even if git-lfs is incorrectly searching for # executables in the current directory first, because the "git-lfs env" # command ignores all errors when it runs "git config". So we should always # pass this step and then, if our malicious program was executed, detect # its output below. If this command does fail, something else is wrong. PATH="$BINPATH" PATHEXT="$X" "git-lfs$X" env >output.log 2>&1 grep "exploit" output.log && false [ ! -f exploit ] ) end_test git-lfs-3.6.1/t/t-pointer.sh000077500000000000000000000246101472372047300156250ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" begin_test "pointer --file --stdin" ( set -e echo "simple" > some-file input="version https://git-lfs.github.com/spec/v1 oid sha256:6c17f2007cbe934aee6e309b28b2dba3c119c5dff2ef813ed124699efe319868 size 7" expected="Git LFS pointer for some-file version https://git-lfs.github.com/spec/v1 oid sha256:6c17f2007cbe934aee6e309b28b2dba3c119c5dff2ef813ed124699efe319868 size 7 Git blob OID: e18acd45d7e3ce0451d1d637f9697aa508e07dee Pointer from STDIN version https://git-lfs.github.com/spec/v1 oid sha256:6c17f2007cbe934aee6e309b28b2dba3c119c5dff2ef813ed124699efe319868 size 7 Git blob OID: e18acd45d7e3ce0451d1d637f9697aa508e07dee" [ "$expected" = "$(echo "$input" | git lfs pointer --file=some-file --stdin 2>&1)" ] ) end_test begin_test "pointer --file --stdin mismatch" ( set -e echo "simple" > some-file input="version https://git-lfs.github.com/spec/v1 oid sha256:6c17f2007cbe934aee6e309b28b2dba3c119c5dff2ef813ed124699efe319868 size 123" set +e output=$(echo "$input" | git lfs pointer --file=some-file --stdin 2>&1) status=$? set -e [ "1" = "$status" ] expected="Git LFS pointer for some-file version https://git-lfs.github.com/spec/v1 oid sha256:6c17f2007cbe934aee6e309b28b2dba3c119c5dff2ef813ed124699efe319868 size 7 Git blob OID: e18acd45d7e3ce0451d1d637f9697aa508e07dee Pointer from STDIN version https://git-lfs.github.com/spec/v1 oid sha256:6c17f2007cbe934aee6e309b28b2dba3c119c5dff2ef813ed124699efe319868 size 123 Git blob OID: 905bcc24b5dc074ab870f9944178e398eec3b470 Pointers do not match" [ "$expected" = "$output" ] ) end_test begin_test "pointer --stdin" ( set -e echo "version https://git-lfs.github.com/spec/v1 oid sha256:6c17f2007cbe934aee6e309b28b2dba3c119c5dff2ef813ed124699efe319868 size 7" > valid-pointer output=$(cat valid-pointer | git lfs pointer --stdin 2>&1) expected="Pointer from STDIN version https://git-lfs.github.com/spec/v1 oid sha256:6c17f2007cbe934aee6e309b28b2dba3c119c5dff2ef813ed124699efe319868 size 7" [ "$expected" = "$output" ] ) end_test begin_test "pointer --stdin without stdin" ( # this test doesn't work on Windows, it just operates like 'bad pointer' case # stdin isn't detectable as detached, it just times out with no content if [[ "$(is_stdin_attached)" == "0" ]]; then echo "Skipping pointer without stdin because STDIN attached" exit 0 fi output=$(echo "" | git lfs pointer --stdin 2>&1) status=$? set -e expected="Cannot read from STDIN. The --stdin flag expects a pointer file from STDIN." [ "$expected" = "$output" ] [ "1" = "$status" ] ) end_test begin_test "pointer --stdin with bad pointer" ( output=$(echo "not a pointer" | git lfs pointer --stdin 2>&1) status=$? set -e expected="Pointer from STDIN Pointer file error: invalid header" diff -u <(printf "%s" "$expected") <(printf "%s" "$output") [ "1" = "$status" ] ) end_test begin_test "pointer --file --pointer mismatch" ( set -e echo "simple" > some-file echo "version https://git-lfs.github.com/spec/v1 oid sha256:6c17f2007cbe934aee6e309b28b2dba3c119c5dff2ef813ed124699efe319868 size 123" > invalid-pointer expected="Git LFS pointer for some-file version https://git-lfs.github.com/spec/v1 oid sha256:6c17f2007cbe934aee6e309b28b2dba3c119c5dff2ef813ed124699efe319868 size 7 Git blob OID: e18acd45d7e3ce0451d1d637f9697aa508e07dee Pointer from invalid-pointer version https://git-lfs.github.com/spec/v1 oid sha256:6c17f2007cbe934aee6e309b28b2dba3c119c5dff2ef813ed124699efe319868 size 123 Git blob OID: 905bcc24b5dc074ab870f9944178e398eec3b470 Pointers do not match" set +e output=$(git lfs pointer --file=some-file --pointer=invalid-pointer 2>&1) status=$? set -e [ "1" = "$status" ] [ "$expected" = "$output" ] ) end_test begin_test "pointer --file --pointer" ( set -e echo "simple" > some-file echo "version https://git-lfs.github.com/spec/v1 oid sha256:6c17f2007cbe934aee6e309b28b2dba3c119c5dff2ef813ed124699efe319868 size 7" > valid-pointer expected="Git LFS pointer for some-file version https://git-lfs.github.com/spec/v1 oid sha256:6c17f2007cbe934aee6e309b28b2dba3c119c5dff2ef813ed124699efe319868 size 7 Git blob OID: e18acd45d7e3ce0451d1d637f9697aa508e07dee Pointer from valid-pointer version https://git-lfs.github.com/spec/v1 oid sha256:6c17f2007cbe934aee6e309b28b2dba3c119c5dff2ef813ed124699efe319868 size 7 Git blob OID: e18acd45d7e3ce0451d1d637f9697aa508e07dee" [ "$expected" = "$(git lfs pointer --file=some-file --pointer=valid-pointer 2>&1)" ] ) end_test begin_test "pointer --pointer" ( set -e echo "version https://git-lfs.github.com/spec/v1 oid sha256:6c17f2007cbe934aee6e309b28b2dba3c119c5dff2ef813ed124699efe319868 size 7" > valid-pointer expected="Pointer from valid-pointer version https://git-lfs.github.com/spec/v1 oid sha256:6c17f2007cbe934aee6e309b28b2dba3c119c5dff2ef813ed124699efe319868 size 7" [ "$expected" = "$(git lfs pointer --pointer=valid-pointer 2>&1)" ] ) end_test begin_test "pointer missing --pointer" ( output=$(git lfs pointer --pointer=missing-pointer 2>&1) status=$? set -e [ "1" = "$status" ] echo "$output" echo "$output" | grep "open missing-pointer:" ) end_test begin_test "pointer invalid --pointer" ( set -e echo "not a pointer" > some-pointer set +e output=$(git lfs pointer --pointer=some-pointer 2>&1) status=$? set -e [ "1" = "$status" ] expected="Pointer from some-pointer Pointer file error: invalid header" diff -u <(printf "%s" "$expected") <(printf "%s" "$output") [ "$expected" = "$output" ] ) end_test begin_test "pointer --file" ( set -e echo "simple" > some-file expected="Git LFS pointer for some-file version https://git-lfs.github.com/spec/v1 oid sha256:6c17f2007cbe934aee6e309b28b2dba3c119c5dff2ef813ed124699efe319868 size 7" [ "$expected" = "$(git lfs pointer --file=some-file 2>&1)" ] ) end_test begin_test "pointer without args" ( output=$(git lfs pointer 2>&1) status=$? set -e [ "Nothing to do!" = "$output" ] [ "1" = "$status" ] ) end_test begin_test "pointer stdout/stderr" ( set -e echo "pointer-stdout-test" > pointer-stdout-test.txt git lfs pointer --file=pointer-stdout-test.txt > stdout.txt 2> stderr.txt echo "stdout:" cat stdout.txt [ $(wc -l stdout.txt | sed -e 's/^[[:space:]]*//' | cut -f1 -d' ') -eq 3 ] grep "oid sha256:e96ec1bd71eea8df78b24c64a7ab9d42dd7f821c4e503f0e2288273b9bff6c16" stdout.txt [ $(grep -c "Git LFS pointer" stdout.txt) -eq 0 ] echo "stderr:" cat stderr.txt grep "Git LFS pointer" stderr.txt [ $(grep -c "oid sha256:" stderr.txt) -eq 0 ] ) end_test begin_test "pointer to console" ( set -e echo "pointer-stdout-test" > pointer-stdout-test.txt git lfs pointer --file=pointer-stdout-test.txt 2>&1 | tee pointer.txt grep "Git LFS pointer" pointer.txt grep "oid sha256:e96ec1bd71eea8df78b24c64a7ab9d42dd7f821c4e503f0e2288273b9bff6c16" pointer.txt ) end_test begin_test "pointer --check (with valid pointer)" ( set -e reponame="pointer---check-valid-pointer" git init "$reponame" cd "$reponame" echo "contents" > good.txt git lfs pointer --file good.txt > good.ptr cat good.ptr git lfs pointer --check --file good.ptr git lfs pointer --check --stdin < good.ptr git lfs pointer --check --no-strict --file good.ptr git lfs pointer --check --no-strict --stdin < good.ptr git lfs pointer --check --strict --file good.ptr git lfs pointer --check --strict --stdin < good.ptr ) end_test begin_test "pointer --check (with invalid pointer)" ( set -e reponame="pointer---check-invalid-pointer" git init "$reponame" cd "$reponame" echo "not-a-pointer" > bad.ptr git lfs pointer --check --file bad.ptr && exit 1 git lfs pointer --check --stdin < bad.ptr && exit 1 git lfs pointer --check --no-strict --file bad.ptr && exit 1 git lfs pointer --check --no-strict --stdin < bad.ptr && exit 1 git lfs pointer --check --strict --file bad.ptr && exit 1 git lfs pointer --check --strict --stdin < bad.ptr && exit 1 # Make the result of the subshell a success. true ) end_test begin_test "pointer --check (with empty file)" ( set -e reponame="pointer---check-empty-file" git init "$reponame" cd "$reponame" touch empty.ptr git lfs pointer --check --file empty.ptr git lfs pointer --check --stdin < empty.ptr git lfs pointer --check --no-strict --file empty.ptr git lfs pointer --check --no-strict --stdin < empty.ptr git lfs pointer --check --strict --file empty.ptr git lfs pointer --check --strict --stdin < empty.ptr ) end_test begin_test "pointer --check (with size 0 pointer)" ( set -e reponame="pointer---check-size-0" git init "$reponame" cd "$reponame" printf '%s\n' \ 'version https://git-lfs.github.com/spec/v1' \ 'oid sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' \ 'size 0' \ >zero.ptr git lfs pointer --check --file zero.ptr git lfs pointer --check --stdin < zero.ptr git lfs pointer --check --no-strict --file zero.ptr git lfs pointer --check --no-strict --stdin < zero.ptr git lfs pointer --check --strict --file zero.ptr && exit 1 git lfs pointer --check --strict --stdin < zero.ptr && exit 1 # Make the result of the subshell a success. true ) end_test begin_test "pointer --check (with CRLF endings)" ( set -e reponame="pointer---check-crlf" git init "$reponame" cd "$reponame" printf '%s\r\n' \ 'version https://git-lfs.github.com/spec/v1' \ 'oid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393' \ 'size 12345' \ >crlf.ptr git lfs pointer --check --file crlf.ptr git lfs pointer --check --stdin < crlf.ptr git lfs pointer --check --no-strict --file crlf.ptr git lfs pointer --check --no-strict --stdin < crlf.ptr git lfs pointer --check --strict --file crlf.ptr && exit 1 git lfs pointer --check --strict --stdin < crlf.ptr && exit 1 # Make the result of the subshell a success. true ) end_test begin_test "pointer --check (with invalid arguments)" ( set -e reponame="pointer---check-invalid-pointer" git init "$reponame" cd "$reponame" touch a.txt # git-lfs-pointer(1) --check with invalid combination --compare git lfs pointer --check --compare && exit 1 # git-lfs-pointer(1) --check without --file or --stdin git lfs pointer --check && exit 1 # git-lfs-pointer(1) --check with --file and --stdin git lfs pointer --check --file a.txt --stdin && exit 1 # Make the result of the subshell a success. true ) end_test git-lfs-3.6.1/t/t-post-checkout.sh000077500000000000000000000142721472372047300167400ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" begin_test "post-checkout" ( set -e reponame="$(basename "$0" ".sh")" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track --lockable "*.dat" git lfs track "*.big" # not lockable git add .gitattributes git commit -m "add git attributes" echo "[ { \"CommitDate\":\"$(get_date -10d)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Data\":\"file 1 creation\"}, {\"Filename\":\"file2.dat\",\"Data\":\"file 2 creation\"}] }, { \"CommitDate\":\"$(get_date -7d)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Data\":\"file 1 updated commit 2\"}, {\"Filename\":\"file3.big\",\"Data\":\"file 3 creation\"}, {\"Filename\":\"file4.big\",\"Data\":\"file 4 creation\"}], \"Tags\":[\"atag\"] }, { \"CommitDate\":\"$(get_date -5d)\", \"Files\":[ {\"Filename\":\"file2.dat\",\"Data\":\"file 2 updated commit 3\"}] }, { \"CommitDate\":\"$(get_date -3d)\", \"NewBranch\":\"branch2\", \"Files\":[ {\"Filename\":\"file5.dat\",\"Data\":\"file 5 creation in branch2\"}, {\"Filename\":\"file6.big\",\"Data\":\"file 6 creation in branch2\"}] }, { \"CommitDate\":\"$(get_date -1d)\", \"Files\":[ {\"Filename\":\"file2.dat\",\"Data\":\"file 2 updated in branch2\"}, {\"Filename\":\"file3.big\",\"Data\":\"file 3 updated in branch2\"}] } ]" | GIT_LFS_SET_LOCKABLE_READONLY=0 lfstest-testutils addcommits # skipped setting read-only above to make bulk load simpler (no read-only issues) git push -u origin main branch2 # re-clone the repo so we start fresh cd .. rm -rf "$reponame" clone_repo "$reponame" "$reponame" # this will be main touch untracked.dat [ "$(cat file1.dat)" == "file 1 updated commit 2" ] [ "$(cat file2.dat)" == "file 2 updated commit 3" ] [ "$(cat file3.big)" == "file 3 creation" ] [ "$(cat file4.big)" == "file 4 creation" ] [ ! -e file5.dat ] [ ! -e file6.big ] # without the post-checkout hook, any changed files would now be writeable refute_file_writeable file1.dat refute_file_writeable file2.dat assert_file_writeable untracked.dat assert_file_writeable file3.big assert_file_writeable file4.big # checkout branch git checkout branch2 [ -e file5.dat ] [ -e file6.big ] refute_file_writeable file1.dat refute_file_writeable file2.dat refute_file_writeable file5.dat assert_file_writeable untracked.dat assert_file_writeable file3.big assert_file_writeable file4.big assert_file_writeable file6.big # Confirm that contents of existing files were updated even though were read-only [ "$(cat file2.dat)" == "file 2 updated in branch2" ] [ "$(cat file3.big)" == "file 3 updated in branch2" ] # restore files inside a branch (causes full scan since no diff) rm -f *.dat [ ! -e file1.dat ] [ ! -e file2.dat ] [ ! -e file5.dat ] git checkout file1.dat file2.dat file5.dat [ "$(cat file1.dat)" == "file 1 updated commit 2" ] [ "$(cat file2.dat)" == "file 2 updated in branch2" ] [ "$(cat file5.dat)" == "file 5 creation in branch2" ] refute_file_writeable file1.dat refute_file_writeable file2.dat refute_file_writeable file5.dat # now lock files, then remove & restore git lfs lock file1.dat git lfs lock file2.dat assert_file_writeable file1.dat assert_file_writeable file2.dat rm -f *.dat git checkout file1.dat file2.dat file5.dat assert_file_writeable file1.dat assert_file_writeable file2.dat refute_file_writeable file5.dat ) end_test begin_test "post-checkout with subdirectories" ( set -e reponame="post-checkout-subdirectories" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track --lockable "bin/*.dat" git lfs track "*.big" # not lockable git add .gitattributes git commit -m "add git attributes" echo "[ { \"CommitDate\":\"$(get_date -10d)\", \"Files\":[ {\"Filename\":\"bin/file1.dat\",\"Data\":\"file 1 creation\"}, {\"Filename\":\"bin/file2.dat\",\"Data\":\"file 2 creation\"}] }, { \"CommitDate\":\"$(get_date -7d)\", \"Files\":[ {\"Filename\":\"bin/file1.dat\",\"Data\":\"file 1 updated commit 2\"}, {\"Filename\":\"file3.big\",\"Data\":\"file 3 creation\"}, {\"Filename\":\"file4.big\",\"Data\":\"file 4 creation\"}], \"Tags\":[\"atag\"] }, { \"CommitDate\":\"$(get_date -5d)\", \"Files\":[ {\"Filename\":\"bin/file2.dat\",\"Data\":\"file 2 updated commit 3\"}] }, { \"CommitDate\":\"$(get_date -3d)\", \"NewBranch\":\"branch2\", \"Files\":[ {\"Filename\":\"bin/file5.dat\",\"Data\":\"file 5 creation in branch2\"}, {\"Filename\":\"file6.big\",\"Data\":\"file 6 creation in branch2\"}] }, { \"CommitDate\":\"$(get_date -1d)\", \"Files\":[ {\"Filename\":\"bin/file2.dat\",\"Data\":\"file 2 updated in branch2\"}, {\"Filename\":\"file3.big\",\"Data\":\"file 3 updated in branch2\"}] } ]" | GIT_LFS_SET_LOCKABLE_READONLY=0 lfstest-testutils addcommits # skipped setting read-only above to make bulk load simpler (no read-only issues) git push -u origin main branch2 # re-clone the repo so we start fresh cd .. rm -rf "$reponame" clone_repo "$reponame" "$reponame" # this will be main [ "$(cat bin/file1.dat)" == "file 1 updated commit 2" ] [ "$(cat bin/file2.dat)" == "file 2 updated commit 3" ] [ "$(cat file3.big)" == "file 3 creation" ] [ "$(cat file4.big)" == "file 4 creation" ] [ ! -e bin/file5.dat ] [ ! -e file6.big ] # without the post-checkout hook, any changed files would now be writeable refute_file_writeable bin/file1.dat refute_file_writeable bin/file2.dat assert_file_writeable file3.big assert_file_writeable file4.big # checkout branch git checkout branch2 [ -e bin/file5.dat ] [ -e file6.big ] refute_file_writeable bin/file1.dat refute_file_writeable bin/file2.dat refute_file_writeable bin/file5.dat assert_file_writeable file3.big assert_file_writeable file4.big assert_file_writeable file6.big # Confirm that contents of existing files were updated even though were read-only [ "$(cat bin/file2.dat)" == "file 2 updated in branch2" ] [ "$(cat file3.big)" == "file 3 updated in branch2" ] ) end_test git-lfs-3.6.1/t/t-post-commit.sh000077500000000000000000000050601472372047300164160ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" begin_test "post-commit" ( set -e reponame="$(basename "$0" ".sh")" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track --lockable "*.dat" git lfs track "*.big" # not lockable git add .gitattributes git commit -m "add git attributes" echo "Come with me" > pcfile1.dat echo "and you'll be" > pcfile2.dat echo "in a world" > pcfile3.big echo "of pure imagination" > pcfile4.big git add *.dat git commit -m "Committed large files" # New lockable files should have been made read-only now since not locked refute_file_writeable pcfile1.dat refute_file_writeable pcfile2.dat assert_file_writeable pcfile3.big assert_file_writeable pcfile4.big git push -u origin main # now lock files, then edit git lfs lock pcfile1.dat git lfs lock pcfile2.dat echo "Take a look" > pcfile1.dat echo "and you'll see" > pcfile2.dat git add pcfile1.dat pcfile2.dat git commit -m "Updated" # files should remain writeable since locked assert_file_writeable pcfile1.dat assert_file_writeable pcfile2.dat ) end_test begin_test "post-commit (locked file outside of LFS)" ( set -e reponame="post-commit-external" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs install # This step is intentionally done in two commits, due to a known bug bug in # the post-checkout process LFS performs. It compares changed files from HEAD, # which is an invalid previous state for the initial commit of a repository. echo "*.dat lockable" > .gitattributes git add .gitattributes git commit -m "initial commit" echo "hello" > a.dat git add a.dat assert_file_writeable a.dat git commit -m "add a.dat" refute_file_writeable a.dat ) end_test begin_test "post-commit does not enter submodules" ( set -e reponame="post-commit-submodules" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track --lockable "*.dat" git lfs track "*.big" # not lockable git add .gitattributes git commit -m "add git attributes" mkdir submodule (cd submodule && git init && echo abc >foo && git add foo && git commit -m 'foo') git submodule add ./submodule submodule git commit -m 'Add submodule' echo "Come with me" > pcfile1.dat echo "and you'll be" > pcfile2.dat echo "in a world" > pcfile3.big echo "of pure imagination" > pcfile4.big git add *.dat GIT_TRACE=1 git commit -m "Committed large files" 2>&1 | tee output grep -E 'filepathfilter:.*submodule/foo' output && exit 1 true ) end_test git-lfs-3.6.1/t/t-post-merge.sh000077500000000000000000000056571472372047300162410ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" begin_test "post-merge" ( set -e reponame="$(basename "$0" ".sh")" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track --lockable "*.dat" git lfs track "*.big" # not lockable git add .gitattributes git commit -m "add git attributes" echo "[ { \"CommitDate\":\"$(get_date -10d)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Data\":\"file 1 creation\"}, {\"Filename\":\"file2.dat\",\"Data\":\"file 2 creation\"}] }, { \"CommitDate\":\"$(get_date -7d)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Data\":\"file 1 updated commit 2\"}, {\"Filename\":\"file3.big\",\"Data\":\"file 3 creation\"}, {\"Filename\":\"file4.big\",\"Data\":\"file 4 creation\"}], \"Tags\":[\"atag\"] }, { \"CommitDate\":\"$(get_date -5d)\", \"Files\":[ {\"Filename\":\"file2.dat\",\"Data\":\"file 2 updated commit 3\"}] }, { \"CommitDate\":\"$(get_date -3d)\", \"NewBranch\":\"branch2\", \"Files\":[ {\"Filename\":\"file5.dat\",\"Data\":\"file 5 creation in branch2\"}, {\"Filename\":\"file6.big\",\"Data\":\"file 6 creation in branch2\"}] }, { \"CommitDate\":\"$(get_date -1d)\", \"Files\":[ {\"Filename\":\"file2.dat\",\"Data\":\"file 2 updated in branch2\"}, {\"Filename\":\"file3.big\",\"Data\":\"file 3 updated in branch2\"}] } ]" | GIT_LFS_SET_LOCKABLE_READONLY=0 lfstest-testutils addcommits # skipped setting read-only above to make bulk load simpler (no read-only issues) git push -u origin main branch2 # re-clone the repo so we start fresh cd .. rm -rf "$reponame" clone_repo "$reponame" "$reponame" # this will be main [ "$(cat file1.dat)" == "file 1 updated commit 2" ] [ "$(cat file2.dat)" == "file 2 updated commit 3" ] [ "$(cat file3.big)" == "file 3 creation" ] [ "$(cat file4.big)" == "file 4 creation" ] [ ! -e file5.dat ] [ ! -e file6.big ] # without the post-checkout hook, any changed files would now be writeable refute_file_writeable file1.dat refute_file_writeable file2.dat assert_file_writeable file3.big assert_file_writeable file4.big # merge branch, with readonly option disabled to demonstrate what would happen GIT_LFS_SET_LOCKABLE_READONLY=0 git merge origin/branch2 # branch2 had hanges to file2.dat and file5.dat which were lockable # but because we disabled the readonly feature they will be writeable now assert_file_writeable file2.dat assert_file_writeable file5.dat # now let's do it again with the readonly option enabled git reset --hard HEAD^ git merge origin/branch2 # This time they should be read-only refute_file_writeable file2.dat refute_file_writeable file5.dat # Confirm that contents of existing files were updated even though were read-only [ "$(cat file2.dat)" == "file 2 updated in branch2" ] [ "$(cat file5.dat)" == "file 5 creation in branch2" ] ) end_test git-lfs-3.6.1/t/t-pre-push.sh000077500000000000000000001130171472372047300157100ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" begin_test "pre-push with good ref" ( set -e reponame="pre-push-main-branch-required" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git config "lfs.$(repo_endpoint "$GITSERVER" "$reponame").locksverify" false git lfs track "*.dat" echo "hi" > a.dat git add .gitattributes a.dat git commit -m "add a.dat" refute_server_object "$reponame" 98ea6e4f216f2fb4b69fff9b3a44842c38686ca685f3f55dc48c5d3fb1107be4 "refs/heads/main" # for some reason, using 'tee' and $PIPESTATUS does not work here echo "refs/heads/main main refs/heads/main 0000000000000000000000000000000000000000" | git lfs pre-push origin "$GITSERVER/$reponame" 2>&1 > push.log assert_server_object "$reponame" 98ea6e4f216f2fb4b69fff9b3a44842c38686ca685f3f55dc48c5d3fb1107be4 "refs/heads/main" ) end_test begin_test "pre-push with tracked ref" ( set -e reponame="pre-push-tracked-branch-required" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git config "lfs.$(repo_endpoint "$GITSERVER" "$reponame").locksverify" false git lfs track "*.dat" echo "hi" > a.dat git add .gitattributes a.dat git commit -m "add a.dat" refute_server_object "$reponame" 98ea6e4f216f2fb4b69fff9b3a44842c38686ca685f3f55dc48c5d3fb1107be4 "refs/heads/tracked" # for some reason, using 'tee' and $PIPESTATUS does not work here echo "refs/heads/main main refs/heads/tracked 0000000000000000000000000000000000000000" | git lfs pre-push origin main 2>&1 > push.log assert_server_object "$reponame" 98ea6e4f216f2fb4b69fff9b3a44842c38686ca685f3f55dc48c5d3fb1107be4 "refs/heads/tracked" ) end_test begin_test "pre-push with bad ref" ( set -e reponame="pre-push-other-branch-required" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git config "lfs.$(repo_endpoint "$GITSERVER" "$reponame").locksverify" false git lfs track "*.dat" echo "hi" > a.dat git add .gitattributes a.dat git commit -m "add a.dat" refute_server_object "$reponame" 98ea6e4f216f2fb4b69fff9b3a44842c38686ca685f3f55dc48c5d3fb1107be4 "refs/heads/other" # for some reason, using 'tee' and $PIPESTATUS does not work here set +e echo "refs/heads/main main refs/heads/main 0000000000000000000000000000000000000000" | git lfs pre-push origin "$GITSERVER/$reponame" 2> push.log pushcode=$? set -e if [ "0" -eq "$pushcode" ]; then echo "expected command to fail" exit 1 fi grep 'Expected ref "refs/heads/other", got "refs/heads/main"' push.log refute_server_object "$reponame" 98ea6e4f216f2fb4b69fff9b3a44842c38686ca685f3f55dc48c5d3fb1107be4 "refs/heads/other" ) end_test begin_test "pre-push" ( set -e reponame="$(basename "$0" ".sh")" setup_remote_repo "$reponame" clone_repo "$reponame" repo git lfs track "*.dat" git add .gitattributes git commit -m "add git attributes" git config "lfs.$(repo_endpoint $GITSERVER $reponame).locksverify" true echo "refs/heads/main main refs/heads/main 0000000000000000000000000000000000000000" | git lfs pre-push origin "$GITSERVER/$reponame" | tee push.log # no output if nothing to do [ "$(du -k push.log | cut -f 1)" == "0" ] git lfs track "*.dat" echo "hi" > hi.dat git add hi.dat git commit -m "add hi.dat" git show refute_server_object "$reponame" 98ea6e4f216f2fb4b69fff9b3a44842c38686ca685f3f55dc48c5d3fb1107be4 # push file to the git lfs server echo "refs/heads/main main refs/heads/main 0000000000000000000000000000000000000000" | git lfs pre-push origin "$GITSERVER/$reponame" 2>&1 | tee push.log grep "Uploading LFS objects: 100% (1/1), 3 B" push.log assert_server_object "$reponame" 98ea6e4f216f2fb4b69fff9b3a44842c38686ca685f3f55dc48c5d3fb1107be4 ) end_test begin_test "pre-push dry-run" ( set -e reponame="$(basename "$0" ".sh")-dry-run" setup_remote_repo "$reponame" clone_repo "$reponame" repo-dry-run git lfs track "*.dat" git add .gitattributes git commit -m "add git attributes" git config "lfs.$(repo_endpoint $GITSERVER $reponame).locksverify" true echo "refs/heads/main main refs/heads/main 0000000000000000000000000000000000000000" | git lfs pre-push --dry-run origin "$GITSERVER/$reponame" | tee push.log [ "" = "$(cat push.log)" ] git lfs track "*.dat" echo "dry" > hi.dat git add hi.dat git commit -m "add hi.dat" git show refute_server_object "$reponame" 2840e0eafda1d0760771fe28b91247cf81c76aa888af28a850b5648a338dc15b echo "refs/heads/main main refs/heads/main 0000000000000000000000000000000000000000" | git lfs pre-push --dry-run origin "$GITSERVER/$reponame" | tee push.log grep "push 2840e0eafda1d0760771fe28b91247cf81c76aa888af28a850b5648a338dc15b => hi.dat" push.log cat push.log [ `wc -l < push.log` = 1 ] refute_server_object "$reponame" 2840e0eafda1d0760771fe28b91247cf81c76aa888af28a850b5648a338dc15b ) end_test begin_test "pre-push skip-push" ( set -e reponame="$(basename "$0" ".sh")-skip-push" setup_remote_repo "$reponame" clone_repo "$reponame" repo-skip-push git lfs track "*.dat" git add .gitattributes git commit -m "add git attributes" git config "lfs.$(repo_endpoint $GITSERVER $reponame).locksverify" true echo "refs/heads/main main refs/heads/main 0000000000000000000000000000000000000000" | GIT_LFS_SKIP_PUSH=true git lfs pre-push origin "$GITSERVER/$reponame" | tee push.log [ "" = "$(cat push.log)" ] git lfs track "*.dat" echo "dry" > hi.dat git add hi.dat git commit -m "add hi.dat" git show refute_server_object "$reponame" 2840e0eafda1d0760771fe28b91247cf81c76aa888af28a850b5648a338dc15b echo "refs/heads/main main refs/heads/main 0000000000000000000000000000000000000000" | GIT_LFS_SKIP_PUSH=true git lfs pre-push origin "$GITSERVER/$reponame" | tee push.log [ "" = "$(cat push.log)" ] refute_server_object "$reponame" 2840e0eafda1d0760771fe28b91247cf81c76aa888af28a850b5648a338dc15b ) end_test begin_test "pre-push 307 redirects" ( set -e reponame="$(basename "$0" ".sh")" setup_remote_repo "$reponame" clone_repo "$reponame" repo-307 git lfs track "*.dat" git add .gitattributes git commit -m "add git attributes" # relative redirect git config remote.origin.lfsurl "$GITSERVER/redirect307/rel/$reponame.git/info/lfs" git lfs track "*.dat" echo "hi" > hi.dat git add hi.dat git commit -m "add hi.dat" git show # push file to the git lfs server echo "refs/heads/main main refs/heads/main 0000000000000000000000000000000000000000" | git lfs pre-push origin "$GITSERVER/redirect307/rel/$reponame.git/info/lfs" 2>&1 | tee push.log grep "Uploading LFS objects: 100% (1/1), 3 B" push.log assert_server_object "$reponame" 98ea6e4f216f2fb4b69fff9b3a44842c38686ca685f3f55dc48c5d3fb1107be4 # absolute redirect git config remote.origin.lfsurl "$GITSERVER/redirect307/abs/$reponame.git/info/lfs" echo "hi" > hi2.dat git add hi2.dat git commit -m "add hi2.dat" git show # push file to the git lfs server echo "refs/heads/main main refs/heads/main 0000000000000000000000000000000000000000" | git lfs pre-push origin "$GITSERVER/redirect307/abs/$reponame.git/info/lfs" 2>&1 | tee push.log grep "Uploading LFS objects: 100% (1/1), 3 B" push.log ) end_test begin_test "pre-push with existing file" ( set -e reponame="$(basename "$0" ".sh")-existing-file" setup_remote_repo "$reponame" clone_repo "$reponame" existing-file echo "existing" > existing.dat git add existing.dat git commit -m "add existing dat" git lfs track "*.dat" echo "new" > new.dat git add new.dat git add .gitattributes git commit -m "add new file through git lfs" # push file to the git lfs server echo "refs/heads/main main refs/heads/main 0000000000000000000000000000000000000000" | git lfs pre-push origin "$GITSERVER/$reponame" 2>&1 | tee push.log grep "Uploading LFS objects: 100% (1/1), 4 B" push.log # now the file exists assert_server_object "$reponame" 7aa7a5359173d05b63cfd682e3c38487f3cb4f7f1d60659fe59fab1505977d4c ) end_test begin_test "pre-push with existing pointer" ( set -e reponame="$(basename "$0" ".sh")-existing-pointer" setup_remote_repo "$reponame" clone_repo "$reponame" existing-pointer echo "$(pointer "7aa7a5359173d05b63cfd682e3c38487f3cb4f7f1d60659fe59fab1505977d4c" 4)" > new.dat git add new.dat git commit -m "add new pointer" mkdir -p .git/lfs/objects/7a/a7 echo "new" > .git/lfs/objects/7a/a7/7aa7a5359173d05b63cfd682e3c38487f3cb4f7f1d60659fe59fab1505977d4c # push file to the git lfs server echo "refs/heads/main main refs/heads/main 0000000000000000000000000000000000000000" | git lfs pre-push origin "$GITSERVER/$reponame" 2>&1 | tee push.log grep "Uploading LFS objects: 100% (1/1), 4 B" push.log ) end_test begin_test "pre-push with missing pointer not on server" ( set -e reponame="$(basename "$0" ".sh")-missing-pointer" setup_remote_repo "$reponame" clone_repo "$reponame" missing-pointer oid="7aa7a5359173d05b63cfd682e3c38487f3cb4f7f1d60659fe59fab1505977d4c" echo "$(pointer "$oid" 4)" > new.dat git add new.dat git commit -m "add new pointer" # assert that push fails set +e echo "refs/heads/main main refs/heads/main 0000000000000000000000000000000000000000" | git lfs pre-push origin "$GITSERVER/$reponame" 2>&1 | tee push.log set -e grep " (missing) new.dat ($oid)" push.log ) end_test begin_test "pre-push with missing pointer which is on server" ( # should permit push if files missing locally but are on server, shouldn't # require client to have every file (prune) set -e reponame="$(basename "$0" ".sh")-missing-but-on-server" setup_remote_repo "$reponame" clone_repo "$reponame" missing-but-on-server contents="common data" contents_oid=$(calc_oid "$contents") git lfs track "*.dat" printf "%s" "$contents" > common1.dat git add common1.dat git add .gitattributes git commit -m "add first file" # push file to the git lfs server echo "refs/heads/main main refs/heads/main 0000000000000000000000000000000000000000" | git lfs pre-push origin "$GITSERVER/$reponame" 2>&1 | tee push.log grep "Uploading LFS objects: 100% (1/1), 11 B" push.log # now the file exists assert_server_object "$reponame" "$contents_oid" # create another commit referencing same oid, then delete local data & push printf "%s" "$contents" > common2.dat git add common2.dat git commit -m "add second file, same content" rm -rf .git/lfs/objects echo "refs/heads/main main refs/heads/main 0000000000000000000000000000000000000000" | git lfs pre-push origin "$GITSERVER/$reponame" 2>&1 | tee push.log # make sure there were no errors reported [ -z "$(grep -i 'Error' push.log)" ] ) end_test begin_test "pre-push with missing and present pointers (lfs.allowincompletepush true)" ( set -e reponame="pre-push-missing-and-present" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" git add .gitattributes git commit -m "initial commit" present="present" present_oid="$(calc_oid "$present")" printf "%s" "$present" > present.dat missing="missing" missing_oid="$(calc_oid "$missing")" printf "%s" "$missing" > missing.dat git add present.dat missing.dat git commit -m "add present.dat and missing.dat" git rm missing.dat git commit -m "remove missing" # :fire: the "missing" object missing_oid_part_1="$(echo "$missing_oid" | cut -b 1-2)" missing_oid_part_2="$(echo "$missing_oid" | cut -b 3-4)" missing_oid_path=".git/lfs/objects/$missing_oid_part_1/$missing_oid_part_2/$missing_oid" rm "$missing_oid_path" git config lfs.allowincompletepush true echo "refs/heads/main main refs/heads/main 0000000000000000000000000000000000000000" | git lfs pre-push origin "$GITSERVER/$reponame" 2>&1 | tee push.log if [ "0" -ne "${PIPESTATUS[1]}" ]; then echo >&2 "fatal: expected \`git lfs pre-push origin $GITSERVER/$reponame\` to succeed..." exit 1 fi grep "LFS upload missing objects" push.log grep " (missing) missing.dat ($missing_oid)" push.log assert_server_object "$reponame" "$present_oid" refute_server_object "$reponame" "$missing_oid" ) end_test begin_test "pre-push reject missing pointers (lfs.allowincompletepush default)" ( set -e reponame="pre-push-reject-missing-and-present" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" git add .gitattributes git commit -m "initial commit" present="present" present_oid="$(calc_oid "$present")" printf "%s" "$present" > present.dat missing="missing" missing_oid="$(calc_oid "$missing")" printf "%s" "$missing" > missing.dat git add present.dat missing.dat git commit -m "add present.dat and missing.dat" git rm missing.dat git commit -m "remove missing" # :fire: the "missing" object missing_oid_part_1="$(echo "$missing_oid" | cut -b 1-2)" missing_oid_part_2="$(echo "$missing_oid" | cut -b 3-4)" missing_oid_path=".git/lfs/objects/$missing_oid_part_1/$missing_oid_part_2/$missing_oid" rm "$missing_oid_path" echo "refs/heads/main main refs/heads/main 0000000000000000000000000000000000000000" | git lfs pre-push origin "$GITSERVER/$reponame" 2>&1 | tee push.log if [ "2" -ne "${PIPESTATUS[1]}" ]; then echo >&2 "fatal: expected \`git lfs pre-push origin $GITSERVER/$reponame\` to fail..." exit 1 fi grep 'Unable to find source' push.log refute_server_object "$reponame" "$present_oid" refute_server_object "$reponame" "$missing_oid" ) end_test begin_test "pre-push multiple branches" ( set -e reponame="$(basename "$0" ".sh")-multiple-branches" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log NUMFILES=6 # generate content we'll use for ((a=0; a < NUMFILES ; a++)) do content[$a]="filecontent$a" oid[$a]=$(calc_oid "${content[$a]}") done echo "[ { \"CommitDate\":\"$(get_date -10d)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":${#content[0]}, \"Data\":\"${content[0]}\"}, {\"Filename\":\"file2.dat\",\"Size\":${#content[1]}, \"Data\":\"${content[1]}\"}] }, { \"NewBranch\":\"branch1\", \"CommitDate\":\"$(get_date -5d)\", \"Files\":[ {\"Filename\":\"file2.dat\",\"Size\":${#content[2]}, \"Data\":\"${content[2]}\"}] }, { \"ParentBranches\":[\"main\"], \"NewBranch\":\"branch2\", \"CommitDate\":\"$(get_date -5d)\", \"Files\":[ {\"Filename\":\"file3.dat\",\"Size\":${#content[3]}, \"Data\":\"${content[3]}\"}] }, { \"ParentBranches\":[\"main\"], \"NewBranch\":\"branch3\", \"CommitDate\":\"$(get_date -2d)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":${#content[4]}, \"Data\":\"${content[4]}\"}] }, { \"ParentBranches\":[\"main\"], \"NewBranch\":\"branch4\", \"CommitDate\":\"$(get_date -1d)\", \"Files\":[ {\"Filename\":\"file4.dat\",\"Size\":${#content[5]}, \"Data\":\"${content[5]}\"}] } ]" | lfstest-testutils addcommits # make sure when called via git push all branches are updated git push origin main branch1 branch2 branch3 branch4 for ((a=0; a < NUMFILES ; a++)) do assert_server_object "$reponame" "${oid[$a]}" done ) end_test begin_test "pre-push with bad remote" ( set -e cd repo echo "refs/heads/main main refs/heads/main 0000000000000000000000000000000000000000" | git lfs pre-push not-a-remote "$GITSERVER/$reponame" 2>&1 | tee pre-push.log grep "Invalid remote name" pre-push.log ) end_test begin_test "pre-push unfetched deleted remote branch & server GC" ( # point of this is to simulate the case where the local cache of the remote # branch state contains a branch which has actually been deleted on the remote, # the client just doesn't know yet (hasn't done 'git fetch origin --prune') # If the server GC'd the objects that deleted branch contained, but they were # referenced by a branch being pushed (earlier commit), push might assume it # doesn't have to push it, but it does. Tests that we check the real remote refs # before making an assumption about the diff we need to push set -e reponame="$(basename "$0" ".sh")-server-deleted-branch-gc" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log NUMFILES=4 # generate content we'll use for ((a=0; a < NUMFILES ; a++)) do content[$a]="filecontent$a" oid[$a]=$(calc_oid "${content[$a]}") done echo "[ { \"CommitDate\":\"$(get_date -10d)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":${#content[0]}, \"Data\":\"${content[0]}\"}, {\"Filename\":\"file2.dat\",\"Size\":${#content[1]}, \"Data\":\"${content[1]}\"}] }, { \"NewBranch\":\"branch-to-delete\", \"CommitDate\":\"$(get_date -5d)\", \"Files\":[ {\"Filename\":\"file3.dat\",\"Size\":${#content[2]}, \"Data\":\"${content[2]}\"}] }, { \"NewBranch\":\"branch-to-push-after\", \"CommitDate\":\"$(get_date -2d)\", \"Files\":[ {\"Filename\":\"file4.dat\",\"Size\":${#content[3]}, \"Data\":\"${content[3]}\"}] } ]" | lfstest-testutils addcommits # push only the first 2 branches git push origin main branch-to-delete for ((a=0; a < 3 ; a++)) do assert_server_object "$reponame" "${oid[$a]}" done # confirm we haven't pushed the last one yet refute_server_object "$reponame" "${oid[3]}" # copy the cached remote ref for the branch we're going to delete remotely cp .git/refs/remotes/origin/branch-to-delete branch-to-delete.ref # now delete the branch on the server git push origin --delete branch-to-delete # remove the OID in it, as if GC'd delete_server_object "$reponame" "${oid[2]}" refute_server_object "$reponame" "${oid[2]}" # Now put the cached remote ref back, as if someone else had deleted it but # we hadn't done git fetch --prune yet mv branch-to-delete.ref .git/refs/remotes/origin/branch-to-delete # Confirm that local cache of remote branch is back git branch -r 2>&1 | tee branch-r.log grep "origin/branch-to-delete" branch-r.log # Now push later branch which should now need to re-push previous commits LFS too git push origin branch-to-push-after # all objects should now be there even though cached remote branch claimed it already had file3.dat for ((a=0; a < NUMFILES ; a++)) do assert_server_object "$reponame" "${oid[$a]}" done ) end_test begin_test "pre-push delete branch" ( set -e reponame="$(basename "$0" ".sh")-delete-branch" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log NUMFILES=4 # generate content we'll use for ((a=0; a < NUMFILES ; a++)) do content[$a]="filecontent$a" oid[$a]=$(calc_oid "${content[$a]}") done echo "[ { \"CommitDate\":\"$(get_date -2d)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":${#content[0]}, \"Data\":\"${content[0]}\"}, {\"Filename\":\"file2.dat\",\"Size\":${#content[1]}, \"Data\":\"${content[1]}\"}] }, { \"NewBranch\":\"branch-to-delete\", \"CommitDate\":\"$(get_date -1d)\", \"Files\":[ {\"Filename\":\"file3.dat\",\"Size\":${#content[2]}, \"Data\":\"${content[2]}\"}] }, { \"ParentBranches\":[\"main\"], \"CommitDate\":\"$(get_date -0d)\", \"Files\":[ {\"Filename\":\"file4.dat\",\"Size\":${#content[3]}, \"Data\":\"${content[3]}\"}] } ]" | lfstest-testutils addcommits # push all branches git push origin main branch-to-delete for ((a=0; a < NUMFILES ; a++)) do assert_server_object "$reponame" "${oid[$a]}" done # deleting a branch with git push should not fail # (requires correct special casing of "(delete) 0000000000.." in hook) git push origin --delete branch-to-delete ) end_test begin_test "pre-push with our lock" ( set -e reponame="pre_push_owned_locks" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" git add .gitattributes git commit -m "initial commit" contents="locked contents" printf "%s" "$contents" > locked.dat git add locked.dat git commit -m "add locked.dat" git push origin main git lfs lock --json "locked.dat" | tee lock.log id=$(assert_lock lock.log locked.dat) assert_server_lock $id printf "authorized changes" >> locked.dat git add locked.dat git commit -m "add unauthorized changes" GIT_CURL_VERBOSE=1 git push origin main 2>&1 | tee push.log grep "Consider unlocking your own locked files" push.log grep "* locked.dat" push.log assert_server_lock "$id" ) end_test begin_test "pre-push with their lock on lfs file" ( set -e reponame="pre_push_unowned_lock" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" git add .gitattributes git commit -m "initial commit" contents="locked contents" # any lock path with "theirs" is returned as "their" lock by /locks/verify printf "%s" "$contents" > locked_theirs.dat git add locked_theirs.dat git commit -m "add locked_theirs.dat" git push origin main git lfs lock --json "locked_theirs.dat" | tee lock.log id=$(assert_lock lock.log locked_theirs.dat) assert_server_lock $id pushd "$TRASHDIR" >/dev/null clone_repo "$reponame" "$reponame-assert" git config lfs.locksverify true printf "unauthorized changes" >> locked_theirs.dat git add locked_theirs.dat # --no-verify is used to avoid the pre-commit hook which is not under test git commit --no-verify -m "add unauthorized changes" git push origin main 2>&1 | tee push.log res="${PIPESTATUS[0]}" if [ "0" -eq "$res" ]; then echo "push should fail" exit 1 fi grep "Unable to push locked files" push.log grep "* locked_theirs.dat - Git LFS Tests" push.log grep "Cannot update locked files." push.log refute_server_object "$reponame" "$(calc_oid_file locked_theirs.dat)" popd >/dev/null ) end_test begin_test "pre-push with their lock on non-lfs lockable file" ( set -e reponame="pre_push_unowned_lock_not_lfs" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" echo "*.dat lockable" > .gitattributes git add .gitattributes git commit -m "initial commit" # any lock path with "theirs" is returned as "their" lock by /locks/verify echo "hi" > readme.txt echo "tiny" > tiny_locked_theirs.dat git help > large_locked_theirs.dat git add readme.txt tiny_locked_theirs.dat large_locked_theirs.dat git commit -m "add initial files" git push origin main git lfs lock --json "tiny_locked_theirs.dat" | tee lock.log id=$(assert_lock lock.log tiny_locked_theirs.dat) assert_server_lock $id git lfs lock --json "large_locked_theirs.dat" | tee lock.log id=$(assert_lock lock.log large_locked_theirs.dat) assert_server_lock $id pushd "$TRASHDIR" >/dev/null clone_repo "$reponame" "$reponame-assert" git config lfs.locksverify true git lfs update # manually add pre-push hook, since lfs clean hook is not used echo "other changes" >> readme.txt echo "unauthorized changes" >> large_locked_theirs.dat echo "unauthorized changes" >> tiny_locked_theirs.dat # --no-verify is used to avoid the pre-commit hook which is not under test git commit --no-verify -am "add unauthorized changes" git push origin main 2>&1 | tee push.log res="${PIPESTATUS[0]}" if [ "0" -eq "$res" ]; then echo "push should fail" exit 1 fi grep "Unable to push locked files" push.log grep "* large_locked_theirs.dat - Git LFS Tests" push.log grep "* tiny_locked_theirs.dat - Git LFS Tests" push.log grep "Cannot update locked files." push.log refute_server_object "$reponame" "$(calc_oid_file large_locked_theirs.dat)" refute_server_object "$reponame" "$(calc_oid_file tiny_locked_theirs.dat)" popd >/dev/null ) end_test begin_test "pre-push locks verify 5xx with verification enabled" ( set -e reponame="lock-enabled-verify-5xx" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" endpoint="$(repo_endpoint $GITSERVER $reponame)" contents="example" contents_oid="$(calc_oid "$contents")" printf "%s" "$contents" > a.dat git lfs track "*.dat" git add .gitattributes a.dat git commit --message "initial commit" git config "lfs.$endpoint.locksverify" true git push origin main 2>&1 | tee push.log grep "\"origin\" does not support the Git LFS locking API" push.log grep "git config lfs.$endpoint.locksverify false" push.log refute_server_object "$reponame" "$contents_oid" ) end_test begin_test "pre-push disable locks verify on exact url" ( set -e reponame="lock-disabled-verify-5xx" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" endpoint="$(repo_endpoint $GITSERVER $reponame)" contents="example" contents_oid="$(calc_oid "$contents")" printf "%s" "$contents" > a.dat git lfs track "*.dat" git add .gitattributes a.dat git commit --message "initial commit" git config "lfs.$endpoint.locksverify" false git push origin main 2>&1 | tee push.log [ "0" -eq "$(grep -c "\"origin\" does not support the Git LFS locking API" push.log)" ] assert_server_object "$reponame" "$contents_oid" ) end_test begin_test "pre-push disable locks verify on partial url" ( set -e reponame="lock-disabled-verify-5xx-partial" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" endpoint="$server/$repo" contents="example" contents_oid="$(calc_oid "$contents")" printf "%s" "$contents" > a.dat git lfs track "*.dat" git add .gitattributes a.dat git commit --message "initial commit" git config "lfs.$endpoint.locksverify" false git push origin main 2>&1 | tee push.log [ "0" -eq "$(grep -c "\"origin\" does not support the Git LFS locking API" push.log)" ] assert_server_object "$reponame" "$contents_oid" ) end_test begin_test "pre-push locks verify 403 with good ref" ( set -e reponame="lock-verify-main-branch-required" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" contents="example" contents_oid="$(calc_oid "$contents")" printf "%s" "$contents" > a.dat git lfs track "*.dat" git add .gitattributes a.dat git commit --message "initial commit" git config "lfs.$GITSERVER/$reponame.git.locksverify" true git push origin main 2>&1 | tee push.log assert_server_object "$reponame" "$contents_oid" "refs/heads/main" ) end_test begin_test "pre-push locks verify 403 with good tracked ref" ( set -e reponame="lock-verify-tracked-branch-required" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" contents="example" contents_oid="$(calc_oid "$contents")" printf "%s" "$contents" > a.dat git lfs track "*.dat" git add .gitattributes a.dat git commit --message "initial commit" git config push.default upstream git config branch.main.merge refs/heads/tracked git config branch.main.remote origin git config "lfs.$GITSERVER/$reponame.git.locksverify" true git push 2>&1 | tee push.log assert_server_object "$reponame" "$contents_oid" "refs/heads/tracked" ) end_test begin_test "pre-push locks verify 403 with explicit ref" ( set -e reponame="lock-verify-explicit-branch-required" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" contents="example" contents_oid="$(calc_oid "$contents")" printf "%s" "$contents" > a.dat git lfs track "*.dat" git add .gitattributes a.dat git commit --message "initial commit" git config "lfs.$GITSERVER/$reponame.git.locksverify" true git push origin main:explicit 2>&1 | tee push.log assert_server_object "$reponame" "$contents_oid" "refs/heads/explicit" ) end_test begin_test "pre-push locks verify 403 with bad ref" ( set -e reponame="lock-verify-other-branch-required" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" contents="example" contents_oid="$(calc_oid "$contents")" printf "%s" "$contents" > a.dat git lfs track "*.dat" git add .gitattributes a.dat git commit --message "initial commit" git config "lfs.$GITSERVER/$reponame.git.locksverify" true git push origin main 2>&1 | tee push.log grep "failed to push some refs" push.log refute_server_object "$reponame" "$contents_oid" "refs/heads/other" ) end_test begin_test "pre-push locks verify 5xx with verification unset" ( set -e reponame="lock-unset-verify-5xx" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" endpoint="$(repo_endpoint $GITSERVER $reponame)" contents="example" contents_oid="$(calc_oid "$contents")" printf "%s" "$contents" > a.dat git lfs track "*.dat" git add .gitattributes a.dat git commit --message "initial commit" [ -z "$(git config "lfs.$endpoint.locksverify")" ] git push origin main 2>&1 | tee push.log grep "\"origin\" does not support the Git LFS locking API" push.log assert_server_object "$reponame" "$contents_oid" ) end_test begin_test "pre-push locks verify 501 with verification enabled" ( set -e reponame="lock-enabled-verify-501" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" endpoint="$(repo_endpoint $GITSERVER $reponame)" contents="example" contents_oid="$(calc_oid "$contents")" printf "%s" "$contents" > a.dat git lfs track "*.dat" git add .gitattributes a.dat git commit --message "initial commit" git config "lfs.$endpoint.locksverify" true git push origin main 2>&1 | tee push.log assert_server_object "$reponame" "$contents_oid" [ "false" = "$(git config "lfs.$endpoint.locksverify")" ] ) end_test begin_test "pre-push locks verify 501 with verification disabled" ( set -e reponame="lock-disabled-verify-501" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" endpoint="$(repo_endpoint $GITSERVER $reponame)" contents="example" contents_oid="$(calc_oid "$contents")" printf "%s" "$contents" > a.dat git lfs track "*.dat" git add .gitattributes a.dat git commit --message "initial commit" git config "lfs.$endpoint.locksverify" false git push origin main 2>&1 | tee push.log assert_server_object "$reponame" "$contents_oid" [ "false" = "$(git config "lfs.$endpoint.locksverify")" ] ) end_test begin_test "pre-push locks verify 501 with verification unset" ( set -e reponame="lock-unset-verify-501" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" endpoint="$(repo_endpoint $GITSERVER $reponame)" contents="example" contents_oid="$(calc_oid "$contents")" printf "%s" "$contents" > a.dat git lfs track "*.dat" git add .gitattributes a.dat git commit --message "initial commit" [ -z "$(git config "lfs.$endpoint.locksverify")" ] git push origin main 2>&1 | tee push.log assert_server_object "$reponame" "$contents_oid" [ "false" = "$(git config "lfs.$endpoint.locksverify")" ] ) end_test begin_test "pre-push locks verify 200" ( set -e reponame="lock-verify-200" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" endpoint="$(repo_endpoint $GITSERVER $reponame)" [ -z "$(git config "lfs.$endpoint.locksverify")" ] contents="example" contents_oid="$(calc_oid "$contents")" printf "%s" "$contents" > a.dat git lfs track "*.dat" git add .gitattributes a.dat git commit --message "initial commit" git push origin main 2>&1 | tee push.log grep "Locking support detected on remote \"origin\"." push.log grep "git config lfs.$endpoint.locksverify true" push.log assert_server_object "$reponame" "$contents_oid" ) end_test begin_test "pre-push locks verify 403 with verification enabled" ( set -e reponame="lock-enabled-verify-403" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" endpoint="$(repo_endpoint $GITSERVER $reponame)" contents="example" contents_oid="$(calc_oid "$contents")" printf "%s" "$contents" > a.dat git lfs track "*.dat" git add .gitattributes a.dat git commit --message "initial commit" git config "lfs.$endpoint.locksverify" true git push origin main 2>&1 | tee push.log grep "error: Authentication error" push.log refute_server_object "$reponame" "$contents_oid" [ "true" = "$(git config "lfs.$endpoint.locksverify")" ] ) end_test begin_test "pre-push locks verify 403 with verification disabled" ( set -e reponame="lock-disabled-verify-403" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" endpoint="$(repo_endpoint $GITSERVER $reponame)" contents="example" contents_oid="$(calc_oid "$contents")" printf "%s" "$contents" > a.dat git lfs track "*.dat" git add .gitattributes a.dat git commit --message "initial commit" git config "lfs.$endpoint.locksverify" false git push origin main 2>&1 | tee push.log assert_server_object "$reponame" "$contents_oid" [ "false" = "$(git config "lfs.$endpoint.locksverify")" ] ) end_test begin_test "pre-push locks verify 403 with verification unset" ( set -e reponame="lock-unset-verify-403" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" endpoint="$(repo_endpoint $GITSERVER $reponame)" contents="example" contents_oid="$(calc_oid "$contents")" printf "%s" "$contents" > a.dat git lfs track "*.dat" git add .gitattributes a.dat git commit --message "initial commit" [ -z "$(git config "lfs.$endpoint.locksverify")" ] git push origin main 2>&1 | tee push.log grep "warning: Authentication error" push.log assert_server_object "$reponame" "$contents_oid" [ -z "$(git config "lfs.$endpoint.locksverify")" ] ) end_test begin_test "pre-push with pushDefault and explicit remote" ( set -e reponame="pre-push-pushdefault-explicit" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git remote add wrong "$(repo_endpoint "$GITSERVER" "wrong-url")" git config "lfs.$(repo_endpoint "$GITSERVER" "$reponame").locksverify" false git config remote.pushDefault wrong git lfs track "*.dat" echo "hi" > a.dat git add .gitattributes a.dat git commit -m "add a.dat" refute_server_object "$reponame" 98ea6e4f216f2fb4b69fff9b3a44842c38686ca685f3f55dc48c5d3fb1107be4 "refs/heads/main" GIT_TRACE=1 GIT_TRANSFER_TRACE=1 git push origin main 2>&1 | tee push.log assert_server_object "$reponame" 98ea6e4f216f2fb4b69fff9b3a44842c38686ca685f3f55dc48c5d3fb1107be4 "refs/heads/main" grep wrong-url push.log && exit 1 true ) end_test begin_test "pre-push uses optimization if remote URL matches" ( set -e reponame="pre-push-remote-url-optimization" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" endpoint=$(git config remote.origin.url) contents_oid=$(calc_oid 'hi\n') git config "lfs.$endpoint.locksverify" false git lfs track "*.dat" echo "hi" > a.dat git add .gitattributes a.dat git commit -m "add a.dat" refute_server_object "$reponame" $contents_oid "refs/heads/main" GIT_TRACE=1 GIT_TRANSFER_TRACE=1 git push "$endpoint" main 2>&1 | tee push.log grep 'rev-list.*--not --remotes=origin' push.log ) end_test begin_test "pre-push does not traverse Git objects server has" ( set -e reponame="pre-push-traverse-server-objects" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" endpoint=$(git config remote.origin.url) contents_oid=$(calc_oid 'hi\n') git config "lfs.$endpoint.locksverify" false git lfs track "*.dat" echo "hi" > a.dat git add .gitattributes a.dat git commit -m "add a.dat" refute_server_object "$reponame" $contents_oid "refs/heads/main" # We use a different URL instead of a named remote or the remote URL so that # we can't make use of the optimization that ignores objects we already have # in remote tracking branches. GIT_TRACE=1 GIT_TRANSFER_TRACE=1 git push "$endpoint.git" main 2>&1 | tee push.log assert_server_object "$reponame" $contents_oid "refs/heads/main" contents2_oid=$(calc_oid 'hello\n') echo "hello" > b.dat git add .gitattributes b.dat git commit -m "add b.dat" refute_server_object "$reponame" $contents2_oid "refs/heads/main" GIT_TRACE=1 GIT_TRANSFER_TRACE=1 git push "$endpoint.git" main 2>&1 | tee push.log assert_server_object "$reponame" $contents2_oid "refs/heads/main" # Verify that we haven't tried to push or query for the object we already # pushed before; i.e., we didn't see it because we ignored its Git object # during traversal. grep $contents_oid push.log && exit 1 true ) end_test begin_test "pre-push with force-pushed ref" ( set -e reponame="pre-push-force-pushed-ref" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git config "lfs.$(repo_endpoint "$GITSERVER" "$reponame").locksverify" false git lfs track "*.dat" echo "hi" > a.dat git add .gitattributes a.dat git commit -m "add a.dat" git tag -a -m tagname tagname refute_server_object "$reponame" 98ea6e4f216f2fb4b69fff9b3a44842c38686ca685f3f55dc48c5d3fb1107be4 "refs/heads/main" git push origin main tagname assert_server_object "$reponame" 98ea6e4f216f2fb4b69fff9b3a44842c38686ca685f3f55dc48c5d3fb1107be4 "refs/heads/main" # We pick a different message so that we get different object IDs even if both # commands run in the same second. git tag -f -a -m tagname2 tagname # Prune the old tag object. git reflog expire --all --expire=now git gc --prune=now # Make sure we deal with us missing the object for the old value of the tag ref. git push origin +tagname ) end_test begin_test "pre-push with local path" ( set -e reponame="pre-push-local-path" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame-2" cd .. clone_repo "$reponame" "$reponame" git lfs track "*.dat" echo "hi" > a.dat git add .gitattributes a.dat git commit -m "add a.dat" # Push to the other repo. git push "../$reponame-2" main:foo # Push to . to make sure that works. git push "." main:foo git lfs fsck cd "../$reponame-2" git checkout foo git lfs fsck ) end_test git-lfs-3.6.1/t/t-progress-meter.sh000077500000000000000000000010731472372047300171210ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" begin_test "progress meter displays positive progress" ( set -e reponame="progress-meter" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" git add .gitattributes git commit -m "initial commit" for i in `seq 1 128`; do printf "%s" "$i" > "$i.dat" done git add *.dat git commit -m "add many objects" git push origin main 2>&1 | tee push.log [ "0" -eq "${PIPESTATUS[0]}" ] grep "Uploading LFS objects: 100% (128/128), 276 B" push.log ) end_test git-lfs-3.6.1/t/t-progress.sh000077500000000000000000000027151472372047300160130ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" reponame="$(basename "$0" ".sh")" begin_test "GIT_LFS_PROGRESS" ( set -e setup_remote_repo "$reponame" clone_repo "$reponame" repo git lfs track "*.dat" echo "a" > a.dat echo "b" > b.dat echo "c" > c.dat echo "d" > d.dat echo "e" > e.dat git add .gitattributes *.dat git commit -m "add files" git push origin main 2>&1 | tee push.log grep "Uploading LFS objects: 100% (5/5), 10 B" push.log cd .. GIT_LFS_PROGRESS="$TRASHDIR/progress.log" git lfs clone "$GITSERVER/$reponame" clone cat progress.log grep "download 1/5" progress.log grep "download 2/5" progress.log grep "download 3/5" progress.log grep "download 4/5" progress.log grep "download 5/5" progress.log GIT_LFS_SKIP_SMUDGE=1 git clone "$GITSERVER/$reponame" clone2 cd clone2 rm -rf "$TRASHDIR/progress.log" .git/lfs/objects GIT_LFS_PROGRESS="$TRASHDIR/progress.log" git lfs fetch --all cat ../progress.log grep "download 1/5" ../progress.log grep "download 2/5" ../progress.log grep "download 3/5" ../progress.log grep "download 4/5" ../progress.log grep "download 5/5" ../progress.log rm -rf "$TRASHDIR/progress.log" GIT_LFS_PROGRESS="$TRASHDIR/progress.log" git lfs checkout cat ../progress.log grep "checkout 1/5" ../progress.log grep "checkout 2/5" ../progress.log grep "checkout 3/5" ../progress.log grep "checkout 4/5" ../progress.log grep "checkout 5/5" ../progress.log ) end_test git-lfs-3.6.1/t/t-prune-worktree.sh000077500000000000000000000174041472372047300171410ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" ensure_git_version_isnt $VERSION_LOWER "2.5.0" begin_test "prune worktree" ( set -e reponame="prune_worktree" setup_remote_repo "remote_$reponame" clone_repo "remote_$reponame" "$reponame" git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log content_head="First checkout HEAD" content_worktree1head="Worktree 1 head" content_worktree1excluded="Worktree 1 excluded by filter" content_worktree1indexed="Worktree 1 indexed" content_worktree2head="Worktree 2 head" content_worktree2excluded="Worktree 2 excluded by filter" content_worktree2indexed="Worktree 2 indexed" content_oldcommit1="Always pruned 1" content_oldcommit2="Always pruned 2" content_oldcommit3="Always pruned 3" oid_head=$(calc_oid "$content_head") oid_worktree1head=$(calc_oid "$content_worktree1head") oid_worktree1excluded=$(calc_oid "$content_worktree1excluded") oid_worktree1indexed=$(calc_oid "$content_worktree1indexed") oid_worktree2head=$(calc_oid "$content_worktree2head") oid_worktree2excluded=$(calc_oid "$content_worktree2excluded") oid_worktree2indexed=$(calc_oid "$content_worktree2indexed") oid_oldcommit1=$(calc_oid "$content_oldcommit1"]) oid_oldcommit2=$(calc_oid "$content_oldcommit2") oid_oldcommit3=$(calc_oid "$content_oldcommit3") echo "[ { \"CommitDate\":\"$(get_date -40d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_oldcommit1}, \"Data\":\"$content_oldcommit1\"}] }, { \"CommitDate\":\"$(get_date -35d)\", \"NewBranch\":\"branch1\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_oldcommit2}, \"Data\":\"$content_oldcommit2\"}] }, { \"CommitDate\":\"$(get_date -20d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_worktree1head}, \"Data\":\"$content_worktree1head\"}, {\"Filename\":\"foo/file.dat\",\"Size\":${#content_worktree1excluded}, \"Data\":\"$content_worktree1excluded\"}] }, { \"CommitDate\":\"$(get_date -30d)\", \"ParentBranches\":[\"main\"], \"NewBranch\":\"branch2\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_oldcommit3}, \"Data\":\"$content_oldcommit3\"}] }, { \"CommitDate\":\"$(get_date -15d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_worktree2head}, \"Data\":\"$content_worktree2head\"}, {\"Filename\":\"foo/file.dat\",\"Size\":${#content_worktree2excluded}, \"Data\":\"$content_worktree2excluded\"}] }, { \"CommitDate\":\"$(get_date -30d)\", \"ParentBranches\":[\"main\"], \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_head}, \"Data\":\"$content_head\"}] } ]" | lfstest-testutils addcommits # push everything so that's not a retention issue git push origin main:main branch1:branch1 branch2:branch2 # don't keep any recent, just checkouts git config lfs.fetchrecentrefsdays 0 git config lfs.fetchrecentremoterefs true git config lfs.fetchrecentcommitsdays 0 # We need to prevent MSYS from rewriting /foo into a Windows path. MSYS_NO_PATHCONV=1 git config "lfs.fetchexclude" "/foo" # before worktree, everything except current checkout would be pruned git lfs prune --dry-run 2>&1 | tee prune.log grep "prune: 8 local objects, 1 retained, done." prune.log grep "prune: 7 files would be pruned" prune.log # now add worktrees on the other branches git worktree add "../w1_$reponame" "branch1" git worktree add "../w2_$reponame" "branch2" # stage files in worktrees cd "../w1_$reponame" echo "$content_worktree1indexed" > indexed.dat git lfs track "*.dat" git add indexed.dat cd "../w2_$reponame" echo "$content_worktree2indexed" > indexed.dat git lfs track "*.dat" git add indexed.dat cd "../$reponame" # now should retain all 3 heads except for paths excluded by filter plus the indexed files git lfs prune --dry-run 2>&1 | tee prune.log grep "prune: 10 local objects, 5 retained, done." prune.log grep "prune: 5 files would be pruned" prune.log # also check that the same result is obtained when inside worktree rather than main cd "../w1_$reponame" git lfs prune --dry-run 2>&1 | tee prune.log grep "prune: 10 local objects, 5 retained, done." prune.log grep "prune: 5 files would be pruned" prune.log # now remove a worktree and prove that frees up the object staged in the # worktree's index but leaves the non-excluded object in its HEAD commit cd "../$reponame" rm -rf "../w1_$reponame" git lfs prune --dry-run 2>&1 | tee prune.log grep "prune: 10 local objects, 4 retained, done." prune.log grep "prune: 6 files would be pruned" prune.log # now ask Git to tidy the worktree metadata and prove that frees up the # non-excluded object in the removed worktree's HEAD commit git worktree prune git lfs prune --dry-run 2>&1 | tee prune.log grep "prune: 10 local objects, 3 retained, done." prune.log grep "prune: 7 files would be pruned" prune.log ) end_test begin_test "prune worktree (bare main)" ( set -e reponame="prune_worktree_bare" setup_remote_repo "remote_$reponame" clone_repo "remote_$reponame" "$reponame" git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log content_head="First checkout HEAD" content_worktree1head="Worktree 1 head" content_worktree1excluded="Worktree 1 excluded by filter" content_worktree1indexed="Worktree 1 indexed" content_oldcommit1="Always pruned 1" content_oldcommit2="Always pruned 2" oid_head=$(calc_oid "$content_head") oid_worktree1head=$(calc_oid "$content_worktree1head") oid_worktree1excluded=$(calc_oid "$content_worktree1excluded") oid_worktree1indexed=$(calc_oid "$content_worktree1indexed") oid_oldcommit1=$(calc_oid "$content_oldcommit1"]) oid_oldcommit2=$(calc_oid "$content_oldcommit2") echo "[ { \"CommitDate\":\"$(get_date -40d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_oldcommit1}, \"Data\":\"$content_oldcommit1\"}] }, { \"CommitDate\":\"$(get_date -35d)\", \"NewBranch\":\"branch1\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_oldcommit2}, \"Data\":\"$content_oldcommit2\"}] }, { \"CommitDate\":\"$(get_date -20d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_worktree1head}, \"Data\":\"$content_worktree1head\"}, {\"Filename\":\"foo/file.dat\",\"Size\":${#content_worktree1excluded}, \"Data\":\"$content_worktree1excluded\"}] }, { \"CommitDate\":\"$(get_date -30d)\", \"ParentBranches\":[\"main\"], \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_head}, \"Data\":\"$content_head\"}] } ]" | lfstest-testutils addcommits git push origin main:main branch1:branch1 # checkout bare repo cd .. git clone --bare "$GITSERVER/remote_${reponame}" "${reponame}-bare" cd "${reponame}-bare" # fetch all LFS objects git lfs fetch --all # set retention configurations which should be ignored because there # are no remote branches, so all objects are considered unpushed git config lfs.fetchrecentrefsdays 0 git config lfs.fetchrecentremoterefs true git config lfs.fetchrecentcommitsdays 0 # We need to prevent MSYS from rewriting /foo into a Windows path. MSYS_NO_PATHCONV=1 git config "lfs.fetchexclude" "/foo" # now add worktree on the branch git worktree add "../w1_$reponame" "branch1" # stage files in worktree cd "../w1_$reponame" echo "$content_worktree1indexed" > indexed.dat git lfs track "*.dat" git add indexed.dat # should retain all objects because there are no remote branches # in a bare repo, so all objects are considered unpushed git lfs prune --dry-run 2>&1 | tee prune.log grep "prune: 6 local objects, 6 retained, done." prune.log [ "0" -eq "$(grep -c "files would be pruned" prune.log)" ] ) end_test git-lfs-3.6.1/t/t-prune.sh000077500000000000000000001654771472372047300153170ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" begin_test "prune unreferenced and old" ( set -e reponame="prune_unref_old" setup_remote_repo "remote_$reponame" clone_repo "remote_$reponame" "clone_$reponame" git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log # generate content we'll use content_unreferenced="To delete: unreferenced" content_oldandpushed="To delete: pushed and too old" content_oldandunchanged="Keep: pushed and created a while ago, but still current" oid_unreferenced=$(calc_oid "$content_unreferenced") oid_oldandpushed=$(calc_oid "$content_oldandpushed") oid_oldandunchanged=$(calc_oid "$content_oldandunchanged") content_retain1="Retained content 1" content_retain2="Retained content 2" oid_retain1=$(calc_oid "$content_retain1") oid_retain2=$(calc_oid "$content_retain2") # Remember for something to be 'too old' it has to appear on the MINUS side # of the diff outside the prune window, i.e. it's not when it was introduced # but when it disappeared from relevance. That's why changes to old.dat on main # from 7d ago are included even though the commit itself is outside of the window, # that content of old.dat was relevant until it was removed with a commit, inside the window # think of it as windows of relevance that overlap until the content is replaced # we also make sure we commit today on main so that the recent commits measured # from latest commit on main tracks back from there echo "[ { \"CommitDate\":\"$(get_date -20d)\", \"Files\":[ {\"Filename\":\"old.dat\",\"Size\":${#content_oldandpushed}, \"Data\":\"$content_oldandpushed\"}, {\"Filename\":\"stillcurrent.dat\",\"Size\":${#content_oldandunchanged}, \"Data\":\"$content_oldandunchanged\"}] }, { \"CommitDate\":\"$(get_date -7d)\", \"Files\":[ {\"Filename\":\"old.dat\",\"Size\":${#content_retain1}, \"Data\":\"$content_retain1\"}] }, { \"CommitDate\":\"$(get_date -4d)\", \"NewBranch\":\"branch_to_delete\", \"Files\":[ {\"Filename\":\"unreferenced.dat\",\"Size\":${#content_unreferenced}, \"Data\":\"$content_unreferenced\"}] }, { \"ParentBranches\":[\"main\"], \"Files\":[ {\"Filename\":\"old.dat\",\"Size\":${#content_retain2}, \"Data\":\"$content_retain2\"}] } ]" | lfstest-testutils addcommits git push origin main git branch -D branch_to_delete git config lfs.fetchrecentrefsdays 5 git config lfs.fetchrecentremoterefs true git config lfs.fetchrecentcommitsdays 3 git config lfs.pruneoffsetdays 2 git lfs prune --dry-run --verbose 2>&1 | tee prune.log grep "prune: 5 local objects, 3 retained" prune.log grep "prune: 2 files would be pruned" prune.log grep "$oid_oldandpushed" prune.log grep "$oid_unreferenced" prune.log assert_local_object "$oid_oldandpushed" "${#content_oldandpushed}" assert_local_object "$oid_unreferenced" "${#content_unreferenced}" git lfs prune refute_local_object "$oid_oldandpushed" "${#content_oldandpushed}" refute_local_object "$oid_unreferenced" "${#content_unreferenced}" assert_local_object "$oid_oldandunchanged" "${#content_oldandunchanged}" assert_local_object "$oid_retain1" "${#content_retain1}" assert_local_object "$oid_retain2" "${#content_retain2}" # now only keep AT refs, no recents git config lfs.fetchrecentcommitsdays 0 git lfs prune --verbose 2>&1 | tee prune.log grep "prune: 3 local objects, 2 retained" prune.log grep "prune: Deleting objects: 100% (1/1), done." prune.log grep "$oid_retain1" prune.log refute_local_object "$oid_retain1" assert_local_object "$oid_retain2" "${#content_retain2}" ) end_test begin_test "prune all excluded paths" ( set -e reponame="prune_unref_old_exclude" setup_remote_repo "remote_$reponame" clone_repo "remote_$reponame" "clone_$reponame" git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log # generate content we'll use content_oldandexcluded="To delete: pushed and too old and excluded by filter" content_oldandunchanged="Keep: pushed and created a while ago, but still current" content_prevandexcluded="To delete: pushed and in previous commit to HEAD but excluded by filter" content_unreferencedandexcluded="To delete: unreferenced in deleted branch and pushed and excluded by filter" content_includedandexcluded="Keep: pushed and both excluded by filter and included by another path" content_excluded="To delete: pushed and in HEAD but excluded by filter" oid_oldandexcluded=$(calc_oid "$content_oldandexcluded") oid_oldandunchanged=$(calc_oid "$content_oldandunchanged") oid_prevandexcluded=$(calc_oid "$content_prevandexcluded") oid_unreferencedandexcluded=$(calc_oid "$content_unreferencedandexcluded") oid_includedandexcluded=$(calc_oid "$content_includedandexcluded") oid_excluded=$(calc_oid "$content_excluded") echo "[ { \"CommitDate\":\"$(get_date -20d)\", \"Files\":[ {\"Filename\":\"foo/oldandexcluded.dat\",\"Size\":${#content_oldandexcluded}, \"Data\":\"$content_oldandexcluded\"}, {\"Filename\":\"stillcurrent.dat\",\"Size\":${#content_oldandunchanged}, \"Data\":\"$content_oldandunchanged\"}] }, { \"CommitDate\":\"$(get_date -7d)\", \"Files\":[ {\"Filename\":\"foo/oldandexcluded.dat\",\"Size\":${#content_prevandexcluded}, \"Data\":\"$content_prevandexcluded\"}] }, { \"CommitDate\":\"$(get_date -4d)\", \"NewBranch\":\"branch_to_delete\", \"Files\":[ {\"Filename\":\"unreferenced.dat\",\"Size\":${#content_unreferencedandexcluded}, \"Data\":\"$content_unreferencedandexcluded\"}] }, { \"ParentBranches\":[\"main\"], \"Files\":[ {\"Filename\":\"foo/unreferencedandexcluded.dat\",\"Size\":${#content_unreferencedandexcluded}, \"Data\":\"$content_unreferencedandexcluded\"}, {\"Filename\":\"foo/includedandexcluded.dat\",\"Size\":${#content_includedandexcluded}, \"Data\":\"$content_includedandexcluded\"}, {\"Filename\":\"included.dat\",\"Size\":${#content_includedandexcluded}, \"Data\":\"$content_includedandexcluded\"}, {\"Filename\":\"foo/oldandexcluded.dat\",\"Size\":${#content_excluded}, \"Data\":\"$content_excluded\"}] } ]" | lfstest-testutils addcommits git push origin main git branch -D branch_to_delete git config lfs.fetchrecentrefsdays 5 git config lfs.fetchrecentremoterefs true git config lfs.fetchrecentcommitsdays 3 git config lfs.pruneoffsetdays 2 # We need to prevent MSYS from rewriting /foo into a Windows path. MSYS_NO_PATHCONV=1 git config "lfs.fetchexclude" "/foo" git lfs prune --dry-run --verbose 2>&1 | tee prune.log grep "prune: 6 local objects, 2 retained" prune.log grep "prune: 4 files would be pruned" prune.log grep "$oid_oldandexcluded" prune.log grep "$oid_prevandexcluded" prune.log grep "$oid_unreferencedandexcluded" prune.log grep "$oid_excluded" prune.log assert_local_object "$oid_oldandexcluded" "${#content_oldandexcluded}" assert_local_object "$oid_prevandexcluded" "${#content_prevandexcluded}" assert_local_object "$oid_unreferencedandexcluded" "${#content_unreferencedandexcluded}" assert_local_object "$oid_excluded" "${#content_excluded}" git lfs prune refute_local_object "$oid_oldandexcluded" "${#content_oldandexcluded}" assert_local_object "$oid_oldandunchanged" "${#content_oldandunchanged}" refute_local_object "$oid_prevandexcluded" "${#content_prevandexcluded}" refute_local_object "$oid_unreferencedandexcluded" "${#content_unreferencedandexcluded}" assert_local_object "$oid_includedandexcluded" "${#content_includedandexcluded}" refute_local_object "$oid_excluded" "${#content_excluded}" ) end_test begin_test "prune keep unpushed" ( set -e # need to set up many commits on each branch with old data so that would # get deleted if it were not for unpushed status (heads would never be pruned but old changes would) reponame="prune_keep_unpushed" setup_remote_repo "remote_$reponame" clone_repo "remote_$reponame" "clone_$reponame" git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log content_keepunpushedhead1="Keep: unpushed HEAD 1" content_keepunpushedhead2="Keep: unpushed HEAD 2" content_keepunpushedhead3="Keep: unpushed HEAD 3" content_keepunpushedbranch1="Keep: unpushed second branch 1" content_keepunpushedbranch2="Keep: unpushed second branch 2" content_keepunpushedbranch3="Keep: unpushed second branch 3" content_keepunpushedandexcludedbranch1="Keep: unpushed second branch 1 excluded by filter" content_keepunpushedandexcludedbranch2="Keep: unpushed second branch 2 excluded by filter" content_keepunpushedandexcludedbranch3="Keep: unpushed second branch 3 excluded by filter" content_keepunpushedtag1="Keep: unpushed tag only 1" content_keepunpushedtag2="Keep: unpushed tag only 2" content_keepunpushedtag3="Keep: unpushed tag only 3" oid_keepunpushedhead1=$(calc_oid "$content_keepunpushedhead1") oid_keepunpushedhead2=$(calc_oid "$content_keepunpushedhead2") oid_keepunpushedhead3=$(calc_oid "$content_keepunpushedhead3") oid_keepunpushedbranch1=$(calc_oid "$content_keepunpushedbranch1") oid_keepunpushedbranch2=$(calc_oid "$content_keepunpushedbranch2") oid_keepunpushedbranch3=$(calc_oid "$content_keepunpushedbranch3") oid_keepunpushedandexcludedbranch1=$(calc_oid "$content_keepunpushedandexcludedbranch1") oid_keepunpushedandexcludedbranch2=$(calc_oid "$content_keepunpushedandexcludedbranch2") oid_keepunpushedandexcludedbranch3=$(calc_oid "$content_keepunpushedandexcludedbranch3") oid_keepunpushedtag1=$(calc_oid "$content_keepunpushedtag1") oid_keepunpushedtag2=$(calc_oid "$content_keepunpushedtag2") oid_keepunpushedtag3=$(calc_oid "$content_keepunpushedtag3") echo "[ { \"CommitDate\":\"$(get_date -40d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_keepunpushedhead1}, \"Data\":\"$content_keepunpushedhead1\"}] }, { \"CommitDate\":\"$(get_date -31d)\", \"ParentBranches\":[\"main\"], \"NewBranch\":\"branch_unpushed\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_keepunpushedbranch1}, \"Data\":\"$content_keepunpushedbranch1\"}, {\"Filename\":\"foo/file.dat\",\"Size\":${#content_keepunpushedandexcludedbranch1}, \"Data\":\"$content_keepunpushedandexcludedbranch1\"}] }, { \"CommitDate\":\"$(get_date -16d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_keepunpushedbranch2}, \"Data\":\"$content_keepunpushedbranch2\"}, {\"Filename\":\"foo/file.dat\",\"Size\":${#content_keepunpushedandexcludedbranch2}, \"Data\":\"$content_keepunpushedandexcludedbranch2\"}] }, { \"CommitDate\":\"$(get_date -2d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_keepunpushedbranch3}, \"Data\":\"$content_keepunpushedbranch3\"}, {\"Filename\":\"foo/file.dat\",\"Size\":${#content_keepunpushedandexcludedbranch3}, \"Data\":\"$content_keepunpushedandexcludedbranch3\"}] }, { \"CommitDate\":\"$(get_date -31d)\", \"ParentBranches\":[\"main\"], \"NewBranch\":\"branch_unpushed_tagged_only\", \"Files\":[ {\"Filename\":\"file2.dat\",\"Size\":${#content_keepunpushedtag1}, \"Data\":\"$content_keepunpushedtag1\"}] }, { \"CommitDate\":\"$(get_date -16d)\", \"Files\":[ {\"Filename\":\"file2.dat\",\"Size\":${#content_keepunpushedtag2}, \"Data\":\"$content_keepunpushedtag2\"}] }, { \"CommitDate\":\"$(get_date -2d)\", \"Tags\":[\"tag_unpushed\"], \"Files\":[ {\"Filename\":\"file2.dat\",\"Size\":${#content_keepunpushedtag3}, \"Data\":\"$content_keepunpushedtag3\"}] }, { \"CommitDate\":\"$(get_date -21d)\", \"ParentBranches\":[\"main\"], \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_keepunpushedhead2}, \"Data\":\"$content_keepunpushedhead2\"}] }, { \"CommitDate\":\"$(get_date -0d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_keepunpushedhead3}, \"Data\":\"$content_keepunpushedhead3\"}] } ]" | lfstest-testutils addcommits git config lfs.fetchrecentrefsdays 5 git config lfs.fetchrecentremoterefs true git config lfs.fetchrecentcommitsdays 0 # only keep AT refs, no recents git config lfs.pruneoffsetdays 2 # We need to prevent MSYS from rewriting /foo into a Windows path. MSYS_NO_PATHCONV=1 git config "lfs.fetchexclude" "/foo" # force color codes in git diff meta-information git config color.diff always git branch -D branch_unpushed_tagged_only git lfs prune # Now push main and show that only older versions on main will be removed. git push origin main git lfs prune --verbose 2>&1 | tee prune.log grep "prune: 12 local objects, 10 retained" prune.log grep "prune: Deleting objects: 100% (2/2), done." prune.log grep "$oid_keepunpushedhead1" prune.log grep "$oid_keepunpushedhead2" prune.log refute_local_object "$oid_keepunpushedhead1" refute_local_object "$oid_keepunpushedhead2" # Merge the unpushed branch and tag, delete them, and then push main. # Resolve conflicts by taking other branch. git merge -Xtheirs branch_unpushed git merge tag_unpushed git branch -D branch_unpushed git tag -d tag_unpushed git push origin main # Now make sure we purged all the intermediate commits but also make sure # they are on the remote. git lfs prune --verbose 2>&1 | tee prune.log grep "prune: 10 local objects, 2 retained" prune.log grep "prune: Deleting objects: 100% (8/8), done." prune.log grep "$oid_keepunpushedbranch1" prune.log grep "$oid_keepunpushedbranch2" prune.log grep "$oid_keepunpushedandexcludedbranch1" prune.log grep "$oid_keepunpushedandexcludedbranch2" prune.log # This is in the new HEAD and would be retained except that it is also # excluded by the filter and has been pushed, so it should have been purged. grep "$oid_keepunpushedandexcludedbranch3" prune.log grep "$oid_keepunpushedtag1" prune.log grep "$oid_keepunpushedtag2" prune.log grep "$oid_keepunpushedhead3" prune.log refute_local_object "$oid_keepunpushedbranch1" refute_local_object "$oid_keepunpushedbranch2" refute_local_object "$oid_keepunpushedandexcludedbranch1" refute_local_object "$oid_keepunpushedandexcludedbranch2" # This is in the new HEAD and would be retained except that it is also # excluded by the filter and has been pushed, so it should have been purged. refute_local_object "$oid_keepunpushedandexcludedbranch3" refute_local_object "$oid_keepunpushedtag1" refute_local_object "$oid_keepunpushedtag2" # We used -Xtheirs when merging the branch so the old HEAD is now obsolete. refute_local_object "$oid_keepunpushedhead3" assert_server_object "remote_$reponame" "$oid_keepunpushedbranch1" assert_server_object "remote_$reponame" "$oid_keepunpushedbranch2" assert_server_object "remote_$reponame" "$oid_keepunpushedandexcludedbranch1" assert_server_object "remote_$reponame" "$oid_keepunpushedandexcludedbranch2" assert_server_object "remote_$reponame" "$oid_keepunpushedandexcludedbranch3" assert_server_object "remote_$reponame" "$oid_keepunpushedtag1" assert_server_object "remote_$reponame" "$oid_keepunpushedtag2" assert_server_object "remote_$reponame" "$oid_keepunpushedhead3" ) end_test begin_test "prune keep recent" ( set -e reponame="prune_recent" setup_remote_repo "remote_$reponame" clone_repo "remote_$reponame" "clone_$reponame" git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log content_keephead="Keep: HEAD" content_keeprecentbranch1tip="Keep: Recent branch 1 tip" content_keeprecentbranch2tip="Keep: Recent branch 2 tip" content_keeprecentcommithead="Keep: Recent commit on HEAD" content_keeprecentcommitbranch1="Keep: Recent commit on recent branch 1" content_keeprecentcommitbranch2="Keep: Recent commit on recent branch 2" content_prunecommitoldbranch1="Prune: old commit on old branch" content_prunecommitoldbranch2="Prune: old branch tip" content_prunecommitbranch1="Prune: old commit on recent branch 1" content_prunecommitbranch2="Prune: old commit on recent branch 2" content_prunecommithead="Prune: old commit on HEAD" oid_keephead=$(calc_oid "$content_keephead") oid_keeprecentbranch1tip=$(calc_oid "$content_keeprecentbranch1tip") oid_keeprecentbranch2tip=$(calc_oid "$content_keeprecentbranch2tip") oid_keeprecentcommithead=$(calc_oid "$content_keeprecentcommithead") oid_keeprecentcommitbranch1=$(calc_oid "$content_keeprecentcommitbranch1") oid_keeprecentcommitbranch2=$(calc_oid "$content_keeprecentcommitbranch2") oid_prunecommitoldbranch=$(calc_oid "$content_prunecommitoldbranch1") oid_prunecommitoldbranch2=$(calc_oid "$content_prunecommitoldbranch2") oid_prunecommitbranch1=$(calc_oid "$content_prunecommitbranch1") oid_prunecommitbranch2=$(calc_oid "$content_prunecommitbranch2") oid_prunecommithead=$(calc_oid "$content_prunecommithead") # use a single file so each commit supersedes the last, if different files # then history becomes harder to track # Also note that when considering 'recent' when editing a single file, it means # that the snapshot state overlapped; so the latest commit *before* the day # that you're looking at, not just the commits on/after. echo "[ { \"CommitDate\":\"$(get_date -50d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_prunecommithead}, \"Data\":\"$content_prunecommithead\"}] }, { \"CommitDate\":\"$(get_date -30d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_keeprecentcommithead}, \"Data\":\"$content_keeprecentcommithead\"}] }, { \"CommitDate\":\"$(get_date -8d)\", \"NewBranch\":\"branch_old\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_prunecommitoldbranch1}, \"Data\":\"$content_prunecommitoldbranch1\"}] }, { \"CommitDate\":\"$(get_date -7d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_prunecommitoldbranch2}, \"Data\":\"$content_prunecommitoldbranch2\"}] }, { \"CommitDate\":\"$(get_date -9d)\", \"ParentBranches\":[\"main\"], \"NewBranch\":\"branch1\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_prunecommitbranch1}, \"Data\":\"$content_prunecommitbranch1\"}] }, { \"CommitDate\":\"$(get_date -8d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_keeprecentcommitbranch1}, \"Data\":\"$content_keeprecentcommitbranch1\"}] }, { \"CommitDate\":\"$(get_date -5d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_keeprecentbranch1tip}, \"Data\":\"$content_keeprecentbranch1tip\"}] }, { \"CommitDate\":\"$(get_date -17d)\", \"ParentBranches\":[\"main\"], \"NewBranch\":\"branch2\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_prunecommitbranch2}, \"Data\":\"$content_prunecommitbranch2\"}] }, { \"CommitDate\":\"$(get_date -10d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_keeprecentcommitbranch2}, \"Data\":\"$content_keeprecentcommitbranch2\"}] }, { \"CommitDate\":\"$(get_date -2d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_keeprecentbranch2tip}, \"Data\":\"$content_keeprecentbranch2tip\"}] }, { \"CommitDate\":\"$(get_date -1d)\", \"ParentBranches\":[\"main\"], \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_keephead}, \"Data\":\"$content_keephead\"}] } ]" | lfstest-testutils addcommits # keep refs for 6 days & any prev commit that overlaps 2 days before tip (recent + offset) git config lfs.fetchrecentrefsdays 5 git config lfs.fetchrecentremoterefs true git config lfs.fetchrecentcommitsdays 1 git config lfs.pruneoffsetdays 1 # force color codes in git diff meta-information git config color.diff always # push everything so that's not a reason to retain git push origin main:main branch_old:branch_old branch1:branch1 branch2:branch2 git lfs prune --verbose 2>&1 | tee prune.log grep "prune: 11 local objects, 6 retained, done." prune.log grep "prune: Deleting objects: 100% (5/5), done." prune.log grep "$oid_prunecommitoldbranch" prune.log grep "$oid_prunecommitoldbranch2" prune.log grep "$oid_prunecommitbranch1" prune.log grep "$oid_prunecommitbranch2" prune.log grep "$oid_prunecommithead" prune.log refute_local_object "$oid_prunecommitoldbranch" refute_local_object "$oid_prunecommitoldbranch2" refute_local_object "$oid_prunecommitbranch1" refute_local_object "$oid_prunecommitbranch2" refute_local_object "$oid_prunecommithead" assert_local_object "$oid_keephead" "${#content_keephead}" assert_local_object "$oid_keeprecentbranch1tip" "${#content_keeprecentbranch1tip}" assert_local_object "$oid_keeprecentbranch2tip" "${#content_keeprecentbranch2tip}" assert_local_object "$oid_keeprecentcommithead" "${#content_keeprecentcommithead}" assert_local_object "$oid_keeprecentcommitbranch1" "${#content_keeprecentcommitbranch1}" assert_local_object "$oid_keeprecentcommitbranch2" "${#content_keeprecentcommitbranch2}" # now don't include any recent commits in fetch & hence don't retain # still retain tips of branches git config lfs.fetchrecentcommitsdays 0 git lfs prune --verbose 2>&1 | tee prune.log grep "prune: 6 local objects, 3 retained, done." prune.log grep "prune: Deleting objects: 100% (3/3), done." prune.log assert_local_object "$oid_keephead" "${#content_keephead}" assert_local_object "$oid_keeprecentbranch1tip" "${#content_keeprecentbranch1tip}" assert_local_object "$oid_keeprecentbranch2tip" "${#content_keeprecentbranch2tip}" refute_local_object "$oid_keeprecentcommithead" refute_local_object "$oid_keeprecentcommitbranch1" refute_local_object "$oid_keeprecentcommitbranch2" # now don't include any recent refs at all, only keep HEAD git config lfs.fetchrecentrefsdays 0 git lfs prune --verbose 2>&1 | tee prune.log grep "prune: 3 local objects, 1 retained, done." prune.log grep "prune: Deleting objects: 100% (2/2), done." prune.log assert_local_object "$oid_keephead" "${#content_keephead}" refute_local_object "$oid_keeprecentbranch1tip" refute_local_object "$oid_keeprecentbranch2tip" ) end_test begin_test "prune remote tests" ( set -e reponame="prune_no_or_nonorigin_remote" git init "$reponame" cd "$reponame" git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log echo "[ { \"CommitDate\":\"$(get_date -50d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":30}] }, { \"CommitDate\":\"$(get_date -40d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":28}] }, { \"CommitDate\":\"$(get_date -35d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":37}] }, { \"CommitDate\":\"$(get_date -25d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":42}] } ]" | lfstest-testutils addcommits # set no recents so max ability to prune normally git config lfs.fetchrecentrefsdays 0 git config lfs.fetchrecentremoterefs true git config lfs.fetchrecentcommitsdays 0 git config lfs.pruneoffsetdays 1 # can never prune with no remote git lfs prune --verbose 2>&1 | tee prune.log grep "prune: 4 local objects, 4 retained, done." prune.log # also make sure nothing is pruned when remote is not origin # create 2 remotes, neither of which is called origin & push to both setup_remote_repo "remote1_$reponame" setup_remote_repo "remote2_$reponame" cd "$TRASHDIR/$reponame" git remote add not_origin "$GITSERVER/remote1_$reponame" git push not_origin main git lfs prune --verbose 2>&1 | tee prune.log grep "prune: 4 local objects, 4 retained, done." prune.log # now set the prune remote to be not_origin, should now prune # do a dry run so we can also verify git config lfs.pruneremotetocheck not_origin git lfs prune --verbose --dry-run 2>&1 | tee prune.log grep "prune: 4 local objects, 1 retained, done." prune.log grep "prune: 3 files would be pruned" prune.log ) end_test begin_test "prune verify" ( set -e reponame="prune_verify" setup_remote_repo "remote_$reponame" clone_repo "remote_$reponame" "clone_$reponame" git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log content_head="HEAD content" content_commit3="Content for commit 3 (prune)" content_commit2_failverify="Content for commit 2 (prune - fail verify)" content_commit1="Content for commit 1 (prune)" oid_head=$(calc_oid "$content_head") oid_commit3=$(calc_oid "$content_commit3") oid_commit2_failverify=$(calc_oid "$content_commit2_failverify") oid_commit1=$(calc_oid "$content_commit1") echo "[ { \"CommitDate\":\"$(get_date -50d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_commit1}, \"Data\":\"$content_commit1\"}] }, { \"CommitDate\":\"$(get_date -40d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_commit2_failverify}, \"Data\":\"$content_commit2_failverify\"}] }, { \"CommitDate\":\"$(get_date -35d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_commit3}, \"Data\":\"$content_commit3\"}] }, { \"CommitDate\":\"$(get_date -25d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_head}, \"Data\":\"$content_head\"}] } ]" | lfstest-testutils addcommits # push all so no unpushed reason to not prune git push origin main # set no recents so max ability to prune normally git config lfs.fetchrecentrefsdays 0 git config lfs.fetchrecentremoterefs true git config lfs.fetchrecentcommitsdays 0 git config lfs.pruneoffsetdays 1 # confirm that it would prune with verify when no issues git lfs prune --dry-run --verify-remote --verbose 2>&1 | tee prune.log grep "prune: 4 local objects, 1 retained, 3 verified with remote, done." prune.log grep "prune: 3 files would be pruned" prune.log grep "$oid_commit3" prune.log grep "$oid_commit2_failverify" prune.log grep "$oid_commit1" prune.log # delete one file on the server to make the verify fail delete_server_object "remote_$reponame" "$oid_commit2_failverify" # this should now fail git lfs prune --verify-remote 2>&1 | tee prune.log grep "prune: 4 local objects, 1 retained, 2 verified with remote, 1 not on remote, done." prune.log grep "missing on remote:" prune.log grep "$oid_commit2_failverify" prune.log # Nothing should have been deleted assert_local_object "$oid_commit1" "${#content_commit1}" assert_local_object "$oid_commit2_failverify" "${#content_commit2_failverify}" assert_local_object "$oid_commit3" "${#content_commit3}" # Now test with the global option git config lfs.pruneverifyremotealways true # no verify arg but should be pulled from global git lfs prune 2>&1 | tee prune.log grep "prune: 4 local objects, 1 retained, 2 verified with remote, 1 not on remote, done." prune.log grep "missing on remote:" prune.log grep "$oid_commit2_failverify" prune.log # Nothing should have been deleted assert_local_object "$oid_commit1" "${#content_commit1}" assert_local_object "$oid_commit2_failverify" "${#content_commit2_failverify}" assert_local_object "$oid_commit3" "${#content_commit3}" # --when-unverified=continue we would prune verified objects but skip unverified objects git lfs prune --when-unverified=continue --dry-run --verbose 2>&1 | tee prune.log grep "prune: 4 local objects, 1 retained, 2 verified with remote, 1 not on remote, done." prune.log grep "prune: 2 files would be pruned" prune.log grep "$oid_commit1" prune.log grep "$oid_commit3" prune.log # now try overriding the global option git lfs prune --no-verify-remote 2>&1 | tee prune.log grep "prune: 4 local objects, 1 retained, done." prune.log grep "prune: Deleting objects: 100% (3/3), done." prune.log # should now have been deleted refute_local_object "$oid_commit1" refute_local_object "$oid_commit2_failverify" refute_local_object "$oid_commit3" ) end_test begin_test "prune verify large numbers of refs" ( set -e reponame="prune_verify_large" setup_remote_repo "remote_$reponame" clone_repo "remote_$reponame" "clone_$reponame" git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log content_head="HEAD content" content_commit1="Recent commit" content_oldcommit1="Old content $(lfstest-genrandom --base64 40)" content_oldcommit2="Old content $(lfstest-genrandom --base64 40)" oid_head=$(calc_oid "$content_head") # Add two recent commits that should not be pruned echo "[ { \"CommitDate\":\"$(get_date -50d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_oldcommit1}, \"Data\":\"$content_oldcommit1\"}] }, { \"CommitDate\":\"$(get_date -45d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_oldcommit2}, \"Data\":\"$content_oldcommit2\"}] }, { \"CommitDate\":\"$(get_date -2d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_commit1}, \"Data\":\"$content_commit1\"}] }, { \"CommitDate\":\"$(get_date -1d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_head}, \"Data\":\"$content_head\"}] } ]" | lfstest-testutils addcommits # Generate a large number of refs to old commits make sure prune has a lot of data to read git checkout $(git log --pretty=oneline main | tail -2 | awk '{print $1}' | head -1) for i in $(seq 0 1000); do git tag v$i done git checkout main # push all so no unpushed reason to not prune # git push origin main # set no recents so max ability to prune normally git config lfs.fetchrecentrefsdays 3 git config lfs.fetchrecentremoterefs true git config lfs.fetchrecentcommitsdays 3 git config lfs.pruneoffsetdays 3 # confirm that prune does not hang git lfs prune --dry-run --verify-remote --verbose 2>&1 | tee prune.log ) end_test begin_test "prune unreachable" ( set -e reponame="prune_unreachable" setup_remote_repo "remote_$reponame" clone_repo "remote_$reponame" "clone_$reponame" git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log content_head="HEAD content" content_orphan="orphan content (not pruned)" content_commit3="Content for commit 3 (prune)" content_commit2="Content for commit 2" content_commit1="Content for commit 1 (prune)" oid_head=$(calc_oid "$content_head") oid_orphan=$(calc_oid "$content_orphan") oid_commit3=$(calc_oid "$content_commit3") oid_commit2=$(calc_oid "$content_commit2") oid_commit1=$(calc_oid "$content_commit1") echo "[ { \"CommitDate\":\"$(get_date -50d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_commit1}, \"Data\":\"$content_commit1\"}] }, { \"CommitDate\":\"$(get_date -40d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_commit2}, \"Data\":\"$content_commit2\"}] }, { \"CommitDate\":\"$(get_date -35d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_commit3}, \"Data\":\"$content_commit3\"}] }, { \"CommitDate\":\"$(get_date -25d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_head}, \"Data\":\"$content_head\"}] } ]" | lfstest-testutils addcommits # push all so no unpushed reason to not prune git push origin main # set no recents so max ability to prune normally git config lfs.fetchrecentrefsdays 0 git config lfs.fetchrecentremoterefs true git config lfs.fetchrecentcommitsdays 0 git config lfs.pruneoffsetdays 1 # add one file to the index and then remove it to see if unreachable files are pruned echo -n "$content_orphan" > file.dat git add file.dat git reset file.dat # confirm that it would prune without --verify-unreachable git lfs prune --dry-run --verify-remote --verbose 2>&1 | tee prune.log grep "prune: 5 local objects, 1 retained, 3 verified with remote, done." prune.log grep "prune: 4 files would be pruned" prune.log grep "$oid_commit3" prune.log grep "$oid_commit2" prune.log grep "$oid_commit1" prune.log grep "$oid_orphan" prune.log # this should now halt as one file cannot be verified git lfs prune --verify-remote --verify-unreachable 2>&1 | tee prune.log grep "prune: 5 local objects, 1 retained, 3 verified with remote, 1 not on remote, done." prune.log grep "missing on remote:" prune.log grep "$oid_orphan" prune.log # No files should have been deleted assert_local_object "$oid_head" "${#content_head}" assert_local_object "$oid_commit1" "${#content_commit1}" assert_local_object "$oid_commit2" "${#content_commit2}" assert_local_object "$oid_commit3" "${#content_commit3}" assert_local_object "$oid_orphan" "${#content_orphan}" # test config option git config lfs.pruneverifyunreachablealways true git lfs prune --verify-remote 2>&1 | tee prune.log grep "prune: 5 local objects, 1 retained, 3 verified with remote, 1 not on remote, done." prune.log grep "missing on remote:" prune.log grep "$oid_orphan" prune.log # now try overriding the global option git lfs prune --verify-remote --no-verify-unreachable --dry-run 2>&1 | tee prune.log grep "prune: 5 local objects, 1 retained, 3 verified with remote, done." prune.log grep "prune: 4 files would be pruned" prune.log # now test with continue to see that it does prune verified objects git lfs prune --verify-remote --when-unverified=continue 2>&1 | tee prune.log grep "prune: 5 local objects, 1 retained, 3 verified with remote, 1 not on remote, done." prune.log grep "prune: Deleting objects: 100% (3/3), done." prune.log # The orphan file should not have been deleted refute_local_object "$oid_commit1" "${#content_commit1}" refute_local_object "$oid_commit2" "${#content_commit2}" refute_local_object "$oid_commit3" "${#content_commit3}" assert_local_object "$oid_orphan" "${#content_orphan}" ) end_test begin_test "prune keep stashed changes" ( set -e reponame="prune_keep_stashed" setup_remote_repo "remote_$reponame" clone_repo "remote_$reponame" "clone_$reponame" git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log # generate content we'll use content_oldandpushed="To delete: pushed and too old" oid_oldandpushed=$(calc_oid "$content_oldandpushed") content_unreferenced="To delete: unreferenced" oid_unreferenced=$(calc_oid "$content_unreferenced") content_retain1="Retained content 1" oid_retain1=$(calc_oid "$content_retain1") content_inrepo="This is the original committed data" oid_inrepo=$(calc_oid "$content_inrepo") content_stashed="This data will be stashed and should not be deleted" oid_stashed=$(calc_oid "$content_stashed") content_stashedandexcluded="This data will be stashed and should not be deleted despite being excluded" oid_stashedandexcluded=$(calc_oid "$content_stashedandexcluded") content_stashedbranch="This data will be stashed on a branch and should not be deleted" oid_stashedbranch=$(calc_oid "$content_stashedbranch") content_stashedandexcludedbranch="This data will be stashed on a branch and should not be deleted despite being excluded" oid_stashedandexcludedbranch=$(calc_oid "$content_stashedandexcludedbranch") # We need to test with older commits to ensure they get pruned as expected echo "[ { \"CommitDate\":\"$(get_date -20d)\", \"Files\":[ {\"Filename\":\"old.dat\",\"Size\":${#content_oldandpushed}, \"Data\":\"$content_oldandpushed\"}] }, { \"CommitDate\":\"$(get_date -7d)\", \"Files\":[ {\"Filename\":\"old.dat\",\"Size\":${#content_retain1}, \"Data\":\"$content_retain1\"}] }, { \"CommitDate\":\"$(get_date -4d)\", \"NewBranch\":\"branch_to_delete\", \"Files\":[ {\"Filename\":\"unreferenced.dat\",\"Size\":${#content_unreferenced}, \"Data\":\"$content_unreferenced\"}, {\"Filename\":\"foo/unreferenced.dat\",\"Size\":${#content_unreferenced}, \"Data\":\"$content_unreferenced\"}] }, { \"CommitDate\":\"$(get_date -1d)\", \"ParentBranches\":[\"main\"], \"Files\":[ {\"Filename\":\"stashedfile.dat\",\"Size\":${#content_inrepo}, \"Data\":\"$content_inrepo\"}, {\"Filename\":\"foo/stashedfile.dat\",\"Size\":${#content_inrepo}, \"Data\":\"$content_inrepo\"}] } ]" | lfstest-testutils addcommits git push origin main assert_local_object "$oid_oldandpushed" "${#content_oldandpushed}" assert_local_object "$oid_unreferenced" "${#content_unreferenced}" assert_local_object "$oid_retain1" "${#content_retain1}" # now modify the files, and stash them printf '%s' "$content_stashed" > stashedfile.dat printf '%s' "$content_stashedandexcluded" > foo/stashedfile.dat git stash # Switch to a branch, modify files, stash them, and delete the branch. git checkout branch_to_delete printf '%s' "$content_stashedbranch" > unreferenced.dat printf '%s' "$content_stashedandexcludedbranch" > foo/unreferenced.dat git stash git checkout main git branch -D branch_to_delete # Prove that the stashed data was stored in LFS (should call clean filter) assert_local_object "$oid_stashed" "${#content_stashed}" assert_local_object "$oid_stashedandexcluded" "${#content_stashedandexcluded}" assert_local_object "$oid_stashedbranch" "${#content_stashedbranch}" assert_local_object "$oid_stashedandexcludedbranch" "${#content_stashedandexcludedbranch}" # We need to prevent MSYS from rewriting /foo into a Windows path. MSYS_NO_PATHCONV=1 git config "lfs.fetchexclude" "/foo" # force color codes in git diff meta-information git config color.diff always # Prune data, should NOT delete stashed files git lfs prune refute_local_object "$oid_oldandpushed" "${#content_oldandpushed}" refute_local_object "$oid_unreferenced" "${#content_unreferenced}" assert_local_object "$oid_retain1" "${#content_retain1}" assert_local_object "$oid_stashed" "${#content_stashed}" assert_local_object "$oid_stashedandexcluded" "${#content_stashedandexcluded}" assert_local_object "$oid_stashedbranch" "${#content_stashedbranch}" assert_local_object "$oid_stashedandexcludedbranch" "${#content_stashedandexcludedbranch}" ) end_test begin_test "prune keep stashed changes in index" ( set -e reponame="prune_keep_stashed_index" setup_remote_repo "remote_$reponame" clone_repo "remote_$reponame" "clone_$reponame" git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log # generate content we'll use content_oldandpushed="To delete: pushed and too old" oid_oldandpushed=$(calc_oid "$content_oldandpushed") content_unreferenced="To delete: unreferenced" oid_unreferenced=$(calc_oid "$content_unreferenced") content_retain1="Retained content 1" oid_retain1=$(calc_oid "$content_retain1") content_inrepo="This is the original committed data" oid_inrepo=$(calc_oid "$content_inrepo") content_indexstashed="This data will be stashed from the index and should not be deleted" oid_indexstashed=$(calc_oid "$content_indexstashed") content_indexstashedandexcluded="This data will be stashed from the index and should not be deleted despite being excluded" oid_indexstashedandexcluded=$(calc_oid "$content_indexstashedandexcluded") content_stashed="This data will be stashed and should not be deleted" oid_stashed=$(calc_oid "$content_stashed") content_stashedandexcluded="This data will be stashed and should not be deleted despite being excluded" oid_stashedandexcluded=$(calc_oid "$content_stashedandexcluded") content_indexstashedbranch="This data will be stashed on a branch from the index and should not be deleted" oid_indexstashedbranch=$(calc_oid "$content_indexstashedbranch") content_indexstashedandexcludedbranch="This data will be stashed on a branch from the index and should not be deleted despite being excluded" oid_indexstashedandexcludedbranch=$(calc_oid "$content_indexstashedandexcludedbranch") content_stashedbranch="This data will be stashed on a branch and should not be deleted" oid_stashedbranch=$(calc_oid "$content_stashedbranch") content_stashedandexcludedbranch="This data will be stashed on a branch and should not be deleted despite being excluded" oid_stashedandexcludedbranch=$(calc_oid "$content_stashedandexcludedbranch") # We need to test with older commits to ensure they get pruned as expected echo "[ { \"CommitDate\":\"$(get_date -20d)\", \"Files\":[ {\"Filename\":\"old.dat\",\"Size\":${#content_oldandpushed}, \"Data\":\"$content_oldandpushed\"}] }, { \"CommitDate\":\"$(get_date -7d)\", \"Files\":[ {\"Filename\":\"old.dat\",\"Size\":${#content_retain1}, \"Data\":\"$content_retain1\"}] }, { \"CommitDate\":\"$(get_date -4d)\", \"NewBranch\":\"branch_to_delete\", \"Files\":[ {\"Filename\":\"unreferenced.dat\",\"Size\":${#content_unreferenced}, \"Data\":\"$content_unreferenced\"}, {\"Filename\":\"foo/unreferenced.dat\",\"Size\":${#content_unreferenced}, \"Data\":\"$content_unreferenced\"}] }, { \"CommitDate\":\"$(get_date -1d)\", \"ParentBranches\":[\"main\"], \"Files\":[ {\"Filename\":\"stashedfile.dat\",\"Size\":${#content_inrepo}, \"Data\":\"$content_inrepo\"}, {\"Filename\":\"foo/stashedfile.dat\",\"Size\":${#content_inrepo}, \"Data\":\"$content_inrepo\"}] } ]" | lfstest-testutils addcommits git push origin main assert_local_object "$oid_oldandpushed" "${#content_oldandpushed}" assert_local_object "$oid_unreferenced" "${#content_unreferenced}" assert_local_object "$oid_retain1" "${#content_retain1}" # now modify the files, and add them to the index printf '%s' "$content_indexstashed" > stashedfile.dat printf '%s' "$content_indexstashedandexcluded" > foo/stashedfile.dat git add stashedfile.dat foo/stashedfile.dat # now modify the files again, and stash them printf '%s' "$content_stashed" > stashedfile.dat printf '%s' "$content_stashedandexcluded" > foo/stashedfile.dat git stash # Switch to a branch, modify files in the index and working tree, stash them, # and delete the branch. git checkout branch_to_delete printf '%s' "$content_indexstashedbranch" > unreferenced.dat printf '%s' "$content_indexstashedandexcludedbranch" > foo/unreferenced.dat git add unreferenced.dat foo/unreferenced.dat printf '%s' "$content_stashedbranch" > unreferenced.dat printf '%s' "$content_stashedandexcludedbranch" > foo/unreferenced.dat git stash git checkout main git branch -D branch_to_delete # Prove that the stashed data was stored in LFS (should call clean filter) assert_local_object "$oid_indexstashed" "${#content_indexstashed}" assert_local_object "$oid_indexstashedandexcluded" "${#content_indexstashedandexcluded}" assert_local_object "$oid_stashed" "${#content_stashed}" assert_local_object "$oid_stashedandexcluded" "${#content_stashedandexcluded}" assert_local_object "$oid_indexstashedbranch" "${#content_indexstashedbranch}" assert_local_object "$oid_indexstashedandexcludedbranch" "${#content_indexstashedandexcludedbranch}" assert_local_object "$oid_stashedbranch" "${#content_stashedbranch}" assert_local_object "$oid_stashedandexcludedbranch" "${#content_stashedandexcludedbranch}" # We need to prevent MSYS from rewriting /foo into a Windows path. MSYS_NO_PATHCONV=1 git config "lfs.fetchexclude" "/foo" # force color codes in git diff meta-information git config color.diff always # Prune data, should NOT delete stashed file or stashed changes to index git lfs prune refute_local_object "$oid_oldandpushed" "${#content_oldandpushed}" refute_local_object "$oid_unreferenced" "${#content_unreferenced}" assert_local_object "$oid_retain1" "${#content_retain1}" assert_local_object "$oid_indexstashed" "${#content_indexstashed}" assert_local_object "$oid_indexstashedandexcluded" "${#content_indexstashedandexcluded}" assert_local_object "$oid_stashed" "${#content_stashed}" assert_local_object "$oid_stashedandexcluded" "${#content_stashedandexcluded}" assert_local_object "$oid_indexstashedbranch" "${#content_indexstashedbranch}" assert_local_object "$oid_indexstashedandexcludedbranch" "${#content_indexstashedandexcludedbranch}" assert_local_object "$oid_stashedbranch" "${#content_stashedbranch}" assert_local_object "$oid_stashedandexcludedbranch" "${#content_stashedandexcludedbranch}" ) end_test begin_test "prune keep stashed untracked files" ( set -e reponame="prune_keep_stashed_untracked" setup_remote_repo "remote_$reponame" clone_repo "remote_$reponame" "clone_$reponame" git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log # Commit .gitattributes first so it is not removed by "git stash -u" and then # not restored when we checkout the branch and try to modify LFS objects. git add .gitattributes git commit -m attribs # generate content we'll use content_oldandpushed="To delete: pushed and too old" oid_oldandpushed=$(calc_oid "$content_oldandpushed") content_unreferenced="To delete: unreferenced" oid_unreferenced=$(calc_oid "$content_unreferenced") content_retain1="Retained content 1" oid_retain1=$(calc_oid "$content_retain1") content_inrepo="This is the original committed data" oid_inrepo=$(calc_oid "$content_inrepo") content_indexstashed="This data will be stashed from the index and should not be deleted" oid_indexstashed=$(calc_oid "$content_indexstashed") content_indexstashedandexcluded="This data will be stashed from the index and should not be deleted despite being excluded" oid_indexstashedandexcluded=$(calc_oid "$content_indexstashedandexcluded") content_stashed="This data will be stashed and should not be deleted" oid_stashed=$(calc_oid "$content_stashed") content_stashedandexcluded="This data will be stashed and should not be deleted despite being excluded" oid_stashedandexcluded=$(calc_oid "$content_stashedandexcluded") content_untrackedstashed="This UNTRACKED FILE data will be stashed and should not be deleted" oid_untrackedstashed=$(calc_oid "$content_untrackedstashed") content_untrackedstashedandexcluded="This UNTRACKED FILE data will be stashed and should not be deleted despite being excluded" oid_untrackedstashedandexcluded=$(calc_oid "$content_untrackedstashedandexcluded") content_indexstashedbranch="This data will be stashed on a branch from the index and should not be deleted" oid_indexstashedbranch=$(calc_oid "$content_indexstashedbranch") content_indexstashedandexcludedbranch="This data will be stashed on a branch from the index and should not be deleted despite being excluded" oid_indexstashedandexcludedbranch=$(calc_oid "$content_indexstashedandexcludedbranch") content_stashedbranch="This data will be stashed on a branch and should not be deleted" oid_stashedbranch=$(calc_oid "$content_stashedbranch") content_stashedandexcludedbranch="This data will be stashed on a branch and should not be deleted despite being excluded" oid_stashedandexcludedbranch=$(calc_oid "$content_stashedandexcludedbranch") content_untrackedstashedbranch="This UNTRACKED FILE data will be stashed on a branch and should not be deleted" oid_untrackedstashedbranch=$(calc_oid "$content_untrackedstashedbranch") content_untrackedstashedandexcludedbranch="This UNTRACKED FILE data will be stashed on a branch and should not be deleted despite being excluded" oid_untrackedstashedandexcludedbranch=$(calc_oid "$content_untrackedstashedandexcludedbranch") # We need to test with older commits to ensure they get pruned as expected echo "[ { \"CommitDate\":\"$(get_date -20d)\", \"Files\":[ {\"Filename\":\"old.dat\",\"Size\":${#content_oldandpushed}, \"Data\":\"$content_oldandpushed\"}] }, { \"CommitDate\":\"$(get_date -7d)\", \"Files\":[ {\"Filename\":\"old.dat\",\"Size\":${#content_retain1}, \"Data\":\"$content_retain1\"}] }, { \"CommitDate\":\"$(get_date -4d)\", \"NewBranch\":\"branch_to_delete\", \"Files\":[ {\"Filename\":\"unreferenced.dat\",\"Size\":${#content_unreferenced}, \"Data\":\"$content_unreferenced\"}, {\"Filename\":\"foo/unreferenced.dat\",\"Size\":${#content_unreferenced}, \"Data\":\"$content_unreferenced\"}] }, { \"CommitDate\":\"$(get_date -1d)\", \"ParentBranches\":[\"main\"], \"Files\":[ {\"Filename\":\"stashedfile.dat\",\"Size\":${#content_inrepo}, \"Data\":\"$content_inrepo\"}, {\"Filename\":\"foo/stashedfile.dat\",\"Size\":${#content_inrepo}, \"Data\":\"$content_inrepo\"}] } ]" | lfstest-testutils addcommits git push origin main assert_local_object "$oid_oldandpushed" "${#content_oldandpushed}" assert_local_object "$oid_unreferenced" "${#content_unreferenced}" assert_local_object "$oid_retain1" "${#content_retain1}" # now modify the files, and add them to the index printf '%s' "$content_indexstashed" > stashedfile.dat printf '%s' "$content_indexstashedandexcluded" > foo/stashedfile.dat git add stashedfile.dat foo/stashedfile.dat # now modify the files again, and stash them printf '%s' "$content_stashed" > stashedfile.dat printf '%s' "$content_stashedandexcluded" > foo/stashedfile.dat # Also create untracked files printf '%s' "$content_untrackedstashed" > untrackedfile.dat printf '%s' "$content_untrackedstashedandexcluded" > foo/untrackedfile.dat # stash, including untracked git stash -u # Switch to a branch, modify files in the index and working tree and create # untracked files, stash them, and delete the branch. git checkout branch_to_delete printf '%s' "$content_indexstashedbranch" > unreferenced.dat printf '%s' "$content_indexstashedandexcludedbranch" > foo/unreferenced.dat git add unreferenced.dat foo/unreferenced.dat printf '%s' "$content_stashedbranch" > unreferenced.dat printf '%s' "$content_stashedandexcludedbranch" > foo/unreferenced.dat printf '%s' "$content_untrackedstashedbranch" > untrackedfile.dat printf '%s' "$content_untrackedstashedandexcludedbranch" > foo/untrackedfile.dat git stash -u git checkout main git branch -D branch_to_delete # Prove that ALL stashed data was stored in LFS (should call clean filter) assert_local_object "$oid_indexstashed" "${#content_indexstashed}" assert_local_object "$oid_indexstashedandexcluded" "${#content_indexstashedandexcluded}" assert_local_object "$oid_stashed" "${#content_stashed}" assert_local_object "$oid_stashedandexcluded" "${#content_stashedandexcluded}" assert_local_object "$oid_untrackedstashed" "${#content_untrackedstashed}" assert_local_object "$oid_untrackedstashedandexcluded" "${#content_untrackedstashedandexcluded}" assert_local_object "$oid_indexstashedbranch" "${#content_indexstashedbranch}" assert_local_object "$oid_indexstashedandexcludedbranch" "${#content_indexstashedandexcludedbranch}" assert_local_object "$oid_stashedbranch" "${#content_stashedbranch}" assert_local_object "$oid_stashedandexcludedbranch" "${#content_stashedandexcludedbranch}" assert_local_object "$oid_untrackedstashedbranch" "${#content_untrackedstashedbranch}" assert_local_object "$oid_untrackedstashedandexcludedbranch" "${#content_untrackedstashedandexcludedbranch}" # We need to prevent MSYS from rewriting /foo into a Windows path. MSYS_NO_PATHCONV=1 git config "lfs.fetchexclude" "/foo" # force color codes in git diff meta-information git config color.diff always # Prune data, should NOT delete stashed file or stashed changes to index git lfs prune refute_local_object "$oid_oldandpushed" "${#content_oldandpushed}" refute_local_object "$oid_unreferenced" "${#content_unreferenced}" assert_local_object "$oid_retain1" "${#content_retain1}" assert_local_object "$oid_indexstashed" "${#content_indexstashed}" assert_local_object "$oid_indexstashedandexcluded" "${#content_indexstashedandexcluded}" assert_local_object "$oid_stashed" "${#content_stashed}" assert_local_object "$oid_stashedandexcluded" "${#content_stashedandexcluded}" assert_local_object "$oid_untrackedstashed" "${#content_untrackedstashed}" assert_local_object "$oid_untrackedstashedandexcluded" "${#content_untrackedstashedandexcluded}" assert_local_object "$oid_indexstashedbranch" "${#content_indexstashedbranch}" assert_local_object "$oid_indexstashedandexcludedbranch" "${#content_indexstashedandexcludedbranch}" assert_local_object "$oid_stashedbranch" "${#content_stashedbranch}" assert_local_object "$oid_stashedandexcludedbranch" "${#content_stashedandexcludedbranch}" assert_local_object "$oid_untrackedstashedbranch" "${#content_untrackedstashedbranch}" assert_local_object "$oid_untrackedstashedandexcludedbranch" "${#content_untrackedstashedandexcludedbranch}" ) end_test begin_test "prune recent changes with --recent" ( set -e reponame="prune_recent_arg" setup_remote_repo "remote_$reponame" clone_repo "remote_$reponame" "clone_$reponame" git lfs track "*.dat" # generate content we'll use content_inrepo="this is the original committed data" oid_inrepo=$(calc_oid "$content_inrepo") content_new="this data will be recent" oid_new=$(calc_oid "$content_new") content_stashed="This data will be stashed and should not be deleted" oid_stashed=$(calc_oid "$content_stashed") echo "[ { \"CommitDate\":\"$(get_date -1d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_inrepo}, \"Data\":\"$content_inrepo\"}] } ]" | lfstest-testutils addcommits # now modify the file, and commit it printf '%s' "$content_new" > file.dat git add . git commit -m 'Update file.dat' # now modify the file, and stash it printf '%s' "$content_stashed" > file.dat git stash git config lfs.fetchrecentrefsdays 5 git config lfs.fetchrecentremoterefs true git config lfs.fetchrecentcommitsdays 3 assert_local_object "$oid_new" "${#content_new}" assert_local_object "$oid_inrepo" "${#content_inrepo}" assert_local_object "$oid_stashed" "${#content_stashed}" # prune data, should not delete. git lfs prune --recent assert_local_object "$oid_new" "${#content_new}" assert_local_object "$oid_inrepo" "${#content_inrepo}" assert_local_object "$oid_stashed" "${#content_stashed}" git push origin HEAD # prune data. git lfs prune --recent assert_local_object "$oid_new" "${#content_new}" refute_local_object "$oid_inrepo" "${#content_inrepo}" assert_local_object "$oid_stashed" "${#content_stashed}" ) end_test begin_test "prune keep files in index" ( set -e reponame="prune_index" setup_remote_repo "remote_$reponame" clone_repo "remote_$reponame" "clone_$reponame" git lfs track "*.dat" # generate content we'll use content_inrepo="this is the original committed data" oid_inrepo=$(calc_oid "$content_inrepo") content_new="this data will be indexed" oid_new=$(calc_oid "$content_new") content_untracked="This data will be untracked and added to the index and should not be deleted" oid_untracked=$(calc_oid "$content_untracked") echo "[ { \"CommitDate\":\"$(get_date -1d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_inrepo}, \"Data\":\"$content_inrepo\"}] } ]" | lfstest-testutils addcommits # now modify the file, and add it to the index printf '%s' "$content_new" > file.dat git add . # now add the file, and add it to the index printf '%s' "$content_untracked" > untracked.dat git add . assert_local_object "$oid_new" "${#content_new}" assert_local_object "$oid_inrepo" "${#content_inrepo}" assert_local_object "$oid_untracked" "${#content_untracked}" # prune data, should not delete. git lfs prune assert_local_object "$oid_new" "${#content_new}" assert_local_object "$oid_inrepo" "${#content_inrepo}" assert_local_object "$oid_untracked" "${#content_untracked}" git push origin HEAD # force prune data should not delete in index git lfs prune --force assert_local_object "$oid_new" "${#content_new}" refute_local_object "$oid_inrepo" "${#content_inrepo}" assert_local_object "$oid_untracked" "${#content_untracked}" ) end_test begin_test "prune --force" ( set -e reponame="prune_force" setup_remote_repo "remote_$reponame" clone_repo "remote_$reponame" "clone_$reponame" git lfs track "*.dat" # generate content we'll use content_inrepo="this is the original committed data" oid_inrepo=$(calc_oid "$content_inrepo") content_new="this data will be recent" oid_new=$(calc_oid "$content_new") content_stashed="This data will be stashed and should not be deleted" oid_stashed=$(calc_oid "$content_stashed") echo "[ { \"CommitDate\":\"$(get_date -1d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_inrepo}, \"Data\":\"$content_inrepo\"}] } ]" | lfstest-testutils addcommits # now modify the file, and commit it printf '%s' "$content_new" > file.dat git add . git commit -m 'Update file.dat' # now modify the file, and stash it printf '%s' "$content_stashed" > file.dat git stash git config lfs.fetchrecentrefsdays 5 git config lfs.fetchrecentremoterefs true git config lfs.fetchrecentcommitsdays 3 assert_local_object "$oid_new" "${#content_new}" assert_local_object "$oid_inrepo" "${#content_inrepo}" assert_local_object "$oid_stashed" "${#content_stashed}" # prune data, should not delete. git lfs prune --force assert_local_object "$oid_new" "${#content_new}" assert_local_object "$oid_inrepo" "${#content_inrepo}" assert_local_object "$oid_stashed" "${#content_stashed}" git push origin HEAD # prune data. git lfs prune --force refute_local_object "$oid_new" "${#content_new}" refute_local_object "$oid_inrepo" "${#content_inrepo}" assert_local_object "$oid_stashed" "${#content_stashed}" ) end_test begin_test "prune does not fail on empty files" ( set -e reponame="prune-empty-file" setup_remote_repo "remote-$reponame" clone_repo "remote-$reponame" "clone-$reponame" git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log touch empty.dat git add .gitattributes empty.dat git commit -m 'Add empty' git push origin main git lfs prune --force ) end_test begin_test "prune does not invoke external diff programs" ( set -e reponame="prune-external-diff" setup_remote_repo "remote-$reponame" clone_repo "remote-$reponame" "clone-$reponame" git config diff.word.textconv 'false' echo "*.dot diff=word" >.git/info/attributes for n in $(seq 1000); do (echo "$n" > "$n.dot") done git add . git commit -am "initial" git lfs prune ) end_test begin_test "prune doesn't hang on long lines in diff" ( set -e reponame="prune_longlines" git init "$reponame" cd "$reponame" git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log echo "test data" >test.dat git add .gitattributes test.dat git commit -m tracked git lfs untrack "*.dat" 2>&1 | tee untrack.log grep "Untracking \"\*.dat\"" untrack.log # Exceed the default buffer size that would be used by bufio.Scanner dd if=/dev/zero bs=1024 count=128 | tr '\0' 'A' >test.dat git add .gitattributes test.dat git commit -m untracked git lfs prune ) end_test begin_test "prune doesn't hang on long lines in stash diff" ( set -e reponame="prune_longlines" git init "$reponame" cd "$reponame" git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log echo "test data" >test.dat git add .gitattributes test.dat git commit -m tracked git lfs untrack "*.dat" 2>&1 | tee untrack.log grep "Untracking \"\*.dat\"" untrack.log # Exceed the default buffer size that would be used by bufio.Scanner dd if=/dev/zero bs=1024 count=128 | tr '\0' 'A' >test.dat git stash git lfs prune ) end_test git-lfs-3.6.1/t/t-pull.sh000077500000000000000000000253711472372047300151260ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" begin_test "pull" ( set -e reponame="$(basename "$0" ".sh")" setup_remote_repo "$reponame" clone_repo "$reponame" clone clone_repo "$reponame" repo git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log contents="a" contents_oid=$(calc_oid "$contents") contents2="A" contents2_oid=$(calc_oid "$contents2") contents3="dir" contents3_oid=$(calc_oid "$contents3") mkdir dir echo "*.log" > .gitignore printf "%s" "$contents" > a.dat printf "%s" "$contents2" > á.dat printf "%s" "$contents3" > dir/dir.dat git add . git commit -m "add files" 2>&1 | tee commit.log grep "main (root-commit)" commit.log grep "5 files changed" commit.log grep "create mode 100644 a.dat" commit.log grep "create mode 100644 .gitattributes" commit.log ls -al [ "a" = "$(cat a.dat)" ] [ "A" = "$(cat "á.dat")" ] [ "dir" = "$(cat "dir/dir.dat")" ] assert_pointer "main" "a.dat" "$contents_oid" 1 assert_pointer "main" "á.dat" "$contents2_oid" 1 assert_pointer "main" "dir/dir.dat" "$contents3_oid" 3 refute_server_object "$reponame" "$contents_oid" refute_server_object "$reponame" "$contents2_oid" refute_server_object "$reponame" "$contents33oid" echo "initial push" git push origin main 2>&1 | tee push.log grep "Uploading LFS objects: 100% (3/3), 5 B" push.log grep "main -> main" push.log assert_server_object "$reponame" "$contents_oid" assert_server_object "$reponame" "$contents2_oid" assert_server_object "$reponame" "$contents3_oid" # change to the clone's working directory cd ../clone echo "normal pull" git config branch.main.remote origin git config branch.main.merge refs/heads/main git pull 2>&1 [ "a" = "$(cat a.dat)" ] [ "A" = "$(cat "á.dat")" ] assert_local_object "$contents_oid" 1 assert_local_object "$contents2_oid" 1 assert_clean_status echo "lfs pull" rm -r a.dat á.dat dir # removing files makes the status dirty rm -rf .git/lfs/objects git lfs pull ls -al [ "a" = "$(cat a.dat)" ] [ "A" = "$(cat "á.dat")" ] assert_local_object "$contents_oid" 1 assert_local_object "$contents2_oid" 1 git lfs fsck echo "lfs pull with remote" rm -r a.dat á.dat dir rm -rf .git/lfs/objects git lfs pull origin [ "a" = "$(cat a.dat)" ] [ "A" = "$(cat "á.dat")" ] assert_local_object "$contents_oid" 1 assert_local_object "$contents2_oid" 1 assert_clean_status git lfs fsck echo "lfs pull with local storage" rm a.dat á.dat git lfs pull [ "a" = "$(cat a.dat)" ] [ "A" = "$(cat "á.dat")" ] assert_clean_status echo "lfs pull with include/exclude filters in gitconfig" rm -rf .git/lfs/objects git config "lfs.fetchinclude" "a*" git lfs pull assert_local_object "$contents_oid" 1 assert_clean_status rm -rf .git/lfs/objects git config --unset "lfs.fetchinclude" git config "lfs.fetchexclude" "a*" git lfs pull refute_local_object "$contents_oid" assert_clean_status echo "lfs pull with include/exclude filters in command line" git config --unset "lfs.fetchexclude" rm -rf .git/lfs/objects git lfs pull --include="a*" assert_local_object "$contents_oid" 1 assert_clean_status rm -rf .git/lfs/objects git lfs pull --exclude="a*" refute_local_object "$contents_oid" assert_clean_status echo "resetting to test status" git reset --hard assert_clean_status echo "lfs pull clean status" git lfs pull assert_clean_status echo "lfs pull with -I" git lfs pull -I "*.dat" assert_clean_status echo "lfs pull with empty file" touch empty.dat git add empty.dat git commit -m 'empty' git lfs pull [ -z "$(cat empty.dat)" ] assert_clean_status echo "lfs pull in subdir" cd dir git lfs pull assert_clean_status echo "lfs pull in subdir with -I" git lfs pull -I "*.dat" assert_clean_status ) end_test begin_test "pull without clean filter" ( set -e GIT_LFS_SKIP_SMUDGE=1 git clone $GITSERVER/t-pull no-clean cd no-clean git lfs uninstall git config --list > config.txt grep "filter.lfs.clean" config.txt && { echo "clean filter still configured:" cat config.txt exit 1 } contents="a" contents_oid=$(calc_oid "$contents") # LFS object not downloaded, pointer in working directory grep "$contents_oid" a.dat || { echo "a.dat not $contents_oid" ls -al cat a.dat exit 1 } assert_local_object "$contents_oid" git lfs pull | tee pull.txt if [ "0" -ne "${PIPESTATUS[0]}" ]; then echo >&2 "fatal: expected pull to succeed ..." exit 1 fi grep "Git LFS is not installed" pull.txt echo "pulled!" # LFS object downloaded, pointer unchanged grep "$contents_oid" a.dat || { echo "a.dat not $contents_oid" ls -al cat a.dat exit 1 } assert_local_object "$contents_oid" 1 ) end_test begin_test "pull with raw remote url" ( set -e mkdir raw cd raw git init git lfs install --local --skip-smudge git remote add origin $GITSERVER/t-pull git pull origin main contents="a" contents_oid=$(calc_oid "$contents") # LFS object not downloaded, pointer in working directory refute_local_object "$contents_oid" grep "$contents_oid" a.dat git lfs pull "$GITSERVER/t-pull" echo "pulled!" # LFS object downloaded and in working directory assert_local_object "$contents_oid" 1 [ "0" = "$(grep -c "$contents_oid" a.dat)" ] [ "a" = "$(cat a.dat)" ] ) end_test begin_test "pull with multiple remotes" ( set -e mkdir multiple cd multiple git init git lfs install --local --skip-smudge git remote add origin "$GITSERVER/t-pull" git remote add bad-remote "invalid-url" git pull origin main contents="a" contents_oid=$(calc_oid "$contents") # LFS object not downloaded, pointer in working directory refute_local_object "$contents_oid" grep "$contents_oid" a.dat # pull should default to origin instead of bad-remote git lfs pull echo "pulled!" # LFS object downloaded and in working directory assert_local_object "$contents_oid" 1 [ "0" = "$(grep -c "$contents_oid" a.dat)" ] [ "a" = "$(cat a.dat)" ] ) end_test begin_test "pull with invalid insteadof" ( set -e mkdir insteadof cd insteadof git init git lfs install --local --skip-smudge git remote add origin "$GITSERVER/t-pull" git pull origin main # set insteadOf to rewrite the href of downloading LFS object. git config url."$GITSERVER/storage/invalid".insteadOf "$GITSERVER/storage/" # Enable href rewriting explicitly. git config lfs.transfer.enablehrefrewrite true set +e git lfs pull > pull.log 2>&1 res=$? set -e [ "$res" = "2" ] # check rewritten href is used to download LFS object. grep "LFS: Repository or object not found: $GITSERVER/storage/invalid" pull.log # lfs-pull succeed after unsetting enableHrefRewrite config git config --unset lfs.transfer.enablehrefrewrite git lfs pull ) end_test begin_test "pull with merge conflict" ( set -e git init pull-merge-conflict cd pull-merge-conflict git lfs track "*.bin" git add . git commit -m 'gitattributes' printf abc > abc.bin git add . git commit -m 'abc' git checkout -b def printf def > abc.bin git add . git commit -m 'def' git checkout main printf ghi > abc.bin git add . git commit -m 'ghi' # This will exit nonzero because of the merge conflict. GIT_LFS_SKIP_SMUDGE=1 git merge def || true git lfs pull > pull.log 2>&1 [ ! -s pull.log ] ) end_test begin_test "pull: with missing object" ( set -e # this clone is setup in the first test in this file cd clone rm -rf .git/lfs/objects contents_oid=$(calc_oid "a") reponame="$(basename "$0" ".sh")" delete_server_object "$reponame" "$contents_oid" refute_server_object "$reponame" "$contents_oid" # should return non-zero, but should also download all the other valid files too git config branch.main.remote origin git config branch.main.merge refs/heads/main git lfs pull 2>&1 | tee pull.log pull_exit="${PIPESTATUS[0]}" [ "$pull_exit" != "0" ] grep "$contents_oid" pull.log contents2_oid=$(calc_oid "A") assert_local_object "$contents2_oid" 1 refute_local_object "$contents_oid" ) end_test begin_test "pull: outside git repository" ( set +e git lfs pull 2>&1 > pull.log res=$? set -e if [ "$res" = "0" ]; then echo "Passes because $GIT_LFS_TEST_DIR is unset." exit 0 fi [ "$res" = "128" ] grep "Not in a Git repository" pull.log ) end_test begin_test "pull with empty file doesn't modify mtime" ( set -e git init pull-empty-file cd pull-empty-file git lfs track "*.bin" git add . git commit -m 'gitattributes' printf abc > abc.bin git add . git commit -m 'abc' touch foo.bin lfstest-nanomtime foo.bin >foo.mtime # This isn't necessary, but it takes a few cycles to make sure that our # timestamp changes. git add foo.bin git commit -m 'foo' git lfs pull lfstest-nanomtime foo.bin >foo.mtime2 diff -u foo.mtime foo.mtime2 ) end_test begin_test "pull with partial clone and sparse checkout and index" ( set -e # Only test with Git version 2.42.0 as it introduced support for the # "objecttype" format option to the "git ls-files" command, which our # code requires. ensure_git_version_isnt "$VERSION_LOWER" "2.42.0" reponame="pull-sparse" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" contents1="a" contents1_oid=$(calc_oid "$contents1") contents2="b" contents2_oid=$(calc_oid "$contents2") contents3="c" contents3_oid=$(calc_oid "$contents3") mkdir in-dir out-dir printf "%s" "$contents1" >a.dat printf "%s" "$contents2" >in-dir/b.dat printf "%s" "$contents3" >out-dir/c.dat git add . git commit -m "add files" git push origin main assert_server_object "$reponame" "$contents1_oid" assert_server_object "$reponame" "$contents2_oid" assert_server_object "$reponame" "$contents3_oid" # Create a partial clone with a cone-mode sparse checkout of one directory # and a sparse index, which is important because otherwise the "git ls-files" # command ignores the --sparse option and lists all Git LFS files. cd .. git clone --filter=tree:0 --depth=1 --no-checkout \ "$GITSERVER/$reponame" "${reponame}-partial" cd "${reponame}-partial" git sparse-checkout init --cone --sparse-index git sparse-checkout set "in-dir" git checkout main [ -d "in-dir" ] [ ! -e "out-dir" ] assert_local_object "$contents1_oid" 1 assert_local_object "$contents2_oid" 1 refute_local_object "$contents3_oid" # Git LFS objects associated with files outside of the sparse cone # should not be pulled. git lfs pull 2>&1 | tee pull.log if [ "0" -ne "${PIPESTATUS[0]}" ]; then echo >&2 "fatal: expected pull to succeed ..." exit 1 fi grep -q "Downloading LFS objects" pull.log && exit 1 refute_local_object "$contents3_oid" ) end_test git-lfs-3.6.1/t/t-push-bad-dns.sh000077500000000000000000000013541472372047300164320ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" ensure_git_version_isnt $VERSION_LOWER "2.3.0" begin_test "push: upload to bad dns" ( set -e reponame="$(basename "$0" ".sh")-bad-dns" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" printf "hi" > good.dat git add .gitattributes good.dat git commit -m "welp" port="$(echo "http://127.0.0.1:63378" | cut -f 3 -d ":")" git config lfs.url "http://git-lfs-bad-dns:$port" set +e GIT_TERMINAL_PROMPT=0 git push origin main 2>&1 | tee push.log res="${PIPESTATUS[0]}" set -e refute_server_object "$reponame" "$(calc_oid "hi")" if [ "$res" = "0" ]; then cat push.log echo "push successful?" exit 1 fi ) end_test git-lfs-3.6.1/t/t-push-failures-local.sh000077500000000000000000000122541472372047300200250ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" begin_test "push with missing objects (lfs.allowincompletepush true)" ( set -e reponame="push-with-missing-objects" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" git add .gitattributes git commit -m "initial commit" present="present" present_oid="$(calc_oid "$present")" printf "%s" "$present" > present.dat missing="missing" missing_oid="$(calc_oid "$missing")" printf "%s" "$missing" > missing.dat git add missing.dat present.dat git commit -m "add objects" git rm missing.dat git commit -m "remove missing" # :fire: the "missing" object missing_oid_part_1="$(echo "$missing_oid" | cut -b 1-2)" missing_oid_part_2="$(echo "$missing_oid" | cut -b 3-4)" missing_oid_path=".git/lfs/objects/$missing_oid_part_1/$missing_oid_part_2/$missing_oid" rm "$missing_oid_path" git config lfs.allowincompletepush true git push origin main 2>&1 | tee push.log if [ "0" -ne "${PIPESTATUS[0]}" ]; then echo >&2 "fatal: expected \`git push origin main\` to succeed ..." exit 1 fi grep "LFS upload missing objects" push.log grep " (missing) missing.dat ($missing_oid)" push.log assert_server_object "$reponame" "$present_oid" refute_server_object "$reponame" "$missing_oid" ) end_test begin_test "push reject missing objects (lfs.allowincompletepush false)" ( set -e reponame="push-reject-missing-objects" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" git add .gitattributes git commit -m "initial commit" present="present" present_oid="$(calc_oid "$present")" printf "%s" "$present" > present.dat missing="missing" missing_oid="$(calc_oid "$missing")" printf "%s" "$missing" > missing.dat git add missing.dat present.dat git commit -m "add objects" git rm missing.dat git commit -m "remove missing" # :fire: the "missing" object missing_oid_part_1="$(echo "$missing_oid" | cut -b 1-2)" missing_oid_part_2="$(echo "$missing_oid" | cut -b 3-4)" missing_oid_path=".git/lfs/objects/$missing_oid_part_1/$missing_oid_part_2/$missing_oid" rm "$missing_oid_path" git config lfs.allowincompletepush false git push origin main 2>&1 | tee push.log if [ "1" -ne "${PIPESTATUS[0]}" ]; then echo >&2 "fatal: expected \`git push origin main\` to succeed ..." exit 1 fi grep 'Unable to find source' push.log refute_server_object "$reponame" "$present_oid" refute_server_object "$reponame" "$missing_oid" ) end_test begin_test "push reject missing objects (lfs.allowincompletepush default)" ( set -e reponame="push-missing-objects" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" git add .gitattributes git commit -m "initial commit" missing="missing" missing_oid="$(calc_oid "$missing")" missing_len="$(printf "%s" "$missing" | wc -c | awk '{ print $1 }')" printf "%s" "$missing" > missing.dat git add missing.dat git commit -m "add missing.dat" present="present" present_oid="$(calc_oid "$present")" present_len="$(printf "%s" "$present" | wc -c | awk '{ print $1 }')" printf "%s" "$present" > present.dat git add present.dat git commit -m "add present.dat" assert_local_object "$missing_oid" "$missing_len" assert_local_object "$present_oid" "$present_len" delete_local_object "$missing_oid" refute_local_object "$missing_oid" assert_local_object "$present_oid" "$present_len" git push origin main 2>&1 | tee push.log if [ "0" -eq "${PIPESTATUS[0]}" ]; then echo >&2 "fatal: expected 'git push origin main' to exit with non-zero code" exit 1 fi grep "LFS upload failed:" push.log grep " (missing) missing.dat ($missing_oid)" push.log refute_server_object "$reponame" "$missing_oid" assert_server_object "$reponame" "$present_oid" ) end_test begin_test "push reject corrupt objects (lfs.allowincompletepush default)" ( set -e reponame="push-corrupt-objects" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" git add .gitattributes git commit -m "initial commit" corrupt="corrupt" corrupt_oid="$(calc_oid "$corrupt")" corrupt_len="$(printf "%s" "$corrupt" | wc -c | awk '{ print $1 }')" printf "%s" "$corrupt" > corrupt.dat git add corrupt.dat git commit -m "add corrupt.dat" present="present" present_oid="$(calc_oid "$present")" present_len="$(printf "%s" "$present" | wc -c | awk '{ print $1 }')" printf "%s" "$present" > present.dat git add present.dat git commit -m "add present.dat" assert_local_object "$corrupt_oid" "$corrupt_len" assert_local_object "$present_oid" "$present_len" corrupt_local_object "$corrupt_oid" refute_local_object "$corrupt_oid" "$corrupt_len" assert_local_object "$present_oid" "$present_len" git push origin main 2>&1 | tee push.log if [ "0" -eq "${PIPESTATUS[0]}" ]; then echo >&2 "fatal: expected 'git push origin main' to exit with non-zero code" exit 1 fi grep "LFS upload failed:" push.log grep " (corrupt) corrupt.dat ($corrupt_oid)" push.log refute_server_object "$reponame" "$corrupt_oid" assert_server_object "$reponame" "$present_oid" ) end_test git-lfs-3.6.1/t/t-push-failures-remote.sh000077500000000000000000000042511472372047300202240ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" # push_fail_test performs a test expecting a `git lfs push` to fail given the # contents of a particular file contained within that push. The Git server used # during tests has certain special cases that are triggered by finding specific # keywords within a file (as given by the first argument). # # An optional second argument can be included, "msg", that assert that the # contents "msg" was included in the output of a `git lfs push`. push_fail_test() { local contents="$1" local msg="$2" set -e local reponame="$(basename "$0" ".sh")-$contents" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" printf "hi" > good.dat printf "%s" "$contents" > bad.dat git add .gitattributes good.dat bad.dat git commit -m "welp" set +e git push origin main 2>&1 | tee push.log res="${PIPESTATUS[0]}" set -e if [ ! -z "$msg" ]; then grep "$msg" push.log fi refute_server_object "$reponame" "$(calc_oid "$contents")" if [ "$res" = "0" ]; then echo "push successful?" exit 1 fi } begin_test "push: upload file with storage 403" ( set -e push_fail_test "status-storage-403" ) end_test begin_test "push: upload file with storage 404" ( set -e push_fail_test "status-storage-404" ) end_test begin_test "push: upload file with storage 410" ( set -e push_fail_test "status-storage-410" ) end_test begin_test "push: upload file with storage 500" ( set -e push_fail_test "status-storage-500" ) end_test begin_test "push: upload file with storage 503" ( set -e push_fail_test "status-storage-503" "LFS is temporarily unavailable" ) end_test begin_test "push: upload file with api 403" ( set -e push_fail_test "status-batch-403" ) end_test begin_test "push: upload file with api 404" ( set -e push_fail_test "status-batch-404" ) end_test begin_test "push: upload file with api 410" ( set -e push_fail_test "status-batch-410" ) end_test begin_test "push: upload file with api 422" ( set -e push_fail_test "status-batch-422" ) end_test begin_test "push: upload file with api 500" ( set -e push_fail_test "status-batch-500" ) end_test git-lfs-3.6.1/t/t-push-file-with-branch-name.sh000077500000000000000000000006771472372047300211720ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" begin_test "push a file with the same name as a branch" ( set -e reponame="$(basename "$0" ".sh")" setup_remote_repo "$reponame" clone_repo "$reponame" repo git lfs track "main" echo "main" > main git add .gitattributes main git commit -m "add main" git lfs push --all origin main 2>&1 | tee push.log grep "Uploading LFS objects: 100% (1/1), [0-9] B" push.log ) end_test git-lfs-3.6.1/t/t-push.sh000077500000000000000000000716121472372047300151300ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" # sets up the repos for the first few push tests. The passed argument is the # name of the repo to setup. The resulting repo will have a local file tracked # with LFS and committed, but not yet pushed to the remote push_repo_setup() { reponame="$1" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git config "lfs.$(repo_endpoint "$GITSERVER" "$reponame").locksverify" false git lfs track "*.dat" echo "push a" > a.dat git add .gitattributes a.dat git commit -m "add a.dat" } begin_test "push with good ref" ( set -e push_repo_setup "push-main-branch-required" git lfs push origin main ) end_test begin_test "push with tracked ref" ( set -e push_repo_setup "push-tracked-branch-required" git config push.default upstream git config branch.main.merge refs/heads/tracked git lfs push origin main ) end_test begin_test "push with invalid ref" ( set -e push_repo_setup "push-invalid-branch-required" git lfs push origin jibberish >push.log 2>&1 && exit 1 grep "Invalid ref argument" push.log ) end_test begin_test "push with bad ref" ( set -e push_repo_setup "push-other-branch-required" git lfs push origin main 2>&1 | tee push.log if [ "0" -eq "${PIPESTATUS[0]}" ]; then echo "expected command to fail" exit 1 fi grep 'batch response: Expected ref "refs/heads/other", got "refs/heads/main"' push.log ) end_test begin_test "push with nothing" ( set -e push_repo_setup "push-nothing" git lfs push origin 2>&1 | tee push.log grep "At least one ref must be supplied without --all" push.log ) end_test begin_test "push with given remote, configured pushRemote" ( set -e push_repo_setup "push-given-and-config" git remote add bad-remote "invalid-url" git config branch.main.pushRemote bad-remote git lfs push --all origin ) end_test begin_test "push via stdin with extra arguments" ( set -e push_repo_setup "push-stdin-extra-args" echo "main" | git lfs push origin --stdin --dry-run "another-ref" \ 2>&1 | tee push.log grep "Further command line arguments are ignored with --stdin" push.log ) begin_test "push" ( set -e reponame="$(basename "$0" ".sh")" setup_remote_repo "$reponame" clone_repo "$reponame" repo git config "lfs.$(repo_endpoint "$GITSERVER" "$reponame").locksverify" true git lfs track "*.dat" echo "push a" > a.dat git add .gitattributes a.dat git commit -m "add a.dat" git lfs push --dry-run origin main 2>&1 | tee push.log grep "push 4c48d2a6991c9895bcddcf027e1e4907280bcf21975492b1afbade396d6a3340 => a.dat" push.log [ $(grep -c "^push " push.log) -eq 1 ] git lfs push origin main 2>&1 | tee push.log grep "Uploading LFS objects: 100% (1/1), 7 B" push.log git checkout -b push-b echo "push b" > b.dat git add b.dat git commit -m "add b.dat" git lfs push --dry-run origin push-b 2>&1 | tee push.log grep "push 4c48d2a6991c9895bcddcf027e1e4907280bcf21975492b1afbade396d6a3340 => a.dat" push.log grep "push 82be50ad35070a4ef3467a0a650c52d5b637035e7ad02c36652e59d01ba282b7 => b.dat" push.log [ $(grep -c "^push " < push.log) -eq 2 ] printf "push-b\n\n" | git lfs push --dry-run origin --stdin 2>&1 | tee push.log grep "push 4c48d2a6991c9895bcddcf027e1e4907280bcf21975492b1afbade396d6a3340 => a.dat" push.log grep "push 82be50ad35070a4ef3467a0a650c52d5b637035e7ad02c36652e59d01ba282b7 => b.dat" push.log [ $(grep -c "^push " < push.log) -eq 2 ] # simulate remote ref mkdir -p .git/refs/remotes/origin git rev-parse HEAD > .git/refs/remotes/origin/HEAD git lfs push --dry-run origin push-b 2>&1 | tee push.log [ $(grep -c "^push " push.log) -eq 0 ] rm -rf .git/refs/remotes git lfs push origin push-b 2>&1 | tee push.log grep "Uploading LFS objects: 100% (2/2), 14 B" push.log ) end_test # helper used by the next few push --all tests to set up their repos push_all_setup() { suffix="$1" reponame="$(basename "$0" ".sh")-all-$suffix" content1="initial" content2="update" content3="branch" content4="tagged" content5="main" extracontent="extra" oid1=$(calc_oid "$content1") oid2=$(calc_oid "$content2") oid3=$(calc_oid "$content3") oid4=$(calc_oid "$content4") oid5=$(calc_oid "$content5") extraoid=$(calc_oid "$extracontent") clone_repo "$reponame" "push-all-$suffix" git config "lfs.$(repo_endpoint "$GITSERVER" "$reponame").locksverify" true git lfs track "*.dat" echo "[ { \"CommitDate\":\"$(get_date -6m)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":${#content1},\"Data\":\"$content1\"} ] }, { \"CommitDate\":\"$(get_date -5m)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":${#content2},\"Data\":\"$content2\"} ] }, { \"CommitDate\":\"$(get_date -4m)\", \"NewBranch\":\"branch\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":${#content3},\"Data\":\"$content3\"} ] }, { \"CommitDate\":\"$(get_date -4m)\", \"ParentBranches\":[\"main\"], \"Tags\":[\"tag\"], \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":${#content4},\"Data\":\"$content4\"} ] }, { \"CommitDate\":\"$(get_date -2m)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":${#content5},\"Data\":\"$content5\"}, {\"Filename\":\"file2.dat\",\"Size\":${#extracontent},\"Data\":\"$extracontent\"} ] } ]" | lfstest-testutils addcommits git rm file2.dat git commit -m "remove file2.dat" # simulate remote ref mkdir -p .git/refs/remotes/origin git rev-parse HEAD > .git/refs/remotes/origin/HEAD setup_alternate_remote "$reponame-$suffix" git config "lfs.$(repo_endpoint "$GITSERVER" "$reponame-$suffix").locksverify" true } begin_test "push --all (no ref args)" ( set -e push_all_setup "everything" git lfs push --dry-run --all origin 2>&1 | tee push.log grep "push $oid1 => file1.dat" push.log grep "push $oid2 => file1.dat" push.log grep "push $oid3 => file1.dat" push.log grep "push $oid4 => file1.dat" push.log grep "push $oid5 => file1.dat" push.log grep "push $extraoid => file2.dat" push.log [ $(grep -c "^push " < push.log) -eq 6 ] git push --all origin 2>&1 | tee push.log [ $(grep -c "Uploading LFS objects: 100% (6/6)" push.log) -eq 1 ] assert_server_object "$reponame-$suffix" "$oid1" assert_server_object "$reponame-$suffix" "$oid2" assert_server_object "$reponame-$suffix" "$oid3" assert_server_object "$reponame-$suffix" "$oid4" assert_server_object "$reponame-$suffix" "$oid5" assert_server_object "$reponame-$suffix" "$extraoid" echo "push while missing old objects locally" setup_alternate_remote "$reponame-$suffix-2" git config "lfs.$(repo_endpoint "$GITSERVER" "$reponame-$suffix-2").locksverify" true git lfs push --object-id origin $oid1 assert_server_object "$reponame-$suffix-2" "$oid1" refute_server_object "$reponame-$suffix-2" "$oid2" refute_server_object "$reponame-$suffix-2" "$oid3" refute_server_object "$reponame-$suffix-2" "$oid4" refute_server_object "$reponame-$suffix-2" "$oid5" refute_server_object "$reponame-$suffix-2" "$extraoid" rm ".git/lfs/objects/${oid1:0:2}/${oid1:2:2}/$oid1" echo "dry run missing local object that exists on server" git lfs push --dry-run --all origin 2>&1 | tee push.log grep "push $oid1 => file1.dat" push.log grep "push $oid2 => file1.dat" push.log grep "push $oid3 => file1.dat" push.log grep "push $oid4 => file1.dat" push.log grep "push $oid5 => file1.dat" push.log grep "push $extraoid => file2.dat" push.log [ $(grep -c "^push " push.log) -eq 6 ] git push --all origin 2>&1 | tee push.log grep "Uploading LFS objects: 100% (6/6)" push.log assert_server_object "$reponame-$suffix-2" "$oid2" assert_server_object "$reponame-$suffix-2" "$oid3" assert_server_object "$reponame-$suffix-2" "$oid4" assert_server_object "$reponame-$suffix-2" "$oid5" assert_server_object "$reponame-$suffix-2" "$extraoid" ) end_test begin_test "push --all (1 ref arg)" ( set -e push_all_setup "ref" git lfs push --dry-run --all origin branch 2>&1 | tee push.log grep "push $oid1 => file1.dat" push.log grep "push $oid2 => file1.dat" push.log grep "push $oid3 => file1.dat" push.log [ $(grep -c "^push " < push.log) -eq 3 ] git lfs push --all origin branch 2>&1 | tee push.log [ $(grep -c "Uploading LFS objects: 100% (3/3)" push.log) -eq 1 ] assert_server_object "$reponame-$suffix" "$oid1" assert_server_object "$reponame-$suffix" "$oid2" assert_server_object "$reponame-$suffix" "$oid3" refute_server_object "$reponame-$suffix" "$oid4" # in main and the tag refute_server_object "$reponame-$suffix" "$oid5" refute_server_object "$reponame-$suffix" "$extraoid" echo "push while missing old objects locally" setup_alternate_remote "$reponame-$suffix-2" git config "lfs.$(repo_endpoint "$GITSERVER" "$reponame-$suffix-2").locksverify" true git lfs push --object-id origin $oid1 assert_server_object "$reponame-$suffix-2" "$oid1" refute_server_object "$reponame-$suffix-2" "$oid2" refute_server_object "$reponame-$suffix-2" "$oid3" refute_server_object "$reponame-$suffix-2" "$oid4" refute_server_object "$reponame-$suffix-2" "$oid5" refute_server_object "$reponame-$suffix-2" "$extraoid" rm ".git/lfs/objects/${oid1:0:2}/${oid1:2:2}/$oid1" # dry run doesn't change git lfs push --dry-run --all origin branch 2>&1 | tee push.log grep "push $oid1 => file1.dat" push.log grep "push $oid2 => file1.dat" push.log grep "push $oid3 => file1.dat" push.log [ $(grep -c "^push " push.log) -eq 3 ] git push origin branch 2>&1 | tee push.log [ $(grep -c "Uploading LFS objects: 100% (3/3)" push.log) -eq 1 ] assert_server_object "$reponame-$suffix-2" "$oid2" assert_server_object "$reponame-$suffix-2" "$oid3" refute_server_object "$reponame-$suffix-2" "$oid4" refute_server_object "$reponame-$suffix-2" "$oid5" refute_server_object "$reponame-$suffix-2" "$extraoid" ) end_test begin_test "push --all (multiple ref args)" ( set -e push_all_setup "multiple-refs" git lfs push --dry-run --all origin branch tag 2>&1 | tee push.log grep "push $oid1 => file1.dat" push.log grep "push $oid2 => file1.dat" push.log grep "push $oid3 => file1.dat" push.log grep "push $oid4 => file1.dat" push.log [ $(grep -c "^push " push.log) -eq 4 ] printf "branch\ntag" | git lfs push --dry-run --all origin --stdin 2>&1 | tee push.log grep "push $oid1 => file1.dat" push.log grep "push $oid2 => file1.dat" push.log grep "push $oid3 => file1.dat" push.log grep "push $oid4 => file1.dat" push.log [ $(grep -c "^push " push.log) -eq 4 ] git lfs push --all origin branch tag 2>&1 | tee push.log [ $(grep -c "Uploading LFS objects: 100% (4/4)" push.log) -eq 1 ] assert_server_object "$reponame-$suffix" "$oid1" assert_server_object "$reponame-$suffix" "$oid2" assert_server_object "$reponame-$suffix" "$oid3" assert_server_object "$reponame-$suffix" "$oid4" refute_server_object "$reponame-$suffix" "$oid5" # only in main refute_server_object "$reponame-$suffix" "$extraoid" echo "push while missing old objects locally" setup_alternate_remote "$reponame-$suffix-2" git config "lfs.$(repo_endpoint "$GITSERVER" "$reponame-$suffix-2").locksverify" true git lfs push --object-id origin $oid1 assert_server_object "$reponame-$suffix-2" "$oid1" refute_server_object "$reponame-$suffix-2" "$oid2" refute_server_object "$reponame-$suffix-2" "$oid3" refute_server_object "$reponame-$suffix-2" "$oid4" refute_server_object "$reponame-$suffix-2" "$oid5" refute_server_object "$reponame-$suffix-2" "$extraoid" rm ".git/lfs/objects/${oid1:0:2}/${oid1:2:2}/$oid1" # dry run doesn't change git lfs push --dry-run --all origin branch tag 2>&1 | tee push.log grep "push $oid1 => file1.dat" push.log grep "push $oid2 => file1.dat" push.log grep "push $oid3 => file1.dat" push.log grep "push $oid4 => file1.dat" push.log [ $(grep -c "^push " push.log) -eq 4 ] git push origin branch refs/tags/tag 2>&1 | tee push.log [ $(grep -c "Uploading LFS objects: 100% (4/4)" push.log) -eq 1 ] assert_server_object "$reponame-$suffix-2" "$oid2" assert_server_object "$reponame-$suffix-2" "$oid3" assert_server_object "$reponame-$suffix-2" "$oid4" refute_server_object "$reponame-$suffix-2" "$oid5" refute_server_object "$reponame-$suffix-2" "$extraoid" ) end_test begin_test "push --all (ref with deleted files)" ( set -e push_all_setup "ref-with-deleted" git lfs push --dry-run --all origin main 2>&1 | tee push.log grep "push $oid1 => file1.dat" push.log grep "push $oid2 => file1.dat" push.log grep "push $oid4 => file1.dat" push.log grep "push $oid5 => file1.dat" push.log grep "push $extraoid => file2.dat" push.log [ $(grep -c "^push " push.log) -eq 5 ] git lfs push --all origin main 2>&1 | tee push.log [ $(grep -c "Uploading LFS objects: 100% (5/5)" push.log) -eq 1 ] assert_server_object "$reponame-$suffix" "$oid1" assert_server_object "$reponame-$suffix" "$oid2" refute_server_object "$reponame-$suffix" "$oid3" # only in the branch assert_server_object "$reponame-$suffix" "$oid4" assert_server_object "$reponame-$suffix" "$oid5" assert_server_object "$reponame-$suffix" "$extraoid" echo "push while missing old objects locally" setup_alternate_remote "$reponame-$suffix-2" git config "lfs.$(repo_endpoint "$GITSERVER" "$reponame-$suffix-2").locksverify" true git lfs push --object-id origin $oid1 assert_server_object "$reponame-$suffix-2" "$oid1" refute_server_object "$reponame-$suffix-2" "$oid2" refute_server_object "$reponame-$suffix-2" "$oid3" refute_server_object "$reponame-$suffix-2" "$oid4" refute_server_object "$reponame-$suffix-2" "$oid5" refute_server_object "$reponame-$suffix-2" "$extraoid" rm ".git/lfs/objects/${oid1:0:2}/${oid1:2:2}/$oid1" # dry run doesn't change git lfs push --dry-run --all origin main 2>&1 | tee push.log grep "push $oid1 => file1.dat" push.log grep "push $oid2 => file1.dat" push.log grep "push $oid4 => file1.dat" push.log grep "push $oid5 => file1.dat" push.log grep "push $extraoid => file2.dat" push.log [ $(grep -c "^push " push.log) -eq 5 ] git push origin main 2>&1 | tee push.log [ $(grep -c "Uploading LFS objects: 100% (5/5)" push.log) -eq 1 ] assert_server_object "$reponame-$suffix-2" "$oid2" refute_server_object "$reponame-$suffix-2" "$oid3" assert_server_object "$reponame-$suffix-2" "$oid4" assert_server_object "$reponame-$suffix-2" "$oid5" assert_server_object "$reponame-$suffix-2" "$extraoid" ) end_test begin_test "push object id(s)" ( set -e reponame="$(basename "$0" ".sh")" setup_remote_repo "$reponame" clone_repo "$reponame" repo2 git config "lfs.$(repo_endpoint "$GITSERVER" "$reponame").locksverify" true git lfs track "*.dat" echo "push a" > a.dat git add .gitattributes a.dat git commit -m "add a.dat" git lfs push --object-id origin --dry-run 2>&1 | tee push.log grep "At least one object ID must be supplied with --object-id" push.log git lfs push --object-id origin \ 4c48d2a6991c9895bcddcf027e1e4907280bcf21975492b1afbade396d6a3340 \ 2>&1 | tee push.log grep "Uploading LFS objects: 100% (1/1), 7 B" push.log echo "push b" > b.dat git add b.dat git commit -m "add b.dat" git lfs push --object-id origin \ 4c48d2a6991c9895bcddcf027e1e4907280bcf21975492b1afbade396d6a3340 \ 82be50ad35070a4ef3467a0a650c52d5b637035e7ad02c36652e59d01ba282b7 \ 2>&1 | tee push.log grep "Uploading LFS objects: 100% (2/2), 14 B" push.log ) end_test begin_test "push object id(s) via stdin" ( set -e reponame="$(basename "$0" ".sh")" setup_remote_repo "$reponame" clone_repo "$reponame" repo3 git config "lfs.$(repo_endpoint "$GITSERVER" "$reponame").locksverify" true git lfs track "*.dat" echo "push a" > a.dat git add .gitattributes a.dat git commit -m "add a.dat" git lfs push --object-id origin --stdin --dry-run &1 | tee push.log grep "At least one object ID must be supplied with --object-id" push.log && exit 1 echo "4c48d2a6991c9895bcddcf027e1e4907280bcf21975492b1afbade396d6a3340" | \ git lfs push --object-id origin --stdin --dry-run "c0ffee" \ 2>&1 | tee push.log grep "Further command line arguments are ignored with --stdin" push.log echo "4c48d2a6991c9895bcddcf027e1e4907280bcf21975492b1afbade396d6a3340" | \ git lfs push --object-id origin --stdin --dry-run \ 2>&1 | tee push.log grep "push 4c48d2a6991c9895bcddcf027e1e4907280bcf21975492b1afbade396d6a3340 =>" push.log echo "push b" > b.dat git add b.dat git commit -m "add b.dat" printf "4c48d2a6991c9895bcddcf027e1e4907280bcf21975492b1afbade396d6a3340\n82be50ad35070a4ef3467a0a650c52d5b637035e7ad02c36652e59d01ba282b7\n\n" | \ git lfs push --object-id origin --stdin --dry-run \ 2>&1 | tee push.log grep "push 4c48d2a6991c9895bcddcf027e1e4907280bcf21975492b1afbade396d6a3340 =>" push.log grep "push 82be50ad35070a4ef3467a0a650c52d5b637035e7ad02c36652e59d01ba282b7 =>" push.log ) end_test begin_test "push modified files" ( set -e reponame="$(basename "$0" ".sh")-modified" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" # generate content we'll use content1="filecontent1" content2="filecontent2" content3="filecontent3" content4="filecontent4" content5="filecontent5" oid1=$(calc_oid "$content1") oid2=$(calc_oid "$content2") oid3=$(calc_oid "$content3") oid4=$(calc_oid "$content4") oid5=$(calc_oid "$content5") echo "[ { \"CommitDate\":\"$(get_date -6m)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":${#content1}, \"Data\":\"$content1\"}] }, { \"CommitDate\":\"$(get_date -3m)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":${#content2}, \"Data\":\"$content2\"}] }, { \"CommitDate\":\"$(get_date -1m)\", \"NewBranch\":\"other_branch\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":${#content5}, \"Data\":\"$content5\"}] }, { \"CommitDate\":\"$(get_date -1m)\", \"ParentBranches\":[\"main\"], \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":${#content3}, \"Data\":\"$content3\"}, {\"Filename\":\"file2.dat\",\"Size\":${#content4}, \"Data\":\"$content4\"}] } ]" | lfstest-testutils addcommits git lfs push origin main git lfs push origin other_branch assert_server_object "$reponame" "$oid1" assert_server_object "$reponame" "$oid2" assert_server_object "$reponame" "$oid3" assert_server_object "$reponame" "$oid4" assert_server_object "$reponame" "$oid5" ) end_test begin_test "push with invalid remote" ( set -e cd repo git lfs push not-a-remote 2>&1 | tee push.log grep "Invalid remote name" push.log ) end_test begin_test "push ambiguous branch name" ( set -e reponame="$(basename "$0" ".sh")-ambiguous-branch" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log NUMFILES=5 # generate content we'll use for ((a=0; a < NUMFILES ; a++)) do content[$a]="filecontent$a" oid[$a]=$(calc_oid "${content[$a]}") done echo "[ { \"CommitDate\":\"$(get_date -10d)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":${#content[0]}, \"Data\":\"${content[0]}\"}, {\"Filename\":\"file2.dat\",\"Size\":${#content[1]}, \"Data\":\"${content[1]}\"}] }, { \"NewBranch\":\"ambiguous\", \"CommitDate\":\"$(get_date -5d)\", \"Files\":[ {\"Filename\":\"file3.dat\",\"Size\":${#content[2]}, \"Data\":\"${content[2]}\"}] }, { \"CommitDate\":\"$(get_date -2d)\", \"Files\":[ {\"Filename\":\"file4.dat\",\"Size\":${#content[3]}, \"Data\":\"${content[3]}\"}] }, { \"ParentBranches\":[\"main\"], \"CommitDate\":\"$(get_date -1d)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":${#content[4]}, \"Data\":\"${content[4]}\"}] } ]" | lfstest-testutils addcommits # create tag with same name as branch git tag ambiguous # lfs push main, should work git lfs push origin main # push ambiguous, does not fail since lfs scans git with sha, not ref name git lfs push origin ambiguous ) end_test begin_test "push (retry with expired actions)" ( set -e reponame="push_retry_expired_action" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" contents="return-expired-action" contents_oid="$(calc_oid "$contents")" contents_size="$(printf "%s" "$contents" | wc -c | awk '{ print $1 }')" printf "%s" "$contents" > a.dat git add .gitattributes a.dat git commit -m "add a.dat, .gitattributes" 2>&1 | tee commit.log grep "main (root-commit)" commit.log grep "2 files changed" commit.log grep "create mode 100644 a.dat" commit.log grep "create mode 100644 .gitattributes" commit.log GIT_TRACE=1 git push origin main 2>&1 | tee push.log expected="enqueue retry #1 after 0.25s for \"$contents_oid\" (size: $contents_size): LFS: action \"upload\" expires at" grep "$expected" push.log grep "Uploading LFS objects: 100% (1/1), 21 B" push.log ) end_test begin_test "push to raw remote url" ( set -e setup_remote_repo "push-raw" mkdir push-raw cd push-raw git init git lfs track "*.dat" contents="raw" contents_oid=$(calc_oid "$contents") printf "%s" "$contents" > raw.dat git add raw.dat .gitattributes git commit -m "add" 2>&1 | tee commit.log grep "main (root-commit)" commit.log grep "2 files changed" commit.log grep "create mode 100644 raw.dat" commit.log grep "create mode 100644 .gitattributes" commit.log refute_server_object push-raw "$contents_oid" git lfs push $GITSERVER/push-raw main assert_server_object push-raw "$contents_oid" ) end_test begin_test "push (with invalid object size)" ( set -e reponame="push-invalid-object-size" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" contents="return-invalid-size" printf "%s" "$contents" > a.dat git add a.dat .gitattributes git commit -m "add a.dat, .gitattributes" 2>&1 | tee commit.log grep "main (root-commit)" commit.log grep "2 files changed" commit.log grep "create mode 100644 a.dat" commit.log grep "create mode 100644 .gitattributes" commit.log set +e git push origin main 2>&1 2> push.log res="$?" set -e grep "invalid size (got: -1)" push.log [ "0" -eq "$(grep -c "panic" push.log)" ] [ "0" -ne "$res" ] refute_server_object "$reponame" "$(calc_oid "$contents")" ) end_test begin_test "push with deprecated _links" ( set -e reponame="$(basename "$0" ".sh")-deprecated" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" git add .gitattributes git commit -m "initial commit" contents="send-deprecated-links" contents_oid="$(calc_oid "$contents")" printf "%s" "$contents" > a.dat git add a.dat git commit -m "add a.dat" git push origin main assert_server_object "$reponame" "$contents_oid" ) end_test begin_test "push with invalid pushInsteadof" ( set -e push_repo_setup "push-invalid-pushinsteadof" # set pushInsteadOf to rewrite the href of uploading LFS object. git config url."$GITSERVER/storage/invalid".pushInsteadOf "$GITSERVER/storage/" # Enable href rewriting explicitly. git config lfs.transfer.enablehrefrewrite true set +e git lfs push origin main > push.log 2>&1 res=$? set -e [ "$res" = "2" ] # check rewritten href is used to upload LFS object. grep "LFS: Authorization error: $GITSERVER/storage/invalid" push.log # lfs-push succeed after unsetting enableHrefRewrite config git config --unset lfs.transfer.enablehrefrewrite git lfs push origin main ) end_test begin_test 'push with data the server already has' ( set -e reponame="push-server-data" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" git add .gitattributes git commit -m "initial commit" contents="abc123" contents_oid="$(calc_oid "$contents")" printf "%s" "$contents" > a.dat git add a.dat git commit -m "add a.dat" git push origin main assert_server_object "$reponame" "$contents_oid" git checkout -b side # Use a different file name for the second file; otherwise, this test # unexpectedly passes with the old code, since we fail to notice that the # object we run through the clean filter is not the object we wanted. contents2="def456" contents2_oid="$(calc_oid "$contents2")" printf "%s" "$contents2" > b.dat git add b.dat git commit -m "add b.dat" # We remove the original object. The server already has this. delete_local_object "$contents_oid" # We use the URL so that we cannot take advantage of the existing "origin/*" # refs that we know the server must have. We will traverse the entire history # for this push, and we should not fail because the server already has the # object we deleted above. git push "$(git config remote.origin.url)" side assert_server_object "$reponame" "$contents2_oid" ) end_test begin_test 'push with multiple refs and data the server already has' ( set -e reponame="push-multi-ref-server-data" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" git add .gitattributes git commit -m "initial commit" contents="abc123" contents_oid="$(calc_oid "$contents")" printf "%s" "$contents" > a.dat git add a.dat git commit -m "add a.dat" git push origin main assert_server_object "$reponame" "$contents_oid" contents2="def456" contents2_oid="$(calc_oid "$contents2")" printf "%s" "$contents2" > b.dat git add b.dat git commit -m "add b.dat" # Create a tag. Normally this would cause the entire history to be traversed # since it's a new ref, but we no longer do that since we're pushing multiple # refs. git tag -m v1.0.0 -a v1.0.0 # We remove the original object. The server already has this. delete_local_object "$contents_oid" # We use the URL so that we cannot take advantage of the existing "origin/*" # refs that we know the server must have. GIT_TRACE=1 GIT_TRANSFER_TRACE=1 GIT_CURL_VERBOSE=1 \ git push "$(git config remote.origin.url)" main v1.0.0 2>&1 | tee push.log # We should not find a batch request for the object which is in the earlier # version of main, since we know the remote side has it. [ "$(grep -c "$contents_oid" push.log)" = 0 ] # Yet we should have pushed the new object successfully. assert_server_object "$reponame" "$contents2_oid" ) end_test begin_test 'push with multiple tag refs' ( set -e reponame="push-multi-ref-tags" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" git add .gitattributes git commit -m "initial commit" contents="abc123" contents_oid="$(calc_oid "$contents")" printf "%s" "$contents" > a.dat git add a.dat git commit -m "add a.dat" git tag v1.0.0 git push origin main v1.0.0 assert_server_object "$reponame" "$contents_oid" contents2="def456" contents2_oid="$(calc_oid "$contents2")" printf "%s" "$contents2" > b.dat git add b.dat git commit -m "add b.dat" git tag v1.0.1 git lfs push origin v1.0.1 assert_server_object "$reponame" "$contents2_oid" ) end_test begin_test "push custom reference" ( set -e reponame="push-custom-reference" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" # generate content we'll use content="filecontent" oid=$(calc_oid "$content") echo "[ { \"CommitDate\":\"$(get_date -6m)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content}, \"Data\":\"$content\"}] } ]" | lfstest-testutils addcommits # Create and try pushing a reference in a nonstandard namespace, that is, # outside of refs/heads, refs/tags, and refs/remotes. git update-ref refs/custom/remote/heads/main refs/heads/main git lfs push origin refs/custom/remote/heads/main assert_server_object "$reponame" "$oid" ) end_test begin_test "push --object-id (invalid value)" ( set -e push_all_setup "push-invalid-oid" git lfs push --object-id origin '' 2>&1 | tee push.log git lfs push --object-id origin "${oid1:0:3}" 2>&1 | tee -a push.log [ "$(grep -c 'too short object ID' push.log)" -eq 2 ] ) end_test begin_test "storage upload with compression" ( set -e reponame="storage-compress" setup_remote_repo "$reponame" clone_repo "$reponame" storage-compress contents="storage-compress" oid="$(calc_oid "$contents")" printf "%s" "$contents" > a.dat git lfs track "*.dat" git add .gitattributes a.dat git commit -m "initial commit" GIT_CURL_VERBOSE=1 git push origin main | tee push.log assert_server_object "$reponame" "$oid" pushd .. git \ -c "filter.lfs.process=" \ -c "filter.lfs.smudge=cat" \ -c "filter.lfs.required=false" \ clone "$GITSERVER/$reponame" "$reponame-assert" cd "$reponame-assert" git config credential.helper lfstest GIT_TRACE=1 git lfs pull origin main 2>&1 | tee pull.log if [ "0" -ne "${PIPESTATUS[0]}" ]; then echo >&2 "fatal: expected \`git lfs pull origin main\` to succeed ..." exit 1 fi grep "decompressed gzipped response" pull.log assert_local_object "$oid" "${#contents}" popd ) end_test git-lfs-3.6.1/t/t-reference-clone.sh000077500000000000000000000040321472372047300171750ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" assert_same_inode() { local repo1=$1 local repo2=$2 local oid=$3 if ! uname -s | grep -qE 'CYGWIN|MSYS|MINGW'; then cfg1=$(cd "$repo1"; git lfs env | grep LocalMediaDir) f1="${cfg1:14}/${oid:0:2}/${oid:2:2}/$oid" inode1=$(ls -i $f1 | cut -f1 -d\ ) cfg2=$(cd "$repo2"; git lfs env | grep LocalMediaDir) f2="${cfg2:14}/${oid:0:2}/${oid:2:2}/$oid" inode2=$(ls -i $f2 | cut -f1 -d\ ) [ "$inode1" == "$inode2" ] fi } begin_test "clone with reference" ( set -e reponame="$(basename "$0" ".sh")" setup_remote_repo "$reponame" ref_repo=clone_reference_repo ref_repo_dir=$TRASHDIR/$ref_repo clone_repo "$reponame" "$ref_repo" git lfs track "*.dat" contents="a" oid=$(calc_oid "$contents") printf "%s" "$contents" > a.dat git add a.dat git add .gitattributes git commit -m "add a.dat" 2>&1 git push origin main delete_server_object "$reponame" "$oid" repo=test_repo repo_dir=$TRASHDIR/$repo git clone --reference "$ref_repo_dir/.git" \ "$GITSERVER/$reponame" "$repo_dir" cd "$TRASHDIR/$repo" assert_pointer "main" "a.dat" "$oid" 1 assert_same_inode "$repo_dir" "$ref_repo_dir" "$oid" ) end_test begin_test "fetch from clone reference" ( set -e reponame="$(basename "$0" ".sh")2" setup_remote_repo "$reponame" ref_repo=clone_reference_repo2 ref_repo_dir=$TRASHDIR/$ref_repo clone_repo "$reponame" "$ref_repo" repo=test_repo2 repo_dir=$TRASHDIR/$repo git clone --reference "$ref_repo_dir/.git" \ "$GITSERVER/$reponame" "$repo_dir" 2> clone.log cd "$ref_repo_dir" git lfs track "*.dat" contents="a" oid=$(calc_oid "$contents") printf "%s" "$contents" > a.dat git add a.dat git add .gitattributes git commit -m "add a.dat" 2>&1 git push origin main delete_server_object "$reponame" "$oid" cd "$repo_dir" GIT_LFS_SKIP_SMUDGE=1 git pull origin main git lfs pull assert_pointer "main" "a.dat" "$oid" 1 assert_same_inode "$TRASHDIR/$repo" "$TRASHDIR/$ref_repo" "$oid" ) end_test git-lfs-3.6.1/t/t-repo-format.sh000077500000000000000000000012471472372047300164010ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" begin_test "repository format version" ( set -e reponame="lfs-repo-version" git init $reponame cd $reponame [ -z "$(git config --local lfs.repositoryFormatVersion)" ] git lfs track '*.dat' [ "$(git config --local lfs.repositoryFormatVersion)" = "0" ] git config --local lfs.repositoryFormatVersion 1 git lfs track '*.bin' >output 2>&1 && exit 1 cat output grep "Unknown repository format version: 1" output git config --local --unset lfs.repositoryFormatVersion # Verify that global settings are ignored. git config --global lfs.repositoryFormatVersion 1 git lfs track '*.bin' ) end_test git-lfs-3.6.1/t/t-resume-http-range.sh000077500000000000000000000073501472372047300175160ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" begin_test "resume-http-range" ( set -e reponame="$(basename "$0" ".sh")" setup_remote_repo "$reponame" clone_repo "$reponame" $reponame git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log # this string announces to server that we want a test that # interrupts the transfer when started from 0 to cause resume contents="status-batch-resume-206" contents_oid=$(calc_oid "$contents") printf "%s" "$contents" > a.dat git add a.dat git add .gitattributes git commit -m "add a.dat" 2>&1 | tee commit.log git push origin main assert_server_object "$reponame" "$contents_oid" # delete local copy then fetch it back # server will abort the transfer mid way (so will error) when not resuming # then we can restart it rm -rf .git/lfs/objects git lfs fetch 2>&1 | tee fetchinterrupted.log refute_local_object "$contents_oid" # now fetch again, this should try to resume and server should send remainder # this time (it does not cut short when Range is requested) GIT_TRACE=1 git lfs fetch 2>&1 | tee fetchresume.log grep "xfer: server accepted resume" fetchresume.log assert_local_object "$contents_oid" "${#contents}" ) end_test begin_test "resume-http-range-fallback" ( set -e reponame="resume-http-range-fallback" setup_remote_repo "$reponame" clone_repo "$reponame" $reponame git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log # this string announces to server that we want it to abort the download part # way, but reject the Range: header and fall back on re-downloading instead contents="batch-resume-fail-fallback" contents_oid=$(calc_oid "$contents") printf "%s" "$contents" > a.dat git add a.dat git add .gitattributes git commit -m "add a.dat" 2>&1 | tee commit.log git push origin main assert_server_object "$reponame" "$contents_oid" # delete local copy then fetch it back # server will abort the transfer mid way (so will error) when not resuming # then we can restart it rm -rf .git/lfs/objects git lfs fetch 2>&1 | tee fetchinterrupted.log refute_local_object "$contents_oid" # now fetch again, this should try to resume but server should reject the Range # header, which should cause client to re-download GIT_TRACE=1 git lfs fetch 2>&1 | tee fetchresumefallback.log grep "xfer: server rejected resume" fetchresumefallback.log # re-download should still have worked assert_local_object "$contents_oid" "${#contents}" ) end_test begin_test "resume-http-range-retry" ( set -e reponame="resume-http-range-retry" setup_remote_repo "$reponame" clone_repo "$reponame" $reponame git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log # This string announces to server that we want a test that strictly handles # Range headers, rejecting any where the latter part of the range is smaller # than the former part. contents="status-batch-retry" contents_oid=$(calc_oid "$contents") printf "%s" "$contents" > a.dat git add a.dat git add .gitattributes git commit -m "add a.dat" 2>&1 | tee commit.log git push origin main assert_server_object "$reponame" "$contents_oid" # Delete local copy then fetch it back. rm -rf .git/lfs/objects refute_local_object "$contents_oid" # Create a partial corrupt object. mkdir .git/lfs/incomplete printf "%s" "${contents/st/aa}" >".git/lfs/incomplete/$contents_oid.tmp" # The first download may fail with an error; run a second time to make sure # that we detect the corrupt file and retry. GIT_TRACE=1 git lfs fetch 2>&1 | tee fetchresume.log GIT_TRACE=1 git lfs fetch 2>&1 | tee fetchresume.log assert_local_object "$contents_oid" "${#contents}" ) end_test git-lfs-3.6.1/t/t-resume-tus.sh000077500000000000000000000043471472372047300162630ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" begin_test "tus-upload-uninterrupted" ( set -e # this repo name is the indicator to the server to use tus reponame="test-tus-upload" setup_remote_repo "$reponame" clone_repo "$reponame" $reponame git config lfs.tustransfers true git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log contents="send-verify-action" contents_oid=$(calc_oid "$contents") printf "%s" "$contents" > a.dat git add a.dat git add .gitattributes git commit -m "add a.dat" 2>&1 | tee commit.log GIT_TRACE=1 GIT_TRANSFER_TRACE=1 git push origin main 2>&1 | tee pushtus.log grep "xfer: tus.io uploading" pushtus.log assert_server_object "$reponame" "$contents_oid" ) end_test begin_test "tus-upload-interrupted-resume" ( set -e # this repo name is the indicator to the server to use tus, AND to # interrupt the upload part way reponame="test-tus-upload-interrupt" setup_remote_repo "$reponame" clone_repo "$reponame" $reponame git config lfs.tustransfers true git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log contents_verify="send-verify-action" contents_verify_oid="$(calc_oid "$contents_verify")" # this string announces to server that we want it to abort the download part # way, but reject the Range: header and fall back on re-downloading instead contents="234587134187634598o634857619384765b747qcvtuedvoaicwtvseudtvcoqi7280r7qvow4i7r8c46pr9q6v9pri6ioq2r8" contents_oid=$(calc_oid "$contents") printf "%s" "$contents" > a.dat printf "%s" "$contents_verify" > verify.dat git add a.dat verify.dat git add .gitattributes git commit -m "add a.dat, verify.dat" 2>&1 | tee commit.log GIT_TRACE=1 GIT_TRANSFER_TRACE=1 git push origin main 2>&1 | tee pushtus_resume.log # first attempt will start from the beginning grep "xfer: tus.io uploading" pushtus_resume.log grep "HTTP: 500" pushtus_resume.log # that will have failed but retry on 500 will resume it grep "xfer: tus.io resuming" pushtus_resume.log grep "HTTP: 204" pushtus_resume.log # should have completed in the end assert_server_object "$reponame" "$contents_oid" assert_server_object "$reponame" "$contents_verify_oid" ) end_test git-lfs-3.6.1/t/t-smudge.sh000077500000000000000000000201651472372047300154320ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" begin_test "smudge" ( set -e reponame="$(basename "$0" ".sh")" setup_remote_repo "$reponame" clone_repo "$reponame" repo git lfs track "*.dat" echo "smudge a" > a.dat git add .gitattributes a.dat git commit -m "add a.dat" # smudge works even though it hasn't been pushed, by reading from .git/lfs/objects output="$(pointer fcf5015df7a9089a7aa7fe74139d4b8f7d62e52d5a34f9a87aeffc8e8c668254 9 | git lfs smudge)" [ "smudge a" = "$output" ] git push origin main # download it from the git lfs server rm -rf .git/lfs/objects output="$(pointer fcf5015df7a9089a7aa7fe74139d4b8f7d62e52d5a34f9a87aeffc8e8c668254 9 | git lfs smudge)" [ "smudge a" = "$output" ] ) end_test begin_test "smudge with temp file" ( set -e cd repo rm -rf .git/lfs/objects mkdir -p .git/lfs/tmp/objects touch .git/lfs/tmp/objects/fcf5015df7a9089a7aa7fe74139d4b8f7d62e52d5a34f9a87aeffc8e8c668254-1 pointer fcf5015df7a9089a7aa7fe74139d4b8f7d62e52d5a34f9a87aeffc8e8c668254 9 | GIT_TRACE=5 git lfs smudge | tee smudge.log [ "smudge a" = "$(cat smudge.log)" ] || { rm -rf .git/lfs/tmp git lfs logs last exit 1 } ) end_test begin_test "smudge with invalid pointer" ( set -e cd repo [ "wat" = "$(echo "wat" | git lfs smudge)" ] [ "not a git-lfs file" = "$(echo "not a git-lfs file" | git lfs smudge)" ] [ "version " = "$(echo "version " | git lfs smudge)" ] # force use of a spool file with non-pointer input longer than max buffer spool="$(lfstest-genrandom --base64 2048)" [ "$spool" = "$(echo "$spool" | git lfs smudge)" ] ) end_test begin_test "smudge include/exclude" ( set -e reponame="$(basename "$0" ".sh")-includeexclude" setup_remote_repo "$reponame" clone_repo "$reponame" includeexclude git lfs track "*.dat" echo "smudge a" > a.dat git add .gitattributes a.dat git commit -m "add a.dat" pointer="$(pointer fcf5015df7a9089a7aa7fe74139d4b8f7d62e52d5a34f9a87aeffc8e8c668254 9)" # smudge works even though it hasn't been pushed, by reading from .git/lfs/objects [ "smudge a" = "$(echo "$pointer" | git lfs smudge)" ] git push origin main # this WOULD download except we're going to prevent it with include/exclude rm -rf .git/lfs/objects git config "lfs.fetchexclude" "a*" [ "$pointer" = "$(echo "$pointer" | git lfs smudge a.dat)" ] mkdir -p foo/bar echo "smudge a" > foo/a.dat echo "smudge a" > foo/bar/a.dat git add foo git commit -m 'add foo' git push origin main # The Git LFS objects for a.dat and foo/bar/a.dat would both download except # we're going to prevent them from doing so with include/exclude. rm -rf .git/lfs/objects # We also need to prevent MSYS from rewriting /foo into a Windows path. MSYS_NO_PATHCONV=1 git config "lfs.fetchinclude" "/foo" MSYS_NO_PATHCONV=1 git config "lfs.fetchexclude" "/foo/bar" [ "$pointer" = "$(echo "$pointer" | git lfs smudge a.dat)" ] [ "smudge a" = "$(echo "$pointer" | git lfs smudge foo/a.dat)" ] [ "$pointer" = "$(echo "$pointer" | git lfs smudge foo/bar/a.dat)" ] ) end_test begin_test "smudge with skip" ( set -e reponame="$(basename "$0" ".sh")-skip" setup_remote_repo "$reponame" clone_repo "$reponame" "skip" git lfs track "*.dat" echo "smudge a" > a.dat git add .gitattributes a.dat git commit -m "add a.dat" pointer="$(pointer fcf5015df7a9089a7aa7fe74139d4b8f7d62e52d5a34f9a87aeffc8e8c668254 9)" [ "smudge a" = "$(echo "$pointer" | git lfs smudge)" ] git push origin main # Must clear the cache because smudge will use # cached objects even with --skip/GIT_LFS_SKIP_SMUDGE # (--skip applies to whether or not it downloads). rm -rf .git/lfs/objects [ "$pointer" = "$(echo "$pointer" | GIT_LFS_SKIP_SMUDGE=1 git lfs smudge)" ] echo "test clone with env" export GIT_LFS_SKIP_SMUDGE=1 env | grep LFS_SKIP clone_repo "$reponame" "skip-clone-env" [ "$pointer" = "$(cat a.dat)" ] git lfs pull [ "smudge a" = "$(cat a.dat)" ] echo "test clone without env" unset GIT_LFS_SKIP_SMUDGE clone_repo "$reponame" "no-skip" [ "smudge a" = "$(cat a.dat)" ] echo "test clone with init --skip-smudge" git lfs install --skip-smudge clone_repo "$reponame" "skip-clone-init" [ "$pointer" = "$(cat a.dat)" ] git lfs install --force ) end_test begin_test "smudge clone with include/exclude" ( set -e reponame="smudge_include_exclude" setup_remote_repo "$reponame" clone_repo "$reponame" "repo_$reponame" git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log contents="a" contents_oid=$(calc_oid "$contents") printf "%s" "$contents" > a.dat git add a.dat git add .gitattributes git commit -m "add a.dat" 2>&1 | tee commit.log grep "main (root-commit)" commit.log grep "2 files changed" commit.log grep "create mode 100644 a.dat" commit.log grep "create mode 100644 .gitattributes" commit.log [ "a" = "$(cat a.dat)" ] assert_local_object "$contents_oid" 1 git push origin main 2>&1 | tee push.log grep "Uploading LFS objects: 100% (1/1), 1 B" push.log grep "main -> main" push.log assert_server_object "$reponame" "$contents_oid" clone="$TRASHDIR/clone_$reponame" git -c lfs.fetchexclude="a*" clone "$GITSERVER/$reponame" "$clone" pushd "$clone" # Should have succeeded but not downloaded refute_local_object "$contents_oid" popd rm -rf "$clone" contents2="b" contents2_oid=$(calc_oid "$contents2") contents3="c" contents3_oid=$(calc_oid "$contents3") mkdir -p foo/bar printf "%s" "$contents2" > foo/b.dat printf "%s" "$contents3" > foo/bar/c.dat git add foo git commit -m 'add foo' assert_local_object "$contents2_oid" 1 assert_local_object "$contents3_oid" 1 git push origin main assert_server_object "$reponame" "$contents2_oid" assert_server_object "$reponame" "$contents3_oid" # The Git LFS objects for a.dat and foo/bar/a.dat would both download except # we're going to prevent them from doing so with include/exclude. # We also need to prevent MSYS from rewriting /foo into a Windows path. MSYS_NO_PATHCONV=1 git config --global "lfs.fetchinclude" "/foo" MSYS_NO_PATHCONV=1 git config --global "lfs.fetchexclude" "/foo/bar" git clone "$GITSERVER/$reponame" "$clone" pushd "$clone" refute_local_object "$contents_oid" assert_local_object "$contents2_oid" 1 refute_local_object "$contents3_oid" popd ) end_test begin_test "smudge skip download failure" ( set -e reponame="$(basename "$0" ".sh")-skipdownloadfail" setup_remote_repo "$reponame" clone_repo "$reponame" skipdownloadfail git lfs track "*.dat" echo "smudge a" > a.dat git add .gitattributes a.dat git commit -m "add a.dat" pointer="$(pointer fcf5015df7a9089a7aa7fe74139d4b8f7d62e52d5a34f9a87aeffc8e8c668254 9)" # smudge works even though it hasn't been pushed, by reading from .git/lfs/objects [ "smudge a" = "$(echo "$pointer" | git lfs smudge)" ] git push origin main # make it try to download but we're going to make it fail rm -rf .git/lfs/objects git remote set-url origin httpnope://nope.com/nope # this should fail set +e echo "$pointer" | git lfs smudge a.dat; test ${PIPESTATUS[1]} -ne 0 set -e git config lfs.skipdownloaderrors true echo "$pointer" | git lfs smudge a.dat # check content too [ "$pointer" = "$(echo "$pointer" | git lfs smudge a.dat)" ] # now try env var git config --unset lfs.skipdownloaderrors echo "$pointer" | GIT_LFS_SKIP_DOWNLOAD_ERRORS=1 git lfs smudge a.dat ) end_test begin_test "smudge no ref, non-origin" ( set -e reponame="$(basename "$0" ".sh")-no-ref-non-origin" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame-1" git lfs track "*.dat" echo "smudge a" > a.dat git add .gitattributes a.dat git commit -m "add a.dat" git push origin main main=$(git rev-parse main) cd .. git init "$reponame" cd "$reponame" # We intentionally pick a name that is not origin to exercise the remote # selection code path. Since there is only one remote, we should use it # regardless of its name git config remote.random.url "$GITSERVER/$reponame" git fetch "$GITSERVER/$reponame" git checkout "$main" [ "smudge a" = "$(cat a.dat)" ] ) end_test git-lfs-3.6.1/t/t-ssh.sh000077500000000000000000000030321472372047300147350ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" begin_test "ssh with proxy command in lfs.url (default variant)" ( set -e reponame="batch-ssh-proxy-default" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" sshurl="${GITSERVER/http:\/\//ssh://-oProxyCommand=ssh-proxy-test/}/$reponame" git config lfs.url "$sshurl" contents="test" oid="$(calc_oid "$contents")" git lfs track "*.dat" printf "%s" "$contents" > test.dat git add .gitattributes test.dat git commit -m "initial commit" unset GIT_SSH_VARIANT GIT_TRACE=1 git push origin main 2>&1 | tee push.log if [ "0" -eq "${PIPESTATUS[0]}" ]; then echo >&2 "fatal: push succeeded" exit 1 fi grep 'expected.*git@127.0.0.1' push.log grep "lfs-ssh-echo -- -oProxyCommand" push.log ) end_test begin_test "ssh with proxy command in lfs.url (custom variant)" ( set -e reponame="batch-ssh-proxy-simple" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" sshurl="${GITSERVER/http:\/\//ssh://-oProxyCommand=ssh-proxy-test/}/$reponame" git config lfs.url "$sshurl" contents="test" oid="$(calc_oid "$contents")" git lfs track "*.dat" printf "%s" "$contents" > test.dat git add .gitattributes test.dat git commit -m "initial commit" export GIT_SSH_VARIANT=simple GIT_TRACE=1 git push origin main 2>&1 | tee push.log if [ "0" -eq "${PIPESTATUS[0]}" ]; then echo >&2 "fatal: push succeeded" exit 1 fi grep 'expected.*git@127.0.0.1' push.log grep "lfs-ssh-echo oProxyCommand" push.log ) end_test git-lfs-3.6.1/t/t-standalone-file.sh000077500000000000000000000377561472372047300172310ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" do_upload_download_test () { git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log git add .gitattributes git commit -m "Tracking" git checkout -b test # set up a decent amount of data so that there's work for multiple concurrent adapters echo "[ { \"CommitDate\":\"$(get_date -10d)\", \"Files\":[ {\"Filename\":\"verify.dat\",\"Size\":18,\"Data\":\"send-verify-action\"}, {\"Filename\":\"file1.dat\",\"Size\":1024}, {\"Filename\":\"file2.dat\",\"Size\":750}] }, { \"CommitDate\":\"$(get_date -7d)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":1050}, {\"Filename\":\"file3.dat\",\"Size\":660}, {\"Filename\":\"file4.dat\",\"Size\":230}] }, { \"CommitDate\":\"$(get_date -5d)\", \"Files\":[ {\"Filename\":\"file5.dat\",\"Size\":1200}, {\"Filename\":\"file6.dat\",\"Size\":300}] }, { \"CommitDate\":\"$(get_date -2d)\", \"Files\":[ {\"Filename\":\"file3.dat\",\"Size\":120}, {\"Filename\":\"file5.dat\",\"Size\":450}, {\"Filename\":\"file7.dat\",\"Size\":520}, {\"Filename\":\"file8.dat\",\"Size\":2048}] } ]" | lfstest-testutils addcommits GIT_TRACE=1 GIT_TRANSFER_TRACE=1 git push origin test 2>&1 | tee pushcustom.log # use PIPESTATUS otherwise we get exit code from tee [ ${PIPESTATUS[0]} = "0" ] ourobjects=$(cd .git && find lfs/objects -type f | sort) theirobjects=$(cd $gitdir && find lfs/objects -type f | sort) # Make sure the lock verification is not attempted. grep "locks/verify$" pushcustom.log && false grep "xfer: started custom adapter process" pushcustom.log grep "Uploading LFS objects: 100% (12/12)" pushcustom.log [ "$ourobjects" = "$theirobjects" ] rm -rf .git/lfs/objects GIT_TRACE=1 GIT_TRANSFER_TRACE=1 git lfs fetch --all 2>&1 | tee fetchcustom.log [ ${PIPESTATUS[0]} = "0" ] objectlist=$(find .git/lfs/objects -type f) [ "$(echo "$objectlist" | wc -l)" -eq 12 ] } do_local_path_test () { local reponame="$1" local suffix="$2" git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log git add .gitattributes git commit -m "Tracking" git checkout -b test # set up a decent amount of data so that there's work for multiple concurrent adapters echo "[ { \"CommitDate\":\"$(get_date -10d)\", \"Files\":[ {\"Filename\":\"verify.dat\",\"Size\":18,\"Data\":\"send-verify-action\"}, {\"Filename\":\"file1.dat\",\"Size\":1024}, {\"Filename\":\"file2.dat\",\"Size\":750}] }, { \"CommitDate\":\"$(get_date -7d)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":1050}, {\"Filename\":\"file3.dat\",\"Size\":660}, {\"Filename\":\"file4.dat\",\"Size\":230}] }, { \"CommitDate\":\"$(get_date -5d)\", \"Files\":[ {\"Filename\":\"file5.dat\",\"Size\":1200}, {\"Filename\":\"file6.dat\",\"Size\":300}] }, { \"CommitDate\":\"$(get_date -2d)\", \"Files\":[ {\"Filename\":\"file3.dat\",\"Size\":120}, {\"Filename\":\"file5.dat\",\"Size\":450}, {\"Filename\":\"file7.dat\",\"Size\":520}, {\"Filename\":\"file8.dat\",\"Size\":2048}] } ]" | lfstest-testutils addcommits testdir="$(pwd)" cd "$TRASHDIR" # Check a clone using an absolute Unix-style path. GIT_TRACE=1 GIT_TRANSFER_TRACE=1 git clone "$testdir$suffix" "$reponame-repo1" 2>&1 | tee clonecustom.log (cd "$reponame-repo1" && git lfs fsck) grep "xfer: started custom adapter process" clonecustom.log # Check a clone using a relative path. GIT_TRACE=1 GIT_TRANSFER_TRACE=1 git clone "$reponame-repo1$suffix" "$reponame-repo2" 2>&1 | tee clonecustom.log (cd "$reponame-repo2" && git lfs fsck) grep "xfer: started custom adapter process" clonecustom.log # Check a clone using an absolute native-style path. GIT_TRACE=1 GIT_TRANSFER_TRACE=1 git clone "$(native_path "$testdir")$suffix" "$reponame-repo3" 2>&1 | tee clonecustom.log (cd "$reponame-repo3" && git lfs fsck) grep "xfer: started custom adapter process" clonecustom.log } begin_test "standalone-file-upload-download-bare" ( set -e # setup a git repo to be used as a local repo, not remote reponame="standalone-file-upload-download-bare" setup_remote_repo "$reponame" git init --bare "$reponame-2.git" gitdir="$(pwd)/$reponame-2.git" # clone directly, not through lfstest-gitserver clone_repo_url "$REMOTEDIR/$reponame.git" $reponame git remote set-url origin "file://$(urlify "$gitdir")" do_upload_download_test ) end_test begin_test "standalone-file-upload-download-non-bare" ( set -e # setup a git repo to be used as a local repo, not remote reponame="standalone-file-upload-download-non-bare" setup_remote_repo "$reponame" git init "$reponame-2.git" repo2="$(pwd)/$reponame-2.git" gitdir="$(pwd)/$reponame-2.git/.git" # clone directly, not through lfstest-gitserver clone_repo_url "$REMOTEDIR/$reponame.git" $reponame git remote set-url origin "file://$(urlify "$repo2")" do_upload_download_test ) end_test begin_test "standalone-file-download-missing-file" ( set -e # setup a git repo to be used as a local repo, not remote reponame="standalone-file-download-missing-file" setup_remote_repo "$reponame" otherrepo="$(pwd)/$reponame-2.git" git init --bare "$otherrepo" # clone directly, not through lfstest-gitserver clone_repo_url "$REMOTEDIR/$reponame.git" $reponame git remote set-url origin "file://$(urlify "$otherrepo")" git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log git add .gitattributes git commit -m "Tracking" git checkout -b test # set up a decent amount of data so that there's work for multiple concurrent adapters echo "[ { \"CommitDate\":\"$(get_date -10d)\", \"Files\":[ {\"Filename\":\"verify.dat\",\"Size\":18,\"Data\":\"send-verify-action\"}, {\"Filename\":\"file1.dat\",\"Size\":1024}, {\"Filename\":\"file2.dat\",\"Size\":750}] }, { \"CommitDate\":\"$(get_date -7d)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":1050}, {\"Filename\":\"file3.dat\",\"Size\":660}, {\"Filename\":\"file4.dat\",\"Size\":230}] }, { \"CommitDate\":\"$(get_date -5d)\", \"Files\":[ {\"Filename\":\"file5.dat\",\"Size\":1200}, {\"Filename\":\"file6.dat\",\"Size\":300}] }, { \"CommitDate\":\"$(get_date -2d)\", \"Files\":[ {\"Filename\":\"file3.dat\",\"Size\":120}, {\"Filename\":\"file5.dat\",\"Size\":450}, {\"Filename\":\"file7.dat\",\"Size\":520}, {\"Filename\":\"file8.dat\",\"Size\":2048}] } ]" | lfstest-testutils addcommits GIT_TRACE=1 GIT_TRANSFER_TRACE=1 git push origin test 2>&1 | tee pushcustom.log # use PIPESTATUS otherwise we get exit code from tee [ ${PIPESTATUS[0]} = "0" ] # Make sure the lock verification is not attempted. grep "locks/verify$" pushcustom.log && false grep "xfer: started custom adapter process" pushcustom.log grep "Uploading LFS objects: 100% (12/12)" pushcustom.log # Delete an object from the remote side. Any object will do. rm -f $(find "$otherrepo/lfs/objects" -type f | head -n1) rm -rf .git/lfs/objects GIT_TRACE=1 GIT_TRANSFER_TRACE=1 git lfs fetch --all 2>&1 | tee fetchcustom.log # Make sure we failed. [ ${PIPESTATUS[0]} != "0" ] # Make sure we downloaded the rest of the objects. objectlist=$(find .git/lfs/objects -type f) [ "$(echo "$objectlist" | wc -l)" -eq 11 ] ) end_test begin_test "standalone-file-clone" ( set -e # setup a git repo to be used as a local repo, not remote reponame="standalone-file-clone" setup_remote_repo "$reponame" # clone directly, not through lfstest-gitserver clone_repo_url "$REMOTEDIR/$reponame.git" $reponame git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log git add .gitattributes git commit -m "Tracking" git checkout -b test # set up a decent amount of data so that there's work for multiple concurrent adapters echo "[ { \"CommitDate\":\"$(get_date -10d)\", \"Files\":[ {\"Filename\":\"verify.dat\",\"Size\":18,\"Data\":\"send-verify-action\"}, {\"Filename\":\"file1.dat\",\"Size\":1024}, {\"Filename\":\"file2.dat\",\"Size\":750}] }, { \"CommitDate\":\"$(get_date -7d)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":1050}, {\"Filename\":\"file3.dat\",\"Size\":660}, {\"Filename\":\"file4.dat\",\"Size\":230}] }, { \"CommitDate\":\"$(get_date -5d)\", \"Files\":[ {\"Filename\":\"file5.dat\",\"Size\":1200}, {\"Filename\":\"file6.dat\",\"Size\":300}] }, { \"CommitDate\":\"$(get_date -2d)\", \"Files\":[ {\"Filename\":\"file3.dat\",\"Size\":120}, {\"Filename\":\"file5.dat\",\"Size\":450}, {\"Filename\":\"file7.dat\",\"Size\":520}, {\"Filename\":\"file8.dat\",\"Size\":2048}] } ]" | lfstest-testutils addcommits testdir="$(pwd)" cd "$TRASHDIR" GIT_TRACE=1 GIT_TRANSFER_TRACE=1 git clone "file://$(urlify "$testdir")" repo3 2>&1 | tee clonecustom.log grep "xfer: started custom adapter process" clonecustom.log ) end_test begin_test "standalone-file-local-path" ( set -e # setup a git repo to be used as a local repo, not remote reponame="standalone-file-local-path" setup_remote_repo "$reponame" # clone directly, not through lfstest-gitserver clone_repo_url "$REMOTEDIR/$reponame.git" $reponame do_local_path_test "$reponame" "" ) end_test begin_test "standalone-file-local-path-trailing-slash" ( set -e # setup a git repo to be used as a local repo, not remote reponame="standalone-file-local-path-trailing-slash" setup_remote_repo "$reponame" # clone directly, not through lfstest-gitserver clone_repo_url "$REMOTEDIR/$reponame.git/" $reponame do_local_path_test "$reponame" "$suffix" ) end_test begin_test "standalone-file-lfs.url file URL" ( set -e # setup a git repo to be used as a local repo, not remote reponame="standalone-file-lfsurl" setup_remote_repo "$reponame" # clone directly, not through lfstest-gitserver clone_repo_url "$REMOTEDIR/$reponame.git" $reponame otherrepo="$(pwd)/$reponame-2.git" git init --bare "$otherrepo" wrongrepo="$(pwd)/$reponame-3.git" git init --bare "$wrongrepo" git remote set-url origin "file://$(urlify "$wrongrepo")" git config lfs.url "file://$(urlify "$otherrepo")" git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log git add .gitattributes git commit -m "Tracking" git checkout -b test # set up a decent amount of data so that there's work for multiple concurrent adapters echo "[ { \"CommitDate\":\"$(get_date -10d)\", \"Files\":[ {\"Filename\":\"verify.dat\",\"Size\":18,\"Data\":\"send-verify-action\"}, {\"Filename\":\"file1.dat\",\"Size\":1024}, {\"Filename\":\"file2.dat\",\"Size\":750}] }, { \"CommitDate\":\"$(get_date -7d)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":1050}, {\"Filename\":\"file3.dat\",\"Size\":660}, {\"Filename\":\"file4.dat\",\"Size\":230}] }, { \"CommitDate\":\"$(get_date -5d)\", \"Files\":[ {\"Filename\":\"file5.dat\",\"Size\":1200}, {\"Filename\":\"file6.dat\",\"Size\":300}] }, { \"CommitDate\":\"$(get_date -2d)\", \"Files\":[ {\"Filename\":\"file3.dat\",\"Size\":120}, {\"Filename\":\"file5.dat\",\"Size\":450}, {\"Filename\":\"file7.dat\",\"Size\":520}, {\"Filename\":\"file8.dat\",\"Size\":2048}] } ]" | lfstest-testutils addcommits GIT_TRACE=1 GIT_TRANSFER_TRACE=1 git push origin test 2>&1 | tee pushcustom.log # use PIPESTATUS otherwise we get exit code from tee [ ${PIPESTATUS[0]} = "0" ] # Make sure the lock verification is not attempted. grep "locks/verify$" pushcustom.log && false grep "xfer: started custom adapter process" pushcustom.log grep "Uploading LFS objects: 100% (12/12)" pushcustom.log # Make sure we didn't write to the wrong repo. objectlist=$(find "$wrongrepo/lfs/objects" -type f || true) [ -z "$objectlist" ] # Make sure we uploaded the expected number of objects. objectlist=$(find "$otherrepo/lfs/objects" -type f || true) [ "$(echo "$objectlist" | wc -l)" -eq 12 ] ) end_test begin_test "standalone-file-lfs.url http URL" ( set -e reponame="standalone-file-lfsurl-http" setup_remote_repo "$reponame" # clone directly, not through lfstest-gitserver clone_repo "$reponame" "$reponame" wrongrepo="$(pwd)/$reponame-2.git" git init --bare "$wrongrepo" git remote set-url origin "file://$(urlify "$wrongrepo")" git config lfs.url "$(repo_endpoint "$GITSERVER" "$reponame")" git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log git add .gitattributes git commit -m "Tracking" git checkout -b test # set up a decent amount of data so that there's work for multiple concurrent adapters echo "[ { \"CommitDate\":\"$(get_date -10d)\", \"Files\":[ {\"Filename\":\"verify.dat\",\"Size\":18,\"Data\":\"send-verify-action\"}, {\"Filename\":\"file1.dat\",\"Size\":1024}, {\"Filename\":\"file2.dat\",\"Size\":750}] }, { \"CommitDate\":\"$(get_date -7d)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":1050}, {\"Filename\":\"file3.dat\",\"Size\":660}, {\"Filename\":\"file4.dat\",\"Size\":230}] }, { \"CommitDate\":\"$(get_date -5d)\", \"Files\":[ {\"Filename\":\"file5.dat\",\"Size\":1200}, {\"Filename\":\"file6.dat\",\"Size\":300}] }, { \"CommitDate\":\"$(get_date -2d)\", \"Files\":[ {\"Filename\":\"file3.dat\",\"Size\":120}, {\"Filename\":\"file5.dat\",\"Size\":450}, {\"Filename\":\"file7.dat\",\"Size\":520}, {\"Filename\":\"file8.dat\",\"Size\":2048}] } ]" | lfstest-testutils addcommits GIT_TRACE=1 GIT_TRANSFER_TRACE=1 git push origin test 2>&1 | tee push.log # use PIPESTATUS otherwise we get exit code from tee [ ${PIPESTATUS[0]} = "0" ] # We should not use the custom adapter process here. grep "xfer: started custom adapter process" push.log && exit 1 grep -F "$GITSERVER/$reponame" push.log # Make sure we didn't write to the wrong repo. objectlist=$(find "$wrongrepo/lfs/objects" -type f || true) [ -z "$objectlist" ] rm -fr .git/lfs/objects GIT_TRACE=1 GIT_TRANSFER_TRACE=1 git lfs fetch --all 2>&1 | tee fetch.log grep "xfer: started custom adapter process" fetch.log && exit 1 grep -F "$GITSERVER/$reponame" fetch.log git lfs fsck ) end_test begin_test "standalone-file error" ( set -e reponame="standalone-file-error" setup_remote_repo "$reponame" # clone directly, not through lfstest-gitserver clone_repo "$reponame" "$reponame" otherrepo="$(pwd)/$reponame-2.git" mkdir "$otherrepo" git remote set-url origin "file://$(urlify "$otherrepo")" git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log git add .gitattributes git commit -m "Tracking" git checkout -b test # set up a decent amount of data so that there's work for multiple concurrent adapters echo "[ { \"CommitDate\":\"$(get_date -10d)\", \"Files\":[ {\"Filename\":\"verify.dat\",\"Size\":18,\"Data\":\"send-verify-action\"}, {\"Filename\":\"file1.dat\",\"Size\":1024}, {\"Filename\":\"file2.dat\",\"Size\":750}] }, { \"CommitDate\":\"$(get_date -7d)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":1050}, {\"Filename\":\"file3.dat\",\"Size\":660}, {\"Filename\":\"file4.dat\",\"Size\":230}] }, { \"CommitDate\":\"$(get_date -5d)\", \"Files\":[ {\"Filename\":\"file5.dat\",\"Size\":1200}, {\"Filename\":\"file6.dat\",\"Size\":300}] }, { \"CommitDate\":\"$(get_date -2d)\", \"Files\":[ {\"Filename\":\"file3.dat\",\"Size\":120}, {\"Filename\":\"file5.dat\",\"Size\":450}, {\"Filename\":\"file7.dat\",\"Size\":520}, {\"Filename\":\"file8.dat\",\"Size\":2048}] } ]" | lfstest-testutils addcommits if git push origin test 2>pusherror.log then exit 1 fi cat pusherror.log grep 'not.*a git repository' pusherror.log ) end_test git-lfs-3.6.1/t/t-status.sh000077500000000000000000000272201472372047300154700ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" begin_test "status" ( set -e mkdir repo-1 cd repo-1 git init git lfs track "*.dat" file_1="some data" file_1_oid="$(calc_oid "$file_1")" file_1_oid_short="$(echo "$file_1_oid" | head -c 7)" printf "%s" "$file_1" > file1.dat git add file1.dat git commit -m "file1.dat" file_1_new="other data" file_1_new_oid="$(calc_oid "$file_1_new")" file_1_new_oid_short="$(echo "$file_1_new_oid" | head -c 7)" printf "%s" "$file_1_new" > file1.dat file_2="file2 data" file_2_oid="$(calc_oid "$file_2")" file_2_oid_short="$(echo "$file_2_oid" | head -c 7)" printf "%s" "$file_2" > file2.dat git add file2.dat file_3="file3 data" file_3_oid="$(calc_oid "$file_3")" file_3_oid_short="$(echo "$file_3_oid" | head -c 7)" printf "%s" "$file_3" > file3.dat git add file3.dat file_3_new="file3 other data" file_3_new_oid="$(calc_oid "$file_3_new")" file_3_new_oid_short="$(echo "$file_3_new_oid" | head -c 7)" printf "%s" "$file_3_new" > file3.dat expected="On branch main Objects to be committed: file2.dat (LFS: $file_2_oid_short) file3.dat (LFS: $file_3_oid_short) Objects not staged for commit: file1.dat (LFS: $file_1_oid_short -> File: $file_1_new_oid_short) file3.dat (File: $file_3_new_oid_short)" [ "$expected" = "$(git lfs status)" ] ) end_test begin_test "status --porcelain" ( set -e mkdir repo-2 cd repo-2 git init git lfs track "*.dat" echo "some data" > file1.dat git add file1.dat git commit -m "file1.dat" echo "other data" > file1.dat echo "file2 data" > file2.dat git add file2.dat echo "file3 data" > file3.dat git add file3.dat echo "file3 other data" > file3.dat expected=" M file1.dat A file3.dat A file2.dat" [ "$expected" = "$(git lfs status --porcelain)" ] ) end_test begin_test "status --json" ( set -e mkdir repo-3 cd repo-3 git init git lfs track "*.dat" echo "some data" > file1.dat git add file1.dat git commit -m "file1.dat" echo "other data" > file1.dat expected='{"files":{"file1.dat":{"status":"M"}}}' [ "$expected" = "$(git lfs status --json)" ] git add file1.dat git commit -m "file1.dat changed" git mv file1.dat file2.dat expected='{"files":{"file2.dat":{"status":"R","from":"file1.dat"}}}' [ "$expected" = "$(git lfs status --json)" ] git commit -m "file1.dat -> file2.dat" # Ensure status --json does not include non-lfs files echo hi > test1.txt git add test1.txt expected='{"files":{}}' [ "$expected" = "$(git lfs status --json)" ] ) end_test begin_test "status in a sub-directory" ( set -e reponame="status-sub-directory" git init "$reponame" cd "$reponame" git lfs track "*.dat" printf "asdf" > file.dat mkdir -p dir git add .gitattributes file.dat git commit -m "initial commit" printf "ASDF" > file.dat expected="On branch main Objects to be committed: Objects not staged for commit: ../file.dat (LFS: f0e4c2f -> File: 99b3bcf)" [ "$expected" = "$(cd dir && git lfs status)" ] ) end_test begin_test "status: outside git repository" ( set +e git lfs status 2>&1 > status.log res=$? set -e if [ "$res" = "0" ]; then echo "Passes because $GIT_LFS_TEST_DIR is unset." exit 0 fi [ "$res" = "128" ] grep "Not in a Git repository" status.log ) end_test begin_test "status - before initial commit" ( set -e git init repo-initial cd repo-initial git lfs track "*.dat" # should not fail when nothing to display (ignore output, will be blank) git lfs status contents="some data" contents_oid="$(calc_oid "$contents")" contents_oid_short="$(echo "$contents_oid" | head -c 7)" printf "%s" "$contents" > file1.dat git add file1.dat expected=" Objects to be committed: file1.dat (LFS: $contents_oid_short) Objects not staged for commit:" [ "$expected" = "$(git lfs status)" ] ) end_test begin_test "status shows multiple files with identical contents" ( set -e reponame="uniq-status" mkdir "$reponame" cd "$reponame" git init git lfs track "*.dat" contents="contents" printf "%s" "$contents" > a.dat printf "%s" "$contents" > b.dat git add --all . git lfs status | tee status.log [ "1" -eq "$(grep -c "a.dat" status.log)" ] [ "1" -eq "$(grep -c "b.dat" status.log)" ] ) end_test begin_test "status shows multiple copies of partially staged files" ( set -e reponame="status-partially-staged" git init "$reponame" cd "$reponame" git lfs track "*.dat" git add .gitattributes git commit -m "initial commit" contents_1="part 1" contents_1_oid="$(calc_oid "$contents_1")" contents_1_oid_short="$(echo "$contents_1_oid" | head -c 7)" printf "%s" "$contents_1" > a.dat # "$contents_1" changes are staged git add a.dat # "$contents_2" changes are unstaged contents_2="part 2" contents_2_oid="$(calc_oid "$contents_2")" contents_2_oid_short="$(echo "$contents_2_oid" | head -c 7)" printf "%s" "$contents_2" > a.dat expected="On branch main Objects to be committed: a.dat (LFS: $contents_1_oid_short) Objects not staged for commit: a.dat (File: $contents_2_oid_short)" actual="$(git lfs status)" diff -u <(echo "$expected") <(echo "$actual") ) end_test begin_test "status: LFS to LFS change" ( set -e reponame="status-lfs-to-lfs-change" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" contents="contents" contents_oid="$(calc_oid "$contents")" contents_oid_short="$(echo "$contents_oid" | head -c 7)" git lfs track "*.dat" git add .gitattributes git commit -m "track *.dat files" printf "%s" "$contents" > a.dat git add a.dat git commit -m "add a.dat" contents_new="$contents +extra" contents_new_oid="$(calc_oid "$contents_new")" contents_new_oid_short="$(echo $contents_new_oid | head -c 7)" printf "%s" "$contents_new" > a.dat git add a.dat expected="On branch main Objects to be committed: a.dat (LFS: $contents_oid_short -> LFS: $contents_new_oid_short) Objects not staged for commit:" actual="$(git lfs status)" [ "$expected" = "$actual" ] ) end_test begin_test "status: Git to LFS change" ( set -e reponame="status-git-to-lfs-change" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" contents="contents" contents_oid="$(calc_oid "$contents")" contents_oid_short="$(echo "$contents_oid" | head -c 7)" printf "%s" "$contents" > a.dat git add a.dat git commit -m "add a.dat" git lfs track "*.dat" git add .gitattributes git commit -m "track *.dat files" contents_new="$contents +extra" contents_new_oid="$(calc_oid "$contents_new")" contents_new_oid_short="$(echo $contents_new_oid | head -c 7)" printf "%s" "$contents_new" > a.dat git add a.dat expected="On branch main Objects to be committed: a.dat (Git: $contents_oid_short -> LFS: $contents_new_oid_short) Objects not staged for commit:" actual="$(git lfs status)" [ "$expected" = "$actual" ] ) end_test begin_test "status: Git to LFS conversion" ( set -e reponame="status-git-to-lfs-conversion" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" contents="contents" contents_oid="$(calc_oid "$contents")" contents_oid_short="$(echo "$contents_oid" | head -c 7)" printf "%s" "$contents" > a.dat git add a.dat git commit -m "add a.dat" git lfs track "*.dat" git add .gitattributes git commit -m "track *.dat" git push origin main pushd "$TRASHDIR" > /dev/null clone_repo "$reponame" "$reponame-2" git add a.dat git lfs status 2>&1 | tee status.log if [ "0" -ne "${PIPESTATUS[0]}" ]; then echo >&2 "git lfs status should have succeeded, didn't ..." exit 1 fi expected="On branch main Objects to be pushed to origin/main: Objects to be committed: a.dat (Git: $contents_oid_short -> LFS: $contents_oid_short) Objects not staged for commit:" actual="$(cat status.log)" [ "$expected" = "$actual" ] popd > /dev/null ) end_test begin_test "status (missing objects)" ( set -e reponame="status-missing-objects" git init "$reponame" cd "$reponame" git lfs track "*.dat" printf "a" > a.dat git add .gitattributes a.dat git commit -m "initial commit" # Remove the original object "a.dat" (ensure '--no-filters' is not given). oid="$(git hash-object -t blob -- a.dat)" rm -rf ".git/objects/${oid:0:2}/${oid:2}" # Create an unstaged change against a source file that doesn't exist. printf "b" > a.dat git add a.dat git lfs status \ | grep "a.dat (?: -> LFS: $(calc_oid b | head -c 7))" ) end_test begin_test "status (unpushed objects)" ( set -e reponame="status-unpushed-objects" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" git add .gitattributes git commit -m "initial commit" git push -u origin main contents="a" oid="$(calc_oid "$contents")" printf "%s" "$contents" > a.dat git add a.dat git commit -m "add a large file" expected="On branch main Objects to be pushed to origin/main: a.dat ($oid) Objects to be committed: Objects not staged for commit:" [ "$expected" = "$(git lfs status)" ] ) end_test begin_test "status (without a working copy)" ( reponame="status-no-working-copy.git" git init --bare "$reponame" cd "$reponame" git lfs status 2>&1 | tee status.log if [ "0" -eq "${PIPESTATUS[0]}" ]; then echo >&2 "git lfs status should have failed, didn't ..." exit 1 fi [ "This operation must be run in a work tree." = "$(cat status.log)" ] ) end_test begin_test "status (deleted files)" ( set -e reponame="status-deleted-files" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" git add .gitattributes git commit -m "initial commit" git push -u origin main contents="a" oid="$(calc_oid "$contents")" oid_short="$(calc_oid "$contents" | head -c 7)" printf "%s" "$contents" > a.dat git add a.dat git commit -m "add a large file" git rm a.dat expected="On branch main Objects to be pushed to origin/main: a.dat ($oid) Objects to be committed: a.dat (LFS: $oid_short -> File: deleted) Objects not staged for commit:" [ "$expected" = "$(git lfs status)" ] ) end_test begin_test "status (file to dir)" ( set -e reponame="status-file-to-dir" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" git add .gitattributes git commit -m "initial commit" printf 'file' > test git add test git commit -m "add test" obj=$(calc_oid "file" | head -c 7) git rm test mkdir test contents="a" oid="$(calc_oid "$contents")" oid_short="$(calc_oid "$contents" | head -c 7)" printf "%s" "$contents" > test/a.dat git add test git commit -m "add files" git reset HEAD~ git add test expected="On branch main Objects to be committed: test (Git: $obj -> File: deleted) test/a.dat (LFS: $oid_short) Objects not staged for commit:" [ "$expected" = "$(git lfs status)" ] ) end_test begin_test "status: permission change" ( set -e # We're using chmod below. if [ "$IS_WINDOWS" -eq 1 ]; then exit 0 fi reponame="status-permission-change" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" contents="contents" git lfs track "*.dat" git add .gitattributes git commit -m "track *.dat" printf "%s" "$contents" > a.dat git add a.dat git commit -m "add a.dat" chmod 400 a.dat # A permission change should not result in any output. git lfs status 2>&1 | tee status.log if [ "0" -ne "${PIPESTATUS[0]}" ]; then echo >&2 "git lfs status should have succeeded, didn't ..." exit 1 fi expected="On branch main Objects to be committed: Objects not staged for commit:" actual="$(cat status.log)" [ "$expected" = "$actual" ] ) end_test git-lfs-3.6.1/t/t-submodule-lfsconfig.sh000077500000000000000000000037371472372047300201230ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" lfsname="submodule-config-test-lfs" reponame="submodule-config-test-repo" submodname="submodule-config-test-submodule" begin_test "submodule env with .lfsconfig" ( set -e # setup dummy repo with lfs store # no git data will be pushed, just lfs objects setup_remote_repo "$lfsname" echo $GITSERVER/$lfsname.git/info/lfs # setup submodule setup_remote_repo "$submodname" clone_repo "$submodname" submod mkdir dir git config -f .lfsconfig lfs.url "$GITSERVER/$lfsname.git/info/lfs" git lfs track "*.dat" submodcontent="submodule lfs file" submodoid=$(calc_oid "$submodcontent") printf "%s" "$submodcontent" > dir/test.dat git add .lfsconfig .gitattributes dir git commit -m "create submodule" git push origin main assert_server_object "$lfsname" "$submodoid" # setup repo with submodule setup_remote_repo "$reponame" clone_repo "$reponame" repo git config -f .lfsconfig lfs.url "$GITSERVER/$lfsname.git/info/lfs" git submodule add -b main "$GITSERVER/$submodname" sub git submodule update git lfs track "*.dat" mkdir dir repocontent="repository lfs file" repooid=$(calc_oid "$repocontent") printf "%s" "$repocontent" > dir/test.dat git add .gitattributes .lfsconfig .gitmodules dir sub git commit -m "create repo" git push origin main assert_server_object "$lfsname" "$repooid" echo "repo" git lfs env | tee env.log grep "Endpoint=$GITSERVER/$lfsname.git/info/lfs (auth=basic)$" env.log cd sub echo "./sub" git lfs env | tee env.log grep "Endpoint=$GITSERVER/$lfsname.git/info/lfs (auth=basic)$" env.log cd dir echo "./sub/dir" git lfs env | tee env.log grep "Endpoint=$GITSERVER/$lfsname.git/info/lfs (auth=basic)$" env.log ) end_test begin_test "submodule update --init --remote with .lfsconfig" ( set -e clone_repo "$reponame" clone grep "$repocontent" dir/test.dat git submodule update --init --remote grep "$submodcontent" sub/dir/test.dat ) end_test git-lfs-3.6.1/t/t-submodule-recurse.sh000077500000000000000000000027051472372047300176130ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" reponame="submodule-recurse-test-repo" submodname="submodule-recurse-test-submodule" begin_test "submodule with submodule.recurse = true" ( set -e setup_remote_repo "$reponame" setup_remote_repo "$submodname" clone_repo "$submodname" submodule git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log echo "foo" > file.dat git add .gitattributes file.dat git commit -a -m "add file" git push origin main subcommit1=$(git rev-parse HEAD) echo "bar" > file.dat git add file.dat git commit -a -m "update file" git push origin main subcommit2=$(git rev-parse HEAD) clone_repo "$reponame" repo git submodule add "$GITSERVER/$submodname" submodule git submodule update --init --recursive git -C submodule reset --hard "$subcommit1" git add .gitmodules submodule git commit -m "add submodule" git push origin main git checkout -b feature git -C submodule reset --hard "$subcommit2" git add .gitmodules submodule git commit -m "update submodule" git push origin feature clone_repo "$reponame" repo-no-recurse git submodule update --init --recursive git checkout feature if [[ -d "submodule/lfs/logs" ]] then exit 1 fi clone_repo "$reponame" repo-recurse git config submodule.recurse true git submodule update --init --recursive git checkout feature if [[ -d "submodule/lfs/logs" ]] then exit 1 fi ) end_test git-lfs-3.6.1/t/t-submodule.sh000077500000000000000000000057521472372047300161520ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" reponame="submodule-test-repo" submodname="submodule-test-submodule" begin_test "submodule local git dir" ( set -e setup_remote_repo "$reponame" setup_remote_repo "$submodname" clone_repo "$submodname" submod mkdir dir echo "sub module" > dir/README git add dir/README git commit -a -m "submodule readme" git push origin main clone_repo "$reponame" repo git submodule add "$GITSERVER/$submodname" sub git submodule update git add .gitmodules sub git commit -m "add submodule" git push origin main grep "sub module" sub/dir/README || { echo "submodule not setup correctly?" cat sub/dir/README exit 1 } ) end_test begin_test "submodule env" ( set -e # using the local clone from the above test cd repo git lfs env | tee env.log grep "Endpoint=$GITSERVER/$reponame.git/info/lfs (auth=none)$" env.log grep "LocalWorkingDir=$(canonical_path_escaped "$TRASHDIR/repo$")" env.log grep "LocalGitDir=$(canonical_path_escaped "$TRASHDIR/repo/.git$")" env.log grep "LocalGitStorageDir=$(canonical_path_escaped "$TRASHDIR/repo/.git$")" env.log grep "LocalMediaDir=$(canonical_path_escaped "$TRASHDIR/repo/.git/lfs/objects$")" env.log grep "TempDir=$(canonical_path_escaped "$TRASHDIR/repo/.git/lfs/tmp$")" env.log cd .git echo "./.git" git lfs env | tee env.log cat env.log grep "Endpoint=$GITSERVER/$reponame.git/info/lfs (auth=none)$" env.log grep "LocalWorkingDir=$" env.log grep "LocalGitDir=$(canonical_path_escaped "$TRASHDIR/repo/.git$")" env.log grep "LocalGitStorageDir=$(canonical_path_escaped "$TRASHDIR/repo/.git$")" env.log grep "LocalMediaDir=$(canonical_path_escaped "$TRASHDIR/repo/.git/lfs/objects$")" env.log grep "TempDir=$(canonical_path_escaped "$TRASHDIR/repo/.git/lfs/tmp$")" env.log cd ../sub echo "./sub" git lfs env | tee env.log grep "Endpoint=$GITSERVER/$submodname.git/info/lfs (auth=none)$" env.log grep "LocalWorkingDir=$(canonical_path_escaped "$TRASHDIR/repo/sub$")" env.log grep "LocalGitDir=$(canonical_path_escaped "$TRASHDIR/repo/.git/modules/sub$")" env.log grep "LocalGitStorageDir=$(canonical_path_escaped "$TRASHDIR/repo/.git/modules/sub$")" env.log grep "LocalMediaDir=$(canonical_path_escaped "$TRASHDIR/repo/.git/modules/sub/lfs/objects$")" env.log grep "TempDir=$(canonical_path_escaped "$TRASHDIR/repo/.git/modules/sub/lfs/tmp$")" env.log cd dir echo "./sub/dir" git lfs env | tee env.log grep "Endpoint=$GITSERVER/$submodname.git/info/lfs (auth=none)$" env.log grep "LocalWorkingDir=$(canonical_path_escaped "$TRASHDIR/repo/sub$")" env.log grep "LocalGitDir=$(canonical_path_escaped "$TRASHDIR/repo/.git/modules/sub$")" env.log grep "LocalGitStorageDir=$(canonical_path_escaped "$TRASHDIR/repo/.git/modules/sub$")" env.log grep "LocalMediaDir=$(canonical_path_escaped "$TRASHDIR/repo/.git/modules/sub/lfs/objects$")" env.log grep "TempDir=$(canonical_path_escaped "$TRASHDIR/repo/.git/modules/sub/lfs/tmp$")" env.log ) end_test git-lfs-3.6.1/t/t-tempfile.sh000077500000000000000000000014211472372047300157450ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" begin_test "cleans only temp files and directories older than an hour" ( set -e reponame="$(basename "$0" ".sh")" git init "$reponame" cd "$reponame" git lfs track '*.bin' echo foo > abc.bin git add abc.bin git commit -m 'Add abc.bin' tmpdir=.git/lfs/tmp mkdir -p "$tmpdir" mkdir "$tmpdir/dir-to-preserve" touch "$tmpdir/to-preserve" touch "$tmpdir/dir-to-preserve/file" # git format-patch datestamp; arbitrary timestamp in the past. TZ=UTC touch -t 200109170000.00 "$tmpdir/to-destroy" TZ=UTC touch -t 200109170000.00 "$tmpdir/dir-to-preserve/file" git lfs ls-files >/dev/null [ -f "$tmpdir/to-preserve" ] [ -f "$tmpdir/dir-to-preserve/file" ] [ ! -f "$tmpdir/to-destroy" ] ) end_test git-lfs-3.6.1/t/t-track-attrs.sh000077500000000000000000000017761472372047300164140ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" ensure_git_version_isnt $VERSION_LOWER "2.1.0" begin_test "track (--no-modify-attrs)" ( set -e reponame="track-no-modify-attrs" git init "$reponame" cd "$reponame" echo "contents" > a.dat git add a.dat # Git assumes that identical results from `stat(1)` between the index and # working copy are stat dirty. To prevent this, wait at least one second to # yield different `stat(1)` results. sleep 1 git commit -m "add a.dat" echo "*.dat filter=lfs diff=lfs merge=lfs -text" > .gitattributes git add .gitattributes git commit -m "asdf" [ -z "$(git status --porcelain)" ] git lfs track --no-modify-attrs "*.dat" [ " M a.dat" = "$(git status --porcelain)" ] ) end_test begin_test "track (--dry-run)" ( set -e reponame="track-dry-run" git init "$reponame" cd "$reponame" git lfs track --dry-run "*.dat" echo "contents" > a.dat git add a.dat git commit -m "add a.dat" refute_pointer "main" "a.dat" ) end_test git-lfs-3.6.1/t/t-track-wildcards.sh000077500000000000000000000045111472372047300172210ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" begin_test "track files using wildcard pattern with leading slash" ( set -e reponame="track-wildcard-leading-slash" mkdir -p "$reponame/dir" cd $reponame git init # Adding files before being tracked by LFS printf "contents" > a.dat printf "contents" > dir/b.dat git add a.dat dir/b.dat git commit -m "initial commit" # Track only in the root git lfs track "/*.dat" grep "/*.dat" .gitattributes git add .gitattributes a.dat dir/b.dat sleep 1 git commit -m "convert to LFS" git lfs ls-files | tee files.log grep "a.dat" files.log grep "dir/b.dat" files.log && exit 1 # Subdirectories ignored # Add files after being tracked by LFS printf "contents" > c.dat printf "contents" > dir/d.dat git add c.dat dir/d.dat sleep 1 git commit -m "more lfs files" git lfs ls-files | tee new_files.log grep "a.dat" new_files.log grep "dir/b.dat" new_files.log && exit 1 grep "c.dat" new_files.log grep "dir/d.dat" new_files.log && exit 1 true ) end_test begin_test "track files using filename pattern with leading slash" ( set -e reponame="track-absolute-leading-slash" mkdir -p "$reponame/dir" cd $reponame git init # Adding files before being tracked by LFS printf "contents" > a.dat printf "contents" > dir/b.dat git add a.dat dir/b.dat sleep 1 git commit -m "initial commit" # These are added by git.GetTrackedFiles git lfs track "/a.dat" | tee track.log grep "Tracking \"/a.dat\"" track.log git lfs track "/dir/b.dat" | tee track.log grep "Tracking \"/dir/b.dat\"" track.log # These are added by Git's `clean` filter git lfs track "/c.dat" | tee track.log grep "Tracking \"/c.dat\"" track.log git lfs track "/dir/d.dat" | tee track.log grep "Tracking \"/dir/d.dat\"" track.log cat .gitattributes git add .gitattributes a.dat dir/b.dat sleep 1 git commit -m "convert to LFS" git lfs ls-files | tee files.log grep "a.dat" files.log grep "dir/b.dat" files.log # Add files after being tracked by LFS printf "contents" > c.dat printf "contents" > dir/d.dat git add c.dat dir/d.dat git commit -m "more lfs files" git lfs ls-files | tee new_files.log grep "a.dat" new_files.log grep "dir/b.dat" new_files.log grep "c.dat" new_files.log grep "dir/d.dat" new_files.log ) end_test git-lfs-3.6.1/t/t-track.sh000077500000000000000000000444321472372047300152550ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" begin_test "track" ( set -e # no need to setup a remote repo, since this test doesn't need to push or pull mkdir track cd track git init echo "############################################################################### # Set default behavior to automatically normalize line endings. ############################################################################### * text=auto #*.cs diff=csharp" > .gitattributes # track *.jpg once git lfs track "*.jpg" | grep "Tracking \"\*.jpg\"" assert_attributes_count "jpg" "filter=lfs" 1 # track *.jpg again git lfs track "*.jpg" | grep "\"*.jpg\" already supported" assert_attributes_count "jpg" "filter=lfs" 1 mkdir -p a/b .git/info echo "*.mov filter=lfs -text" > .git/info/attributes echo "*.gif filter=lfs -text" > a/.gitattributes echo "*.png filter=lfs -text" > a/b/.gitattributes git lfs track | tee track.log grep "Listing tracked patterns" track.log grep "*.mov ($(native_path_escaped ".git/info/attributes"))" track.log grep "*.jpg (.gitattributes)" track.log grep "*.gif ($(native_path_escaped "a/.gitattributes"))" track.log grep "*.png ($(native_path_escaped "a/b/.gitattributes"))" track.log grep "Set default behavior" .gitattributes grep "############" .gitattributes grep "* text=auto" .gitattributes grep "diff=csharp" .gitattributes grep "*.jpg" .gitattributes echo "*.gif -filter -text" >> a/b/.gitattributes echo "*.mov -filter -text" >> a/b/.gitattributes git lfs track | tee track.log tail -n 3 track.log | head -n 1 | grep "Listing excluded patterns" tail -n 3 track.log | grep "*.gif ($(native_path_escaped "a/b/.gitattributes"))" tail -n 3 track.log | grep "*.mov ($(native_path_escaped "a/b/.gitattributes"))" ) end_test begin_test "track --no-excluded" ( set -e reponame="track_no_excluded" mkdir "$reponame" cd "$reponame" git init mkdir -p a/b .git/info echo "*.mov filter=lfs -text" > .git/info/attributes echo "*.gif filter=lfs -text" > a/.gitattributes echo "*.png filter=lfs -text" > a/b/.gitattributes echo "*.gif -filter -text" >> a/b/.gitattributes echo "*.mov -filter=lfs -text" >> a/b/.gitattributes git lfs track --no-excluded | tee track.log grep "Listing excluded patterns" track.log && exit 1 true ) end_test begin_test "track --verbose" ( set -e reponame="track_verbose_logs" mkdir "$reponame" cd "$reponame" git init touch foo.dat git add foo.dat git lfs track --verbose "foo.dat" 2>&1 > track.log grep "Touching \"foo.dat\"" track.log ) end_test begin_test "track --dry-run" ( set -e reponame="track_dry_run" mkdir "$reponame" cd "$reponame" git init touch foo.dat git add foo.dat git lfs track --dry-run "foo.dat" 2>&1 > track.log grep "Tracking \"foo.dat\"" track.log grep "Touching \"foo.dat\"" track.log git status --porcelain 2>&1 > status.log grep "A foo.dat" status.log ) end_test begin_test "track directory" ( set -e mkdir dir cd dir git init if [ "$IS_WINDOWS" -eq 1 ] then git lfs track "foo bar\\*" | tee track.txt else git lfs track "foo bar/*" | tee track.txt fi [ "foo[[:space:]]bar/* filter=lfs diff=lfs merge=lfs -text" = "$(cat .gitattributes)" ] [ "Tracking \"foo bar/*\"" = "$(cat track.txt)" ] mkdir "foo bar" echo "a" > "foo bar/a" echo "b" > "foo bar/b" git add foo\ bar git commit -am "add foo bar" assert_pointer "main" "foo bar/a" "87428fc522803d31065e7bce3cf03fe475096631e5e07bbd7a0fde60c4cf25c7" 2 assert_pointer "main" "foo bar/b" "0263829989b6fd954f72baaf2fc64bc2e2f01d692d4de72986ea808f6e99813f" 2 ) end_test begin_test "track without trailing linebreak" ( set -e mkdir no-linebreak cd no-linebreak git init printf "*.mov filter=lfs -text" > .gitattributes [ "*.mov filter=lfs -text" = "$(cat .gitattributes)" ] git lfs track "*.gif" expected="*.mov filter=lfs -text$(cat_end) *.gif filter=lfs diff=lfs merge=lfs -text$(cat_end)" [ "$expected" = "$(cat -e .gitattributes)" ] ) end_test begin_test "track with existing crlf" ( set -e mkdir existing-crlf cd existing-crlf git init git config core.autocrlf true git lfs track "*.mov" git lfs track "*.gif" expected="*.mov filter=lfs diff=lfs merge=lfs -text^M$ *.gif filter=lfs diff=lfs merge=lfs -text^M$" [ "$expected" = "$(cat -e .gitattributes)" ] git config core.autocrlf false git lfs track "*.jpg" expected="*.mov filter=lfs diff=lfs merge=lfs -text^M$ *.gif filter=lfs diff=lfs merge=lfs -text^M$ *.jpg filter=lfs diff=lfs merge=lfs -text^M$" [ "$expected" = "$(cat -e .gitattributes)" ] ) end_test begin_test "track with autocrlf=true" ( set -e mkdir autocrlf-true cd autocrlf-true git init git config core.autocrlf true printf "*.mov filter=lfs -text" > .gitattributes [ "*.mov filter=lfs -text" = "$(cat .gitattributes)" ] git lfs track "*.gif" expected="*.mov filter=lfs -text^M$ *.gif filter=lfs diff=lfs merge=lfs -text^M$" [ "$expected" = "$(cat -e .gitattributes)" ] ) end_test begin_test "track with autocrlf=input" ( set -e mkdir autocrlf-input cd autocrlf-input git init git config core.autocrlf input printf "*.mov filter=lfs -text" > .gitattributes [ "*.mov filter=lfs -text" = "$(cat .gitattributes)" ] git lfs track "*.gif" if [ $IS_WINDOWS -eq 1 ] then cat -e .gitattributes | grep '\^M\$' else cat -e .gitattributes | grep -v '\^M' fi ) end_test begin_test "track outside git repo" ( set -e git lfs track "*.foo" || { # this fails if it's run outside of a git repo using GIT_LFS_TEST_DIR # git itself returns an exit status of 128 # $ git show # fatal: Not a git repository (or any of the parent directories): .git # $ echo "$?" # 128 [ "$?" = "128" ] exit 0 } if [ -n "$GIT_LFS_TEST_DIR" ]; then echo "GIT_LFS_TEST_DIR should be set outside of any Git repository" exit 1 fi git init track-outside cd track-outside git lfs track "*.file" git lfs track "../*.foo" || { # git itself returns an exit status of 128 # $ git add ../test.foo # fatal: ../test.foo: '../test.foo' is outside repository # $ echo "$?" # 128 [ "$?" = "128" ] exit 0 } exit 1 ) end_test begin_test "track representation" ( set -e git init track-representation cd track-representation git lfs track "*.jpg" mkdir a git lfs track "a/test.file" cd a out3=$(git lfs track "test.file") if [ "$out3" != "\"test.file\" already supported" ]; then echo "Track didn't recognize duplicate path" cat .gitattributes exit 1 fi git lfs track "file.bin" cd .. out4=$(git lfs track "a/file.bin") if [ "$out4" != "\"a/file.bin\" already supported" ]; then echo "Track didn't recognize duplicate path" cat .gitattributes exit 1 fi ) end_test begin_test "track absolute" ( # MinGW bash intercepts '/images' and passes 'C:/Program Files/Git/images' as arg! if [[ $(uname) == *"MINGW"* ]]; then echo "Skipping track absolute on Windows" exit 0 fi set -e git init track-absolute cd track-absolute git lfs track "/images" cat .gitattributes grep "^/images" .gitattributes ) end_test begin_test "track in gitDir" ( set -e git init track-in-dot-git cd track-in-dot-git echo "some content" > test.file cd .git git lfs track "../test.file" || { # this fails if it's run inside a .git directory # git itself returns an exit status of 128 # $ git add ../test.file # fatal: This operation must be run in a work tree # $ echo "$?" # 128 [ "$?" = "128" ] exit 0 } # fail if track passed exit 1 ) end_test begin_test "track in symlinked dir" ( set -e git init track-symlinkdst ln -s track-symlinkdst track-symlinksrc cd track-symlinksrc git lfs track "*.png" grep "^*.png" .gitattributes || { echo ".gitattributes doesn't contain the expected relative path *.png:" cat .gitattributes exit 1 } ) end_test begin_test "track blocklisted files by name" ( set -e repo="track_blocklisted_by_name" mkdir "$repo" cd "$repo" git init touch .gitattributes git add .gitattributes git commit -m 'Initial commit' git lfs track .gitattributes 2>&1 > track.log && exit 1 grep "Pattern '.gitattributes' matches forbidden file '.gitattributes'" track.log [ -z "$(git status --porcelain | grep -v '^??')" ] ) end_test begin_test "track blocklisted files with glob" ( set -e repo="track_blocklisted_glob" mkdir "$repo" cd "$repo" git init touch .gitattributes git add .gitattributes git commit -m 'Initial commit' git lfs track ".git*" 2>&1 > track.log && exit 1 grep "Pattern '.git\*' matches forbidden file" track.log [ -z "$(git status --porcelain | grep -v '^??')" ] git lfs track "*" 2>&1 > track.log && exit 1 grep "Pattern '\*' matches forbidden file" track.log [ -z "$(git status --porcelain | grep -v '^??')" ] ) end_test begin_test "track lockable" ( set -e repo="track_lockable" mkdir "$repo" cd "$repo" git init # track *.jpg once, lockable git lfs track --lockable "*.jpg" | grep "Tracking \"\*.jpg\"" assert_attributes_count "jpg" "lockable" 1 # track *.jpg again, don't change anything. Should retain lockable git lfs track "*.jpg" | grep "\"*.jpg\" already supported" assert_attributes_count "jpg" "lockable" 1 # track *.png once, not lockable yet git lfs track "*.png" | grep "Tracking \"\*.png\"" assert_attributes_count "png" "filter=lfs" 1 assert_attributes_count "png" "lockable" 0 # track png again, enable lockable, should replace git lfs track --lockable "*.png" | grep "Tracking \"\*.png\"" assert_attributes_count "png" "filter=lfs" 1 assert_attributes_count "png" "lockable" 1 # track png again, disable lockable, should replace git lfs track --not-lockable "*.png" | grep "Tracking \"\*.png\"" assert_attributes_count "png" "filter=lfs" 1 assert_attributes_count "png" "lockable" 0 # check output reflects lockable out=$(git lfs track) echo "$out" | grep "Listing tracked patterns" echo "$out" | grep "*.jpg \[lockable\] (.gitattributes)" echo "$out" | grep "*.png (.gitattributes)" ) end_test begin_test "track lockable read-only/read-write" ( set -e repo="track_lockable_ro_rw" mkdir "$repo" cd "$repo" git init echo "blah blah" > test.bin echo "foo bar" > test.dat mkdir subfolder echo "sub blah blah" > subfolder/test.bin echo "sub foo bar" > subfolder/test.dat git add *.bin *.dat subfolder # should start writeable assert_file_writeable test.bin assert_file_writeable test.dat assert_file_writeable subfolder/test.bin assert_file_writeable subfolder/test.dat # track *.bin, not lockable yet git lfs track "*.bin" | grep "Tracking \"\*.bin\"" # track *.dat, lockable immediately git lfs track --lockable "*.dat" | grep "Tracking \"\*.dat\"" # bin should remain writeable, dat should have been made read-only assert_file_writeable test.bin refute_file_writeable test.dat assert_file_writeable subfolder/test.bin refute_file_writeable subfolder/test.dat git add .gitattributes test.bin test.dat git commit -m "First commit" # bin should still be writeable assert_file_writeable test.bin assert_file_writeable subfolder/test.bin # now make bin lockable git lfs track --lockable "*.bin" | grep "Tracking \"\*.bin\"" # bin should now be read-only refute_file_writeable test.bin refute_file_writeable subfolder/test.bin # remove lockable again git lfs track --not-lockable "*.bin" | grep "Tracking \"\*.bin\"" # bin should now be writeable again assert_file_writeable test.bin assert_file_writeable subfolder/test.bin ) end_test begin_test "track escaped pattern" ( set -e reponame="track-escaped-pattern" git init "$reponame" cd "$reponame" git lfs track " " | grep "Tracking \" \"" assert_attributes_count "[[:space:]]" "filter=lfs" 1 git lfs track "#" | grep "Tracking \"#\"" assert_attributes_count "\\#" "filter=lfs" 1 ) end_test begin_test "track (symlinked repository)" ( set -e reponame="tracked-symlinked-repository" git init "$reponame" cd "$reponame" touch a.dat pushd .. > /dev/null dir="tracked-symlinked-repository-tmp" mkdir -p "$dir" ln -s "../$reponame" "./$dir" cd "$dir/$reponame" [ "Tracking \"a.dat\"" = "$(git lfs track "a.dat")" ] [ "\"a.dat\" already supported" = "$(git lfs track "a.dat")" ] popd > /dev/null ) end_test begin_test "track (\$GIT_LFS_TRACK_NO_INSTALL_HOOKS)" ( set -e reponame="track-no-setup-hooks" git init "$reponame" cd "$reponame" [ ! -f .git/hooks/pre-push ] [ ! -f .git/hooks/post-checkout ] [ ! -f .git/hooks/post-commit ] [ ! -f .git/hooks/post-merge ] GIT_LFS_TRACK_NO_INSTALL_HOOKS=1 git lfs track [ ! -f .git/hooks/pre-push ] [ ! -f .git/hooks/post-checkout ] [ ! -f .git/hooks/post-commit ] [ ! -f .git/hooks/post-merge ] ) end_test begin_test "track (with comments)" ( set -e reponame="track-with=comments" git init "$reponame" cd "$reponame" echo "*.jpg filter=lfs diff=lfs merge=lfs -text" >> .gitattributes echo "# *.png filter=lfs diff=lfs merge=lfs -text" >> .gitattributes echo "*.pdf filter=lfs diff=lfs merge=lfs -text" >> .gitattributes git add .gitattributes git commit -m "initial commit" git lfs track 2>&1 | tee track.log if [ "0" -ne "${PIPESTATUS[0]}" ]; then echo >&2 "expected \`git lfs track\` command to exit cleanly, didn't" exit 1 fi [ "1" -eq "$(grep -c "\.jpg" track.log)" ] [ "1" -eq "$(grep -c "\.pdf" track.log)" ] [ "0" -eq "$(grep -c "\.png" track.log)" ] ) end_test begin_test "track (with current-directory prefix)" ( set -e reponame="track-with-current-directory-prefix" git init "$reponame" cd "$reponame" git lfs track "./a.dat" printf "a" > a.dat git add .gitattributes a.dat git commit -m "initial commit" grep -e "^a.dat" .gitattributes ) end_test begin_test "track (global gitattributes)" ( set -e reponame="track-global-gitattributes" git init "$reponame" cd "$reponame" global="$(cd .. && pwd)/gitattributes-global" echo "*.dat filter=lfs diff=lfs merge=lfs -text" > "$global" git config --local core.attributesfile "$global" git lfs track 2>&1 | tee track.log grep "*.dat" track.log ) end_test begin_test "track (system gitattributes)" ( set -e gitversion=$(git version | cut -d" " -f3) set +e compare_version "$gitversion" 2.42.0 result=$? set -e # We no longer read the PREFIX variable as of Git 2.42.0. [ "$result" -ne "$VERSION_LOWER" ] && exit 0 reponame="track-system-gitattributes" git init "$reponame" cd "$reponame" pushd "$TRASHDIR" > /dev/null mkdir -p "prefix/${reponame}/etc" cd "prefix/${reponame}/etc" echo "*.dat filter=lfs diff=lfs merge=lfs -text" > gitattributes popd > /dev/null PREFIX="${TRASHDIR}/prefix/${reponame}" git lfs track 2>&1 | tee track.log grep "*.dat" track.log ) end_test begin_test "track: escaped pattern in .gitattributes" ( set -e reponame="track-escaped" git init "$reponame" cd "$reponame" filename="file with spaces.#" echo "I need escaping" > "$filename" [ "Tracking \"$filename\"" = "$(git lfs track "$filename")" ] [ "\"$filename\" already supported" = "$(git lfs track "$filename")" ] #changing flags should track the file again [ "Tracking \"$filename\"" = "$(git lfs track -l "$filename")" ] if [ 1 -ne "$(wc -l .gitattributes | awk '{ print $1 }')" ]; then echo >&2 "changing flag for an existing tracked file shouldn't add another line" exit 1 fi [ "Tracking \"foo/bar/$filename\"" = "$(git lfs track "foo/bar/$filename")" ] [ "\"foo/bar/$filename\" already supported" = "$(git lfs track "foo/bar/$filename")" ] ) end_test begin_test "track: escaped glob pattern in .gitattributes" ( set -e reponame="track-escaped-glob" git init "$reponame" cd "$reponame" filename='[foo]bar.txt' contents='I need escaping' contents_oid=$(calc_oid "$contents") git lfs track --filename "$filename" git lfs track --filename "$filename" | grep 'already supported' git add . cat .gitattributes printf "%s" "$contents" > "$filename" git add . git commit -m 'Add unusually named file' # If Git understood our escaping, we'll have a pointer. Otherwise, we won't. assert_pointer "main" "$filename" "$contents_oid" 15 ) end_test begin_test "track: escaped glob pattern with spaces in .gitattributes" ( set -e # None of these characters are valid in the Win32 subsystem. [ "$IS_WINDOWS" -eq 1 ] && exit 0 reponame="track-escaped-glob-spaces" git init "$reponame" cd "$reponame" # Note that the \n is literally just that; it is not a newline. filename='*[foo] \n bar?.txt' contents='I need escaping' contents_oid=$(calc_oid "$contents") git lfs track --filename "$filename" >output 2>&1 # This error would occur if `git ls-files` didn't handle the backslash # properly. grep 'Error marking' output && exit 1 rm -f output git add . cat .gitattributes printf "%s" "$contents" > "$filename" git add . git commit -m 'Add unusually named file' # If Git understood our escaping, we'll have a pointer. Otherwise, we won't. assert_pointer "main" "$filename" "$contents_oid" 15 ) end_test begin_test "track: verbose logging" ( set -e reponame="track-verbose-logging" git init "$reponame" cd "$reponame" filename='[foo]bar.bin' contents='I need escaping' contents_oid=$(calc_oid "$contents") printf "%s" "$contents" > "$filename" printf 'Hello, world!\n' > a.txt git add a.txt "$filename" git commit -m 'some files' git lfs track -v "*.txt" 2>&1 | tee output grep "Found 1 files previously added to Git matching pattern:" output git lfs track -v --filename "$filename" 2>&1 | tee output grep "Found 1 files previously added to Git matching pattern:" output ) end_test begin_test "--json output" ( set -e reponame="track-json" git init "$reponame" cd "$reponame" git lfs track '*.dat' git lfs track --lockable '*.bin' echo 'a.dat !filter' >>.gitattributes git lfs track --json > actual cat >expected <<-EOF { "patterns": [ { "pattern": "*.dat", "source": ".gitattributes", "lockable": false, "tracked": true }, { "pattern": "*.bin", "source": ".gitattributes", "lockable": true, "tracked": true }, { "pattern": "a.dat", "source": ".gitattributes", "lockable": false, "tracked": false } ] } EOF diff -u actual expected ) end_test git-lfs-3.6.1/t/t-umask.sh000077500000000000000000000057411472372047300152710ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" if [ $IS_WINDOWS -eq 1 ]; then echo "skip $0: Windows lacks POSIX permissions" exit fi clean_setup () { mkdir "$1" cd "$1" git init } perms_for () { local file=$(echo "$1" | sed "s!^\(..\)\(..\)!.git/lfs/objects/\1/\2/\1\2!") local perms=$(ls -l "$file" | awk '{print $1}') # Trim extended attributes: echo ${perms:0:10} } assert_dir_perms () { local perms="$1" [ "$(find .git/lfs -type d -ls | grep -vE "$perms")" = "" ] } begin_test "honors umask" ( set -e clean_setup "simple" umask 027 echo "whatever" | git lfs clean | tee clean.log [ "$(perms_for cd293be6cea034bd45a0352775a219ef5dc7825ce55d1f7dae9762d80ce64411)" = "-rw-r-----" ] umask 007 echo "random" | git lfs clean | tee clean.log [ "$(perms_for 87c1b129fbadd7b6e9abc0a9ef7695436d767aece042bec198a97e949fcbe14c)" = "-rw-rw----" ] ) end_test begin_test "honors umask for directories" ( set -e reponame="simple-directories" setup_remote_repo_with_file "$reponame" "a.dat" umask 027 clone_repo "$reponame" "$reponame-a" echo "whatever" | git lfs clean assert_dir_perms "drwxr-[xs]---" umask 007 clone_repo "$reponame" "$reponame-b" echo "whatever" | git lfs clean assert_dir_perms "drwxrw[xs]---" ) end_test # This is tested more comprehensively in the unit tests. begin_test "honors core.sharedrepository" ( set -e clean_setup "shared-repo" umask 027 git config core.sharedRepository 0660 echo "whatever" | git lfs clean | tee clean.log [ "$(perms_for cd293be6cea034bd45a0352775a219ef5dc7825ce55d1f7dae9762d80ce64411)" = "-rw-rw----" ] git config core.sharedRepository everybody echo "random" | git lfs clean | tee clean.log [ "$(perms_for 87c1b129fbadd7b6e9abc0a9ef7695436d767aece042bec198a97e949fcbe14c)" = "-rw-rw-r--" ] git config core.sharedRepository false echo "something else" | git lfs clean | tee clean.log [ "$(perms_for a1621be95040239ee14362c16e20510ddc20f527d772d823b2a1679b33f5cd74)" = "-rw-r-----" ] umask 007 echo "who cares" | git lfs clean | tee clean.log [ "$(perms_for 261ded5f01a8ca18d9fb1958e8f58c53fa77648cc88a6d67c93d241a91133f3e)" = "-rw-rw----" ] ) end_test begin_test "honors core.sharedrepository for directories" ( set -e reponame="shared-repo-directories" setup_remote_repo_with_file "$reponame" "a.dat" umask 027 git config --global core.sharedRepository 0660 clone_repo "$reponame" "$reponame-a" echo "whatever" | git lfs clean assert_dir_perms "drwxrw[xs]---" git config --global core.sharedRepository everybody clone_repo "$reponame" "$reponame-b" echo "whatever" | git lfs clean assert_dir_perms "drwxrw[xs]r-x" git config --global core.sharedRepository false clone_repo "$reponame" "$reponame-c" echo "whatever" | git lfs clean assert_dir_perms "drwxr-[xs]---" umask 007 clone_repo "$reponame" "$reponame-d" echo "whatever" | git lfs clean assert_dir_perms "drwxrw[xs]---" git config --global --unset-all core.sharedRepository ) end_test git-lfs-3.6.1/t/t-uninstall-worktree-unsupported.sh000077500000000000000000000011341472372047300224000ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" # These tests rely on behavior found in Git versions less than 2.20.0 to # perform themselves, specifically: # - lack of worktreeConfig extension support ensure_git_version_isnt $VERSION_HIGHER "2.20.0" begin_test "uninstall --worktree with unsupported worktreeConfig extension" ( set -e reponame="$(basename "$0" ".sh")-unsupported" mkdir "$reponame" cd "$reponame" set +e git lfs uninstall --worktree 2>err.log res=$? set -e cat err.log grep -i "error" err.log grep -- "--worktree" err.log [ "0" != "$res" ] ) end_test git-lfs-3.6.1/t/t-uninstall-worktree.sh000077500000000000000000000157731472372047300200300ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" # These tests rely on behavior found in Git versions higher than 2.20.0 to # perform themselves, specifically: # - worktreeConfig extension support ensure_git_version_isnt $VERSION_LOWER "2.20.0" begin_test "uninstall --worktree outside repository" ( set -e # If run inside the git-lfs source dir this will update its .git/config & cause issues if [ "$GIT_LFS_TEST_DIR" == "" ]; then echo "Skipping uninstall --worktree because GIT_LFS_TEST_DIR is not set" exit 0 fi has_test_dir || exit 0 set +e git lfs uninstall --worktree >out.log res=$? set -e [ "Not in a Git repository." = "$(cat out.log)" ] [ "0" != "$res" ] ) end_test begin_test "uninstall --worktree with single working tree" ( set -e # old values that should be ignored by `uninstall --worktree` git config --global filter.lfs.smudge "global smudge" git config --global filter.lfs.clean "global clean" git config --global filter.lfs.process "global filter" reponame="$(basename "$0" ".sh")-single-tree" mkdir "$reponame" cd "$reponame" git init git lfs install --worktree # local configs are correct [ "git-lfs smudge -- %f" = "$(git config filter.lfs.smudge)" ] [ "git-lfs smudge -- %f" = "$(git config --local filter.lfs.smudge)" ] [ "git-lfs smudge -- %f" = "$(git config --worktree filter.lfs.smudge)" ] [ "git-lfs clean -- %f" = "$(git config filter.lfs.clean)" ] [ "git-lfs clean -- %f" = "$(git config --local filter.lfs.clean)" ] [ "git-lfs clean -- %f" = "$(git config --worktree filter.lfs.clean)" ] [ "git-lfs filter-process" = "$(git config filter.lfs.process)" ] [ "git-lfs filter-process" = "$(git config --local filter.lfs.process)" ] [ "git-lfs filter-process" = "$(git config --worktree filter.lfs.process)" ] # global configs [ "global smudge" = "$(git config --global filter.lfs.smudge)" ] [ "global clean" = "$(git config --global filter.lfs.clean)" ] [ "global filter" = "$(git config --global filter.lfs.process)" ] git lfs uninstall --worktree 2>&1 | tee uninstall.log if [ ${PIPESTATUS[0]} -ne 0 ]; then echo >&2 "fatal: expected 'git lfs uninstall --worktree' to succeed" exit 1 fi grep -v "Global Git LFS configuration has been removed." uninstall.log # global configs [ "global smudge" = "$(git config filter.lfs.smudge)" ] [ "global smudge" = "$(git config --global filter.lfs.smudge)" ] [ "global clean" = "$(git config filter.lfs.clean)" ] [ "global clean" = "$(git config --global filter.lfs.clean)" ] [ "global filter" = "$(git config filter.lfs.process)" ] [ "global filter" = "$(git config --global filter.lfs.process)" ] # local configs are empty [ "" = "$(git config --local filter.lfs.smudge)" ] [ "" = "$(git config --worktree filter.lfs.smudge)" ] [ "" = "$(git config --local filter.lfs.clean)" ] [ "" = "$(git config --worktree filter.lfs.clean)" ] [ "" = "$(git config --local filter.lfs.process)" ] [ "" = "$(git config --worktree filter.lfs.process)" ] ) end_test begin_test "uninstall --worktree with multiple working trees" ( set -e reponame="$(basename "$0" ".sh")-multi-tree" mkdir "$reponame" cd "$reponame" git init # old values that should be ignored by `uninstall --worktree` git config --global filter.lfs.smudge "global smudge" git config --global filter.lfs.clean "global clean" git config --global filter.lfs.process "global filter" git config --local filter.lfs.smudge "local smudge" git config --local filter.lfs.clean "local clean" git config --local filter.lfs.process "local filter" touch a.txt git add a.txt git commit -m "initial commit" git config core.repositoryformatversion 1 git config extensions.worktreeConfig true treename="../$reponame-wt" git worktree add "$treename" cd "$treename" git lfs install --worktree # worktree configs are correct [ "git-lfs smudge -- %f" = "$(git config filter.lfs.smudge)" ] [ "git-lfs smudge -- %f" = "$(git config --worktree filter.lfs.smudge)" ] [ "git-lfs clean -- %f" = "$(git config filter.lfs.clean)" ] [ "git-lfs clean -- %f" = "$(git config --worktree filter.lfs.clean)" ] [ "git-lfs filter-process" = "$(git config filter.lfs.process)" ] [ "git-lfs filter-process" = "$(git config --worktree filter.lfs.process)" ] # local configs are correct [ "local smudge" = "$(git config --local filter.lfs.smudge)" ] [ "local clean" = "$(git config --local filter.lfs.clean)" ] [ "local filter" = "$(git config --local filter.lfs.process)" ] # global configs [ "global smudge" = "$(git config --global filter.lfs.smudge)" ] [ "global clean" = "$(git config --global filter.lfs.clean)" ] [ "global filter" = "$(git config --global filter.lfs.process)" ] git lfs uninstall --worktree 2>&1 | tee uninstall.log if [ ${PIPESTATUS[0]} -ne 0 ]; then echo >&2 "fatal: expected 'git lfs uninstall --worktree' to succeed" exit 1 fi grep -v "Global Git LFS configuration has been removed." uninstall.log # global configs [ "global smudge" = "$(git config --global filter.lfs.smudge)" ] [ "global clean" = "$(git config --global filter.lfs.clean)" ] [ "global filter" = "$(git config --global filter.lfs.process)" ] # local configs [ "local smudge" = "$(git config filter.lfs.smudge)" ] [ "local smudge" = "$(git config --local filter.lfs.smudge)" ] [ "local clean" = "$(git config filter.lfs.clean)" ] [ "local clean" = "$(git config --local filter.lfs.clean)" ] [ "local filter" = "$(git config filter.lfs.process)" ] [ "local filter" = "$(git config --local filter.lfs.process)" ] # worktree configs are empty [ "" = "$(git config --worktree filter.lfs.smudge)" ] [ "" = "$(git config --worktree filter.lfs.clean)" ] [ "" = "$(git config --worktree filter.lfs.process)" ] ) end_test begin_test "uninstall --worktree without worktreeConfig extension" ( set -e reponame="$(basename "$0" ".sh")-multi-tree-no-config" mkdir "$reponame" cd "$reponame" git init touch a.txt git add a.txt git commit -m "initial commit" treename="../$reponame-wt" git worktree add "$treename" cd "$treename" set +e git lfs uninstall --worktree >out.log res=$? set -e cat out.log grep -E "error running.*git.*config" out.log [ "$res" -eq 0 ] ) end_test begin_test "uninstall --worktree with conflicting scope" ( set -e reponame="$(basename "$0" ".sh")-scope-conflict" mkdir "$reponame" cd "$reponame" git init set +e git lfs uninstall --local --worktree 2>err.log res=$? set -e [ "Only one of the --local, --system, --worktree, and --file options can be specified." = "$(cat err.log)" ] [ "0" != "$res" ] set +e git lfs uninstall --worktree --system 2>err.log res=$? set -e [ "Only one of the --local, --system, --worktree, and --file options can be specified." = "$(cat err.log)" ] [ "0" != "$res" ] set +e git lfs uninstall --worktree --file test-file 2>err.log res=$? set -e [ "Only one of the --local, --system, --worktree, and --file options can be specified." = "$(cat err.log)" ] [ "0" != "$res" ] ) end_test git-lfs-3.6.1/t/t-uninstall.sh000077500000000000000000000213401472372047300161530ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" begin_test "uninstall outside repository" ( set -e mkdir uninstall-test cd uninstall-test smudge="$(git config filter.lfs.smudge)" clean="$(git config filter.lfs.clean)" filter="$(git config filter.lfs.process)" printf "%s" "$smudge" | grep "git-lfs smudge" printf "%s" "$clean" | grep "git-lfs clean" printf "%s" "$filter" | grep "git-lfs filter-process" # uninstall multiple times to trigger https://github.com/git-lfs/git-lfs/issues/529 git lfs uninstall [ ! -e "lfs" ] for opt in "" "--skip-repo" do git lfs install git lfs uninstall $opt | tee uninstall.log grep "configuration has been removed" uninstall.log [ "" = "$(git config --global filter.lfs.smudge)" ] [ "" = "$(git config --global filter.lfs.clean)" ] [ "" = "$(git config --global filter.lfs.process)" ] cat $HOME/.gitconfig [ "$(grep 'filter "lfs"' $HOME/.gitconfig -c)" = "0" ] done ) end_test begin_test "uninstall outside repository without access to .git/lfs" ( set -e mkdir uninstall-no-lfs cd uninstall-no-lfs mkdir .git touch .git/lfs touch lfs [ "" != "$(git config --global filter.lfs.smudge)" ] [ "" != "$(git config --global filter.lfs.clean)" ] [ "" != "$(git config --global filter.lfs.process)" ] git lfs uninstall [ "" = "$(git config --global filter.lfs.smudge)" ] [ "" = "$(git config --global filter.lfs.clean)" ] [ "" = "$(git config --global filter.lfs.process)" ] ) begin_test "uninstall inside repository with --skip-repo" ( set -e reponame="$(basename "$0" ".sh")-skip-repo" mkdir "$reponame" cd "$reponame" git init git lfs install [ -f .git/hooks/pre-push ] grep "git-lfs" .git/hooks/pre-push [ "git-lfs smudge -- %f" = "$(git config filter.lfs.smudge)" ] [ "git-lfs clean -- %f" = "$(git config filter.lfs.clean)" ] [ "git-lfs filter-process" = "$(git config filter.lfs.process)" ] git lfs uninstall --skip-repo [ -f .git/hooks/pre-push ] [ "" = "$(git config filter.lfs.smudge)" ] [ "" = "$(git config filter.lfs.clean)" ] [ "" = "$(git config filter.lfs.process)" ] ) end_test begin_test "uninstall inside repository with default pre-push hook" ( set -e reponame="$(basename "$0" ".sh")-hook" mkdir "$reponame" cd "$reponame" git init git lfs install [ -f .git/hooks/pre-push ] grep "git-lfs" .git/hooks/pre-push [ "git-lfs smudge -- %f" = "$(git config filter.lfs.smudge)" ] [ "git-lfs clean -- %f" = "$(git config filter.lfs.clean)" ] [ "git-lfs filter-process" = "$(git config filter.lfs.process)" ] git lfs uninstall [ -f .git/hooks/pre-push ] && { echo "expected .git/hooks/pre-push to be deleted" exit 1 } [ "" = "$(git config filter.lfs.smudge)" ] [ "" = "$(git config filter.lfs.clean)" ] [ "" = "$(git config filter.lfs.process)" ] ) end_test begin_test "uninstall inside repository without lfs pre-push hook" ( set -e reponame="$(basename "$0" ".sh")-no-hook" mkdir "$reponame" cd "$reponame" git init git lfs install echo "something something git-lfs" > .git/hooks/pre-push [ -f .git/hooks/pre-push ] [ "something something git-lfs" = "$(cat .git/hooks/pre-push)" ] [ "git-lfs smudge -- %f" = "$(git config filter.lfs.smudge)" ] [ "git-lfs clean -- %f" = "$(git config filter.lfs.clean)" ] [ "git-lfs filter-process" = "$(git config filter.lfs.process)" ] git lfs uninstall [ -f .git/hooks/pre-push ] [ "" = "$(git config filter.lfs.smudge)" ] [ "" = "$(git config filter.lfs.clean)" ] [ "" = "$(git config filter.lfs.process)" ] ) end_test begin_test "uninstall hooks inside repository" ( set -e reponame="$(basename "$0" ".sh")-only-hook" mkdir "$reponame" cd "$reponame" git init git lfs install [ -f .git/hooks/pre-push ] grep "git-lfs" .git/hooks/pre-push [ "git-lfs smudge -- %f" = "$(git config filter.lfs.smudge)" ] [ "git-lfs clean -- %f" = "$(git config filter.lfs.clean)" ] [ "git-lfs filter-process" = "$(git config filter.lfs.process)" ] git lfs uninstall hooks [ -f .git/hooks/pre-push ] && { echo "expected .git/hooks/pre-push to be deleted" exit 1 } [ "git-lfs smudge -- %f" = "$(git config filter.lfs.smudge)" ] [ "git-lfs clean -- %f" = "$(git config filter.lfs.clean)" ] [ "git-lfs filter-process" = "$(git config filter.lfs.process)" ] ) end_test begin_test "uninstall --local outside repository" ( set -e # If run inside the git-lfs source dir this will update its .git/config & cause issues if [ "$GIT_LFS_TEST_DIR" == "" ]; then echo "Skipping uninstall --local because GIT_LFS_TEST_DIR is not set" exit 0 fi has_test_dir || exit 0 set +e git lfs uninstall --local >out.log res=$? set -e [ "Not in a Git repository." = "$(cat out.log)" ] [ "0" != "$res" ] ) end_test begin_test "uninstall --local with conflicting scope" ( set -e reponame="$(basename "$0" ".sh")-scope-conflict" mkdir "$reponame" cd "$reponame" git init set +e git lfs uninstall --local --system 2>err.log res=$? set -e [ "Only one of the --local, --system, --worktree, and --file options can be specified." = "$(cat err.log)" ] [ "0" != "$res" ] ) end_test begin_test "uninstall --local" ( set -e # old values that should be ignored by `uninstall --local` git config --global filter.lfs.smudge "global smudge" git config --global filter.lfs.clean "global clean" git config --global filter.lfs.process "global filter" reponame="$(basename "$0" ".sh")-local" mkdir "$reponame" cd "$reponame" git init git lfs install --local # local configs are correct [ "git-lfs smudge -- %f" = "$(git config filter.lfs.smudge)" ] [ "git-lfs smudge -- %f" = "$(git config --local filter.lfs.smudge)" ] [ "git-lfs clean -- %f" = "$(git config filter.lfs.clean)" ] [ "git-lfs clean -- %f" = "$(git config --local filter.lfs.clean)" ] [ "git-lfs filter-process" = "$(git config filter.lfs.process)" ] [ "git-lfs filter-process" = "$(git config --local filter.lfs.process)" ] # global configs [ "global smudge" = "$(git config --global filter.lfs.smudge)" ] [ "global clean" = "$(git config --global filter.lfs.clean)" ] [ "global filter" = "$(git config --global filter.lfs.process)" ] git lfs uninstall --local 2>&1 | tee uninstall.log if [ ${PIPESTATUS[0]} -ne 0 ]; then echo >&2 "fatal: expected 'git lfs uninstall --local' to succeed" exit 1 fi grep -v "Global Git LFS configuration has been removed." uninstall.log # global configs [ "global smudge" = "$(git config filter.lfs.smudge)" ] [ "global smudge" = "$(git config --global filter.lfs.smudge)" ] [ "global clean" = "$(git config filter.lfs.clean)" ] [ "global clean" = "$(git config --global filter.lfs.clean)" ] [ "global filter" = "$(git config filter.lfs.process)" ] [ "global filter" = "$(git config --global filter.lfs.process)" ] # local configs are empty [ "" = "$(git config --local filter.lfs.smudge)" ] [ "" = "$(git config --local filter.lfs.clean)" ] [ "" = "$(git config --local filter.lfs.process)" ] ) end_test begin_test "uninstall --file" ( set -e # old values that should be ignored by `uninstall --local` git config --global filter.lfs.smudge "global smudge" git config --global filter.lfs.clean "global clean" git config --global filter.lfs.process "global filter" reponame="$(basename "$0" ".sh")-file" mkdir "$reponame" cd "$reponame" git init git lfs install --file=test-file # local configs are correct [ "git-lfs smudge -- %f" = "$(git config --file test-file filter.lfs.smudge)" ] [ "git-lfs clean -- %f" = "$(git config --file test-file filter.lfs.clean)" ] [ "git-lfs filter-process" = "$(git config --file test-file filter.lfs.process)" ] # global configs [ "global smudge" = "$(git config --global filter.lfs.smudge)" ] [ "global clean" = "$(git config --global filter.lfs.clean)" ] [ "global filter" = "$(git config --global filter.lfs.process)" ] git lfs uninstall --file=test-file 2>&1 | tee uninstall.log if [ ${PIPESTATUS[0]} -ne 0 ]; then echo >&2 "fatal: expected 'git lfs uninstall --file=test-file' to succeed" exit 1 fi grep -v "Global Git LFS configuration has been removed." uninstall.log # global configs [ "global smudge" = "$(git config filter.lfs.smudge)" ] [ "global smudge" = "$(git config --global filter.lfs.smudge)" ] [ "global clean" = "$(git config filter.lfs.clean)" ] [ "global clean" = "$(git config --global filter.lfs.clean)" ] [ "global filter" = "$(git config filter.lfs.process)" ] [ "global filter" = "$(git config --global filter.lfs.process)" ] # local configs are empty [ "" = "$(git config --file test-file filter.lfs.smudge)" ] [ "" = "$(git config --file test-file filter.lfs.clean)" ] [ "" = "$(git config --file test-file filter.lfs.process)" ] ) end_test git-lfs-3.6.1/t/t-unlock.sh000077500000000000000000000311531472372047300154400ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" setup_repo () { setup_remote_repo_with_file "$1" "$2" git lfs track --lockable "*.dat" git add -u git commit -m 'Mark files lockable' } begin_test "unlocking a lock by path without a ref required" ( set -e reponame="unlock-by-path-main-branch-not-required" setup_repo "$reponame" "c.dat" git lfs lock --json "c.dat" | tee lock.log id=$(assert_lock lock.log c.dat) assert_server_lock "$reponame" "$id" "refs/heads/main" git lfs unlock "c.dat" refute_server_lock "$reponame" "$id" "refs/heads/main" ) end_test begin_test "unlocking a lock by path with good ref" ( set -e reponame="unlock-by-path-main-branch-required" setup_repo "$reponame" "c.dat" git lfs lock --json "c.dat" | tee lock.log id=$(assert_lock lock.log c.dat) assert_server_lock "$reponame" "$id" "refs/heads/main" git lfs unlock "c.dat" refute_server_lock "$reponame" "$id" "refs/heads/main" ) end_test begin_test "unlocking a lock by id with good ref" ( set -e reponame="unlock-by-id-main-branch-required" setup_repo "$reponame" "c.dat" git lfs lock --json "c.dat" | tee lock.log id=$(assert_lock lock.log c.dat) assert_server_lock "$reponame" "$id" "refs/heads/main" git lfs unlock --id="$id" refute_server_lock "$reponame" "$id" "refs/heads/main" ) end_test begin_test "unlocking a lock by path with tracked ref" ( set -e reponame="unlock-by-path-tracked-branch-required" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" echo "c" > c.dat git add .gitattributes c.dat git commit -m "add c.dat" git config push.default upstream git config branch.main.merge refs/heads/tracked git config branch.main.remote origin git push origin main git lfs lock --json "c.dat" | tee lock.log id=$(assert_lock lock.log c.dat) assert_server_lock "$reponame" "$id" "refs/heads/tracked" git lfs unlock "c.dat" refute_server_lock "$reponame" "$id" "refs/heads/tracked" ) end_test begin_test "unlocking a lock by id with tracked ref" ( set -e reponame="unlock-by-id-tracked-branch-required" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" echo "c" > c.dat git add .gitattributes c.dat git commit -m "add c.dat" git config push.default upstream git config branch.main.merge refs/heads/tracked git config branch.main.remote origin git push origin main git lfs lock --json "c.dat" | tee lock.log id=$(assert_lock lock.log c.dat) assert_server_lock "$reponame" "$id" "refs/heads/tracked" git lfs unlock --id="$id" refute_server_lock "$reponame" "$id" "refs/heads/tracked" ) end_test begin_test "unlocking a lock by path with bad ref without a ref required" ( set -e reponame="unlock-by-path-other-branch-not-required" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" echo "c" > c.dat git add .gitattributes c.dat git commit -m "add c.dat" git push origin main:other git checkout -b other git lfs lock --json "c.dat" | tee lock.log id=$(assert_lock lock.log c.dat) assert_server_lock "$reponame" "$id" "refs/heads/other" git checkout main git lfs unlock "c.dat" refute_server_lock "$reponame" "$id" "refs/heads/other" ) end_test begin_test "unlocking a lock by path with bad ref" ( set -e reponame="unlock-by-path-other-branch-required" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" echo "c" > c.dat git add .gitattributes c.dat git commit -m "add c.dat" git push origin main:other git checkout -b other git lfs lock --json "c.dat" | tee lock.log id=$(assert_lock lock.log c.dat) assert_server_lock "$reponame" "$id" "refs/heads/other" git checkout main git lfs unlock "c.dat" 2>&1 | tee unlock.log if [ "0" -eq "${PIPESTATUS[0]}" ]; then echo >&2 "fatal: expected 'git lfs lock \'a.dat\'' to fail" exit 1 fi assert_server_lock "$reponame" "$id" "refs/heads/other" grep 'Expected ref "refs/heads/other", got "refs/heads/main"' unlock.log ) end_test begin_test "unlocking a lock by id with bad ref" ( set -e reponame="unlock-by-id-other-branch-required" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" echo "c" > c.dat git add .gitattributes c.dat git commit -m "add c.dat" git push origin main:other git checkout -b other git lfs lock --json "c.dat" | tee lock.log id=$(assert_lock lock.log c.dat) assert_server_lock "$reponame" "$id" "refs/heads/other" git checkout main git lfs unlock --id="$id" 2>&1 | tee unlock.log if [ "0" -eq "${PIPESTATUS[0]}" ]; then echo >&2 "fatal: expected 'git lfs lock \'a.dat\'' to fail" exit 1 fi assert_server_lock "$reponame" "$id" "refs/heads/other" grep 'Expected ref "refs/heads/other", got "refs/heads/main"' unlock.log ) end_test begin_test "unlock multiple files" ( set -e reponame="unlock-multiple-files" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" echo "a" > a.dat echo "b" > b.dat git add .gitattributes a.dat b.dat git commit -m "add dat files" git push origin main:other git lfs lock a.dat git lfs lock b.dat git lfs unlock *.dat >log 2>&1 grep "Exactly one of --id or a set of paths must be provided" log && exit 1 true ) end_test begin_test "unlock multiple files (JSON)" ( set -e reponame="unlock-multiple-files-json" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" echo "a" > a.dat echo "b" > b.dat git add .gitattributes a.dat b.dat git commit -m "add dat files" git push origin main:other git lfs lock a.dat git lfs lock b.dat git lfs unlock --json *.dat | tee lock.json grep -F '[{"path":"a.dat","unlocked":true},{"path":"b.dat","unlocked":true}]' lock.json ) end_test begin_test "unlocking a file makes it readonly" ( set -e reponame="unlock_set_readonly" setup_repo "$reponame" "c.dat" git lfs lock --json "c.dat" assert_file_writeable c.dat git lfs unlock "c.dat" refute_file_writeable c.dat ) end_test begin_test "unlocking a file ignores readonly" ( set -e reponame="unlock_set_readonly_ignore" setup_repo "$reponame" "c.dat" git lfs lock --json "c.dat" assert_file_writeable c.dat git -c lfs.setlockablereadonly=false lfs unlock "c.dat" assert_file_writeable c.dat ) end_test begin_test "unlocking lock removed file" ( set -e reponame="unlock-removed-file" setup_repo "$reponame" "a.dat" git lfs lock --json "a.dat" | tee lock.log id=$(assert_lock lock.log a.dat) assert_server_lock "$reponame" "$id" git rm a.dat git commit -m "a.dat" rm *.log *.json # ensure clean git status git status git lfs unlock --force "a.dat" 2>&1 | tee unlock.log refute_server_lock "$reponame" "$id" ) end_test begin_test "unlocking nonexistent file" ( set -e reponame="unlock-nonexistent-file" setup_repo "$reponame" "a.dat" git lfs lock --json "b.dat" | tee lock.log id=$(assert_lock lock.log b.dat) assert_server_lock "$reponame" "$id" git lfs unlock --force "b.dat" 2>&1 | tee unlock.log refute_server_lock "$reponame" "$id" ) end_test begin_test "unlocking unlockable file" ( set -e reponame="unlock-unlockable-file" # Try with lockable patterns. setup_repo "$reponame" "a.dat" touch README.md git add README.md git commit -m 'Add README' git lfs lock --json "README.md" | tee lock.log id=$(assert_lock lock.log README.md) assert_server_lock "$reponame" "$id" assert_file_writeable "README.md" git lfs unlock --force "README.md" 2>&1 | tee unlock.log refute_server_lock "$reponame" "$id" assert_file_writeable "README.md" cd "$TRASHDIR" # Try without any lockable patterns. setup_remote_repo_with_file "$reponame-2" "a.dat" touch README.md git add README.md git commit -m 'Add README' git lfs lock --json "README.md" | tee lock.log id=$(assert_lock lock.log README.md) assert_server_lock "$reponame-2" "$id" assert_file_writeable "README.md" git lfs unlock --force "README.md" 2>&1 | tee unlock.log refute_server_lock "$reponame-2" "$id" assert_file_writeable "README.md" ) end_test begin_test "unlocking a lock (--json)" ( set -e reponame="unlock_by_path_json" setup_repo "$reponame" "c_json.dat" git lfs lock --json "c_json.dat" | tee lock.log id=$(assert_lock lock.log c_json.dat) assert_server_lock "$reponame" "$id" git lfs unlock --json "c_json.dat" 2>&1 | tee unlock.log grep "\"unlocked\":true" unlock.log refute_server_lock "$reponame" "$id" ) end_test begin_test "unlocking a lock by id" ( set -e reponame="unlock_by_id" setup_repo "$reponame" "d.dat" git lfs lock --json "d.dat" | tee lock.log assert_file_writeable d.dat id=$(assert_lock lock.log d.dat) assert_server_lock "$reponame" "$id" git lfs unlock --id="$id" refute_file_writeable d.dat ) end_test begin_test "unlocking a lock by id (--json)" ( set -e reponame="unlock_by_id_json" setup_repo "$reponame" "c_json.dat" git lfs lock --json "c_json.dat" | tee lock.log id=$(assert_lock lock.log c_json.dat) assert_server_lock "$reponame" "$id" git lfs unlock --json --id="$id" 2>&1 | tee unlock.log grep "\"unlocked\":true" unlock.log refute_server_lock "$reponame" "$id" ) end_test begin_test "unlocking a lock without sufficient info" ( set -e reponame="unlock_ambiguous" setup_repo "$reponame" "e.dat" git lfs lock --json "e.dat" | tee lock.log id=$(assert_lock lock.log e.dat) assert_server_lock "$reponame" "$id" git lfs unlock 2>&1 | tee unlock.log grep "Exactly one of --id or a set of paths must be provided" unlock.log assert_server_lock "$reponame" "$id" ) end_test begin_test "unlocking a lock while uncommitted" ( set -e reponame="unlock_modified" setup_repo "$reponame" "mod.dat" git lfs lock --json "mod.dat" | tee lock.log id=$(assert_lock lock.log mod.dat) assert_server_lock "$reponame" "$id" echo "\nSomething" >> mod.dat git lfs unlock "mod.dat" 2>&1 | tee unlock.log [ ${PIPESTATUS[0]} -ne "0" ] grep "Cannot unlock file with uncommitted changes" unlock.log assert_server_lock "$reponame" "$id" # should allow after discard git checkout mod.dat git lfs unlock "mod.dat" 2>&1 | tee unlock.log refute_server_lock "$reponame" "$id" ) end_test begin_test "unlocking a lock with ambiguous arguments" ( set -e reponame="unlock_ambiguous_args" setup_repo "$reponame" "a.dat" git lfs lock --json "a.dat" | tee lock.log id=$(assert_lock lock.log a.dat) assert_server_lock "$reponame" "$id" git lfs unlock --id "$id" a.dat 2>&1 | tee unlock.log if [ "0" -eq "${PIPESTATUS[0]}" ]; then echo >&2 "expected ambiguous \`git lfs unlock\` command to exit, didn't" exit 1 fi grep "Exactly one of --id or a set of paths must be provided" unlock.log assert_server_lock "$reponame" "$id" ) end_test begin_test "unlocking a lock while uncommitted with --force" ( set -e reponame="unlock_modified_force" setup_repo "$reponame" "modforce.dat" git lfs lock --json "modforce.dat" | tee lock.log id=$(assert_lock lock.log modforce.dat) assert_server_lock "$reponame" "$id" echo "\nSomething" >> modforce.dat # should allow with --force git lfs unlock --force "modforce.dat" 2>&1 | tee unlock.log grep "warning: unlocking with uncommitted changes" unlock.log refute_server_lock "$reponame" "$id" ) end_test begin_test "unlocking a lock while untracked" ( set -e reponame="unlock_untracked" setup_repo "$reponame" "notrelevant.dat" git lfs track "*.dat" # Create file but don't add it to git # Shouldn't be able to unlock it echo "something" > untracked.dat git lfs lock --json "untracked.dat" | tee lock.log id=$(assert_lock lock.log untracked.dat) assert_server_lock "$reponame" "$id" git lfs unlock "untracked.dat" 2>&1 | tee unlock.log [ ${PIPESTATUS[0]} -ne "0" ] grep "Cannot unlock file with uncommitted changes" unlock.log assert_server_lock "$reponame" "$id" # should allow after add/commit git add untracked.dat git commit -m "Added untracked" git lfs unlock "untracked.dat" 2>&1 | tee unlock.log refute_server_lock "$reponame" "$id" ) end_test begin_test "unlock with git-lfs-transfer" ( set -e setup_pure_ssh reponame="unlock-git-lfs-transfer" setup_remote_repo_with_file "$reponame" "f.dat" clone_repo "$reponame" "$reponame" sshurl=$(ssh_remote "$reponame") git config lfs.url "$sshurl" GIT_TRACE_PACKET=1 git lfs lock --json "f.dat" | tee lock.log id=$(assert_lock lock.log f.dat) assert_server_lock_ssh "$reponame" "$id" "refs/heads/main" git lfs unlock --id "$id" refute_server_lock_ssh "$reponame" "$id" "refs/heads/main" ) end_test git-lfs-3.6.1/t/t-untrack.sh000077500000000000000000000077471472372047300156300ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" begin_test "untrack" ( set -e # no need to setup a remote repo, since this test doesn't need to push or pull reponame="untrack" git init $reponame cd $reponame # track *.jpg once git lfs track "*.jpg" | grep "Tracking \"\*.jpg\"" echo "* annex.backend=SHA512E" >> .gitattributes git lfs untrack "*.jpg" expected="* annex.backend=SHA512E" [ "$expected" = "$(cat .gitattributes)" ] ) end_test begin_test "untrack outside git repo" ( set -e reponame="outside" mkdir $reponame cd $reponame git lfs untrack "*.foo" || { # this fails if it's run outside of a git repo using GIT_LFS_TEST_DIR # git itself returns an exit status of 128 # $ git show # fatal: Not a git repository (or any of the parent directories): .git # $ echo "$?" # 128 [ "$?" = "128" ] exit 0 } if [ -n "$GIT_LFS_TEST_DIR" ]; then echo "GIT_LFS_TEST_DIR should be set outside of any Git repository" exit 1 fi ) end_test begin_test "untrack removes escape sequences" ( set -e reponame="untrack-remove-escape-sequence" git init "$reponame" cd "$reponame" git lfs track " " | grep "Tracking \" \"" assert_attributes_count "[[:space:]]" "filter=lfs" 1 git lfs untrack " " | grep "Untracking \" \"" assert_attributes_count "[[:space:]]" "filter=lfs" 0 git lfs track "#" | grep "Tracking \"#\"" assert_attributes_count "\\#" "filter=lfs" 1 git lfs untrack "#" | grep "Untracking \"#\"" assert_attributes_count "\\#" "filter=lfs" 0 ) end_test begin_test "untrack removes prefixed patterns (legacy)" ( set -e reponame="untrack-removes-prefix-patterns-legacy" git init "$reponame" cd "$reponame" echo "./a.dat filter=lfs diff=lfs merge=lfs" > .gitattributes printf "a" > a.dat git add .gitattributes a.dat git commit -m "initial commit" git lfs untrack "./a.dat" if [ ! -z "$(cat .gitattributes)" ]; then echo >&2 "fatal: expected 'git lfs untrack' to clear .gitattributes" exit 1 fi git checkout -- .gitattributes git lfs untrack "a.dat" if [ ! -z "$(cat .gitattributes)" ]; then echo >&2 "fatal: expected 'git lfs untrack' to clear .gitattributes" exit 1 fi ) end_test begin_test "untrack removes prefixed patterns (modern)" ( set -e reponame="untrack-removes-prefix-patterns-modern" git init "$reponame" cd "$reponame" echo "a.dat filter=lfs diff=lfs merge=lfs" > .gitattributes printf "a" > a.dat git add .gitattributes a.dat git commit -m "initial commit" git lfs untrack "./a.dat" if [ ! -z "$(cat .gitattributes)" ]; then echo >&2 "fatal: expected 'git lfs untrack' to clear .gitattributes" exit 1 fi git checkout -- .gitattributes git lfs untrack "a.dat" if [ ! -z "$(cat .gitattributes)" ]; then echo >&2 "fatal: expected 'git lfs untrack' to clear .gitattributes" exit 1 fi ) end_test begin_test "untrack removes escaped pattern in .gitattributes" ( set -e reponame="untrack-escaped" git init "$reponame" cd "$reponame" filename="file with spaces.#" # emulate multiple instances of the same file in gitattributes echo 'file[[:space:]]with[[:space:]]spaces.\# filter=lfs diff=lfs merge=lfs -text' >> .gitattributes echo 'file[[:space:]]with[[:space:]]spaces.\# filter=lfs diff=lfs merge=lfs -text' >> .gitattributes echo 'file[[:space:]]with[[:space:]]spaces.\# filter=lfs diff=lfs merge=lfs -text' >> .gitattributes git lfs untrack "$filename" if [ ! -z "$(cat .gitattributes)" ]; then echo >&2 "fatal: expected 'git lfs untrack' to clear .gitattributes even if the file name was escaped" exit 1 fi ) end_test begin_test "untrack works with GIT_WORK_TREE" ( set -e reponame="untrack-work-tree" export GIT_WORK_TREE="$reponame" GIT_DIR="$reponame-git" mkdir "$GIT_WORK_TREE" "$GIT_DIR" git init git lfs track '*.bin' grep -F '*.bin filter=lfs diff=lfs merge=lfs -text' "$reponame/.gitattributes" git lfs untrack '*.bin' [ ! -s "$reponame/.gitattributes" ] ) end_test git-lfs-3.6.1/t/t-unusual-filenames.sh000077500000000000000000000010631472372047300175770ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" reponame="$(basename "$0" ".sh")" # Leading dashes may be misinterpreted as flags if commands don't use "--" # before paths. name1='-dash.dat' contents1='leading dash' begin_test "push unusually named files" ( set -e setup_remote_repo "$reponame" clone_repo "$reponame" repo git lfs track "*.dat" echo "$content1" > "$name1" git add -- .gitattributes *.dat git commit -m "add files" git push origin main | tee push.log grep "Uploading LFS objects: 100% (1/1), 1 B" push.log ) end_test git-lfs-3.6.1/t/t-update.sh000077500000000000000000000306701472372047300154320ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" begin_test "update" ( set -e pre_push_hook="#!/bin/sh command -v git-lfs >/dev/null 2>&1 || { printf >&2 \"\\n%s\\n\\n\" \"This repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting the 'pre-push' file in the hooks directory (set by 'core.hookspath'; usually '.git/hooks').\"; exit 2; } git lfs pre-push \"\$@\"" post_checkout_hook="#!/bin/sh command -v git-lfs >/dev/null 2>&1 || { printf >&2 \"\\n%s\\n\\n\" \"This repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting the 'post-checkout' file in the hooks directory (set by 'core.hookspath'; usually '.git/hooks').\"; exit 2; } git lfs post-checkout \"\$@\"" post_commit_hook="#!/bin/sh command -v git-lfs >/dev/null 2>&1 || { printf >&2 \"\\n%s\\n\\n\" \"This repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting the 'post-commit' file in the hooks directory (set by 'core.hookspath'; usually '.git/hooks').\"; exit 2; } git lfs post-commit \"\$@\"" post_merge_hook="#!/bin/sh command -v git-lfs >/dev/null 2>&1 || { printf >&2 \"\\n%s\\n\\n\" \"This repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting the 'post-merge' file in the hooks directory (set by 'core.hookspath'; usually '.git/hooks').\"; exit 2; } git lfs post-merge \"\$@\"" mkdir without-pre-push cd without-pre-push git init [ "Updated Git hooks." = "$(git lfs update)" ] [ "$pre_push_hook" = "$(cat .git/hooks/pre-push)" ] [ "$post_checkout_hook" = "$(cat .git/hooks/post-checkout)" ] [ "$post_commit_hook" = "$(cat .git/hooks/post-commit)" ] [ "$post_merge_hook" = "$(cat .git/hooks/post-merge)" ] # run it again [ "Updated Git hooks." = "$(git lfs update)" ] [ "$pre_push_hook" = "$(cat .git/hooks/pre-push)" ] [ "$post_checkout_hook" = "$(cat .git/hooks/post-checkout)" ] [ "$post_commit_hook" = "$(cat .git/hooks/post-commit)" ] [ "$post_merge_hook" = "$(cat .git/hooks/post-merge)" ] # replace old hook 1 echo "#!/bin/sh git lfs push --stdin \$*" > .git/hooks/pre-push [ "Updated Git hooks." = "$(git lfs update)" ] [ "$pre_push_hook" = "$(cat .git/hooks/pre-push)" ] # replace old hook 2 echo "#!/bin/sh git lfs push --stdin \"\$@\"" > .git/hooks/pre-push [ "Updated Git hooks." = "$(git lfs update)" ] [ "$pre_push_hook" = "$(cat .git/hooks/pre-push)" ] # replace old hook 3 echo "#!/bin/sh git lfs pre-push \"\$@\"" > .git/hooks/pre-push [ "Updated Git hooks." = "$(git lfs update)" ] [ "$pre_push_hook" = "$(cat .git/hooks/pre-push)" ] # replace blank hook rm .git/hooks/pre-push touch .git/hooks/pre-push touch .git/hooks/post-checkout touch .git/hooks/post-merge [ "Updated Git hooks." = "$(git lfs update)" ] [ "$pre_push_hook" = "$(cat .git/hooks/pre-push)" ] [ "$post_checkout_hook" = "$(cat .git/hooks/post-checkout)" ] [ "$post_commit_hook" = "$(cat .git/hooks/post-commit)" ] [ "$post_merge_hook" = "$(cat .git/hooks/post-merge)" ] # replace old hook 4 echo "#!/bin/sh command -v git-lfs >/dev/null 2>&1 || { echo >&2 \"\\nThis repository has been set up with Git LFS but Git LFS is not installed.\\n\"; exit 0; } git lfs pre-push \"\$@\"" > .git/hooks/pre-push [ "Updated Git hooks." = "$(git lfs update)" ] [ "$pre_push_hook" = "$(cat .git/hooks/pre-push)" ] # replace old hook 5 echo "#!/bin/sh command -v git-lfs >/dev/null 2>&1 || { echo >&2 \"\\nThis repository has been set up with Git LFS but Git LFS is not installed.\\n\"; exit 2; } git lfs pre-push \"\$@\"" > .git/hooks/pre-push [ "Updated Git hooks." = "$(git lfs update)" ] [ "$pre_push_hook" = "$(cat .git/hooks/pre-push)" ] # replace old hook 6 echo "#!/bin/sh command -v git-lfs >/dev/null 2>&1 || { echo >&2 \"\\nThis repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting '.git/hooks/pre-push'.\\n\"; exit 2; } git lfs pre-push \"\$@\"" > .git/hooks/pre-push [ "Updated Git hooks." = "$(git lfs update)" ] [ "$pre_push_hook" = "$(cat .git/hooks/pre-push)" ] # replace old hook 7 echo "#!/bin/sh command -v git-lfs >/dev/null 2>&1 || { echo >&2 \"\\nThis repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting the 'pre-push' file in the hooks directory (set by 'core.hookspath'; usually '.git/hooks').\\n\"; exit 2; } git lfs pre-push \"\$@\"" > .git/hooks/pre-push [ "Updated Git hooks." = "$(git lfs update)" ] [ "$pre_push_hook" = "$(cat .git/hooks/pre-push)" ] # don't replace unexpected hook echo "test" > .git/hooks/pre-push echo "test" > .git/hooks/post-checkout echo "test" > .git/hooks/post-commit echo "test" > .git/hooks/post-merge expected="Hook already exists: pre-push test To resolve this, either: 1: run \`git lfs update --manual\` for instructions on how to merge hooks. 2: run \`git lfs update --force\` to overwrite your hook." [ "$expected" = "$(git lfs update 2>&1)" ] [ "test" = "$(cat .git/hooks/pre-push)" ] [ "test" = "$(cat .git/hooks/post-checkout)" ] [ "test" = "$(cat .git/hooks/post-commit)" ] [ "test" = "$(cat .git/hooks/post-merge)" ] # Make sure returns non-zero set +e git lfs update if [ $? -eq 0 ] then exit 1 fi set -e # test manual steps expected="Add the following to '.git/hooks/pre-push': #!/bin/sh command -v git-lfs >/dev/null 2>&1 || { printf >&2 \"\n%s\n\n\" \"This repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting the 'pre-push' file in the hooks directory (set by 'core.hookspath'; usually '.git/hooks').\"; exit 2; } git lfs pre-push \"\$@\" Add the following to '.git/hooks/post-checkout': #!/bin/sh command -v git-lfs >/dev/null 2>&1 || { printf >&2 \"\n%s\n\n\" \"This repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting the 'post-checkout' file in the hooks directory (set by 'core.hookspath'; usually '.git/hooks').\"; exit 2; } git lfs post-checkout \"\$@\" Add the following to '.git/hooks/post-commit': #!/bin/sh command -v git-lfs >/dev/null 2>&1 || { printf >&2 \"\n%s\n\n\" \"This repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting the 'post-commit' file in the hooks directory (set by 'core.hookspath'; usually '.git/hooks').\"; exit 2; } git lfs post-commit \"\$@\" Add the following to '.git/hooks/post-merge': #!/bin/sh command -v git-lfs >/dev/null 2>&1 || { printf >&2 \"\n%s\n\n\" \"This repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting the 'post-merge' file in the hooks directory (set by 'core.hookspath'; usually '.git/hooks').\"; exit 2; } git lfs post-merge \"\$@\"" [ "$expected" = "$(git lfs update --manual 2>&1)" ] [ "test" = "$(cat .git/hooks/pre-push)" ] [ "test" = "$(cat .git/hooks/post-checkout)" ] [ "test" = "$(cat .git/hooks/post-commit)" ] [ "test" = "$(cat .git/hooks/post-merge)" ] # force replace unexpected hook [ "Updated Git hooks." = "$(git lfs update --force)" ] [ "$pre_push_hook" = "$(cat .git/hooks/pre-push)" ] [ "$post_checkout_hook" = "$(cat .git/hooks/post-checkout)" ] [ "$post_commit_hook" = "$(cat .git/hooks/post-commit)" ] [ "$post_merge_hook" = "$(cat .git/hooks/post-merge)" ] # test manual steps with core.hookspath gitversion=$(git version | cut -d" " -f3) set +e compare_version "$gitversion" 2.9.0 result=$? set -e if [ "$result" -ne "$VERSION_LOWER" ] then mkdir hooks rm -fr .git/hooks git config core.hookspath hooks echo "test" > hooks/pre-push echo "test" > hooks/post-checkout echo "test" > hooks/post-commit echo "test" > hooks/post-merge expected="Add the following to 'hooks/pre-push': #!/bin/sh command -v git-lfs >/dev/null 2>&1 || { printf >&2 \"\n%s\n\n\" \"This repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting the 'pre-push' file in the hooks directory (set by 'core.hookspath'; usually '.git/hooks').\"; exit 2; } git lfs pre-push \"\$@\" Add the following to 'hooks/post-checkout': #!/bin/sh command -v git-lfs >/dev/null 2>&1 || { printf >&2 \"\n%s\n\n\" \"This repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting the 'post-checkout' file in the hooks directory (set by 'core.hookspath'; usually '.git/hooks').\"; exit 2; } git lfs post-checkout \"\$@\" Add the following to 'hooks/post-commit': #!/bin/sh command -v git-lfs >/dev/null 2>&1 || { printf >&2 \"\n%s\n\n\" \"This repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting the 'post-commit' file in the hooks directory (set by 'core.hookspath'; usually '.git/hooks').\"; exit 2; } git lfs post-commit \"\$@\" Add the following to 'hooks/post-merge': #!/bin/sh command -v git-lfs >/dev/null 2>&1 || { printf >&2 \"\n%s\n\n\" \"This repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting the 'post-merge' file in the hooks directory (set by 'core.hookspath'; usually '.git/hooks').\"; exit 2; } git lfs post-merge \"\$@\"" [ "$expected" = "$(git lfs update --manual 2>&1)" ] [ "test" = "$(cat hooks/pre-push)" ] [ "test" = "$(cat hooks/post-checkout)" ] [ "test" = "$(cat hooks/post-commit)" ] [ "test" = "$(cat hooks/post-merge)" ] # force replace unexpected hook [ "Updated Git hooks." = "$(git lfs update --force)" ] [ "$pre_push_hook" = "$(cat hooks/pre-push)" ] [ "$post_checkout_hook" = "$(cat hooks/post-checkout)" ] [ "$post_commit_hook" = "$(cat hooks/post-commit)" ] [ "$post_merge_hook" = "$(cat hooks/post-merge)" ] test -d .git/hooks && exit 1 fi has_test_dir || exit 0 echo "test with bare repository" cd .. git clone --mirror without-pre-push bare cd bare git lfs env git lfs update ls -al hooks [ "$pre_push_hook" = "$(cat hooks/pre-push)" ] ) end_test begin_test "update with leading spaces" ( set -e reponame="update-leading-spaces" git init "$reponame" cd "$reponame" [ "Updated Git hooks." = "$(git lfs update)" ] # $pre_push_hook contains leading TAB '\t' characters pre_push_hook="#!/bin/sh command -v git-lfs >/dev/null 2>&1 || { echo >&2 \"\\nThis repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting '.git/hooks/pre-push'.\\n\"; exit 2; } git lfs pre-push \"\$@\"" echo -n "$pre_push_hook" > .git/hooks/pre-push [ "Updated Git hooks." = "$(git lfs update)" ] ) end_test begin_test "update lfs.{url}.access" ( set -e mkdir update-access cd update-access git init git config lfs.http://example.com.access private git config lfs.https://example.com.access private git config lfs.https://example2.com.access basic git config lfs.https://example3.com.access other [ "private" = "$(git config lfs.http://example.com.access)" ] [ "private" = "$(git config lfs.https://example.com.access)" ] [ "basic" = "$(git config lfs.https://example2.com.access)" ] [ "other" = "$(git config lfs.https://example3.com.access)" ] expected="Updated Git hooks. Updated http://example.com access from private to basic. Updated https://example.com access from private to basic. Removed invalid https://example3.com access of other." ) end_test begin_test "update: outside git repository" ( if [ -d "hooks" ]; then ls -al echo "hooks dir exists" exit 1 fi set +e git lfs update 2>&1 > check.log res=$? set -e if [ "$res" = "0" ]; then if [ -z "$GIT_LFS_TEST_DIR" ]; then echo "Passes because $GIT_LFS_TEST_DIR is unset." exit 0 fi fi [ "$res" = "128" ] if [ -d "hooks" ]; then ls -al echo "hooks dir exists" exit 1 fi cat check.log grep "Not in a Git repository" check.log ) end_test git-lfs-3.6.1/t/t-upload-redirect.sh000077500000000000000000000012421472372047300172240ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" begin_test "redirect upload" ( set -e reponame="redirect-storage-upload" setup_remote_repo "$reponame" clone_repo "$reponame" redirect-repo-upload contents="redirect-storage-upload" oid="$(calc_oid "$contents")" printf "%s" "$contents" > a.dat git lfs track "*.dat" git add .gitattributes a.dat git commit -m "initial commit" GIT_TRACE=1 git push origin main 2>&1 | tee push.log if [ "0" -ne "${PIPESTATUS[0]}" ]; then echo >&2 "fatal: expected \`git push origin main\` to succeed ..." exit 1 fi grep "api: redirect" push.log assert_server_object "$reponame" "$oid" ) end_test git-lfs-3.6.1/t/t-verify.sh000077500000000000000000000057071472372047300154570ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" begin_test "verify with retries" ( set -e reponame="verify-fail-2-times" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" git add .gitattributes git commit -m "initial commit" contents="send-verify-action" contents_oid="$(calc_oid "$contents")" contents_short_oid="$(echo "$contents_oid" | head -c 7)" printf "%s" "$contents" > a.dat git add a.dat git commit -m "add a.dat" GIT_TRACE=1 GIT_CURL_VERBOSE=1 git push origin main 2>&1 | tee push.log grep "Authorization: Basic * * * * *" push.log [ "0" -eq "${PIPESTATUS[0]}" ] [ "2" -eq "$(grep -c "verify $contents_short_oid attempt" push.log)" ] ) end_test begin_test "verify with retries (success without retry)" ( set -e reponame="verify-fail-0-times" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" git add .gitattributes git commit -m "initial commit" contents="send-verify-action" contents_oid="$(calc_oid "$contents")" contents_short_oid="$(echo "$contents_oid" | head -c 7)" printf "%s" "$contents" > a.dat git add a.dat git commit -m "add a.dat" GIT_TRACE=1 GIT_CURL_VERBOSE=1 git push origin main 2>&1 | tee push.log grep "Authorization: Basic * * * * *" push.log [ "0" -eq "${PIPESTATUS[0]}" ] [ "1" -eq "$(grep -c "verify $contents_short_oid attempt" push.log)" ] ) end_test begin_test "verify with retries (insufficient retries)" ( set -e reponame="verify-fail-10-times" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" git add .gitattributes git commit -m "initial commit" contents="send-verify-action" contents_oid="$(calc_oid "$contents")" contents_short_oid="$(echo "$contents_oid" | head -c 7)" printf "%s" "$contents" > a.dat git add a.dat git commit -m "add a.dat" set +e GIT_TRACE=1 git push origin main 2>&1 | tee push.log if [ "0" -eq "${PIPESTATUS[0]}" ]; then echo >&2 "verify: expected \"git push\" to fail, didn't ..." exit 1 fi set -e [ "3" -eq "$(grep -c "verify $contents_short_oid attempt" push.log)" ] ) end_test begin_test "verify with retries (bad .gitconfig)" ( set -e reponame="bad-config-verify-fail-2-times" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" # Invalid `lfs.transfer.maxverifies` will default to 3. git config "lfs.transfer.maxverifies" "-1" git lfs track "*.dat" git add .gitattributes git commit -m "initial commit" contents="send-verify-action" contents_oid="$(calc_oid "$contents")" contents_short_oid="$(echo "$contents_oid" | head -c 7)" printf "%s" "$contents" > a.dat git add a.dat git commit -m "add a.dat" GIT_TRACE=1 GIT_CURL_VERBOSE=1 git push origin main 2>&1 | tee push.log grep "Authorization: Basic * * * * *" push.log [ "0" -eq "${PIPESTATUS[0]}" ] [ "2" -eq "$(grep -c "verify $contents_short_oid attempt" push.log)" ] ) end_test git-lfs-3.6.1/t/t-version.sh000077500000000000000000000007671472372047300156410ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" begin_test "git lfs --version is a synonym of git lfs version" ( set -e reponame="git-lfs-version-synonymous" mkdir "$reponame" cd "$reponame" git lfs version 2>&1 >version.log git lfs --version 2>&1 >flag.log if [ "$(cat version.log)" != "$(cat flag.log)" ]; then echo >&2 "fatal: expected 'git lfs version' and 'git lfs --version' to" echo >&2 "produce identical output ..." diff -u {version,flag}.log fi ) end_test git-lfs-3.6.1/t/t-worktree.sh000077500000000000000000000100171472372047300160030ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" ensure_git_version_isnt $VERSION_LOWER "2.5.0" envInitConfig='git config filter.lfs.process = "git-lfs filter-process" git config filter.lfs.smudge = "git-lfs smudge -- %f" git config filter.lfs.clean = "git-lfs clean -- %f"' if [ "$IS_WINDOWS" -eq 1 ]; then export MSYS2_ENV_CONV_EXCL="GIT_LFS_TEST_DIR" fi # The "git lfs env" command should ignore this environment variable # despite the "GIT_" strings in its name and value. export TEST_GIT_EXAMPLE="GIT_EXAMPLE" begin_test "git worktree" ( set -e reponame="worktree-main" mkdir $reponame cd $reponame git init # can't create a worktree until there's 1 commit at least echo "a" > tmp.txt git add tmp.txt git commit -m "Initial commit" expected=$(printf "%s\n%s\n LocalWorkingDir=$(canonical_path_escaped "$TRASHDIR/$reponame") LocalGitDir=$(canonical_path_escaped "$TRASHDIR/$reponame/.git") LocalGitStorageDir=$(canonical_path_escaped "$TRASHDIR/$reponame/.git") LocalMediaDir=$(canonical_path_escaped "$TRASHDIR/$reponame/.git/lfs/objects") LocalReferenceDirs= TempDir=$(canonical_path_escaped "$TRASHDIR/$reponame/.git/lfs/tmp") ConcurrentTransfers=8 TusTransfers=false BasicTransfersOnly=false SkipDownloadErrors=false FetchRecentAlways=false FetchRecentRefsDays=7 FetchRecentCommitsDays=0 FetchRecentRefsIncludeRemotes=true PruneOffsetDays=3 PruneVerifyRemoteAlways=false PruneVerifyUnreachableAlways=false PruneRemoteName=origin LfsStorageDir=$(canonical_path_escaped "$TRASHDIR/$reponame/.git/lfs") AccessDownload=none AccessUpload=none DownloadTransfers=basic,lfs-standalone-file,ssh UploadTransfers=basic,lfs-standalone-file,ssh $(escape_path "$(env | grep "^GIT_")") %s " "$(git lfs version)" "$(git version)" "$envInitConfig") actual=$(git lfs env | grep -v "^GIT_EXEC_PATH=") contains_same_elements "$expected" "$actual" worktreename="worktree-2" git worktree add "$TRASHDIR/$worktreename" cd "$TRASHDIR/$worktreename" # git dir in worktree is like submodules (except path is worktrees) but this # is only for index, temp etc # storage of git objects and lfs objects is in the original .git expected=$(printf "%s\n%s\n LocalWorkingDir=$(canonical_path_escaped "$TRASHDIR/$worktreename") LocalGitDir=$(canonical_path_escaped "$TRASHDIR/$reponame/.git/worktrees/$worktreename") LocalGitStorageDir=$(canonical_path_escaped "$TRASHDIR/$reponame/.git") LocalMediaDir=$(canonical_path_escaped "$TRASHDIR/$reponame/.git/lfs/objects") LocalReferenceDirs= TempDir=$(canonical_path_escaped "$TRASHDIR/$reponame/.git/lfs/tmp") ConcurrentTransfers=8 TusTransfers=false BasicTransfersOnly=false SkipDownloadErrors=false FetchRecentAlways=false FetchRecentRefsDays=7 FetchRecentCommitsDays=0 FetchRecentRefsIncludeRemotes=true PruneOffsetDays=3 PruneVerifyRemoteAlways=false PruneVerifyUnreachableAlways=false PruneRemoteName=origin LfsStorageDir=$(canonical_path_escaped "$TRASHDIR/$reponame/.git/lfs") AccessDownload=none AccessUpload=none DownloadTransfers=basic,lfs-standalone-file,ssh UploadTransfers=basic,lfs-standalone-file,ssh $(escape_path "$(env | grep "^GIT_")") %s " "$(git lfs version)" "$(git version)" "$envInitConfig") actual=$(git lfs env | grep -v "^GIT_EXEC_PATH=") contains_same_elements "$expected" "$actual" ) end_test begin_test "git worktree with hooks" ( set -e reponame="worktree-hooks" mkdir $reponame cd $reponame git init # can't create a worktree until there's 1 commit at least echo "a" > tmp.txt git add tmp.txt git commit -m "Initial commit" worktreename="worktree-2-hook" git worktree add "$TRASHDIR/$worktreename" cd "$TRASHDIR/$worktreename" # No hooks so far. [ ! -e "$TRASHDIR/$reponame/.git/worktrees/$worktreename/hooks" ] [ ! -e "$TRASHDIR/$reponame/.git/hooks/pre-push" ] git lfs install # Make sure we installed the hooks in the main repo, not the worktree dir. [ ! -e "$TRASHDIR/$reponame/.git/worktrees/$worktreename/hooks" ] [ -x "$TRASHDIR/$reponame/.git/hooks/pre-push" ] ) end_test git-lfs-3.6.1/t/t-zero-len-file.sh000077500000000000000000000027451472372047300166220ustar00rootroot00000000000000#!/usr/bin/env bash . "$(dirname "$0")/testlib.sh" reponame="$(basename "$0" ".sh")" begin_test "push zero len file" ( set -e setup_remote_repo "$reponame" clone_repo "$reponame" repo git lfs track "*.dat" touch empty.dat contents="full" contents_oid=$(calc_oid "$contents") printf "%s" "$contents" > full.dat git add .gitattributes *.dat git commit -m "add files" | tee commit.log # cut from commit output # $ git cat-file -p main # tree 2d67d025fb1f9df9fa349412b4b130e982314e92 tree="$(git cat-file -p main | cut -f 2 -d " " | head -n 1)" # cut from tree output # $ git cat-file -p "$tree" # 100644 blob 1e9f8f7cafb6af3a6f6ddf211fa39c45fccea7ab .gitattributes # 100644 blob e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 empty.dat # 100644 blob c5de5ac7dec1c40bafe60d24da9b498937640332 full.dat emptyblob="$(git cat-file -p "$tree" | cut -f 3 -d " " | grep "empty.dat" | cut -f 1 -d$'\t')" # look for lfs pointer in git blob [ "0" = "$(git cat-file -p "$emptyblob" | grep "lfs" -c)" ] assert_pointer "main" "full.dat" "$contents_oid" 4 git push origin main | tee push.log grep "Uploading LFS objects: 100% (1/1), 4 B" push.log ) end_test begin_test "pull zero len file" ( set -e clone_repo "$reponame" clone rm clone.log git status | grep -E "working (directory|tree) clean" ls -al if [ -s "empty.dat" ]; then echo "empty.dat has content:" cat empty.dat exit 1 fi [ "full" = "$(cat full.dat)" ] ) end_test git-lfs-3.6.1/t/testenv.sh000066400000000000000000000114551472372047300153740ustar00rootroot00000000000000#!/usr/bin/env bash # Including in script/integration and every t/t-*.sh file. set -e UNAME=$(uname -s) IS_WINDOWS=0 IS_MAC=0 X="" SHASUM="shasum -a 256" PATH_SEPARATOR="/" if [[ $UNAME == MINGW* || $UNAME == MSYS* || $UNAME == CYGWIN* ]] then IS_WINDOWS=1 X=".exe" # Windows might be MSYS2 which does not have the shasum Perl wrapper # script by default, so use sha256sum directly. MacOS on the other hand # does not have sha256sum, so still use shasum as the default. SHASUM="sha256sum" PATH_SEPARATOR="\\" elif [[ $UNAME == *Darwin* ]] then IS_MAC=1 fi # Convert potentially MinGW bash paths to native Windows paths # Needed to match generic built paths in test scripts to native paths generated from Go native_path() { local arg=$1 if [ $IS_WINDOWS -eq 1 ]; then # Use params form to avoid interpreting any '\' characters printf '%s' "$(cygpath -w $arg)" else printf '%s' "$arg" fi } resolve_symlink() { local arg=$1 if [ $IS_WINDOWS -eq 1 ]; then printf '%s' "$arg" elif [ $IS_MAC -eq 1 ]; then # no readlink -f on Mac local oldwd=$(pwd) local target=$arg cd `dirname $target` target=`basename $target` while [ -L "$target" ] do target=`readlink $target` cd `dirname $target` target=`basename $target` done local resolveddir=`pwd -P` cd "$oldwd" printf '%s' "$resolveddir/$target" else readlink -f "$arg" fi } # The root directory for the git-lfs repository by default. if [ -z "$ROOTDIR" ]; then ROOTDIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd -P)" fi # Where Git LFS outputs the compiled binaries BINPATH="$ROOTDIR/bin" # Put bin path on PATH PATH="$BINPATH:$PATH" # Always provide a test dir outside our git repo if not specified if [ "$IS_MAC" -eq 1 ]; then TEMPDIR_PREFIX="git-lfs_TEMP" else TEMPDIR_PREFIX="git-lfs_TEMP.XXXXXX" fi if [ -z "$GIT_LFS_TEST_DIR" ]; then GIT_LFS_TEST_DIR="$(mktemp -d -t "$TEMPDIR_PREFIX")" GIT_LFS_TEST_DIR="$(resolve_symlink "$GIT_LFS_TEST_DIR")" # cleanup either after single test or at end of integration (except on fail) RM_GIT_LFS_TEST_DIR="yes" fi # Make these variables available to all test files run in the same shell, # particularly when setup() is run first by itself to start a single # common lfstest-gitserver instance. export GIT_LFS_TEST_DIR RM_GIT_LFS_TEST_DIR # create a temporary work space TMPDIR=$GIT_LFS_TEST_DIR # This is unique to every test file, and cleared after every test run. TRASHDIR="$TMPDIR/$(basename "$0")-$$" # The directory that the test Git server works from. This cleared at the # beginning of every test run. REMOTEDIR="$ROOTDIR/t/remote" # The directory that stores credentials. Credentials are stored in files with # the username:password with filenames identifying the host (port numbers are # ignored). # # # stores the credentials for http://127.0.0.1:* # $CREDSDIR/127.0.0.1 # # # stores the credentials for http://git-server.com # $CREDSDIR/git-server.com # CREDSDIR="$REMOTEDIR/creds/" # This file contains the URL of the test Git server. See the "Test Suite" # section in t/README.md LFS_URL_FILE="$REMOTEDIR/url" # This file contains the SSL URL of the test Git server. See the "Test Suite" # section in t/README.md LFS_SSL_URL_FILE="$REMOTEDIR/sslurl" # This file contains the client cert SSL URL of the test Git server. See the "Test Suite" # section in t/README.md LFS_CLIENT_CERT_URL_FILE="$REMOTEDIR/clientcerturl" # This file contains the self-signed SSL cert of the TLS endpoint of the test Git server. LFS_CERT_FILE="$REMOTEDIR/cert" # This file contains the client certificate of the client cert endpoint of the test Git server. LFS_CLIENT_CERT_FILE="$REMOTEDIR/client.crt" # This file contains the client key of the client cert endpoint of the test Git server. LFS_CLIENT_KEY_FILE="$REMOTEDIR/client.key" # This file contains the encrypted client key of the client cert endpoint of the test Git server. LFS_CLIENT_KEY_FILE_ENCRYPTED="$REMOTEDIR/client.enc.key" # the fake home dir used for the initial setup TESTHOME="$REMOTEDIR/home" # This directory contains the expected output of the "git lfs completion" # command for different shells. COMPLETIONSDIR="$ROOTDIR/t/fixtures/completions" GIT_LFS_FORCE_PROGRESS=1 GIT_CONFIG_NOSYSTEM=1 GIT_TERMINAL_PROMPT=0 GIT_SSH=lfs-ssh-echo GIT_TEMPLATE_DIR="$(native_path "$ROOTDIR/t/fixtures/templates")" LC_ALL=C export CREDSDIR export GIT_LFS_FORCE_PROGRESS export GIT_CONFIG_NOSYSTEM export GIT_SSH export GIT_TEMPLATE_DIR export LC_ALL # Don't fail if run under git rebase -x. unset GIT_DIR unset GIT_WORK_TREE unset GIT_EXEC_PATH unset GIT_CHERRY_PICK_HELP mkdir -p "$TMPDIR" mkdir -p "$TRASHDIR" if [ $IS_WINDOWS -eq 1 ]; then # prevent Windows OpenSSH from opening GUI prompts SSH_ASKPASS="" fi . "$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/testhelpers.sh" git-lfs-3.6.1/t/testhelpers.sh000066400000000000000000000555431472372047300162540ustar00rootroot00000000000000#!/usr/bin/env bash # assert_pointer confirms that the pointer in the repository for $path in the # given $ref matches the given $oid and $size. # Note that $path is prepended with a space to match the against the start # of path field in the ls-tree output, so be careful if your test involves # files with spaces in their paths. # # $ assert_pointer "main" "path/to/file" "some-oid" 123 assert_pointer() { local ref="$1" local path="$2" local oid="$3" local size="$4" gitblob=$(git ls-tree -lrz "$ref" | while read -r -d $'\0' x; do echo $x done | grep -F " $path" | cut -f 3 -d " ") actual=$(git cat-file -p $gitblob) expected=$(pointer $oid $size) if [ "$expected" != "$actual" ]; then exit 1 fi } # refute_pointer confirms that the file in the repository for $path in the # given $ref is _not_ a pointer. # Note that $path is prepended with a space to match the against the start # of path field in the ls-tree output, so be careful if your test involves # files with spaces in their paths. # # $ refute_pointer "main" "path/to/file" refute_pointer() { local ref="$1" local path="$2" gitblob=$(git ls-tree -lrz "$ref" | while read -r -d $'\0' x; do echo $x done | grep -F " $path" | cut -f 3 -d " ") file=$(git cat-file -p $gitblob) version="version https://git-lfs.github.com/spec/v[0-9]" oid="oid sha256:[0-9a-f]\{64\}" size="size [0-9]*" regex="$version.*$oid.*$size" if echo $file | grep -q "$regex"; then exit 1 fi } # assert_local_object confirms that an object file is stored for the given oid & # has the correct size # $ assert_local_object "some-oid" size assert_local_object() { local oid="$1" local size="$2" local cfg=`git lfs env | grep LocalMediaDir` local f="${cfg#LocalMediaDir=}/${oid:0:2}/${oid:2:2}/$oid" actualsize=$(wc -c <"$f" | tr -d '[[:space:]]') if [ "$size" != "$actualsize" ]; then exit 1 fi } # refute_local_object confirms that an object file is NOT stored for an oid. # If "$size" is given as the second argument, assert that the file exists _and_ # that it does _not_ the expected size # # $ refute_local_object "some-oid" # $ refute_local_object "some-oid" "123" refute_local_object() { local oid="$1" local size="$2" local cfg=`git lfs env | grep LocalMediaDir` local f="${cfg#LocalMediaDir=}/${oid:0:2}/${oid:2:2}/$oid" if [ -e $f ]; then if [ -z "$size" ]; then exit 1 fi actual_size="$(wc -c < "$f" | awk '{ print $1 }')" if [ "$size" -eq "$actual_size" ]; then echo >&2 "fatal: expected object $oid not to have size: $size" exit 1 fi fi } # delete_local_object deletes the local storage for an oid # $ delete_local_object "some-oid" delete_local_object() { local oid="$1" local cfg=`git lfs env | grep LocalMediaDir` local f="${cfg#LocalMediaDir=}/${oid:0:2}/${oid:2:2}/$oid" rm "$f" } # corrupt_local_object corrupts the local storage for an oid # $ corrupt_local_object "some-oid" corrupt_local_object() { local oid="$1" local cfg=`git lfs env | grep LocalMediaDir` local f="${cfg#LocalMediaDir=}/${oid:0:2}/${oid:2:2}/$oid" cp /dev/null "$f" } # check that the object does not exist in the git lfs server. HTTP log is # written to http.log. JSON output is written to http.json. # # $ refute_server_object "reponame" "oid" refute_server_object() { local reponame="$1" local oid="$2" curl -v "$GITSERVER/$reponame.git/info/lfs/objects/batch" \ -u "user:pass" \ -o http.json \ -d "{\"operation\":\"download\",\"objects\":[{\"oid\":\"$oid\"}]}" \ -H "Accept: application/vnd.git-lfs+json" \ -H "Content-Type: application/vnd.git-lfs+json" \ -H "X-Check-Object: 1" \ -H "X-Ignore-Retries: true" 2>&1 | tee http.log [ "0" = "$(grep -c "download" http.json)" ] || { cat http.json exit 1 } } # Delete an object on the lfs server. HTTP log is # written to http.log. JSON output is written to http.json. # # $ delete_server_object "reponame" "oid" delete_server_object() { local reponame="$1" local oid="$2" curl -v "$GITSERVER/$reponame.git/info/lfs/objects/$oid" \ -X DELETE \ -u "user:pass" \ -o http.json \ -H "Accept: application/vnd.git-lfs+json" 2>&1 | tee http.log grep "200 OK" http.log } # check that the object does exist in the git lfs server. HTTP log is written # to http.log. JSON output is written to http.json. assert_server_object() { local reponame="$1" local oid="$2" local refspec="$3" curl -v "$GITSERVER/$reponame.git/info/lfs/objects/batch" \ -u "user:pass" \ -o http.json \ -d "{\"operation\":\"download\",\"objects\":[{\"oid\":\"$oid\"}],\"ref\":{\"name\":\"$refspec\"}}" \ -H "Accept: application/vnd.git-lfs+json" \ -H "Content-Type: application/vnd.git-lfs+json" \ -H "X-Check-Object: 1" \ -H "X-Ignore-Retries: true" 2>&1 | tee http.log grep "200 OK" http.log grep "download" http.json || { cat http.json exit 1 } } # assert_remote_object() confirms that an object file with the given OID and # size is stored in the "remote" copy of a repository assert_remote_object() { local reponame="$1" local oid="$2" local size="$3" local destination="$(canonical_path "$REMOTEDIR/$reponame.git")" pushd "$destination" local cfg="$(git lfs env | grep LocalMediaDir)" local f="${cfg#LocalMediaDir=}/${oid:0:2}/${oid:2:2}/$oid" actualsize="$(wc -c <"$f" | tr -d '[[:space:]]')" [ "$size" -eq "$actualsize" ] popd } check_server_lock_ssh() { local reponame="$1" local id="$2" local refspec="$3" local destination="$(canonical_path "$REMOTEDIR/$reponame.git")" ( pktize_text 'version 1' pktize_flush pktize_text 'list-lock' pktize_text "id=$id" pktize_text "refname=$refname" pktize_flush pktize_text 'quit' pktize_flush ) | lfs-ssh-echo git@127.0.0.1 "git-lfs-transfer '$destination' download" 2>&1 } # This asserts the lock path and returns the lock ID by parsing the response of # # git lfs lock --json assert_lock() { local log="$1" local path="$2" if [ $(grep -c "\"path\":\"$path\"" "$log") -eq 0 ]; then echo "path '$path' not found in:" cat "$log" exit 1 fi local jsonid=$(grep -oh "\"id\":\"\w\+\"" "$log") echo "${jsonid:3}" | tr -d \"\: } # assert that a lock with the given ID exists on the test server assert_server_lock() { local reponame="$1" local id="$2" local refspec="$3" curl -v "$GITSERVER/$reponame.git/info/lfs/locks?refspec=$refspec" \ -u "user:pass" \ -o http.json \ -H "Accept:application/vnd.git-lfs+json" 2>&1 | tee http.log grep "200 OK" http.log grep "$id" http.json || { cat http.json exit 1 } } # assert that a lock with the given ID exists on the test server assert_server_lock_ssh() { local reponame="$1" local id="$2" local refspec="$3" check_server_lock_ssh "$reponame" "$id" "$refspec" | tee output.log grep "status 200" output.log grep "$id" output.log || { cat output.log exit 1 } } # refute that a lock with the given ID exists on the test server refute_server_lock() { local reponame="$1" local id="$2" local refspec="$3" curl -v "$GITSERVER/$reponame.git/info/lfs/locks?refspec=$refspec" \ -u "user:pass" \ -o http.json \ -H "Accept:application/vnd.git-lfs+json" 2>&1 | tee http.log grep "200 OK" http.log [ $(grep -c "$id" http.json) -eq 0 ] } # refute that a lock with the given ID exists on the test server refute_server_lock_ssh() { local reponame="$1" local id="$2" local refspec="$3" local destination="$(canonical_path "$REMOTEDIR/$reponame.git")" check_server_lock_ssh "$reponame" "$id" "$refspec" | tee output.log grep "status 200" output.log if grep "$id" output.log then cat output.log exit 1 fi } # Assert that .gitattributes contains a given attribute N times assert_attributes_count() { local fileext="$1" local attrib="$2" local count="$3" pattern="\(*.\)\?$fileext\(.*\)$attrib" actual=$(grep -e "$pattern" .gitattributes | wc -l) if [ "$(printf "%d" "$actual")" != "$count" ]; then echo "wrong number of $attrib entries for $fileext" echo "expected: $count actual: $actual" cat .gitattributes exit 1 fi } assert_file_writeable() { ls -l "$1" | grep -e "^-rw" } refute_file_writeable() { ls -l "$1" | grep -e "^-r-" } git_root() { git rev-parse --show-toplevel 2>/dev/null } dot_git_dir() { echo "$(git_root)/.git" } assert_hooks() { local git_root="$1" if [ -z "$git_root" ]; then echo >&2 "fatal: (assert_hooks) not in git repository" exit 1 fi [ -x "$git_root/hooks/post-checkout" ] [ -x "$git_root/hooks/post-commit" ] [ -x "$git_root/hooks/post-merge" ] [ -x "$git_root/hooks/pre-push" ] } assert_clean_status() { status="$(git status)" echo "$status" | grep "working tree clean" || { echo $status git lfs status } } # pointer returns a string Git LFS pointer file. # # $ pointer abc-some-oid 123 # > version ... pointer() { local oid=$1 local size=$2 local version=${3:-https://git-lfs.github.com/spec/v1} printf "version %s oid sha256:%s size %s " "$version" "$oid" "$size" } # wait_for_file simply sleeps until a file exists. # # $ wait_for_file "path/to/upcoming/file" wait_for_file() { local filename="$1" n=0 wait_time=1 while [ $n -lt 17 ]; do if [ -s $filename ]; then return 0 fi sleep $wait_time n=`expr $n + 1` if [ $wait_time -lt 4 ]; then wait_time=`expr $wait_time \* 2` fi done echo "$filename did not appear after 60 seconds." return 1 } # setup_remote_repo initializes a bare Git repository that is accessible through # the test Git server. The `pwd` is set to the repository's directory, in case # further commands need to be run. This server is running for every test in an # integration run, so every test file should setup its own remote repository to # avoid conflicts. # # $ setup_remote_repo "some-name" # setup_remote_repo() { local reponame="$1" echo "set up remote git repository: $reponame" repodir="$REMOTEDIR/$reponame.git" mkdir -p "$repodir" cd "$repodir" git init --bare git config http.receivepack true git config receive.denyCurrentBranch ignore } # creates a bare remote repository for a local clone. Useful to test pushing to # a fresh remote server. # # $ setup_alternate_remote "$reponame-whatever" # $ setup_alternate_remote "$reponame-whatever" "other-remote-name" # setup_alternate_remote() { local newRemoteName=$1 local remote=${2:-origin} wd=`pwd` setup_remote_repo "$newRemoteName" cd $wd git remote rm "$remote" git remote add "$remote" "$GITSERVER/$newRemoteName" } # clone_repo clones a repository from the test Git server to the subdirectory # $dir under $TRASHDIR. setup_remote_repo() needs to be run first. Output is # written to clone.log. clone_repo() { cd "$TRASHDIR" local reponame="$1" local dir="$2" echo "clone local git repository $reponame to $dir" git clone "$GITSERVER/$reponame" "$dir" 2>&1 | tee clone.log if [ "0" -ne "${PIPESTATUS[0]}" ]; then return 1 fi cd "$dir" mv ../clone.log . git config credential.helper lfstest } # clone_repo_url clones a Git repository to the subdirectory $dir under $TRASHDIR. # setup_remote_repo() needs to be run first. Output is written to clone.log. clone_repo_url() { cd "$TRASHDIR" local repo="$1" local dir="$2" echo "clone git repository $repo to $dir" git clone "$repo" "$dir" 2>&1 | tee clone.log if [ "0" -ne "${PIPESTATUS[0]}" ]; then return 1 fi cd "$dir" mv ../clone.log . git config credential.helper lfstest } # clone_repo_ssl clones a repository from the test Git server to the subdirectory # $dir under $TRASHDIR, using the SSL endpoint. # setup_remote_repo() needs to be run first. Output is written to clone_ssl.log. clone_repo_ssl() { cd "$TRASHDIR" local reponame="$1" local dir="$2" echo "clone local git repository $reponame to $dir" git clone "$SSLGITSERVER/$reponame" "$dir" 2>&1 | tee clone_ssl.log if [ "0" -ne "${PIPESTATUS[0]}" ]; then return 1 fi cd "$dir" mv ../clone_ssl.log . git config credential.helper lfstest } # clone_repo_clientcert clones a repository from the test Git server to the subdirectory # $dir under $TRASHDIR, using the client cert endpoint. # setup_remote_repo() needs to be run first. Output is written to clone_client_cert.log. clone_repo_clientcert() { cd "$TRASHDIR" local reponame="$1" local dir="$2" echo "clone $CLIENTCERTGITSERVER/$reponame to $dir" git clone "$CLIENTCERTGITSERVER/$reponame" "$dir" 2>&1 | tee clone_client_cert.log if [ "0" -ne "${PIPESTATUS[0]}" ]; then return 1 fi cd "$dir" mv ../clone_client_cert.log . git config credential.helper lfstest } # setup_remote_repo_with_file creates a remote repo, clones it locally, commits # a file tracked by LFS, and pushes it to the remote: # # setup_remote_repo_with_file "reponame" "filename" setup_remote_repo_with_file() { local reponame="$1" local filename="$2" local dirname="$(dirname "$filename")" setup_remote_repo "$reponame" clone_repo "$reponame" "clone_$reponame" mkdir -p "$dirname" git lfs track "$filename" echo "$filename" > "$filename" git add .gitattributes $filename git commit -m "add $filename" | tee commit.log grep "main (root-commit)" commit.log grep "2 files changed" commit.log grep "create mode 100644 $filename" commit.log grep "create mode 100644 .gitattributes" commit.log git push origin main 2>&1 | tee push.log grep "main -> main" push.log } # substring_position returns the position of a substring in a 1-indexed search # space. # # [ "$(substring_position "foo bar baz" "baz")" -eq "9" ] substring_position() { local str="$1" local substr="$2" # 1) Print the string... # 2) Remove the substring and everything after it # 3) Count the number of characters (bytes) left, i.e., the offset of the # string we were looking for. echo "$str" \ | sed "s/$substr.*$//" \ | wc -c } # repo_endpoint returns the LFS endpoint for a given server and repository. # # [ "$GITSERVER/example/repo.git/info/lfs" = "$(repo_endpoint $GITSERVER example-repo)" ] repo_endpoint() { local server="$1" local repo="$2" echo "$server/$repo.git/info/lfs" } # write_creds_file writes credentials to a file iff it doesn't exist. write_creds_file() { local creds="$1" local file="$2" if [ ! -f "$file" ] then printf "%s" "$creds" > "$file" fi } setup_creds() { mkdir -p "$CREDSDIR" write_creds_file ":user:pass" "$CREDSDIR/127.0.0.1" } # setup initializes the clean, isolated environment for integration tests. setup() { cd "$ROOTDIR" if [ ! -d "$REMOTEDIR" ]; then mkdir "$REMOTEDIR" fi echo "# Git LFS: ${LFS_BIN:-$(command -v git-lfs)}" git lfs version | sed -e 's/^/# /g' git version | sed -e 's/^/# /g' LFSTEST_URL="$LFS_URL_FILE" \ LFSTEST_SSL_URL="$LFS_SSL_URL_FILE" \ LFSTEST_CLIENT_CERT_URL="$LFS_CLIENT_CERT_URL_FILE" \ LFSTEST_DIR="$REMOTEDIR" \ LFSTEST_CERT="$LFS_CERT_FILE" \ LFSTEST_CLIENT_CERT="$LFS_CLIENT_CERT_FILE" \ LFSTEST_CLIENT_KEY="$LFS_CLIENT_KEY_FILE" \ LFSTEST_CLIENT_KEY_ENCRYPTED="$LFS_CLIENT_KEY_FILE_ENCRYPTED" \ lfstest-count-tests increment wait_for_file "$LFS_URL_FILE" wait_for_file "$LFS_SSL_URL_FILE" wait_for_file "$LFS_CLIENT_CERT_URL_FILE" wait_for_file "$LFS_CERT_FILE" wait_for_file "$LFS_CLIENT_CERT_FILE" wait_for_file "$LFS_CLIENT_KEY_FILE" wait_for_file "$LFS_CLIENT_KEY_FILE_ENCRYPTED" LFS_CLIENT_CERT_URL=`cat $LFS_CLIENT_CERT_URL_FILE` # Set up the initial git config and osx keychain if applicable HOME="$TESTHOME" if [ ! -d "$HOME" ]; then mkdir "$HOME" fi # do not let Git use a different configuration file unset GIT_CONFIG unset XDG_CONFIG_HOME if [ ! -f $HOME/.gitconfig ]; then git lfs install --skip-repo git config --global credential.usehttppath true git config --global credential.helper lfstest git config --global user.name "Git LFS Tests" git config --global user.email "git-lfs@example.com" git config --global http.sslcainfo "$LFS_CERT_FILE" git config --global init.defaultBranch main fi | sed -e 's/^/# /g' # setup the git credential password storage setup_creds echo "#" echo "# HOME: $HOME" echo "# TMP: $TMPDIR" echo "# CREDS: $CREDSDIR" echo "# lfstest-gitserver:" echo "# LFSTEST_URL=$LFS_URL_FILE" echo "# LFSTEST_SSL_URL=$LFS_SSL_URL_FILE" echo "# LFSTEST_CLIENT_CERT_URL=$LFS_CLIENT_CERT_URL_FILE ($LFS_CLIENT_CERT_URL)" echo "# LFSTEST_CERT=$LFS_CERT_FILE" echo "# LFSTEST_CLIENT_CERT=$LFS_CLIENT_CERT_FILE" echo "# LFSTEST_CLIENT_KEY=$LFS_CLIENT_KEY_FILE" echo "# LFSTEST_CLIENT_KEY_ENCRYPTED=$LFS_CLIENT_KEY_FILE_ENCRYPTED" echo "# LFSTEST_DIR=$REMOTEDIR" } # shutdown cleans the $TRASHDIR and shuts the test Git server down. shutdown() { # every t/t-*.sh file should cleanup its trashdir [ -z "$KEEPTRASH" ] && rm -rf "$TRASHDIR" LFSTEST_DIR="$REMOTEDIR" \ LFS_URL_FILE="$LFS_URL_FILE" \ lfstest-count-tests decrement # delete entire lfs test root if we created it (double check pattern) if [ -z "$KEEPTRASH" ] && [ "$RM_GIT_LFS_TEST_DIR" = "yes" ] && [[ $GIT_LFS_TEST_DIR == *"$TEMPDIR_PREFIX"* ]]; then rm -rf "$GIT_LFS_TEST_DIR" fi } tap_show_plan() { local tests="$1" printf "1..%i\n" "$tests" } ensure_git_version_isnt() { local expectedComparison=$1 local version=$2 local gitVersion=$(git version | cut -d" " -f3) set +e compare_version $gitVersion $version result=$? set -e if [[ $result == $expectedComparison ]]; then echo "skip: $0 (git version $(comparison_to_operator $expectedComparison) $version)" exit fi } VERSION_EQUAL=0 VERSION_HIGHER=1 VERSION_LOWER=2 # Compare $1 and $2 and return VERSION_EQUAL / VERSION_LOWER / VERSION_HIGHER compare_version() { if [[ $1 == $2 ]] then return $VERSION_EQUAL fi local IFS=. local i ver1=($1) ver2=($2) # fill empty fields in ver1 with zeros for ((i=${#ver1[@]}; i<${#ver2[@]}; i++)) do ver1[i]=0 done for ((i=0; i<${#ver1[@]}; i++)) do if [[ -z ${ver2[i]} ]] then # fill empty fields in ver2 with zeros ver2[i]=0 fi if ((10#${ver1[i]} > 10#${ver2[i]})) then return $VERSION_HIGHER fi if ((10#${ver1[i]} < 10#${ver2[i]})) then return $VERSION_LOWER fi done return $VERSION_EQUAL } comparison_to_operator() { local comparison=$1 if [[ $1 == $VERSION_EQUAL ]]; then echo "==" elif [[ $1 == $VERSION_HIGHER ]]; then echo ">" elif [[ $1 == $VERSION_LOWER ]]; then echo "<" else echo "???" fi } # Calculate the object ID from the string passed as the argument calc_oid() { printf "$1" | $SHASUM | cut -f 1 -d " " } # Calculate the object ID from the file passed as the argument calc_oid_file() { $SHASUM "$1" | cut -f 1 -d " " } # Get a date string with an offset # Args: One or more date offsets of the form (regex) "[+-]\d+[dmyHM]" # e.g. +1d = 1 day forward from today # -5y = 5 years before today # Example call: # D=$(get_date +1y +1m -5H) # returns date as string in RFC3339 format ccyy-mm-ddThh:MM:ssZ # note returns in UTC time not local time hence Z and not +/- get_date() { # Wrapped because BSD (inc OSX) & GNU 'date' functions are different # on Windows under Git Bash it's GNU if date --version >/dev/null 2>&1 ; then # GNU ARGS="" for var in "$@" do # GNU offsets are more verbose unit=${var: -1} val=${var:0:${#var}-1} case "$unit" in d) unit="days" ;; m) unit="months" ;; y) unit="years" ;; H) unit="hours" ;; M) unit="minutes" ;; esac ARGS="$ARGS $val $unit" done date -d "$ARGS" -u +%Y-%m-%dT%TZ else # BSD ARGS="" for var in "$@" do ARGS="$ARGS -v$var" done date $ARGS -u +%Y-%m-%dT%TZ fi } # escape any instance of '\' with '\\' on Windows escape_path() { local unescaped="$1" if [ $IS_WINDOWS -eq 1 ]; then printf '%s' "${unescaped//\\/\\\\}" else printf '%s' "$unescaped" fi } # As native_path but escape all backslash characters to "\\" native_path_escaped() { local unescaped=$(native_path "$1") escape_path "$unescaped" } # native_path_list_separator prints the operating system-specific path list # separator. native_path_list_separator() { if [ "$IS_WINDOWS" -eq 1 ]; then printf ";"; else printf ":"; fi } # canonical_path prints the native path name in a canonical form, as if # realpath(3) were called on it. canonical_path() { printf "%s" "$(lfstest-realpath "$(native_path "$1")")" } # canonical_path_escaped prints the native path name in a canonical form, as if # realpath(3) were called on it, and then escapes it. canonical_path_escaped() { printf "%s" "$(escape_path "$(lfstest-realpath "$(native_path "$1")")")" } cat_end() { if [ $IS_WINDOWS -eq 1 ]; then printf '^M$' else printf '$' fi } # Compare 2 lists which are newline-delimited in a string, ignoring ordering and blank lines contains_same_elements() { # Remove blank lines then sort diff -u <(printf '%s' "$1" | grep -v '^$' | sort) <(printf '%s' "$2" | grep -v '^$' | sort) } is_stdin_attached() { test -t0 echo $? } has_test_dir() { if [ -z "$GIT_LFS_TEST_DIR" ]; then echo "No GIT_LFS_TEST_DIR. Skipping..." exit 0 fi } add_symlink() { local src=$1 local dest=$2 prefix=`git rev-parse --show-prefix` hashsrc=`printf "$src" | git hash-object -w --stdin` git update-index --add --cacheinfo 120000 "$hashsrc" "$prefix$dest" git checkout -- "$dest" } urlify() { if [ "$IS_WINDOWS" -eq 1 ] then local prefix="" path="$(canonical_path "$1")" if echo "$path" | grep -qsv "^/" then prefix="/" fi echo "$prefix$path" | sed -e 's,\\,/,g' -e 's,:,%3a,g' -e 's, ,%20,g' else echo "$1" fi } setup_pure_ssh() { export PATH="$ROOTDIR/t/scutiger/bin:$PATH" if ! command -v git-lfs-transfer >/dev/null 2>&1 then if [ -z "$CI" ] || [ -n "$TEST_SKIP_LFS_TRANSFER" ] then echo "No git-lfs-transfer. Skipping..." exit 0 else echo "No git-lfs-transfer. Failing.." exit 1 fi elif [ "$GIT_DEFAULT_HASH" = sha256 ] then # Scutiger's git-lfs-transfer uses libgit2, which doesn't yet do SHA-256 # repos. echo "Using SHA-256 repositories. Skipping..." exit 0 fi } ssh_remote() { local reponame="$1" local destination=$(urlify "$(canonical_path "$REMOTEDIR/$reponame.git")") # Prepend a slash iff it lacks one. Windows compatibiity. [ -z "${destination##/*}" ] || destination="/$destination" echo "ssh://git@127.0.0.1$destination" } # Create a pkt-line message from s, which is an argument string to printf(1). pktize() { local s="$1" local len=$(printf "$s" | wc -c) printf "%04x$s" $((len + 4)) } pktize_text() { local s="$1" pktize "$s"'\n' } pktize_delim() { printf '0001' } pktize_flush() { printf '0000' } git-lfs-3.6.1/t/testlib.sh000066400000000000000000000053511472372047300153500ustar00rootroot00000000000000#!/usr/bin/env bash # Usage: . testlib.sh # Simple shell command language test library. # # Tests must follow the basic form: # # begin_test "the thing" # ( # set -e # echo "hello" # false # ) # end_test # # When a test fails its stdout and stderr are shown. # # Note that tests must `set -e' within the subshell block or failed assertions # will not cause the test to fail and the result may be misreported. # # Copyright (c) 2011-13 by Ryan Tomayko # License: MIT fullfile="$(pwd)/$0" . "$(dirname "$0")/testenv.sh" set -e # keep track of num tests and failures tests=0 failures=0 # this runs at process exit atexit () { tap_show_plan "$tests" shutdown if [ $failures -gt 0 ]; then exit 1 fi exit 0 } # create the trash dir trap "atexit" SIGKILL SIGINT SIGTERM EXIT GITSERVER=undefined setup GITSERVER=$(cat "$LFS_URL_FILE") SSLGITSERVER=$(cat "$LFS_SSL_URL_FILE") CLIENTCERTGITSERVER=$(cat "$LFS_CLIENT_CERT_URL_FILE") cd "$TRASHDIR" # Mark the beginning of a test. A subshell should immediately follow this # statement. begin_test () { test_status=$? [ -n "$test_description" ] && end_test $test_status unset test_status tests=$(( tests + 1 )) test_description="$1" exec 3>&1 4>&2 out="$TRASHDIR/out" err="$TRASHDIR/err" trace="$TRASHDIR/trace" exec 1>"$out" 2>"$err" # enabling GIT_TRACE can cause Windows git to stall, esp with fd 5 # other fd numbers like 8/9 don't stall but still don't work, so disable if [ $IS_WINDOWS -eq 0 ]; then exec 5>"$trace" export GIT_TRACE=5 fi # reset global git config HOME="$TRASHDIR/home" rm -rf "$TRASHDIR/home" mkdir "$HOME" cp "$TESTHOME/.gitconfig" "$HOME/.gitconfig" # allow the subshell to exit non-zero without exiting this process set -x +e } # Mark the end of a test. end_test () { test_status="${1:-$?}" set +x -e exec 1>&3 2>&4 # close fd 5 (GIT_TRACE) exec 5>&- local dump_output="$LFS_DUMP_TEST_OUTPUT" if [ "$test_status" -eq 0 ]; then printf "ok %d - %-60s\n" "$tests" "$test_description ..." else failures=$(( failures + 1 )) printf "not ok %d - %-60s\n" "$tests" "$test_description ..." dump_output=1 fi if [ -n "$dump_output" ] then ( echo "# -- stdout --" sed 's/^/# /' <"$TRASHDIR/out" echo "# -- stderr --" grep -v -e '^\+ end_test' -e '^+ set +x' <"$TRASHDIR/err" | sed 's/^/# /' if [ $IS_WINDOWS -eq 0 ]; then echo "# -- git trace --" sed 's/^/# /' <"$TRASHDIR/trace" fi ) 1>&2 echo fi unset test_description } git-lfs-3.6.1/tasklog/000077500000000000000000000000001472372047300145435ustar00rootroot00000000000000git-lfs-3.6.1/tasklog/list_task.go000066400000000000000000000020631472372047300170700ustar00rootroot00000000000000package tasklog import ( "fmt" "time" ) // ListTask is a Task implementation that logs all updates in a list where each // entry is line-delimited. // // For example: // // entry #1 // entry #2 // msg: ..., done. type ListTask struct { msg string ch chan *Update } // NewListTask instantiates a new *ListTask instance with the given message. func NewListTask(msg string) *ListTask { return &ListTask{ msg: msg, ch: make(chan *Update, 1), } } // Entry logs a line-delimited task entry. func (l *ListTask) Entry(update string) { l.ch <- &Update{ S: fmt.Sprintf("%s\n", update), At: time.Now(), } } func (l *ListTask) Complete() { l.ch <- &Update{ S: fmt.Sprintf("%s: ...", l.msg), At: time.Now(), } close(l.ch) } // Throttled implements the Task.Throttled function and ensures that all log // updates are printed to the sink. func (l *ListTask) Throttled() bool { return false } // Updates implements the Task.Updates function and returns a channel of updates // to log to the sink. func (l *ListTask) Updates() <-chan *Update { return l.ch } git-lfs-3.6.1/tasklog/list_task_test.go000066400000000000000000000022151472372047300201260ustar00rootroot00000000000000package tasklog import ( "testing" "github.com/stretchr/testify/assert" ) func TestListTaskCallsDoneWhenComplete(t *testing.T) { task := NewListTask("example") task.Complete() select { case update, ok := <-task.Updates(): assert.Equal(t, "example: ...", update.S) assert.True(t, ok, "tasklog: expected Updates() to remain open") default: t.Fatal("tasklog: expected update from *ListTask") } select { case update, ok := <-task.Updates(): assert.False(t, ok, "git/githistory.log: unexpected *ListTask.Update(): %s", update) default: t.Fatal("tasklog: expected *ListTask.Updates() to be closed") } } func TestListTaskWritesEntries(t *testing.T) { task := NewListTask("example") task.Entry("1") select { case update, ok := <-task.Updates(): assert.True(t, ok, "tasklog: expected ListTask.Updates() to remain open") assert.Equal(t, "1\n", update.S) default: t.Fatal("tasklog: expected task.Updates() to have an update") } } func TestListTaskIsNotThrottled(t *testing.T) { task := NewListTask("example") throttled := task.Throttled() assert.False(t, throttled, "tasklog: expected *ListTask to be Throttle()-d") } git-lfs-3.6.1/tasklog/log.go000066400000000000000000000137411472372047300156610ustar00rootroot00000000000000package tasklog import ( "fmt" "io" "os" "strings" "sync" "time" "github.com/git-lfs/git-lfs/v3/tools" isatty "github.com/mattn/go-isatty" "github.com/olekukonko/ts" ) const ( DefaultLoggingThrottle = 200 * time.Millisecond ) // Logger logs a series of tasks to an io.Writer, processing each task in order // until completion . type Logger struct { // sink is the writer to write to. sink io.Writer // widthFn is a function that returns the width of the terminal that // this logger is running within. widthFn func() int // tty is true if sink is connected to a terminal tty bool // forceProgress forces progress status even when stdout is not a tty forceProgress bool // throttle is the minimum amount of time that must pass between each // instant data is logged. throttle time.Duration // queue is the incoming, unbuffered queue of tasks to enqueue. queue chan Task // tasks is the set of tasks to process. tasks chan Task // wg is a WaitGroup that is incremented when new tasks are enqueued, // and decremented when tasks finish. wg *sync.WaitGroup } // Option is the type for type Option func(*Logger) // ForceProgress returns an options function that configures forced progress status // on the logger. func ForceProgress(v bool) Option { return func(l *Logger) { l.forceProgress = v } } // NewLogger returns a new *Logger instance that logs to "sink" and uses the // current terminal width as the width of the line. Will log progress status if // stdout is a terminal or if forceProgress is true func NewLogger(sink io.Writer, options ...Option) *Logger { if sink == nil { sink = io.Discard } l := &Logger{ sink: sink, throttle: DefaultLoggingThrottle, widthFn: func() int { size, err := ts.GetSize() if err != nil { return 80 } return size.Col() }, queue: make(chan Task), tasks: make(chan Task), wg: new(sync.WaitGroup), } for _, option := range options { option(l) } l.tty = tty(sink) go l.consume() return l } type hasFd interface { Fd() uintptr } // tty returns true if the writer is connected to a tty func tty(writer io.Writer) bool { if v, ok := writer.(hasFd); ok { return isatty.IsTerminal(v.Fd()) || isatty.IsCygwinTerminal(v.Fd()) } return false } // Close closes the queue and does not allow new Tasks to be `enqueue()`'d. It // waits until the currently running Task has completed. func (l *Logger) Close() { if l == nil { return } close(l.queue) l.wg.Wait() } // Waiter creates and enqueues a new *WaitingTask. func (l *Logger) Waiter(msg string) *WaitingTask { t := NewWaitingTask(msg) l.Enqueue(t) return t } // Percentage creates and enqueues a new *PercentageTask. func (l *Logger) Percentage(msg string, total uint64) *PercentageTask { t := NewPercentageTask(msg, total) l.Enqueue(t) return t } // List creates and enqueues a new *ListTask. func (l *Logger) List(msg string) *ListTask { t := NewListTask(msg) l.Enqueue(t) return t } // Simple creates and enqueues a new *SimpleTask. func (l *Logger) Simple() *SimpleTask { t := NewSimpleTask() l.Enqueue(t) return t } // Enqueue enqueues the given Tasks "ts". func (l *Logger) Enqueue(ts ...Task) { if l == nil { for _, t := range ts { if t == nil { // NOTE: Do not allow nil tasks which are unable // to be completed. continue } go func(t Task) { for range t.Updates() { // Discard all updates. } }(t) } return } l.wg.Add(len(ts)) for _, t := range ts { if t == nil { // NOTE: See above. continue } l.queue <- t } } // consume creates a pseudo-infinte buffer between the incoming set of tasks and // the queue of tasks to work on. func (l *Logger) consume() { go func() { // Process the single next task in sequence until completion, // then consume the next task. for task := range l.tasks { l.logTask(task) } }() defer close(l.tasks) for { // Wait for either a) l.queue to close, or b) a new task // to be submitted. task, ok := <-l.queue if !ok { // If the queue is closed, no more new tasks may // be added. return } // Otherwise, add a new task to the set of tasks to // process immediately. l.tasks <- task } } // logTask logs the set of updates from a given task to the sink, then logs a // "done." message, and then marks the task as done. // // By default, the *Logger throttles log entry updates to once per the duration // of time specified by `l.throttle time.Duration`. // // If the duration if 0, or the task is "durable" (by implementing // github.com/git-lfs/git-lfs/tasklog#DurableTask), then all entries will be // logged. func (l *Logger) logTask(task Task) { defer l.wg.Done() logAll := !task.Throttled() var last time.Time var update *Update for update = range task.Updates() { if !tty(os.Stdout) && !l.forceProgress { continue } if logAll || l.throttle == 0 || !update.Throttled(last.Add(l.throttle)) { l.logLine(update.S) last = update.At } } if update != nil { // If a task sent no updates, the last recorded update will be // nil. Given this, only log a message when there was at least // (1) update. l.log(fmt.Sprintf("%s, done.\n", update.S)) } if v, ok := task.(interface { // OnComplete is called after the Task "task" is closed, but // before new tasks are accepted. OnComplete() }); ok { // If the Task implements this interface, call it and block // before accepting new tasks. v.OnComplete() } } // logLine writes a complete line and moves the cursor to the beginning of the // line. // // It returns the number of bytes "n" written to the sink and the error "err", // if one was encountered. func (l *Logger) logLine(str string) (n int, err error) { padding := strings.Repeat(" ", tools.MaxInt(0, l.widthFn()-len(str))) return l.log(str + padding + "\r") } // log writes a string verbatim to the sink. // // It returns the number of bytes "n" written to the sink and the error "err", // if one was encountered. func (l *Logger) log(str string) (n int, err error) { return fmt.Fprint(l.sink, str) } git-lfs-3.6.1/tasklog/log_test.go000066400000000000000000000115721472372047300167200ustar00rootroot00000000000000package tasklog import ( "bytes" "strings" "testing" "time" "github.com/stretchr/testify/assert" ) type ChanTask chan *Update func (e ChanTask) Updates() <-chan *Update { return e } func (e ChanTask) Throttled() bool { return true } type UnthrottledChanTask chan *Update func (e UnthrottledChanTask) Updates() <-chan *Update { return e } func (e UnthrottledChanTask) Throttled() bool { return false } func TestLoggerLogsTasks(t *testing.T) { var buf bytes.Buffer task := make(chan *Update) go func() { task <- &Update{"first", time.Now(), false} task <- &Update{"second", time.Now(), false} close(task) }() l := NewLogger(&buf, ForceProgress(true)) l.throttle = 0 l.widthFn = func() int { return 0 } l.Enqueue(ChanTask(task)) l.Close() assert.Equal(t, "first\rsecond\rsecond, done.\n", buf.String()) } func TestLoggerLogsSuppressesProgress(t *testing.T) { var buf bytes.Buffer task := make(chan *Update) go func() { task <- &Update{"first", time.Now(), false} task <- &Update{"second", time.Now(), false} close(task) }() l := NewLogger(&buf, ForceProgress(false)) l.throttle = 0 l.widthFn = func() int { return 0 } l.Enqueue(ChanTask(task)) l.Close() assert.Equal(t, "second, done.\n", buf.String()) } func TestLoggerLogsMultipleTasksInOrder(t *testing.T) { var buf bytes.Buffer t1 := make(chan *Update) go func() { t1 <- &Update{"first", time.Now(), false} t1 <- &Update{"second", time.Now(), false} close(t1) }() t2 := make(chan *Update) go func() { t2 <- &Update{"third", time.Now(), false} t2 <- &Update{"fourth", time.Now(), false} close(t2) }() l := NewLogger(&buf, ForceProgress(true)) l.throttle = 0 l.widthFn = func() int { return 0 } l.Enqueue(ChanTask(t1), ChanTask(t2)) l.Close() assert.Equal(t, strings.Join([]string{ "first\r", "second\r", "second, done.\n", "third\r", "fourth\r", "fourth, done.\n", }, ""), buf.String()) } func TestLoggerLogsMultipleTasksWithoutBlocking(t *testing.T) { var buf bytes.Buffer l := NewLogger(&buf, ForceProgress(true)) l.throttle = 0 t1, t2 := make(chan *Update), make(chan *Update) l.widthFn = func() int { return 0 } l.Enqueue(ChanTask(t1)) t1 <- &Update{"first", time.Now(), false} l.Enqueue(ChanTask(t2)) close(t1) t2 <- &Update{"second", time.Now(), false} close(t2) l.Close() assert.Equal(t, strings.Join([]string{ "first\r", "first, done.\n", "second\r", "second, done.\n", }, ""), buf.String()) } func TestLoggerThrottlesWrites(t *testing.T) { var buf bytes.Buffer t1 := make(chan *Update) go func() { start := time.Now() t1 <- &Update{"first", start, false} // t = 0 ms, throttle was open t1 <- &Update{"forced", start.Add(10 * time.Millisecond), true} // t = 10+ε ms, throttle is closed t1 <- &Update{"second", start.Add(10 * time.Millisecond), false} // t = 10+ε ms, throttle is closed t1 <- &Update{"third", start.Add(26 * time.Millisecond), false} // t = 20+ε ms, throttle was open close(t1) // t = 20+2ε ms, throttle is closed }() l := NewLogger(&buf, ForceProgress(true)) l.widthFn = func() int { return 0 } l.throttle = 15 * time.Millisecond l.Enqueue(ChanTask(t1)) l.Close() assert.Equal(t, strings.Join([]string{ "first\r", "forced\r", "third\r", "third, done.\n", }, ""), buf.String()) } func TestLoggerThrottlesLastWrite(t *testing.T) { var buf bytes.Buffer t1 := make(chan *Update) go func() { start := time.Now() t1 <- &Update{"first", start, false} // t = 0 ms, throttle was open t1 <- &Update{"second", start.Add(10 * time.Millisecond), false} // t = 10+ε ms, throttle is closed close(t1) // t = 10+2ε ms, throttle is closed }() l := NewLogger(&buf, ForceProgress(true)) l.widthFn = func() int { return 0 } l.throttle = 15 * time.Millisecond l.Enqueue(ChanTask(t1)) l.Close() assert.Equal(t, strings.Join([]string{ "first\r", "second, done.\n", }, ""), buf.String()) } func TestLoggerLogsAllDurableUpdates(t *testing.T) { var buf bytes.Buffer l := NewLogger(&buf, ForceProgress(true)) l.widthFn = func() int { return 0 } l.throttle = 15 * time.Minute t1 := make(chan *Update) go func() { t1 <- &Update{"first", time.Now(), false} // t = 0+ε ms, throttle is open t1 <- &Update{"second", time.Now(), false} // t = 0+2ε ms, throttle is closed close(t1) // t = 0+3ε ms, throttle is closed }() l.Enqueue(UnthrottledChanTask(t1)) l.Close() assert.Equal(t, strings.Join([]string{ "first\r", "second\r", "second, done.\n", }, ""), buf.String()) } func TestLoggerHandlesSilentTasks(t *testing.T) { var buf bytes.Buffer task := make(chan *Update) close(task) l := NewLogger(&buf, ForceProgress(true)) l.Enqueue(ChanTask(task)) l.Close() assert.Equal(t, "", buf.String()) } git-lfs-3.6.1/tasklog/percentage_task.go000066400000000000000000000050441472372047300202340ustar00rootroot00000000000000package tasklog import ( "fmt" "math" "sync/atomic" "time" "github.com/git-lfs/git-lfs/v3/tr" ) // PercentageTask is a task that is performed against a known number of // elements. type PercentageTask struct { // members managed via sync/atomic must be aligned at the top of this // structure (see: https://github.com/git-lfs/git-lfs/pull/2880). // n is the number of elements whose work has been completed. It is // managed sync/atomic. n uint64 // total is the total number of elements to execute work upon. total uint64 // msg is the task message. msg string // ch is a channel which is written to when the task state changes and // is closed when the task is completed. ch chan *Update } func NewPercentageTask(msg string, total uint64) *PercentageTask { p := &PercentageTask{ msg: msg, total: total, ch: make(chan *Update, 1), } p.Count(0) return p } // Count indicates that work has been completed against "n" number of elements, // marking the task as complete if the total "n" given to all invocations of // this method is equal to total. // // Count returns the new total number of (atomically managed) elements that have // been completed. func (c *PercentageTask) Count(n uint64) (new uint64) { if new = atomic.AddUint64(&c.n, n); new > c.total { panic(fmt.Sprintf("tasklog: %s", tr.Tr.Get("counted too many items"))) } var percentage float64 if c.total == 0 { percentage = 100 } else { percentage = 100 * float64(new) / float64(c.total) } c.ch <- &Update{ S: fmt.Sprintf("%s: %3.f%% (%d/%d)", c.msg, math.Floor(percentage), new, c.total), At: time.Now(), } if new >= c.total { close(c.ch) } return new } // Entry logs a line-delimited task entry. func (c *PercentageTask) Entry(update string) { c.ch <- &Update{ S: fmt.Sprintf("%s\n", update), At: time.Now(), Force: true, } } // Complete notes that the task is completed by setting the number of // completed elements to the total number of elements, and if necessary // closing the Updates channel, which yields the logger to the next Task. func (c *PercentageTask) Complete() { if count := atomic.SwapUint64(&c.n, c.total); count < c.total { close(c.ch) } } // Updates implements Task.Updates and returns a channel which is written to // when the state of this task changes, and closed when the task is completed. func (c *PercentageTask) Updates() <-chan *Update { return c.ch } // Throttled implements Task.Throttled and returns true, indicating that this // task is throttled. func (c *PercentageTask) Throttled() bool { return true } git-lfs-3.6.1/tasklog/percentage_task_test.go000066400000000000000000000041761472372047300213000ustar00rootroot00000000000000package tasklog import ( "testing" "github.com/stretchr/testify/assert" ) func TestPercentageTaskCalculuatesPercentages(t *testing.T) { task := NewPercentageTask("example", 10) assert.Equal(t, "example: 0% (0/10)", (<-task.Updates()).S) n := task.Count(3) assert.EqualValues(t, 3, n) assert.Equal(t, "example: 30% (3/10)", (<-task.Updates()).S) } func TestPercentageTaskCalculatesPercentWithoutTotal(t *testing.T) { task := NewPercentageTask("example", 0) select { case v, ok := <-task.Updates(): if ok { assert.Equal(t, "example: 100% (0/0)", v.S) } else { t.Fatal("expected channel to be open") } default: } } func TestPercentageTaskCallsDoneWhenComplete(t *testing.T) { task := NewPercentageTask("example", 10) select { case v, ok := <-task.Updates(): if ok { assert.Equal(t, "example: 0% (0/10)", v.S) } else { t.Fatal("expected channel to be open") } default: } assert.EqualValues(t, 10, task.Count(10)) assert.Equal(t, "example: 100% (10/10)", (<-task.Updates()).S) if _, ok := <-task.Updates(); ok { t.Fatalf("expected channel to be closed") } defer func() { if err := recover(); err != nil { t.Fatal("tasklog: expected *PercentageTask.Complete() to not panic") } }() task.Complete() } func TestPercentageTaskCompleteClosesUpdates(t *testing.T) { task := NewPercentageTask("example", 10) select { case v, ok := <-task.Updates(): if ok { assert.Equal(t, "example: 0% (0/10)", v.S) } else { t.Fatal("expected channel to be open") } default: } assert.EqualValues(t, 7, task.Count(7)) assert.Equal(t, "example: 70% (7/10)", (<-task.Updates()).S) task.Complete() if _, ok := <-task.Updates(); ok { t.Fatalf("expected channel to be closed") } } func TestPercentageTaskIsThrottled(t *testing.T) { task := NewPercentageTask("example", 10) throttled := task.Throttled() assert.True(t, throttled, "tasklog: expected *PercentageTask to be Throttle()-d") } func TestPercentageTaskPanicsWhenOvercounted(t *testing.T) { task := NewPercentageTask("example", 0) defer func() { assert.Equal(t, "tasklog: counted too many items", recover()) }() task.Count(1) } git-lfs-3.6.1/tasklog/simple_task.go000066400000000000000000000033551472372047300174130ustar00rootroot00000000000000package tasklog import ( "fmt" "sync" "time" ) // SimpleTask is in an implementation of tasklog.Task which prints out messages // verbatim. type SimpleTask struct { // ch is used to transmit task updates. ch chan *Update // wg is used to wait between closing the channel, and acknowledging // that the close-related operations have been completed by the // tasklog.Logger. wg *sync.WaitGroup } // NewSimpleTask returns a new *SimpleTask instance. func NewSimpleTask() *SimpleTask { return &SimpleTask{ ch: make(chan *Update), wg: new(sync.WaitGroup), } } // Log logs a string with no formatting verbs. func (s *SimpleTask) Log(str string) { s.Logf(str) } // Logf logs some formatted string, which is interpreted according to the rules // defined in package "fmt". func (s *SimpleTask) Logf(str string, vals ...interface{}) { s.ch <- &Update{ S: fmt.Sprintf(str, vals...), At: time.Now(), } } // Complete notes that the task is completed by closing the Updates channel and // yields the logger to the next Task. Complete blocks until the *tasklog.Logger // has acknowledged completion of this task. func (s *SimpleTask) Complete() { s.wg.Add(1) close(s.ch) s.wg.Wait() } // OnComplete implements an interface which receives a call to this method when // the *tasklog.Logger has finished processing this task, but before it has // accepted new tasks. func (s *SimpleTask) OnComplete() { s.wg.Done() } // Updates implements Task.Updates and returns a channel of updates which is // closed when Complete() is called. func (s *SimpleTask) Updates() <-chan *Update { return s.ch } // Throttled implements Task.Throttled and returns false, indicating that this // task is not throttled. func (s *SimpleTask) Throttled() bool { return false } git-lfs-3.6.1/tasklog/simple_task_test.go000066400000000000000000000025651472372047300204540ustar00rootroot00000000000000package tasklog import ( "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestSimpleTaskLogLogsUpdates(t *testing.T) { task := NewSimpleTask() var updates []*Update go func() { for update := range task.Updates() { updates = append(updates, update) } task.OnComplete() }() task.Log("Hello, world") task.Complete() require.Len(t, updates, 1) assert.Equal(t, "Hello, world", updates[0].S) } func TestSimpleTaskLogfLogsFormattedUpdates(t *testing.T) { task := NewSimpleTask() var updates []*Update go func() { for update := range task.Updates() { updates = append(updates, update) } task.OnComplete() }() task.Logf("Hello, world (%d)", 3+4) task.Complete() require.Len(t, updates, 1) assert.Equal(t, "Hello, world (7)", updates[0].S) } func TestSimpleTaskCompleteClosesUpdates(t *testing.T) { task := NewSimpleTask() select { case <-task.Updates(): t.Fatalf("tasklog: unexpected update from *SimpleTask") default: } go func() { <-task.Updates() task.OnComplete() }() task.Complete() if _, ok := <-task.Updates(); ok { t.Fatalf("tasklog: expected (*SimpleTask).Updates() to be closed") } } func TestSimpleTaskIsNotThrottled(t *testing.T) { task := NewSimpleTask() throttled := task.Throttled() assert.False(t, throttled, "tasklog: expected *SimpleTask not to be Throttle()-d") } git-lfs-3.6.1/tasklog/task.go000066400000000000000000000022001472372047300160260ustar00rootroot00000000000000package tasklog import "time" // Task is an interface which encapsulates an activity which can be logged. type Task interface { // Updates returns a channel which is written to with the current state // of the Task when an update is present. It is closed when the task is // complete. Updates() <-chan *Update // Throttled returns whether or not updates from this task should be // limited when being printed to a sink via *log.Logger. // // It is expected to return the same value for a given Task instance. Throttled() bool } // Update is a single message sent (S) from a Task at a given time (At). type Update struct { // S is the message sent in this update. S string // At is the time that this update was sent. At time.Time // Force determines if this update should not be throttled. Force bool } // Throttled determines whether this update should be throttled, based on the // given earliest time of the next update. The caller should determine how often // updates should be throttled. An Update with Force=true is never throttled. func (u *Update) Throttled(next time.Time) bool { return !(u.Force || u.At.After(next)) } git-lfs-3.6.1/tasklog/waiting_task.go000066400000000000000000000015531472372047300175620ustar00rootroot00000000000000package tasklog import ( "fmt" "time" ) // WaitingTask represents a task for which the total number of items to do work // is on is unknown. type WaitingTask struct { // ch is used to transmit task updates. ch chan *Update } // NewWaitingTask returns a new *WaitingTask. func NewWaitingTask(msg string) *WaitingTask { ch := make(chan *Update, 1) ch <- &Update{ S: fmt.Sprintf("%s: ...", msg), At: time.Now(), } return &WaitingTask{ch: ch} } // Complete marks the task as completed. func (w *WaitingTask) Complete() { close(w.ch) } // Updates implements the Task.Updates function and returns a channel of updates // to log to the sink. func (w *WaitingTask) Updates() <-chan *Update { return w.ch } // Throttled implements Task.Throttled and returns true, indicating that this // task is Throttled. func (w *WaitingTask) Throttled() bool { return true } git-lfs-3.6.1/tasklog/waiting_task_test.go000066400000000000000000000022741472372047300206220ustar00rootroot00000000000000package tasklog import ( "testing" "github.com/stretchr/testify/assert" ) func TestWaitingTaskDisplaysWaitingStatus(t *testing.T) { task := NewWaitingTask("example") assert.Equal(t, "example: ...", (<-task.Updates()).S) } func TestWaitingTaskCallsDoneWhenComplete(t *testing.T) { task := NewWaitingTask("example") select { case v, ok := <-task.Updates(): if ok { assert.Equal(t, "example: ...", v.S) } else { t.Fatal("expected channel to be open") } default: } task.Complete() if _, ok := <-task.Updates(); ok { t.Fatalf("expected channel to be closed") } } func TestWaitingTaskPanicsWithMultipleDoneCalls(t *testing.T) { task := NewWaitingTask("example") task.Complete() defer func() { if err := recover(); err == nil { t.Fatal("tasklog: expected panic()") } else { if s, ok := err.(error); ok { assert.Equal(t, "close of closed channel", s.Error()) } else { t.Fatal("tasklog: expected panic() to implement error") } } }() task.Complete() } func TestWaitingTaskIsThrottled(t *testing.T) { task := NewWaitingTask("example") throttled := task.Throttled() assert.True(t, throttled, "tasklog: expected *WaitingTask to be Throttle()-d") } git-lfs-3.6.1/tools/000077500000000000000000000000001472372047300142375ustar00rootroot00000000000000git-lfs-3.6.1/tools/channels.go000066400000000000000000000016151472372047300163640ustar00rootroot00000000000000package tools import "fmt" // Interface for all types of wrapper around a channel of results and an error channel // Implementors will expose a type-specific channel for results // Call the Wait() function after processing the results channel to catch any errors // that occurred during the async processing type ChannelWrapper interface { // Call this after processing results channel to check for async errors Wait() error } // Base implementation of channel wrapper to just deal with errors type BaseChannelWrapper struct { errorChan <-chan error } func (w *BaseChannelWrapper) Wait() error { var err error for e := range w.errorChan { if err != nil { // Combine in case multiple errors err = fmt.Errorf("%v\n%v", err, e) } else { err = e } } return err } func NewBaseChannelWrapper(errChan <-chan error) *BaseChannelWrapper { return &BaseChannelWrapper{errorChan: errChan} } git-lfs-3.6.1/tools/copycallback.go000066400000000000000000000051061472372047300172170ustar00rootroot00000000000000package tools import ( "bytes" "io" "os" ) type CopyCallback func(totalSize int64, readSoFar int64, readSinceLast int) error type BodyWithCallback struct { c CopyCallback totalSize int64 readSize int64 ReadSeekCloser } func NewByteBodyWithCallback(by []byte, totalSize int64, cb CopyCallback) *BodyWithCallback { return NewBodyWithCallback(NewByteBody(by), totalSize, cb) } func NewFileBodyWithCallback(f *os.File, totalSize int64, cb CopyCallback) *BodyWithCallback { return NewBodyWithCallback(NewFileBody(f), totalSize, cb) } func NewBodyWithCallback(body ReadSeekCloser, totalSize int64, cb CopyCallback) *BodyWithCallback { return &BodyWithCallback{ c: cb, totalSize: totalSize, ReadSeekCloser: body, } } // Read wraps the underlying Reader's "Read" method. It also captures the number // of bytes read, and calls the callback. func (r *BodyWithCallback) Read(p []byte) (int, error) { n, err := r.ReadSeekCloser.Read(p) if n > 0 { r.readSize += int64(n) if (err == nil || err == io.EOF) && r.c != nil { err = r.c(r.totalSize, r.readSize, n) } } return n, err } // Seek wraps the underlying Seeker's "Seek" method, updating the number of // bytes that have been consumed by this reader. func (r *BodyWithCallback) Seek(offset int64, whence int) (int64, error) { switch whence { case io.SeekStart: r.readSize = offset case io.SeekCurrent: r.readSize += offset case io.SeekEnd: r.readSize = r.totalSize + offset } return r.ReadSeekCloser.Seek(offset, whence) } // ResetProgress calls the callback with a negative read size equal to the // total number of bytes read so far, effectively "resetting" the progress. func (r *BodyWithCallback) ResetProgress() error { return r.c(r.totalSize, r.readSize, -int(r.readSize)) } type CallbackReader struct { C CopyCallback TotalSize int64 ReadSize int64 io.Reader } func (w *CallbackReader) Read(p []byte) (int, error) { n, err := w.Reader.Read(p) if n > 0 { w.ReadSize += int64(n) if (err == nil || err == io.EOF) && w.C != nil { err = w.C(w.TotalSize, w.ReadSize, n) } } return n, err } // prevent import cycle type ReadSeekCloser interface { io.Seeker io.ReadCloser } func NewByteBody(by []byte) ReadSeekCloser { return &closingByteReader{Reader: bytes.NewReader(by)} } type closingByteReader struct { *bytes.Reader } func (r *closingByteReader) Close() error { return nil } func NewFileBody(f *os.File) ReadSeekCloser { return &closingFileReader{File: f} } type closingFileReader struct { *os.File } func (r *closingFileReader) Close() error { return nil } git-lfs-3.6.1/tools/copycallback_test.go000066400000000000000000000037021472372047300202560ustar00rootroot00000000000000package tools import ( "io" "sync/atomic" "testing" "github.com/stretchr/testify/assert" ) func TestCopyCallbackReaderCallsCallbackUnderfilledBuffer(t *testing.T) { var ( calls uint32 actualTotalSize int64 actualReadSoFar int64 actualReadSinceLast int ) cb := func(totalSize int64, readSoFar int64, readSinceLast int) error { atomic.AddUint32(&calls, 1) actualTotalSize = totalSize actualReadSoFar = readSoFar actualReadSinceLast = readSinceLast return nil } buf := []byte{0x1} r := &CallbackReader{ C: cb, TotalSize: 3, ReadSize: 2, Reader: &EOFReader{b: buf}, } p := make([]byte, len(buf)+1) n, err := r.Read(p) assert.Equal(t, 1, n) assert.Nil(t, err) assert.EqualValues(t, 1, calls, "expected 1 call(s) to callback, got %d", calls) assert.EqualValues(t, 3, actualTotalSize) assert.EqualValues(t, 2+1, actualReadSoFar) assert.EqualValues(t, 1, actualReadSinceLast) } type EOFReader struct { b []byte i int } var _ io.Reader = (*EOFReader)(nil) func (r *EOFReader) Read(p []byte) (n int, err error) { n = copy(p, r.b[r.i:]) r.i += n if r.i == len(r.b) { err = io.EOF } return } func TestEOFReaderReturnsEOFs(t *testing.T) { r := EOFReader{[]byte{0x1}, 0} p := make([]byte, 2) n, err := r.Read(p) assert.Equal(t, 1, n) assert.Equal(t, io.EOF, err) } func TestBodyCallbackReaderCountsReads(t *testing.T) { br := NewByteBodyWithCallback([]byte{0x1, 0x2, 0x3, 0x4}, 4, nil) assert.EqualValues(t, 0, br.readSize) p := make([]byte, 8) n, err := br.Read(p) assert.Equal(t, 4, n) assert.Nil(t, err) assert.EqualValues(t, 4, br.readSize) } func TestBodyCallbackReaderUpdatesOffsetOnSeek(t *testing.T) { br := NewByteBodyWithCallback([]byte{0x1, 0x2, 0x3, 0x4}, 4, nil) br.Seek(1, io.SeekStart) assert.EqualValues(t, 1, br.readSize) br.Seek(1, io.SeekCurrent) assert.EqualValues(t, 2, br.readSize) br.Seek(-1, io.SeekEnd) assert.EqualValues(t, 3, br.readSize) } git-lfs-3.6.1/tools/cygwin.go000066400000000000000000000001361472372047300160660ustar00rootroot00000000000000//go:build !windows // +build !windows package tools func isCygwin() bool { return false } git-lfs-3.6.1/tools/cygwin_windows.go000066400000000000000000000016711472372047300176450ustar00rootroot00000000000000//go:build windows // +build windows package tools import ( "bytes" "github.com/git-lfs/git-lfs/v3/subprocess" "github.com/git-lfs/git-lfs/v3/tr" ) type cygwinSupport byte const ( cygwinStateUnknown cygwinSupport = iota cygwinStateEnabled cygwinStateDisabled ) func (c cygwinSupport) Enabled() bool { switch c { case cygwinStateEnabled: return true case cygwinStateDisabled: return false default: panic(tr.Tr.Get("unknown enabled state for %v", c)) } } var ( cygwinState cygwinSupport ) func isCygwin() bool { if cygwinState != cygwinStateUnknown { return cygwinState.Enabled() } cmd, err := subprocess.ExecCommand("uname") if err != nil { return false } out, err := cmd.Output() if err != nil { return false } if bytes.Contains(out, []byte("CYGWIN")) || bytes.Contains(out, []byte("MSYS")) { cygwinState = cygwinStateEnabled } else { cygwinState = cygwinStateDisabled } return cygwinState.Enabled() } git-lfs-3.6.1/tools/filetools.go000066400000000000000000000353141472372047300165740ustar00rootroot00000000000000// Package tools contains other helper functions too small to justify their own package // NOTE: Subject to change, do not rely on this package from outside git-lfs source package tools import ( "encoding/hex" "fmt" "io" "os" "os/user" "path/filepath" "runtime" "strconv" "strings" "sync" "sync/atomic" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/filepathfilter" "github.com/git-lfs/git-lfs/v3/tr" ) // FileOrDirExists determines if a file/dir exists, returns IsDir() results too. func FileOrDirExists(path string) (exists bool, isDir bool) { fi, err := os.Stat(path) if err != nil { return false, false } else { return true, fi.IsDir() } } // FileExists determines if a file (NOT dir) exists. func FileExists(path string) bool { ret, isDir := FileOrDirExists(path) return ret && !isDir } // DirExists determines if a dir (NOT file) exists. func DirExists(path string) bool { ret, isDir := FileOrDirExists(path) return ret && isDir } // FileExistsOfSize determines if a file exists and is of a specific size. func FileExistsOfSize(path string, sz int64) bool { fi, err := os.Stat(path) if err != nil { return false } return !fi.IsDir() && fi.Size() == sz } // ResolveSymlinks ensures that if the path supplied is a symlink, it is // resolved to the actual concrete path func ResolveSymlinks(path string) string { if len(path) == 0 { return path } if resolved, err := CanonicalizeSystemPath(path); err == nil { return resolved } return path } // RenameFileCopyPermissions moves srcfile to destfile, replacing destfile if // necessary and also copying the permissions of destfile if it already exists func RenameFileCopyPermissions(srcfile, destfile string) error { info, err := os.Stat(destfile) if os.IsNotExist(err) { // no original file } else if err != nil { return err } else { if err := os.Chmod(srcfile, info.Mode()); err != nil { return errors.New(tr.Tr.Get("can't set filemode on file %q: %v", srcfile, err)) } } if err := RobustRename(srcfile, destfile); err != nil { return errors.New(tr.Tr.Get("cannot replace %q with %q: %v", destfile, srcfile, err)) } return nil } // CleanPaths splits the given `paths` argument by the delimiter argument, and // then "cleans" that path according to the path.Clean function (see // https://golang.org/pkg/path#Clean). // Note always cleans to '/' path separators regardless of platform (git friendly) func CleanPaths(paths, delim string) (cleaned []string) { // If paths is an empty string, splitting it will yield [""], which will // become the path ".". To avoid this, bail out if trimmed paths // argument is empty. if paths = strings.TrimSpace(paths); len(paths) == 0 { return } for _, part := range strings.Split(paths, delim) { part = strings.TrimSpace(part) // Remove trailing `/` or `\`, but only the first one. for _, sep := range []string{`/`, `\`} { if strings.HasSuffix(part, sep) { part = strings.TrimSuffix(part, sep) break } } cleaned = append(cleaned, part) } return cleaned } // repositoryPermissionFetcher is an interface that matches the configuration // object and can be used to fetch repository permissions. type repositoryPermissionFetcher interface { RepositoryPermissions(executable bool) os.FileMode } // MkdirAll makes a directory and any intervening directories with the // permissions specified by the core.sharedRepository setting. func MkdirAll(path string, config repositoryPermissionFetcher) error { umask := 0777 & ^config.RepositoryPermissions(true) return doWithUmask(int(umask), func() error { return os.MkdirAll(path, config.RepositoryPermissions(true)) }) } var ( // currentUser is a wrapper over user.Current(), but instead uses the // value of os.Getenv("HOME") for the returned *user.User's "HomeDir" // member. currentUser func() (*user.User, error) = func() (*user.User, error) { u := &user.User{} u.HomeDir = os.Getenv("HOME") return u, nil } lookupUser func(who string) (*user.User, error) = user.Lookup lookupConfigHome func() string = func() string { return os.Getenv("XDG_CONFIG_HOME") } ) // ExpandPath returns a copy of path with any references to the current user's // home directory (spelled "~"), or a named user's home directory (spelled // "~user") in the path, sanitized to the calling filesystem's path separator // preference. // // If the "expand" argument is given as true, the resolved path to the named // user's home directory will expanded with filepath.EvalSymlinks. // // If either the current or named user does not have a home directory, an error // will be returned. // // Otherwise, the error returned will be nil, and the string returned will be // the expanded path. func ExpandPath(path string, expand bool) (string, error) { if len(path) == 0 || path[0] != '~' { return path, nil } var username string if slash := strings.Index(path[1:], "/"); slash > -1 { username = path[1 : slash+1] } else { username = path[1:] } var ( who *user.User err error ) if len(username) == 0 { who, err = currentUser() } else { who, err = lookupUser(username) } if err != nil { return "", errors.Wrapf(err, tr.Tr.Get("could not find user %s", username)) } homedir := who.HomeDir if expand { homedir, err = filepath.EvalSymlinks(homedir) if err != nil { return "", errors.Wrapf(err, tr.Tr.Get("cannot eval symlinks for %s", homedir)) } } return filepath.Join(homedir, path[len(username)+1:]), nil } // ExpandConfigPath returns a copy of path expanded as with ExpandPath. If the // path is empty, the default path is looked up inside $XDG_CONFIG_HOME, or // ~/.config if that is not set. func ExpandConfigPath(path, defaultPath string) (string, error) { if path != "" { return ExpandPath(path, false) } configHome := lookupConfigHome() if configHome != "" { return filepath.Join(configHome, defaultPath), nil } return ExpandPath(fmt.Sprintf("~/.config/%s", defaultPath), false) } // VerifyFileHash reads a file and verifies whether the SHA is correct // Returns an error if there is a problem func VerifyFileHash(oid, path string) error { f, err := os.Open(path) if err != nil { return err } defer f.Close() h := NewLfsContentHash() _, err = io.Copy(h, f) if err != nil { return err } calcOid := hex.EncodeToString(h.Sum(nil)) if calcOid != oid { return errors.New(tr.Tr.Get("file %q has an invalid hash %s, expected %s", path, calcOid, oid)) } return nil } // FastWalkCallback is the signature for the callback given to FastWalkGitRepo() type FastWalkCallback func(parentDir string, info os.FileInfo, err error) // FastWalkDir is a more optimal implementation of filepath.Walk for a Git // repo. The callback guaranteed to be called sequentially. The function returns // once all files and errors have triggered callbacks. // It differs in the following ways: // - Uses goroutines to parallelise large dirs and descent into subdirs // - Does not provide sorted output; parents will always be before children but // there are no other guarantees. Use parentDir argument in the callback to // determine absolute path rather than tracking it yourself // - Automatically ignores any .git directories // // rootDir - Absolute path to the top of the repository working directory func FastWalkDir(rootDir string, cb FastWalkCallback) { fastWalkCallback(fastWalkWithExcludeFiles(rootDir), cb) } // fastWalkCallback calls the FastWalkCallback "cb" for all files found by the // given *fastWalker, "walker". func fastWalkCallback(walker *fastWalker, cb FastWalkCallback) { for file := range walker.ch { cb(file.ParentDir, file.Info, file.Err) } } // Returned from FastWalk with parent directory context // This is needed because FastWalk can provide paths out of order so the // parent dir cannot be implied type fastWalkInfo struct { ParentDir string Info os.FileInfo Err error } type fastWalker struct { rootDir string ch chan fastWalkInfo limit int32 cur *int32 wg *sync.WaitGroup } // fastWalkWithExcludeFiles walks the contents of a dir, respecting // include/exclude patterns. // // rootDir - Absolute path to the top of the repository working directory func fastWalkWithExcludeFiles(rootDir string) *fastWalker { excludePaths := []filepathfilter.Pattern{ filepathfilter.NewPattern(".git", filepathfilter.GitIgnore), filepathfilter.NewPattern("**/.git", filepathfilter.GitIgnore), } limit, _ := strconv.Atoi(os.Getenv("LFS_FASTWALK_LIMIT")) if limit < 1 { limit = runtime.GOMAXPROCS(-1) * 20 } c := int32(0) w := &fastWalker{ rootDir: rootDir, limit: int32(limit), cur: &c, ch: make(chan fastWalkInfo, 256), wg: &sync.WaitGroup{}, } go func() { defer w.Wait() dirFi, err := os.Stat(w.rootDir) if err != nil { w.ch <- fastWalkInfo{Err: err} return } w.Walk(true, "", dirFi, excludePaths) }() return w } // Walk is the main recursive implementation of fast walk. Sends the file/dir // and any contents to the channel so long as it passes the include/exclude // filter. Increments waitg.Add(1) for each new goroutine launched internally // // workDir - Relative path inside the repository func (w *fastWalker) Walk(isRoot bool, workDir string, itemFi os.FileInfo, excludePaths []filepathfilter.Pattern) { var fullPath string // Absolute path to the current file or dir var parentWorkDir string // Absolute path to the workDir inside the repository if isRoot { fullPath = w.rootDir } else { parentWorkDir = join(w.rootDir, workDir) fullPath = join(parentWorkDir, itemFi.Name()) } if !isRoot && itemFi.IsDir() { // If this directory has a .git directory or file in it, then // this is a submodule, and we should not recurse into it. _, err := os.Stat(filepath.Join(fullPath, ".git")) if err == nil { return } } workPath := join(workDir, itemFi.Name()) if !filepathfilter.NewFromPatterns(nil, excludePaths).Allows(workPath) { return } w.ch <- fastWalkInfo{ParentDir: parentWorkDir, Info: itemFi} if !itemFi.IsDir() { // Nothing more to do if this is not a dir return } var childWorkDir string if !isRoot { childWorkDir = join(workDir, itemFi.Name()) } // The absolute optimal way to scan would be File.Readdirnames but we // still need the Stat() to know whether something is a dir, so use // File.Readdir instead. Means we can provide os.FileInfo to callers like // filepath.Walk as a bonus. df, err := os.Open(fullPath) if err != nil { w.ch <- fastWalkInfo{Err: err} return } // The number of items in a dir we process in each goroutine jobSize := 100 for children, err := df.Readdir(jobSize); err == nil; children, err = df.Readdir(jobSize) { // Parallelise all dirs, and chop large dirs into batches w.walk(children, func(subitems []os.FileInfo) { for _, childFi := range subitems { w.Walk(false, childWorkDir, childFi, excludePaths) } }) } df.Close() if err != nil && err != io.EOF { w.ch <- fastWalkInfo{Err: err} } } func (w *fastWalker) walk(children []os.FileInfo, fn func([]os.FileInfo)) { cur := atomic.AddInt32(w.cur, 1) if cur > w.limit { fn(children) atomic.AddInt32(w.cur, -1) return } w.wg.Add(1) go func() { fn(children) w.wg.Done() atomic.AddInt32(w.cur, -1) }() } func (w *fastWalker) Wait() { w.wg.Wait() close(w.ch) } func join(paths ...string) string { ne := make([]string, 0, len(paths)) for _, p := range paths { if len(p) > 0 { ne = append(ne, p) } } return strings.Join(ne, "/") } // SetFileWriteFlag changes write permissions on a file // Used to make a file read-only or not. When writeEnabled = false, the write // bit is removed for all roles. When writeEnabled = true, the behaviour is // different per platform: // On Mac & Linux, the write bit is set only on the owner as per default umask. // All other bits are unaffected. // On Windows, all the write bits are set since Windows doesn't support Unix permissions. func SetFileWriteFlag(path string, writeEnabled bool) error { stat, err := os.Stat(path) if err != nil { return err } mode := uint32(stat.Mode()) if (writeEnabled && (mode&0200) > 0) || (!writeEnabled && (mode&0222) == 0) { // no change needed return nil } if writeEnabled { mode = mode | 0200 // set owner write only // Go's own Chmod makes Windows set all though } else { mode = mode &^ 0222 // disable all write } return os.Chmod(path, os.FileMode(mode)) } // TempFile creates a temporary file in specified directory with proper permissions for the repository. // On success, it returns an open, non-nil *os.File, and the caller is responsible // for closing and/or removing it. On failure, the temporary file is // automatically cleaned up and an error returned. // // This function is designed to handle only temporary files that will be renamed // into place later somewhere within the Git repository. func TempFile(dir, pattern string, cfg repositoryPermissionFetcher) (*os.File, error) { tmp, err := os.CreateTemp(dir, pattern) if err != nil { return nil, err } perms := cfg.RepositoryPermissions(false) err = os.Chmod(tmp.Name(), perms) if err != nil { tmp.Close() os.Remove(tmp.Name()) return nil, err } return tmp, nil } // ExecutablePermissions takes a set of Unix permissions (which may or may not // have the executable bits set) and maps them into a set of permissions in // which the executable bits are set, using the same technique as Git does. func ExecutablePermissions(perms os.FileMode) os.FileMode { // Copy read bits to executable bits. return perms | ((perms & 0444) >> 2) } // CanonicalizePath takes a path and produces a canonical absolute path, // performing any OS- or environment-specific path transformations (within the // limitations of the Go standard library). If the path is empty, it returns // the empty path with no error. If missingOk is true, then if the // canonicalized path does not exist, an absolute path is given instead. func CanonicalizePath(path string, missingOk bool) (string, error) { path, err := TranslateCygwinPath(path) if err != nil { return "", err } if len(path) > 0 { path, err := filepath.Abs(path) if err != nil { return "", err } result, err := CanonicalizeSystemPath(path) if err != nil && os.IsNotExist(err) && missingOk { return path, nil } return result, err } return "", nil } const ( windowsPrefix = `.\` nixPrefix = `./` ) // TrimCurrentPrefix removes a leading prefix of "./" or ".\" (referring to the // current directory in a platform independent manner). // // It is useful for callers such as "git lfs track" and "git lfs untrack", that // wish to compare filepaths and/or attributes patterns without cleaning across // multiple platforms. func TrimCurrentPrefix(p string) string { if strings.HasPrefix(p, windowsPrefix) { return strings.TrimPrefix(p, windowsPrefix) } return strings.TrimPrefix(p, nixPrefix) } git-lfs-3.6.1/tools/filetools_nix.go000066400000000000000000000003661472372047300174510ustar00rootroot00000000000000//go:build !windows // +build !windows package tools import "path/filepath" func CanonicalizeSystemPath(path string) (string, error) { path, err := filepath.Abs(path) if err != nil { return "", err } return filepath.EvalSymlinks(path) } git-lfs-3.6.1/tools/filetools_test.go000066400000000000000000000206121472372047300176260ustar00rootroot00000000000000package tools import ( "fmt" "os" "os/user" "path/filepath" "runtime" "sort" "testing" "github.com/stretchr/testify/assert" ) func TestCleanPathsCleansPaths(t *testing.T) { cleaned := CleanPaths("/foo/bar/,/foo/bar/baz", ",") assert.Equal(t, []string{"/foo/bar", "/foo/bar/baz"}, cleaned) } func TestCleanPathsReturnsNoResultsWhenGivenNoPaths(t *testing.T) { cleaned := CleanPaths("", ",") assert.Empty(t, cleaned) } type ExpandPathTestCase struct { Path string Expand bool Want string WantErr string currentUser func() (*user.User, error) lookupUser func(who string) (*user.User, error) } func (c *ExpandPathTestCase) Assert(t *testing.T) { if c.currentUser != nil { oldCurrentUser := currentUser currentUser = c.currentUser defer func() { currentUser = oldCurrentUser }() } if c.lookupUser != nil { oldLookupUser := lookupUser lookupUser = c.lookupUser defer func() { lookupUser = oldLookupUser }() } got, err := ExpandPath(c.Path, c.Expand) if err != nil || len(c.WantErr) > 0 { assert.EqualError(t, err, c.WantErr) } assert.Equal(t, filepath.ToSlash(c.Want), filepath.ToSlash(got)) } func TestExpandPath(t *testing.T) { for desc, c := range map[string]*ExpandPathTestCase{ "no expand": { Path: "/path/to/hooks", Want: "/path/to/hooks", }, "current": { Path: "~/path/to/hooks", Want: "/home/jane/path/to/hooks", currentUser: func() (*user.User, error) { return &user.User{ HomeDir: "/home/jane", }, nil }, }, "current, slash": { Path: "~/", Want: "/home/jane", currentUser: func() (*user.User, error) { return &user.User{ HomeDir: "/home/jane", }, nil }, }, "current, no slash": { Path: "~", Want: "/home/jane", currentUser: func() (*user.User, error) { return &user.User{ HomeDir: "/home/jane", }, nil }, }, "non-current": { Path: "~other/path/to/hooks", Want: "/home/special/path/to/hooks", lookupUser: func(who string) (*user.User, error) { assert.Equal(t, "other", who) return &user.User{ HomeDir: "/home/special", }, nil }, }, "non-current, no slash": { Path: "~other", Want: "/home/special", lookupUser: func(who string) (*user.User, error) { assert.Equal(t, "other", who) return &user.User{ HomeDir: "/home/special", }, nil }, }, "non-current (missing)": { Path: "~other/path/to/hooks", WantErr: "could not find user other: missing", lookupUser: func(who string) (*user.User, error) { assert.Equal(t, "other", who) return nil, fmt.Errorf("missing") }, }, } { t.Run(desc, c.Assert) } } type ExpandConfigPathTestCase struct { Path string DefaultPath string Want string WantErr string currentUser func() (*user.User, error) lookupConfigHome func() string } func (c *ExpandConfigPathTestCase) Assert(t *testing.T) { if c.currentUser != nil { oldCurrentUser := currentUser currentUser = c.currentUser defer func() { currentUser = oldCurrentUser }() } if c.lookupConfigHome != nil { oldLookupConfigHome := lookupConfigHome lookupConfigHome = c.lookupConfigHome defer func() { lookupConfigHome = oldLookupConfigHome }() } got, err := ExpandConfigPath(c.Path, c.DefaultPath) if err != nil || len(c.WantErr) > 0 { assert.EqualError(t, err, c.WantErr) } assert.Equal(t, filepath.ToSlash(c.Want), filepath.ToSlash(got)) } func TestExpandConfigPath(t *testing.T) { os.Unsetenv("XDG_CONFIG_HOME") for desc, c := range map[string]*ExpandConfigPathTestCase{ "unexpanded full path": { Path: "/path/to/attributes", Want: "/path/to/attributes", }, "expanded full path": { Path: "~/path/to/attributes", Want: "/home/pat/path/to/attributes", currentUser: func() (*user.User, error) { return &user.User{ HomeDir: "/home/pat", }, nil }, }, "expanded default path": { DefaultPath: "git/attributes", Want: "/home/pat/.config/git/attributes", currentUser: func() (*user.User, error) { return &user.User{ HomeDir: "/home/pat", }, nil }, }, "XDG_CONFIG_HOME set": { DefaultPath: "git/attributes", Want: "/home/pat/configpath/git/attributes", lookupConfigHome: func() string { return "/home/pat/configpath" }, }, } { t.Run(desc, c.Assert) } } func TestFastWalkBasic(t *testing.T) { wd, err := os.Getwd() assert.NoError(t, err) rootDir := t.TempDir() os.Chdir(rootDir) defer os.Chdir(wd) expectedEntries := createFastWalkInputData(10, 160) walker := fastWalkWithExcludeFiles(expectedEntries[0]) gotEntries, gotErrors := collectFastWalkResults(walker.ch) assert.Empty(t, gotErrors) sort.Strings(expectedEntries) sort.Strings(gotEntries) assert.Equal(t, expectedEntries, gotEntries) } // Make test data - ensure you've Chdir'ed into a temp dir first // Returns list of files/dirs that are created // First entry is the parent dir of all others func createFastWalkInputData(smallFolder, largeFolder int) []string { dirs := []string{ "testroot", "testroot/folder1", "testroot/folder2", "testroot/folder2/subfolder1", "testroot/folder2/subfolder2", "testroot/folder2/subfolder3", "testroot/folder2/subfolder4", "testroot/folder2/subfolder4/subsub", } expectedEntries := make([]string, 0, 250) for i, dir := range dirs { os.MkdirAll(dir, 0755) numFiles := smallFolder expectedEntries = append(expectedEntries, dir) if i >= 3 && i <= 5 { // Bulk test to ensure works with > 1 batch numFiles = largeFolder } for f := 0; f < numFiles; f++ { filename := join(dir, fmt.Sprintf("file%d.txt", f)) os.WriteFile(filename, []byte("TEST"), 0644) expectedEntries = append(expectedEntries, filename) } } return expectedEntries } func collectFastWalkResults(fchan <-chan fastWalkInfo) ([]string, []error) { gotEntries := make([]string, 0, 1000) gotErrors := make([]error, 0, 5) for o := range fchan { if o.Err != nil { gotErrors = append(gotErrors, o.Err) } else { if len(o.ParentDir) == 0 { gotEntries = append(gotEntries, o.Info.Name()) } else { gotEntries = append(gotEntries, join(o.ParentDir, o.Info.Name())) } } } return gotEntries, gotErrors } func getFileMode(filename string) os.FileMode { s, err := os.Stat(filename) if err != nil { return 0000 } return s.Mode() } // uniq creates an element-wise copy of "xs" containing only unique elements in // the same order. func uniq(xs []string) []string { seen := make(map[string]struct{}) uniq := make([]string, 0, len(xs)) for _, x := range xs { if _, ok := seen[x]; !ok { seen[x] = struct{}{} uniq = append(uniq, x) } } return uniq } func TestSetWriteFlag(t *testing.T) { f, err := os.CreateTemp("", "lfstestwriteflag") assert.Nil(t, err) filename := f.Name() defer os.Remove(filename) f.Close() // Set up with read/write bit for all but no execute assert.Nil(t, os.Chmod(filename, 0666)) assert.Nil(t, SetFileWriteFlag(filename, false)) // should turn off all write assert.EqualValues(t, 0444, getFileMode(filename)) assert.Nil(t, SetFileWriteFlag(filename, true)) // should only add back user write (on Mac/Linux) if runtime.GOOS == "windows" { assert.EqualValues(t, 0666, getFileMode(filename)) } else { assert.EqualValues(t, 0644, getFileMode(filename)) } // Can't run selective UGO tests on Windows as doesn't support separate roles // Also Golang only supports read/write but not execute on Windows if runtime.GOOS != "windows" { // Set up with read/write/execute bit for all but no execute assert.Nil(t, os.Chmod(filename, 0777)) assert.Nil(t, SetFileWriteFlag(filename, false)) // should turn off all write but not execute assert.EqualValues(t, 0555, getFileMode(filename)) assert.Nil(t, SetFileWriteFlag(filename, true)) // should only add back user write (on Mac/Linux) if runtime.GOOS == "windows" { assert.EqualValues(t, 0777, getFileMode(filename)) } else { assert.EqualValues(t, 0755, getFileMode(filename)) } assert.Nil(t, os.Chmod(filename, 0440)) assert.Nil(t, SetFileWriteFlag(filename, false)) assert.EqualValues(t, 0440, getFileMode(filename)) assert.Nil(t, SetFileWriteFlag(filename, true)) // should only add back user write assert.EqualValues(t, 0640, getFileMode(filename)) } } func TestExecutablePermissions(t *testing.T) { assert.EqualValues(t, os.FileMode(0755), ExecutablePermissions(0644)) assert.EqualValues(t, os.FileMode(0750), ExecutablePermissions(0640)) assert.EqualValues(t, os.FileMode(0700), ExecutablePermissions(0600)) } git-lfs-3.6.1/tools/filetools_windows.go000066400000000000000000000017751472372047300203520ustar00rootroot00000000000000//go:build windows // +build windows package tools import ( "golang.org/x/sys/windows" ) func openSymlink(path string) (windows.Handle, error) { p, err := windows.UTF16PtrFromString(path) if err != nil { return 0, err } attrs := uint32(windows.FILE_FLAG_BACKUP_SEMANTICS) h, err := windows.CreateFile(p, 0, 0, nil, windows.OPEN_EXISTING, attrs, 0) if err != nil { return 0, err } return h, nil } func CanonicalizeSystemPath(path string) (string, error) { h, err := openSymlink(path) if err != nil { return "", err } defer windows.CloseHandle(h) buf := make([]uint16, 100) for { n, err := windows.GetFinalPathNameByHandle(h, &buf[0], uint32(len(buf)), 0) if err != nil { return "", err } if n < uint32(len(buf)) { break } buf = make([]uint16, n) } s := windows.UTF16ToString(buf) if len(s) > 4 && s[:4] == `\\?\` { s = s[4:] if len(s) > 3 && s[:3] == `UNC` { // return path like \\server\share\... return `\` + s[3:], nil } return s, nil } return s, nil } git-lfs-3.6.1/tools/humanize/000077500000000000000000000000001472372047300160575ustar00rootroot00000000000000git-lfs-3.6.1/tools/humanize/humanize.go000066400000000000000000000072751472372047300202410ustar00rootroot00000000000000package humanize import ( "fmt" "math" "strconv" "strings" "time" "unicode" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/tr" ) const ( Byte = 1 << (iota * 10) Kibibyte Mebibyte Gibibyte Tebibyte Pebibyte Kilobyte = 1000 * Byte Megabyte = 1000 * Kilobyte Gigabyte = 1000 * Megabyte Terabyte = 1000 * Gigabyte Petabyte = 1000 * Terabyte // eps is the machine epsilon, or a 64-bit floating point value // reasonably close to zero. eps float64 = 7.0/3 - 4.0/3 - 1.0 ) var bytesTable = map[string]uint64{ "": Byte, "b": Byte, "kib": Kibibyte, "mib": Mebibyte, "gib": Gibibyte, "tib": Tebibyte, "pib": Pebibyte, "kb": Kilobyte, "mb": Megabyte, "gb": Gigabyte, "tb": Terabyte, "pb": Petabyte, } // ParseBytes parses a given human-readable bytes or ibytes string into a number // of bytes, or an error if the string was unable to be parsed. func ParseBytes(str string) (uint64, error) { var sep int for _, r := range str { if !(unicode.IsDigit(r) || r == '.' || r == ',') { break } sep = sep + 1 } var f float64 if s := strings.Replace(str[:sep], ",", "", -1); len(s) > 0 { var err error f, err = strconv.ParseFloat(s, 64) if err != nil { return 0, err } } m, err := ParseByteUnit(str[sep:]) if err != nil { return 0, err } f = f * float64(m) if f >= math.MaxUint64 { return 0, errors.New(tr.Tr.Get("number of bytes too large")) } return uint64(f), nil } // ParseByteUnit returns the number of bytes in a given unit of storage, or an // error, if that unit is unrecognized. func ParseByteUnit(str string) (uint64, error) { str = strings.TrimSpace(str) str = strings.ToLower(str) if u, ok := bytesTable[str]; ok { return u, nil } return 0, errors.New(tr.Tr.Get("unknown unit: %q", str)) } var sizes = []string{"B", "KB", "MB", "GB", "TB", "PB"} // FormatBytes outputs the given number of bytes "s" as a human-readable string, // rounding to the nearest half within .01. func FormatBytes(s uint64) string { var e float64 if s == 0 { e = 0 } else { e = math.Floor(log(float64(s), 1000)) } unit := uint64(math.Pow(1000, e)) suffix := sizes[int(e)] return fmt.Sprintf("%s %s", FormatBytesUnit(s, unit), suffix) } // FormatBytesUnit outputs the given number of bytes "s" as a quantity of the // given units "u" to the nearest half within .01. func FormatBytesUnit(s, u uint64) string { var rounded float64 if s < 10 { rounded = float64(s) } else { rounded = math.Floor(float64(s)/float64(u)*10+.5) / 10 } format := "%.0f" if rounded < 10 && u > 1 { format = "%.1f" } return fmt.Sprintf(format, rounded) } // FormatByteRate outputs the given rate of transfer "r" as the quotient of "s" // (the number of bytes transferred) over "d" (the duration of time that those // bytes were transferred in). // // It displays the output as a quantity of a "per-unit-time" unit (i.e., B/s, // MiB/s) in the most representative fashion possible, as above. func FormatByteRate(s uint64, d time.Duration) string { // e is the index of the most representative unit of storage. var e float64 // f is the floating-point equivalent of "s", so as to avoid more // conversions than necessary. f := float64(s) if f != 0 { f = f / math.Max(time.Nanosecond.Seconds(), d.Seconds()) e = math.Floor(log(f, 1000)) if e <= eps { // The result of math.Floor(log(r, 1000)) can be // "close-enough" to zero that it should be effectively // considered zero. e = 0 } } unit := uint64(math.Pow(1000, e)) suffix := sizes[int(e)] return fmt.Sprintf("%s %s/s", FormatBytesUnit(uint64(math.Ceil(f)), unit), suffix) } // log takes the log base "b" of "n" (\log_b{n}) func log(n, b float64) float64 { return math.Log(n) / math.Log(b) } git-lfs-3.6.1/tools/humanize/humanize_test.go000066400000000000000000000326241472372047300212740ustar00rootroot00000000000000package humanize_test import ( "math" "testing" "time" "github.com/git-lfs/git-lfs/v3/tools/humanize" "github.com/stretchr/testify/assert" ) type ParseBytesTestCase struct { Given string Expected uint64 Err error } func (c *ParseBytesTestCase) Assert(t *testing.T) { got, err := humanize.ParseBytes(c.Given) if c.Err == nil { assert.NoError(t, err, "unexpected error: %s", err) assert.EqualValues(t, c.Expected, got) } else { assert.Equal(t, c.Err, err) } } type FormatBytesTestCase struct { Given uint64 Expected string } func (c *FormatBytesTestCase) Assert(t *testing.T) { assert.Equal(t, c.Expected, humanize.FormatBytes(c.Given)) } type ParseByteUnitTestCase struct { Given string Expected uint64 Err string } func (c *ParseByteUnitTestCase) Assert(t *testing.T) { got, err := humanize.ParseByteUnit(c.Given) if len(c.Err) == 0 { assert.NoError(t, err, "unexpected error: %s", err) assert.EqualValues(t, c.Expected, got) } else { assert.EqualError(t, err, c.Err) } } type FormatBytesUnitTestCase struct { Given uint64 Unit uint64 Expected string } func (c *FormatBytesUnitTestCase) Assert(t *testing.T) { assert.Equal(t, c.Expected, humanize.FormatBytesUnit(c.Given, c.Unit)) } type FormatByteRateTestCase struct { Given uint64 Over time.Duration Expected string } func (c *FormatByteRateTestCase) Assert(t *testing.T) { assert.Equal(t, c.Expected, humanize.FormatByteRate(c.Given, c.Over)) } func TestParseBytes(t *testing.T) { for desc, c := range map[string]*ParseBytesTestCase{ "parse byte (zero, empty)": {"", uint64(0), nil}, "parse byte (empty)": {"10", uint64(10 * math.Pow(2, 0)), nil}, "parse byte": {"10B", uint64(10 * math.Pow(2, 0)), nil}, "parse kibibyte": {"20KIB", uint64(20 * math.Pow(2, 10)), nil}, "parse mebibyte": {"30MIB", uint64(30 * math.Pow(2, 20)), nil}, "parse gibibyte": {"40GIB", uint64(40 * math.Pow(2, 30)), nil}, "parse tebibyte": {"50TIB", uint64(50 * math.Pow(2, 40)), nil}, "parse pebibyte": {"60PIB", uint64(60 * math.Pow(2, 50)), nil}, "parse byte (lowercase)": {"10b", uint64(10 * math.Pow(2, 0)), nil}, "parse kibibyte (lowercase)": {"20kib", uint64(20 * math.Pow(2, 10)), nil}, "parse mebibyte (lowercase)": {"30mib", uint64(30 * math.Pow(2, 20)), nil}, "parse gibibyte (lowercase)": {"40gib", uint64(40 * math.Pow(2, 30)), nil}, "parse tebibyte (lowercase)": {"50tib", uint64(50 * math.Pow(2, 40)), nil}, "parse pebibyte (lowercase)": {"60pib", uint64(60 * math.Pow(2, 50)), nil}, "parse byte (with space)": {"10 B", uint64(10 * math.Pow(2, 0)), nil}, "parse kibibyte (with space)": {"20 KIB", uint64(20 * math.Pow(2, 10)), nil}, "parse mebibyte (with space)": {"30 MIB", uint64(30 * math.Pow(2, 20)), nil}, "parse gibibyte (with space)": {"40 GIB", uint64(40 * math.Pow(2, 30)), nil}, "parse tebibyte (with space)": {"50 TIB", uint64(50 * math.Pow(2, 40)), nil}, "parse pebibyte (with space)": {"60 PIB", uint64(60 * math.Pow(2, 50)), nil}, "parse byte (with space, lowercase)": {"10 b", uint64(10 * math.Pow(2, 0)), nil}, "parse kibibyte (with space, lowercase)": {"20 kib", uint64(20 * math.Pow(2, 10)), nil}, "parse mebibyte (with space, lowercase)": {"30 mib", uint64(30 * math.Pow(2, 20)), nil}, "parse gibibyte (with space, lowercase)": {"40 gib", uint64(40 * math.Pow(2, 30)), nil}, "parse tebibyte (with space, lowercase)": {"50 tib", uint64(50 * math.Pow(2, 40)), nil}, "parse pebibyte (with space, lowercase)": {"60 pib", uint64(60 * math.Pow(2, 50)), nil}, "parse kilobyte": {"20KB", uint64(20 * math.Pow(10, 3)), nil}, "parse megabyte": {"30MB", uint64(30 * math.Pow(10, 6)), nil}, "parse gigabyte": {"40GB", uint64(40 * math.Pow(10, 9)), nil}, "parse terabyte": {"50TB", uint64(50 * math.Pow(10, 12)), nil}, "parse petabyte": {"60PB", uint64(60 * math.Pow(10, 15)), nil}, "parse kilobyte (lowercase)": {"20kb", uint64(20 * math.Pow(10, 3)), nil}, "parse megabyte (lowercase)": {"30mb", uint64(30 * math.Pow(10, 6)), nil}, "parse gigabyte (lowercase)": {"40gb", uint64(40 * math.Pow(10, 9)), nil}, "parse terabyte (lowercase)": {"50tb", uint64(50 * math.Pow(10, 12)), nil}, "parse petabyte (lowercase)": {"60pb", uint64(60 * math.Pow(10, 15)), nil}, "parse kilobyte (with space)": {"20 KB", uint64(20 * math.Pow(10, 3)), nil}, "parse megabyte (with space)": {"30 MB", uint64(30 * math.Pow(10, 6)), nil}, "parse gigabyte (with space)": {"40 GB", uint64(40 * math.Pow(10, 9)), nil}, "parse terabyte (with space)": {"50 TB", uint64(50 * math.Pow(10, 12)), nil}, "parse petabyte (with space)": {"60 PB", uint64(60 * math.Pow(10, 15)), nil}, "parse kilobyte (with space, lowercase)": {"20 kb", uint64(20 * math.Pow(10, 3)), nil}, "parse megabyte (with space, lowercase)": {"30 mb", uint64(30 * math.Pow(10, 6)), nil}, "parse gigabyte (with space, lowercase)": {"40 gb", uint64(40 * math.Pow(10, 9)), nil}, "parse terabyte (with space, lowercase)": {"50 tb", uint64(50 * math.Pow(10, 12)), nil}, "parse petabyte (with space, lowercase)": {"60 pb", uint64(60 * math.Pow(10, 15)), nil}, } { t.Run(desc, c.Assert) } } func TestFormatBytes(t *testing.T) { for desc, c := range map[string]*FormatBytesTestCase{ "format bytes": {uint64(1 * math.Pow(10, 0)), "1 B"}, "format kilobytes": {uint64(1 * math.Pow(10, 3)), "1.0 KB"}, "format megabytes": {uint64(1 * math.Pow(10, 6)), "1.0 MB"}, "format gigabytes": {uint64(1 * math.Pow(10, 9)), "1.0 GB"}, "format petabytes": {uint64(1 * math.Pow(10, 12)), "1.0 TB"}, "format terabytes": {uint64(1 * math.Pow(10, 15)), "1.0 PB"}, "format kilobytes under": {uint64(1.49 * math.Pow(10, 3)), "1.5 KB"}, "format megabytes under": {uint64(1.49 * math.Pow(10, 6)), "1.5 MB"}, "format gigabytes under": {uint64(1.49 * math.Pow(10, 9)), "1.5 GB"}, "format petabytes under": {uint64(1.49 * math.Pow(10, 12)), "1.5 TB"}, "format terabytes under": {uint64(1.49 * math.Pow(10, 15)), "1.5 PB"}, "format kilobytes over": {uint64(1.51 * math.Pow(10, 3)), "1.5 KB"}, "format megabytes over": {uint64(1.51 * math.Pow(10, 6)), "1.5 MB"}, "format gigabytes over": {uint64(1.51 * math.Pow(10, 9)), "1.5 GB"}, "format petabytes over": {uint64(1.51 * math.Pow(10, 12)), "1.5 TB"}, "format terabytes over": {uint64(1.51 * math.Pow(10, 15)), "1.5 PB"}, "format kilobytes exact": {uint64(1.3 * math.Pow(10, 3)), "1.3 KB"}, "format megabytes exact": {uint64(1.3 * math.Pow(10, 6)), "1.3 MB"}, "format gigabytes exact": {uint64(1.3 * math.Pow(10, 9)), "1.3 GB"}, "format petabytes exact": {uint64(1.3 * math.Pow(10, 12)), "1.3 TB"}, "format terabytes exact": {uint64(1.3 * math.Pow(10, 15)), "1.3 PB"}, } { t.Run(desc, c.Assert) } } func TestParseByteUnit(t *testing.T) { for desc, c := range map[string]*ParseByteUnitTestCase{ "parse byte": {"B", uint64(math.Pow(2, 0)), ""}, "parse kibibyte": {"KIB", uint64(math.Pow(2, 10)), ""}, "parse mebibyte": {"MIB", uint64(math.Pow(2, 20)), ""}, "parse gibibyte": {"GIB", uint64(math.Pow(2, 30)), ""}, "parse tebibyte": {"TIB", uint64(math.Pow(2, 40)), ""}, "parse pebibyte": {"PIB", uint64(math.Pow(2, 50)), ""}, "parse byte (lowercase)": {"b", uint64(math.Pow(2, 0)), ""}, "parse kibibyte (lowercase)": {"kib", uint64(math.Pow(2, 10)), ""}, "parse mebibyte (lowercase)": {"mib", uint64(math.Pow(2, 20)), ""}, "parse gibibyte (lowercase)": {"gib", uint64(math.Pow(2, 30)), ""}, "parse tebibyte (lowercase)": {"tib", uint64(math.Pow(2, 40)), ""}, "parse pebibyte (lowercase)": {"pib", uint64(math.Pow(2, 50)), ""}, "parse byte (with space)": {" B", uint64(math.Pow(2, 0)), ""}, "parse kibibyte (with space)": {" KIB", uint64(math.Pow(2, 10)), ""}, "parse mebibyte (with space)": {" MIB", uint64(math.Pow(2, 20)), ""}, "parse gibibyte (with space)": {" GIB", uint64(math.Pow(2, 30)), ""}, "parse tebibyte (with space)": {" TIB", uint64(math.Pow(2, 40)), ""}, "parse pebibyte (with space)": {" PIB", uint64(math.Pow(2, 50)), ""}, "parse byte (with space, lowercase)": {" b", uint64(math.Pow(2, 0)), ""}, "parse kibibyte (with space, lowercase)": {" kib", uint64(math.Pow(2, 10)), ""}, "parse mebibyte (with space, lowercase)": {" mib", uint64(math.Pow(2, 20)), ""}, "parse gibibyte (with space, lowercase)": {" gib", uint64(math.Pow(2, 30)), ""}, "parse tebibyte (with space, lowercase)": {" tib", uint64(math.Pow(2, 40)), ""}, "parse pebibyte (with space, lowercase)": {" pib", uint64(math.Pow(2, 50)), ""}, "parse kilobyte": {"KB", uint64(math.Pow(10, 3)), ""}, "parse megabyte": {"MB", uint64(math.Pow(10, 6)), ""}, "parse gigabyte": {"GB", uint64(math.Pow(10, 9)), ""}, "parse terabyte": {"TB", uint64(math.Pow(10, 12)), ""}, "parse petabyte": {"PB", uint64(math.Pow(10, 15)), ""}, "parse kilobyte (lowercase)": {"kb", uint64(math.Pow(10, 3)), ""}, "parse megabyte (lowercase)": {"mb", uint64(math.Pow(10, 6)), ""}, "parse gigabyte (lowercase)": {"gb", uint64(math.Pow(10, 9)), ""}, "parse terabyte (lowercase)": {"tb", uint64(math.Pow(10, 12)), ""}, "parse petabyte (lowercase)": {"pb", uint64(math.Pow(10, 15)), ""}, "parse kilobyte (with space)": {" KB", uint64(math.Pow(10, 3)), ""}, "parse megabyte (with space)": {" MB", uint64(math.Pow(10, 6)), ""}, "parse gigabyte (with space)": {" GB", uint64(math.Pow(10, 9)), ""}, "parse terabyte (with space)": {" TB", uint64(math.Pow(10, 12)), ""}, "parse petabyte (with space)": {" PB", uint64(math.Pow(10, 15)), ""}, "parse kilobyte (with space, lowercase)": {"kb", uint64(math.Pow(10, 3)), ""}, "parse megabyte (with space, lowercase)": {"mb", uint64(math.Pow(10, 6)), ""}, "parse gigabyte (with space, lowercase)": {"gb", uint64(math.Pow(10, 9)), ""}, "parse terabyte (with space, lowercase)": {"tb", uint64(math.Pow(10, 12)), ""}, "parse petabyte (with space, lowercase)": {"pb", uint64(math.Pow(10, 15)), ""}, "parse unknown unit": {"imag", 0, "unknown unit: \"imag\""}, } { t.Run(desc, c.Assert) } } func TestFormatBytesUnit(t *testing.T) { for desc, c := range map[string]*FormatBytesUnitTestCase{ "format bytes": {uint64(1 * math.Pow(10, 0)), humanize.Byte, "1"}, "format kilobytes": {uint64(1 * math.Pow(10, 3)), humanize.Byte, "1000"}, "format megabytes": {uint64(1 * math.Pow(10, 6)), humanize.Byte, "1000000"}, "format gigabytes": {uint64(1 * math.Pow(10, 9)), humanize.Byte, "1000000000"}, "format petabytes": {uint64(1 * math.Pow(10, 12)), humanize.Byte, "1000000000000"}, "format terabytes": {uint64(1 * math.Pow(10, 15)), humanize.Byte, "1000000000000000"}, "format kilobytes under": {uint64(1.49 * math.Pow(10, 3)), humanize.Byte, "1490"}, "format megabytes under": {uint64(1.49 * math.Pow(10, 6)), humanize.Byte, "1490000"}, "format gigabytes under": {uint64(1.49 * math.Pow(10, 9)), humanize.Byte, "1490000000"}, "format petabytes under": {uint64(1.49 * math.Pow(10, 12)), humanize.Byte, "1490000000000"}, "format terabytes under": {uint64(1.49 * math.Pow(10, 15)), humanize.Byte, "1490000000000000"}, "format kilobytes over": {uint64(1.51 * math.Pow(10, 3)), humanize.Byte, "1510"}, "format megabytes over": {uint64(1.51 * math.Pow(10, 6)), humanize.Byte, "1510000"}, "format gigabytes over": {uint64(1.51 * math.Pow(10, 9)), humanize.Byte, "1510000000"}, "format petabytes over": {uint64(1.51 * math.Pow(10, 12)), humanize.Byte, "1510000000000"}, "format terabytes over": {uint64(1.51 * math.Pow(10, 15)), humanize.Byte, "1510000000000000"}, } { t.Run(desc, c.Assert) } } func TestFormateByteRate(t *testing.T) { for desc, c := range map[string]*FormatByteRateTestCase{ "format bytes": {uint64(1 * math.Pow(10, 0)), time.Second, "1 B/s"}, "format kilobytes": {uint64(1 * math.Pow(10, 3)), time.Second, "1.0 KB/s"}, "format megabytes": {uint64(1 * math.Pow(10, 6)), time.Second, "1.0 MB/s"}, "format gigabytes": {uint64(1 * math.Pow(10, 9)), time.Second, "1.0 GB/s"}, "format petabytes": {uint64(1 * math.Pow(10, 12)), time.Second, "1.0 TB/s"}, "format terabytes": {uint64(1 * math.Pow(10, 15)), time.Second, "1.0 PB/s"}, "format kilobytes under": {uint64(1.49 * math.Pow(10, 3)), time.Second, "1.5 KB/s"}, "format megabytes under": {uint64(1.49 * math.Pow(10, 6)), time.Second, "1.5 MB/s"}, "format gigabytes under": {uint64(1.49 * math.Pow(10, 9)), time.Second, "1.5 GB/s"}, "format petabytes under": {uint64(1.49 * math.Pow(10, 12)), time.Second, "1.5 TB/s"}, "format terabytes under": {uint64(1.49 * math.Pow(10, 15)), time.Second, "1.5 PB/s"}, "format kilobytes over": {uint64(1.51 * math.Pow(10, 3)), time.Second, "1.5 KB/s"}, "format megabytes over": {uint64(1.51 * math.Pow(10, 6)), time.Second, "1.5 MB/s"}, "format gigabytes over": {uint64(1.51 * math.Pow(10, 9)), time.Second, "1.5 GB/s"}, "format petabytes over": {uint64(1.51 * math.Pow(10, 12)), time.Second, "1.5 TB/s"}, "format terabytes over": {uint64(1.51 * math.Pow(10, 15)), time.Second, "1.5 PB/s"}, "format kilobytes exact": {uint64(1.3 * math.Pow(10, 3)), time.Second, "1.3 KB/s"}, "format megabytes exact": {uint64(1.3 * math.Pow(10, 6)), time.Second, "1.3 MB/s"}, "format gigabytes exact": {uint64(1.3 * math.Pow(10, 9)), time.Second, "1.3 GB/s"}, "format petabytes exact": {uint64(1.3 * math.Pow(10, 12)), time.Second, "1.3 TB/s"}, "format terabytes exact": {uint64(1.3 * math.Pow(10, 15)), time.Second, "1.3 PB/s"}, "format bytes (non-second)": {uint64(10 * math.Pow(10, 0)), 2 * time.Second, "5 B/s"}, "format bytes (zero-second)": {uint64(10 * math.Pow(10, 0)), 0, "10 GB/s"}, } { t.Run(desc, c.Assert) } } git-lfs-3.6.1/tools/humanize/package.go000066400000000000000000000002431472372047300200000ustar00rootroot00000000000000// package humanize is designed to parse and format "humanized" versions of // numbers with units. // // Based on: github.com/dustin/go-humanize. package humanize git-lfs-3.6.1/tools/iotools.go000066400000000000000000000102101472372047300162500ustar00rootroot00000000000000package tools import ( "bytes" "crypto/sha256" "encoding/hex" "hash" "io" "os" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/tr" ) const ( // memoryBufferLimit is the number of bytes to buffer in memory before // spooling the contents of an `io.Reader` in `Spool()` to a temporary // file on disk. memoryBufferLimit = 1024 ) // CopyWithCallback copies reader to writer while performing a progress callback func CopyWithCallback(writer io.Writer, reader io.Reader, totalSize int64, cb CopyCallback) (int64, error) { if success, _ := CloneFile(writer, reader); success { if cb != nil { cb(totalSize, totalSize, 0) } return totalSize, nil } if cb == nil { return io.Copy(writer, reader) } cbReader := &CallbackReader{ C: cb, TotalSize: totalSize, Reader: reader, } return io.Copy(writer, cbReader) } // Get a new Hash instance of the type used to hash LFS content func NewLfsContentHash() hash.Hash { return sha256.New() } // HashingReader wraps a reader and calculates the hash of the data as it is read type HashingReader struct { reader io.Reader hasher hash.Hash } func NewHashingReader(r io.Reader) *HashingReader { return &HashingReader{r, NewLfsContentHash()} } func NewHashingReaderPreloadHash(r io.Reader, hash hash.Hash) *HashingReader { return &HashingReader{r, hash} } func (r *HashingReader) Hash() string { return hex.EncodeToString(r.hasher.Sum(nil)) } func (r *HashingReader) Read(b []byte) (int, error) { w, err := r.reader.Read(b) if err == nil || err == io.EOF { _, e := r.hasher.Write(b[0:w]) if e != nil && err == nil { return w, e } } return w, err } // RetriableReader wraps a error response of reader as RetriableError() type RetriableReader struct { reader io.Reader } func NewRetriableReader(r io.Reader) io.Reader { return &RetriableReader{r} } func (r *RetriableReader) Read(b []byte) (int, error) { n, err := r.reader.Read(b) // EOF is a successful response as it is used to signal a graceful end // of input c.f. https://git.io/v6riQ // // Otherwise, if the error is non-nil and already retriable (in the // case that the underlying reader `r.reader` is itself a // `*RetriableReader`, return the error wholesale: if err == nil || err == io.EOF || errors.IsRetriableError(err) { return n, err } return n, errors.NewRetriableError(err) } // Spool spools the contents from 'from' to 'to' by buffering the entire // contents of 'from' into a temporary file created in the directory "dir". // That buffer is held in memory until the file grows to larger than // 'memoryBufferLimit`, then the remaining contents are spooled to disk. // // The temporary file is cleaned up after the copy is complete. // // The number of bytes written to "to", as well as any error encountered are // returned. func Spool(to io.Writer, from io.Reader, dir string) (n int64, err error) { // First, buffer up to `memoryBufferLimit` in memory. buf := make([]byte, memoryBufferLimit) if bn, err := from.Read(buf); err != nil && err != io.EOF { return int64(bn), err } else { buf = buf[:bn] } var spool io.Reader = bytes.NewReader(buf) if err != io.EOF { // If we weren't at the end of the stream, create a temporary // file, and spool the remaining contents there. tmp, err := os.CreateTemp(dir, "") if err != nil { return 0, errors.Wrap(err, tr.Tr.Get("Unable to create temporary file for spooling")) } defer func() { tmp.Close() os.Remove(tmp.Name()) }() if n, err = io.Copy(tmp, from); err != nil { return n, errors.Wrap(err, tr.Tr.Get("unable to spool")) } if _, err = tmp.Seek(0, io.SeekStart); err != nil { return 0, errors.Wrap(err, tr.Tr.Get("unable to seek")) } // The spooled contents will now be the concatenation of the // contents we stored in memory, then the remainder of the // contents on disk. spool = io.MultiReader(spool, tmp) } return io.Copy(to, spool) } // Split the input on the NUL character. Usable with bufio.Scanner. func SplitOnNul(data []byte, atEOF bool) (advance int, token []byte, err error) { for i := 0; i < len(data); i++ { if data[i] == '\x00' { return i + 1, data[:i], nil } } return 0, nil, nil } git-lfs-3.6.1/tools/iotools_test.go000066400000000000000000000036361472372047300173250ustar00rootroot00000000000000package tools_test import ( "bytes" "io" "testing" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/tools" "github.com/stretchr/testify/assert" ) func TestRetriableReaderReturnsSuccessfulReads(t *testing.T) { r := tools.NewRetriableReader(bytes.NewBuffer([]byte{0x1, 0x2, 0x3, 0x4})) var buf [4]byte n, err := r.Read(buf[:]) assert.Nil(t, err) assert.Equal(t, 4, n) assert.Equal(t, []byte{0x1, 0x2, 0x3, 0x4}, buf[:]) } func TestRetriableReaderReturnsEOFs(t *testing.T) { r := tools.NewRetriableReader(bytes.NewBuffer([]byte{ /* empty */ })) var buf [1]byte n, err := r.Read(buf[:]) assert.Equal(t, io.EOF, err) assert.Equal(t, 0, n) } func TestRetriableReaderMakesErrorsRetriable(t *testing.T) { expected := errors.New("example error") r := tools.NewRetriableReader(&ErrReader{expected}) var buf [1]byte n, err := r.Read(buf[:]) assert.Equal(t, 0, n) assert.EqualError(t, err, "LFS: "+expected.Error()) assert.True(t, errors.IsRetriableError(err)) } func TestRetriableReaderDoesNotRewrap(t *testing.T) { // expected is already "retriable", as would be the case if the // underlying reader was a *RetriableReader itself. expected := errors.NewRetriableError(errors.New("example error")) r := tools.NewRetriableReader(&ErrReader{expected}) var buf [1]byte n, err := r.Read(buf[:]) assert.Equal(t, 0, n) // errors.NewRetriableError wraps the given error with the prefix // message "LFS", so these two errors should be equal, indicating that // the RetriableReader did not re-wrap the error it received. assert.EqualError(t, err, expected.Error()) assert.True(t, errors.IsRetriableError(err)) } // ErrReader implements io.Reader and only returns errors. type ErrReader struct { // err is the error that this reader will return. err error } // Read implements io.Reader#Read, and returns (0, e.err). func (e *ErrReader) Read(p []byte) (n int, err error) { return 0, e.err } git-lfs-3.6.1/tools/kv/000077500000000000000000000000001472372047300146575ustar00rootroot00000000000000git-lfs-3.6.1/tools/kv/keyvaluestore.go000066400000000000000000000144511472372047300201150ustar00rootroot00000000000000package kv import ( "encoding/gob" "io" "os" "sync" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/tr" ) // Store provides an in-memory key/value store which is persisted to // a file. The file handle itself is not kept locked for the duration; it is // only locked during load and save, to make it concurrency friendly. When // saving, the store uses optimistic locking to determine whether the db on disk // has been modified by another process; in which case it loads the latest // version and re-applies modifications made during this session. This means // the Lost Update db concurrency issue is possible; so don't use this if you // need more DB integrity than Read Committed isolation levels. type Store struct { // Locks the entire store mu sync.RWMutex filename string log []change // This is the persistent data // version for optimistic locking, this field is incremented with every Save() version int64 db map[string]interface{} } // Type of operation; set or remove type operation int const ( // Set a value for a key setOperation = operation(iota) // Removed a value for a key removeOperation = operation(iota) ) type change struct { op operation key string value interface{} } // NewStore creates a new key/value store and initialises it with contents from // the named file, if it exists func NewStore(filepath string) (*Store, error) { kv := &Store{filename: filepath, db: make(map[string]interface{})} return kv, kv.loadAndMergeIfNeeded() } // Set updates the key/value store in memory // Changes are not persisted until you call Save() func (k *Store) Set(key string, value interface{}) { k.mu.Lock() defer k.mu.Unlock() k.db[key] = value k.logChange(setOperation, key, value) } // Remove removes the key and its value from the store in memory // Changes are not persisted until you call Save() func (k *Store) Remove(key string) { k.mu.Lock() defer k.mu.Unlock() delete(k.db, key) k.logChange(removeOperation, key, nil) } // RemoveAll removes all entries from the store // These changes are not persisted until you call Save() func (k *Store) RemoveAll() { k.mu.Lock() defer k.mu.Unlock() // Log all changes for key, _ := range k.db { k.logChange(removeOperation, key, nil) } k.db = make(map[string]interface{}) } // Visit walks through the entire store via a function; return false from // your visitor function to halt the walk func (k *Store) Visit(cb func(string, interface{}) bool) { // Read-only lock k.mu.RLock() defer k.mu.RUnlock() for k, v := range k.db { if !cb(k, v) { break } } } // Append a change to the log; mutex must already be locked func (k *Store) logChange(op operation, key string, value interface{}) { k.log = append(k.log, change{op, key, value}) } // Get retrieves a value from the store, or nil if it is not present func (k *Store) Get(key string) interface{} { // Read-only lock k.mu.RLock() defer k.mu.RUnlock() // zero value of interface{} is nil so this does what we want return k.db[key] } // Save persists the changes made to disk // If any changes have been written by other code they will be merged func (k *Store) Save() error { k.mu.Lock() defer k.mu.Unlock() // Short-circuit if we have no changes if len(k.log) == 0 { return nil } // firstly peek at version; open read/write to keep lock between check & write f, err := os.OpenFile(k.filename, os.O_RDWR|os.O_CREATE, 0664) if err != nil { return err } defer f.Close() // Only try to merge if > 0 bytes, ignore empty files (decoder will fail) if stat, _ := f.Stat(); stat.Size() > 0 { k.loadAndMergeReaderIfNeeded(f) // Now we overwrite the file f.Seek(0, io.SeekStart) f.Truncate(0) } k.version++ enc := gob.NewEncoder(f) if err := enc.Encode(k.version); err != nil { return errors.New(tr.Tr.Get("error while writing version data to %v: %v", k.filename, err)) } if err := enc.Encode(k.db); err != nil { return errors.New(tr.Tr.Get("error while writing new key/value data to %v: %v", k.filename, err)) } // Clear log now that it's saved k.log = nil return nil } // Reads as little as possible from the passed in file to determine if the // contents are different from the version already held. If so, reads the // contents and merges with any outstanding changes. If not, stops early without // reading the rest of the file func (k *Store) loadAndMergeIfNeeded() error { stat, err := os.Stat(k.filename) if err != nil { if os.IsNotExist(err) { return nil // missing is OK } return err } // Do nothing if empty file if stat.Size() == 0 { return nil } f, err := os.OpenFile(k.filename, os.O_RDONLY, 0664) if err == nil { defer f.Close() return k.loadAndMergeReaderIfNeeded(f) } else { return err } } // As loadAndMergeIfNeeded but lets caller decide how to manage file handles func (k *Store) loadAndMergeReaderIfNeeded(f io.Reader) error { var versionOnDisk int64 // Decode *only* the version field to check whether anyone else has // modified the db; gob serializes structs in order so it will always be 1st dec := gob.NewDecoder(f) err := dec.Decode(&versionOnDisk) if err != nil { return errors.New(tr.Tr.Get("problem checking version of key/value data from %v: %v", k.filename, err)) } // Totally uninitialised Version == 0, saved versions are always >=1 if versionOnDisk != k.version { // Reload data & merge var dbOnDisk map[string]interface{} err = dec.Decode(&dbOnDisk) if err != nil { return errors.New(tr.Tr.Get("problem reading updated key/value data from %v: %v", k.filename, err)) } k.reapplyChanges(dbOnDisk) k.version = versionOnDisk } return nil } // reapplyChanges replays the changes made since the last load onto baseDb // and stores the result as our own DB func (k *Store) reapplyChanges(baseDb map[string]interface{}) { for _, change := range k.log { switch change.op { case setOperation: baseDb[change.key] = change.value case removeOperation: delete(baseDb, change.key) } } // Note, log is not cleared here, that only happens on Save since it's a // list of unsaved changes k.db = baseDb } // RegisterTypeForStorage registers a custom type (e.g. a struct) for // use in the key value store. This is necessary if you intend to pass custom // structs to Store.Set() rather than primitive types. func RegisterTypeForStorage(val interface{}) { gob.Register(val) } git-lfs-3.6.1/tools/kv/keyvaluestore_test.go000066400000000000000000000106111472372047300211460ustar00rootroot00000000000000package kv import ( "os" "testing" "github.com/stretchr/testify/assert" ) func TestStoreSimple(t *testing.T) { tmpf, err := os.CreateTemp("", "lfstest1") assert.Nil(t, err) filename := tmpf.Name() defer os.Remove(filename) tmpf.Close() kvs, err := NewStore(filename) assert.Nil(t, err) // We'll include storing custom structs type customData struct { Val1 string Val2 int } // Needed to store custom struct RegisterTypeForStorage(&customData{}) kvs.Set("stringVal", "This is a string value") kvs.Set("intVal", 3) kvs.Set("floatVal", 3.142) kvs.Set("structVal", &customData{"structTest", 20}) s := kvs.Get("stringVal") assert.Equal(t, "This is a string value", s) i := kvs.Get("intVal") assert.Equal(t, 3, i) f := kvs.Get("floatVal") assert.Equal(t, 3.142, f) c := kvs.Get("structVal") assert.Equal(t, c, &customData{"structTest", 20}) n := kvs.Get("noValue") assert.Nil(t, n) kvs.Remove("stringVal") s = kvs.Get("stringVal") assert.Nil(t, s) // Set the string value again before saving kvs.Set("stringVal", "This is a string value") err = kvs.Save() assert.Nil(t, err) kvs = nil // Now confirm that we can read it all back kvs2, err := NewStore(filename) assert.Nil(t, err) s = kvs2.Get("stringVal") assert.Equal(t, "This is a string value", s) i = kvs2.Get("intVal") assert.Equal(t, 3, i) f = kvs2.Get("floatVal") assert.Equal(t, 3.142, f) c = kvs2.Get("structVal") assert.Equal(t, c, &customData{"structTest", 20}) n = kvs2.Get("noValue") assert.Nil(t, n) // Test remove all kvs2.RemoveAll() s = kvs2.Get("stringVal") assert.Nil(t, s) i = kvs2.Get("intVal") assert.Nil(t, i) f = kvs2.Get("floatVal") assert.Nil(t, f) c = kvs2.Get("structVal") assert.Nil(t, c) err = kvs2.Save() assert.Nil(t, err) kvs2 = nil // Now confirm that we can read blank & get nothing kvs, err = NewStore(filename) assert.Nil(t, err) kvs.Visit(func(k string, v interface{}) bool { // Should not be called assert.Fail(t, "Should be no entries") return true }) } func TestStoreOptimisticConflict(t *testing.T) { tmpf, err := os.CreateTemp("", "lfstest2") assert.Nil(t, err) filename := tmpf.Name() defer os.Remove(filename) tmpf.Close() kvs1, err := NewStore(filename) assert.Nil(t, err) kvs1.Set("key1", "value1") kvs1.Set("key2", "value2") kvs1.Set("key3", "value3") err = kvs1.Save() assert.Nil(t, err) // Load second copy & modify kvs2, err := NewStore(filename) assert.Nil(t, err) // New keys kvs2.Set("key4", "value4_fromkvs2") kvs2.Set("key5", "value5_fromkvs2") // Modify a key too kvs2.Set("key1", "value1_fromkvs2") err = kvs2.Save() assert.Nil(t, err) // Now modify first copy & save; it should detect optimistic lock issue // New item kvs1.Set("key10", "value10") // Overlapping item; since we save second this will overwrite one from kvs2 kvs1.Set("key4", "value4") err = kvs1.Save() assert.Nil(t, err) // This should have merged changes from kvs2 in the process v := kvs1.Get("key1") assert.Equal(t, "value1_fromkvs2", v) // this one was modified by kvs2 v = kvs1.Get("key2") assert.Equal(t, "value2", v) v = kvs1.Get("key3") assert.Equal(t, "value3", v) v = kvs1.Get("key4") assert.Equal(t, "value4", v) // we overwrote this so would not be merged v = kvs1.Get("key5") assert.Equal(t, "value5_fromkvs2", v) } func TestStoreReduceSize(t *testing.T) { tmpf, err := os.CreateTemp("", "lfstest3") assert.Nil(t, err) filename := tmpf.Name() defer os.Remove(filename) tmpf.Close() kvs, err := NewStore(filename) assert.Nil(t, err) kvs.Set("key1", "I woke up in a Soho doorway") kvs.Set("key2", "A policeman knew my name") kvs.Set("key3", "He said 'You can go sleep at home tonight") kvs.Set("key4", "If you can get up and walk away'") assert.NotNil(t, kvs.Get("key1")) assert.NotNil(t, kvs.Get("key2")) assert.NotNil(t, kvs.Get("key3")) assert.NotNil(t, kvs.Get("key4")) assert.Nil(t, kvs.Save()) stat1, _ := os.Stat(filename) // Remove all but 1 key & save smaller version kvs.Remove("key2") kvs.Remove("key3") kvs.Remove("key4") assert.Nil(t, kvs.Save()) // Now reload fresh & prove works kvs = nil kvs, err = NewStore(filename) assert.Nil(t, err) assert.NotNil(t, kvs.Get("key1")) assert.Nil(t, kvs.Get("key2")) assert.Nil(t, kvs.Get("key3")) assert.Nil(t, kvs.Get("key4")) stat2, _ := os.Stat(filename) assert.True(t, stat2.Size() < stat1.Size(), "Size should have reduced, was %d now %d", stat1.Size(), stat2.Size()) } git-lfs-3.6.1/tools/math.go000066400000000000000000000012471472372047300155230ustar00rootroot00000000000000package tools // MinInt returns the smaller of two `int`s, "a", or "b". func MinInt(a, b int) int { if a < b { return a } return b } // MaxInt returns the greater of two `int`s, "a", or "b". func MaxInt(a, b int) int { if a > b { return a } return b } // ClampInt returns the integer "n" bounded between "min" and "max". func ClampInt(n, min, max int) int { return MinInt(max, MaxInt(min, n)) } // MinInt64 returns the smaller of two `int`s, "a", or "b". func MinInt64(a, b int64) int64 { if a < b { return a } return b } // MaxInt64 returns the greater of two `int`s, "a", or "b". func MaxInt64(a, b int64) int64 { if a > b { return a } return b } git-lfs-3.6.1/tools/math_test.go000066400000000000000000000010421472372047300165530ustar00rootroot00000000000000package tools import ( "testing" "github.com/stretchr/testify/assert" ) func TestMinIntPicksTheSmallerInt(t *testing.T) { assert.Equal(t, -1, MinInt(-1, 1)) } func TestMaxIntPicksTheBiggertInt(t *testing.T) { assert.Equal(t, 1, MaxInt(-1, 1)) } func TestClampDiscardsIntsLowerThanMin(t *testing.T) { assert.Equal(t, 0, ClampInt(-1, 0, 1)) } func TestClampDiscardsIntsGreaterThanMax(t *testing.T) { assert.Equal(t, 1, ClampInt(2, 0, 1)) } func TestClampAcceptsIntsWithinBounds(t *testing.T) { assert.Equal(t, 1, ClampInt(1, 0, 2)) } git-lfs-3.6.1/tools/ordered_set.go000066400000000000000000000123451472372047300170720ustar00rootroot00000000000000package tools // OrderedSet is a unique set of strings that maintains insertion order. type OrderedSet struct { // s is the set of strings that we're keeping track of. s []string // m is a mapping of string value "s" into the index "i" that that // string is present in in the given "s". m map[string]int } // NewOrderedSet creates an ordered set with no values. func NewOrderedSet() *OrderedSet { return NewOrderedSetWithCapacity(0) } // NewOrderedSetWithCapacity creates a new ordered set with no values. The // returned ordered set can be appended to "capacity" number of times before it // grows internally. func NewOrderedSetWithCapacity(capacity int) *OrderedSet { return &OrderedSet{ s: make([]string, 0, capacity), m: make(map[string]int, capacity), } } // NewOrderedSetFromSlice returns a new ordered set with the elements given in // the slice "s". func NewOrderedSetFromSlice(s []string) *OrderedSet { set := NewOrderedSetWithCapacity(len(s)) for _, e := range s { set.Add(e) } return set } // Add adds the given element "i" to the ordered set, unless the element is // already present. It returns whether or not the element was added. func (s *OrderedSet) Add(i string) bool { if _, ok := s.m[i]; ok { return false } s.s = append(s.s, i) s.m[i] = len(s.s) - 1 return true } // Contains returns whether or not the given "i" is contained in this ordered // set. It is a constant-time operation. func (s *OrderedSet) Contains(i string) bool { if _, ok := s.m[i]; ok { return true } return false } // ContainsAll returns whether or not all of the given items in "i" are present // in the ordered set. func (s *OrderedSet) ContainsAll(i ...string) bool { for _, item := range i { if !s.Contains(item) { return false } } return true } // IsSubset returns whether other is a subset of this ordered set. In other // words, it returns whether or not all of the elements in "other" are also // present in this set. func (s *OrderedSet) IsSubset(other *OrderedSet) bool { for _, i := range other.s { if !s.Contains(i) { return false } } return true } // IsSuperset returns whether or not this set is a superset of "other". In other // words, it returns whether or not all of the elements in this set are also in // the set "other". func (s *OrderedSet) IsSuperset(other *OrderedSet) bool { return other.IsSubset(s) } // Union returns a union of this set with the given set "other". It returns the // items that are in either set while maintaining uniqueness constraints. It // preserves ordered within each set, and orders the elements in this set before // the elements in "other". // // It is an O(n+m) operation. func (s *OrderedSet) Union(other *OrderedSet) *OrderedSet { union := NewOrderedSetWithCapacity(other.Cardinality() + s.Cardinality()) for _, e := range s.s { union.Add(e) } for _, e := range other.s { union.Add(e) } return union } // Intersect returns the elements that are in both this set and then given // "ordered" set. It is an O(min(n, m)) (in other words, O(n)) operation. func (s *OrderedSet) Intersect(other *OrderedSet) *OrderedSet { intersection := NewOrderedSetWithCapacity(MinInt( s.Cardinality(), other.Cardinality())) if s.Cardinality() < other.Cardinality() { for _, elem := range s.s { if other.Contains(elem) { intersection.Add(elem) } } } else { for _, elem := range other.s { if s.Contains(elem) { intersection.Add(elem) } } } return intersection } // Difference returns the elements that are in this set, but not included in // other. func (s *OrderedSet) Difference(other *OrderedSet) *OrderedSet { diff := NewOrderedSetWithCapacity(s.Cardinality()) for _, e := range s.s { if !other.Contains(e) { diff.Add(e) } } return diff } // SymmetricDifference returns the elements that are not present in both sets. func (s *OrderedSet) SymmetricDifference(other *OrderedSet) *OrderedSet { left := s.Difference(other) right := other.Difference(s) return left.Union(right) } // Clear removes all elements from this set. func (s *OrderedSet) Clear() { s.s = make([]string, 0) s.m = make(map[string]int, 0) } // Remove removes the given element "i" from this set. func (s *OrderedSet) Remove(i string) { idx, ok := s.m[i] if !ok { return } rest := MinInt(idx+1, len(s.s)-1) s.s = append(s.s[:idx], s.s[rest:]...) for _, e := range s.s[rest:] { s.m[e] = s.m[e] - 1 } delete(s.m, i) } // Cardinality returns the cardinality of this set. func (s *OrderedSet) Cardinality() int { return len(s.s) } // Iter returns a channel which yields the elements in this set in insertion // order. func (s *OrderedSet) Iter() <-chan string { c := make(chan string) go func() { for _, i := range s.s { c <- i } close(c) }() return c } // Equal returns whether this element has the same number, identity and ordering // elements as given in "other". func (s *OrderedSet) Equal(other *OrderedSet) bool { if s.Cardinality() != other.Cardinality() { return false } for e, i := range s.m { if ci, ok := other.m[e]; !ok || ci != i { return false } } return true } // Clone returns a deep copy of this set. func (s *OrderedSet) Clone() *OrderedSet { clone := NewOrderedSetWithCapacity(s.Cardinality()) for _, i := range s.s { clone.Add(i) } return clone } git-lfs-3.6.1/tools/ordered_set_test.go000066400000000000000000000136561472372047300201370ustar00rootroot00000000000000package tools import ( "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestOrderedSetAddAddsElements(t *testing.T) { s := NewOrderedSetFromSlice([]string{"a", "b", "c"}) assert.False(t, s.Contains("d"), "tools: did not expected s to contain \"d\"") assert.True(t, s.Add("d")) assert.True(t, s.Contains("d"), "tools: expected s to contain \"d\"") } func TestOrderedSetContainsReturnsTrueForItemsItContains(t *testing.T) { s := NewOrderedSetFromSlice([]string{"a", "b", "c"}) assert.True(t, s.Contains("b"), "tools: expected s to contain element \"b\"") } func TestOrderedSetContainsReturnsFalseForItemsItDoesNotContains(t *testing.T) { s := NewOrderedSetFromSlice([]string{"a", "b", "c"}) assert.False(t, s.Contains("d"), "tools: did not expect s to contain element \"d\"") } func TestOrderedSetContainsAllReturnsTrueWhenAllElementsAreContained(t *testing.T) { s := NewOrderedSetFromSlice([]string{"a", "b", "c"}) assert.True(t, s.ContainsAll("b", "c"), "tools: expected s to contain element \"b\" and \"c\"") } func TestOrderedSetContainsAllReturnsFalseWhenAllElementsAreNotContained(t *testing.T) { s := NewOrderedSetFromSlice([]string{"a", "b", "c"}) assert.False(t, s.ContainsAll("b", "c", "d"), "tools: did not expect s to contain element \"b\", \"c\" and \"d\"") } func TestOrderedSetIsSubsetReturnsTrueWhenOtherContainsAllElements(t *testing.T) { s1 := NewOrderedSetFromSlice([]string{"a", "b", "c"}) s2 := NewOrderedSetFromSlice([]string{"a", "b"}) assert.True(t, s1.IsSubset(s2), "tools: expected [a, b] to be a subset of [a, b, c]") } func TestOrderedSetIsSubsetReturnsFalseWhenOtherDoesNotContainAllElements(t *testing.T) { s1 := NewOrderedSetFromSlice([]string{"a", "b"}) s2 := NewOrderedSetFromSlice([]string{"a", "b", "c"}) assert.False(t, s1.IsSubset(s2), "tools: did not expect [a, b, c] to be a subset of [a, b]") } func TestOrderedSetIsSupersetReturnsTrueWhenContainsAllElementsOfOther(t *testing.T) { s1 := NewOrderedSetFromSlice([]string{"a", "b"}) s2 := NewOrderedSetFromSlice([]string{"a", "b", "c"}) assert.True(t, s1.IsSuperset(s2), "tools: expected [a, b, c] to be a superset of [a, b]") } func TestOrderedSetIsSupersetReturnsFalseWhenDoesNotContainAllElementsOfOther(t *testing.T) { s1 := NewOrderedSetFromSlice([]string{"a", "b", "c"}) s2 := NewOrderedSetFromSlice([]string{"a", "b"}) assert.False(t, s1.IsSuperset(s2), "tools: did not expect [a, b] to be a superset of [a, b, c]") } func TestOrderedSetUnion(t *testing.T) { s1 := NewOrderedSetFromSlice([]string{"a"}) s2 := NewOrderedSetFromSlice([]string{"b", "a"}) elems := make([]string, 0) for e := range s1.Union(s2).Iter() { elems = append(elems, e) } require.Len(t, elems, 2) assert.Equal(t, "a", elems[0]) assert.Equal(t, "b", elems[1]) } func TestOrderedSetIntersect(t *testing.T) { s1 := NewOrderedSetFromSlice([]string{"a"}) s2 := NewOrderedSetFromSlice([]string{"b", "a"}) elems := make([]string, 0) for e := range s1.Intersect(s2).Iter() { elems = append(elems, e) } require.Len(t, elems, 1) assert.Equal(t, "a", elems[0]) } func TestOrderedSetDifference(t *testing.T) { s1 := NewOrderedSetFromSlice([]string{"a", "b"}) s2 := NewOrderedSetFromSlice([]string{"a"}) elems := make([]string, 0) for e := range s1.Difference(s2).Iter() { elems = append(elems, e) } require.Len(t, elems, 1) assert.Equal(t, "b", elems[0]) } func TestOrderedSetSymmetricDifference(t *testing.T) { s1 := NewOrderedSetFromSlice([]string{"a", "b"}) s2 := NewOrderedSetFromSlice([]string{"b", "c"}) elems := make([]string, 0) for e := range s1.SymmetricDifference(s2).Iter() { elems = append(elems, e) } require.Len(t, elems, 2) assert.Equal(t, "a", elems[0]) assert.Equal(t, "c", elems[1]) } func TestOrderedSetClear(t *testing.T) { s1 := NewOrderedSetFromSlice([]string{"a", "b"}) assert.Equal(t, 2, s1.Cardinality()) s1.Clear() assert.Equal(t, 0, s1.Cardinality()) } func TestOrderedSetRemove(t *testing.T) { s1 := NewOrderedSetFromSlice([]string{"a", "b"}) assert.True(t, s1.Contains("a"), "tools: expected [a, b] to contain 'a'") assert.True(t, s1.Contains("b"), "tools: expected [a, b] to contain 'b'") s1.Remove("a") assert.False(t, s1.Contains("a"), "tools: did not expect to find 'a' in [b]") assert.True(t, s1.Contains("b"), "tools: expected [b] to contain 'b'") } func TestOrderedSetCardinality(t *testing.T) { s1 := NewOrderedSetFromSlice([]string{"a", "b"}) assert.Equal(t, 2, s1.Cardinality(), "tools: expected cardinality of [a, b] to equal 2") } func TestOrderedSetIter(t *testing.T) { s1 := NewOrderedSetFromSlice([]string{"a", "b", "c"}) elems := make([]string, 0) for e := range s1.Iter() { elems = append(elems, e) } require.Len(t, elems, 3) assert.Equal(t, "a", elems[0]) assert.Equal(t, "b", elems[1]) assert.Equal(t, "c", elems[2]) } func TestOrderedSetEqualReturnsTrueWhenSameElementsInSameOrder(t *testing.T) { s1 := NewOrderedSetFromSlice([]string{"a", "b", "c"}) s2 := NewOrderedSetFromSlice([]string{"a", "b", "c"}) assert.True(t, s1.Equal(s2), "tools: expected [a, b, c] to equal [a, b, c]") } func TestOrderedSetEqualReturnsFalseWhenSameElementsInDifferentOrder(t *testing.T) { s1 := NewOrderedSetFromSlice([]string{"a", "b", "c"}) s2 := NewOrderedSetFromSlice([]string{"a", "c", "b"}) assert.False(t, s1.Equal(s2), "tools: did not expect [a, b, c] to equal [a, c, b]") } func TestOrderedSetEqualReturnsFalseWithDifferentCardinalities(t *testing.T) { s1 := NewOrderedSetFromSlice([]string{"a"}) s2 := NewOrderedSetFromSlice([]string{"a", "b"}) assert.False(t, s1.Equal(s2), "tools: did not expect [a] to equal [a, b]") } func TestOrderedSetClone(t *testing.T) { s1 := NewOrderedSetFromSlice([]string{"a", "b", "c"}) s2 := s1.Clone() elems := make([]string, 0) for e := range s2.Iter() { elems = append(elems, e) } require.Len(t, elems, 3) assert.Equal(t, "a", elems[0]) assert.Equal(t, "b", elems[1]) assert.Equal(t, "c", elems[2]) } git-lfs-3.6.1/tools/os_tools.go000066400000000000000000000031041472372047300164250ustar00rootroot00000000000000package tools import ( "bytes" "os" "os/exec" "strings" "github.com/git-lfs/git-lfs/v3/subprocess" "github.com/git-lfs/git-lfs/v3/tr" "github.com/pkg/errors" ) func Getwd() (dir string, err error) { dir, err = os.Getwd() if err != nil { return } if isCygwin() { dir, err = translateCygwinPath(dir) if err != nil { return "", errors.Wrap(err, tr.Tr.Get("error converting working directory to Cygwin")) } } return } func translateCygwinPath(path string) (string, error) { cmd, err := subprocess.ExecCommand("cygpath", "-w", path) if err != nil { // If cygpath doesn't exist, that's okay: just return the paths // as we got it. return path, nil } // cygpath uses ISO-8850-1 as the default encoding if the locale is not // set, resulting in breakage, since we want a UTF-8 path. env := make([]string, 0, len(cmd.Env)+1) for _, val := range cmd.Env { if !strings.HasPrefix(val, "LC_ALL=") { env = append(env, val) } } cmd.Env = append(env, "LC_ALL=C.UTF-8") buf := &bytes.Buffer{} cmd.Stderr = buf out, err := cmd.Output() output := strings.TrimSpace(string(out)) if err != nil { // If cygpath doesn't exist, that's okay: just return the paths // as we got it. if _, ok := err.(*exec.Error); ok { return path, nil } return path, errors.New(tr.Tr.Get("failed to translate path from Cygwin to Windows: %s", buf.String())) } return output, nil } func TranslateCygwinPath(path string) (string, error) { if isCygwin() { var err error path, err = translateCygwinPath(path) if err != nil { return "", err } } return path, nil } git-lfs-3.6.1/tools/robustio.go000066400000000000000000000003471472372047300164400ustar00rootroot00000000000000//go:build !windows // +build !windows package tools import "os" func RobustRename(oldpath, newpath string) error { return os.Rename(oldpath, newpath) } func RobustOpen(name string) (*os.File, error) { return os.Open(name) } git-lfs-3.6.1/tools/robustio_windows.go000066400000000000000000000013531472372047300202100ustar00rootroot00000000000000//go:build windows // +build windows package tools import ( "errors" "os" "github.com/avast/retry-go" "golang.org/x/sys/windows" ) // isEphemeralError returns true if err may be resolved by waiting. func isEphemeralError(err error) bool { return errors.Is(err, windows.ERROR_SHARING_VIOLATION) } func RobustRename(oldpath, newpath string) error { return retry.Do( func() error { return os.Rename(oldpath, newpath) }, retry.RetryIf(isEphemeralError), retry.LastErrorOnly(true), ) } func RobustOpen(name string) (*os.File, error) { var result *os.File return result, retry.Do( func() error { f, err := os.Open(name) result = f return err }, retry.RetryIf(isEphemeralError), retry.LastErrorOnly(true), ) } git-lfs-3.6.1/tools/str_tools.go000066400000000000000000000054251472372047300166240ustar00rootroot00000000000000package tools import ( "regexp" "strings" ) var ( // quoteFieldRe greedily matches between matching pairs of '', "", or // non-word characters. quoteFieldRe = regexp.MustCompile("'(.*)'|\"(.*)\"|(\\S*)") ) // QuotedFields is an alternative to strings.Fields (see: // https://golang.org/pkg/strings#Fields) that respects spaces between matching // pairs of quotation delimiters. // // For instance, the quoted fields of the string "foo bar 'baz etc'" would be: // // []string{"foo", "bar", "baz etc"} // // Whereas the same argument given to strings.Fields, would return: // // []string{"foo", "bar", "'baz", "etc'"} func QuotedFields(s string) []string { submatches := quoteFieldRe.FindAllStringSubmatch(s, -1) out := make([]string, 0, len(submatches)) for _, matches := range submatches { // if a leading or trailing space is found, ignore that if matches[0] == "" { continue } // otherwise, find the first non-empty match (inside balanced // quotes, or a space-delimited string) var str string for _, m := range matches[1:] { if len(m) > 0 { str = m break } } out = append(out, str) } return out } // Ljust returns a copied string slice where each element is left justified to // match the width of the longest element in the set. func Ljust(strs []string) []string { llen := len(Longest(strs)) dup := make([]string, len(strs), cap(strs)) copy(dup, strs) for i, str := range strs { width := MaxInt(0, llen-len(str)) padding := strings.Repeat(" ", width) dup[i] = str + padding } return dup } // Rjust returns a copied string slice where each element is right justified to // match the width of the longest element in the set. func Rjust(strs []string) []string { llen := len(Longest(strs)) dup := make([]string, len(strs), cap(strs)) copy(dup, strs) for i, str := range strs { width := MaxInt(0, llen-len(str)) padding := strings.Repeat(" ", width) dup[i] = padding + str } return dup } // Longest returns the longest element in the string slice in O(n) time and O(1) // space. If strs is empty or nil, an empty string will be returned. func Longest(strs []string) string { if len(strs) == 0 { return "" } var longest string var llen int for _, str := range strs { if len(str) >= llen { longest = str llen = len(longest) } } return longest } // Indent returns a string which prepends "\t" TAB characters to the beginning // of each line in the given string "str". func Indent(str string) string { indented := strings.Replace(str, "\n", "\n\t", -1) if len(indented) > 0 { indented = "\t" + indented } return indented } var ( tabRe = regexp.MustCompile(`(?m)^[ \t]+`) ) // Undent removes all leading tabs in the given string "str", line-wise. func Undent(str string) string { return tabRe.ReplaceAllString(str, "") } git-lfs-3.6.1/tools/str_tools_test.go000066400000000000000000000131441472372047300176600ustar00rootroot00000000000000package tools import ( "testing" "github.com/stretchr/testify/assert" ) type QuotedFieldsTestCase struct { Given string Expected []string } func (c *QuotedFieldsTestCase) Assert(t *testing.T) { actual := QuotedFields(c.Given) assert.Equal(t, c.Expected, actual, "tools: expected QuotedFields(%q) to equal %#v (was %#v)", c.Given, c.Expected, actual, ) } func TestQuotedFields(t *testing.T) { for desc, c := range map[string]QuotedFieldsTestCase{ "simple": {`foo bar`, []string{"foo", "bar"}}, "simple trailing": {`foo bar `, []string{"foo", "bar"}}, "simple leading": {` foo bar`, []string{"foo", "bar"}}, "single quotes": {`foo 'bar baz'`, []string{"foo", "bar baz"}}, "single quotes trailing": {`foo 'bar baz' `, []string{"foo", "bar baz"}}, "single quotes leading": {` foo 'bar baz'`, []string{"foo", "bar baz"}}, "single quotes empty": {`foo ''`, []string{"foo", ""}}, "single quotes trailing empty": {`foo '' `, []string{"foo", ""}}, "single quotes leading empty": {` foo ''`, []string{"foo", ""}}, "double quotes": {`foo "bar baz"`, []string{"foo", "bar baz"}}, "double quotes trailing": {`foo "bar baz" `, []string{"foo", "bar baz"}}, "double quotes leading": {` foo "bar baz"`, []string{"foo", "bar baz"}}, "double quotes empty": {`foo ""`, []string{"foo", ""}}, "double quotes trailing empty": {`foo "" `, []string{"foo", ""}}, "double quotes leading empty": {` foo ""`, []string{"foo", ""}}, "nested single quotes": {`foo 'bar 'baz''`, []string{"foo", "bar 'baz'"}}, "nested single quotes trailing": {`foo 'bar 'baz'' `, []string{"foo", "bar 'baz'"}}, "nested single quotes leading": {` foo 'bar 'baz''`, []string{"foo", "bar 'baz'"}}, "nested single quotes empty": {`foo 'bar '''`, []string{"foo", "bar ''"}}, "nested single quotes trailing empty": {`foo 'bar ''' `, []string{"foo", "bar ''"}}, "nested single quotes leading empty": {` foo 'bar '''`, []string{"foo", "bar ''"}}, "nested double quotes": {`foo "bar "baz""`, []string{"foo", `bar "baz"`}}, "nested double quotes trailing": {`foo "bar "baz"" `, []string{"foo", `bar "baz"`}}, "nested double quotes leading": {` foo "bar "baz""`, []string{"foo", `bar "baz"`}}, "nested double quotes empty": {`foo "bar """`, []string{"foo", `bar ""`}}, "nested double quotes trailing empty": {`foo "bar """ `, []string{"foo", `bar ""`}}, "nested double quotes leading empty": {` foo "bar """`, []string{"foo", `bar ""`}}, "mixed quotes": {`foo 'bar "baz"'`, []string{"foo", `bar "baz"`}}, "mixed quotes trailing": {`foo 'bar "baz"' `, []string{"foo", `bar "baz"`}}, "mixed quotes leading": {` foo 'bar "baz"'`, []string{"foo", `bar "baz"`}}, "mixed quotes empty": {`foo 'bar ""'`, []string{"foo", `bar ""`}}, "mixed quotes trailing empty": {`foo 'bar ""' `, []string{"foo", `bar ""`}}, "mixed quotes leading empty": {` foo 'bar ""'`, []string{"foo", `bar ""`}}, } { t.Log(desc) c.Assert(t) } } func TestLongestReturnsEmptyStringGivenEmptySet(t *testing.T) { assert.Equal(t, "", Longest(nil)) } func TestLongestReturnsLongestString(t *testing.T) { assert.Equal(t, "longest", Longest([]string{"short", "longer", "longest"})) } func TestLongestReturnsLastStringGivenSameLength(t *testing.T) { assert.Equal(t, "baz", Longest([]string{"foo", "bar", "baz"})) } func TestRjustRightJustifiesString(t *testing.T) { unjust := []string{ "short", "longer", "longest", } expected := []string{ " short", " longer", "longest", } assert.Equal(t, expected, Rjust(unjust)) } func TestLjustLeftJustifiesString(t *testing.T) { unjust := []string{ "short", "longer", "longest", } expected := []string{ "short ", "longer ", "longest", } assert.Equal(t, expected, Ljust(unjust)) } func TestIndentIndentsStrings(t *testing.T) { assert.Equal(t, "\tfoo\n\tbar", Indent("foo\nbar")) } func TestIndentIndentsSingleLineStrings(t *testing.T) { assert.Equal(t, "\tfoo", Indent("foo")) } func TestIndentReturnsEmptyStrings(t *testing.T) { assert.Equal(t, "", Indent("")) } func TestUndentRemovesLeadingWhitespace(t *testing.T) { assert.Equal(t, "foo", Undent("\t\t\tfoo")) assert.Equal(t, "foo", Undent("foo")) assert.Equal(t, "foo", Undent(" foo")) } func TestUndentRemovesPreservesLinebreaks(t *testing.T) { // No leading space assert.Equal(t, "\r\nfoo", Undent("\r\nfoo")) assert.Equal(t, "foo\r\n", Undent("foo\r\n")) assert.Equal(t, "\r\nfoo\r\n", Undent("\r\nfoo\r\n")) assert.Equal(t, "\nfoo", Undent("\nfoo")) assert.Equal(t, "foo\n", Undent("foo\n")) assert.Equal(t, "\nfoo\n", Undent("\nfoo\n")) // Trim leading space assert.Equal(t, "\r\nfoo", Undent("\r\n foo")) assert.Equal(t, "foo\r\n", Undent(" foo\r\n")) assert.Equal(t, "\r\nfoo\r\n", Undent("\r\n foo\r\n")) assert.Equal(t, "\nfoo", Undent("\n foo")) assert.Equal(t, "foo\n", Undent(" foo\n")) assert.Equal(t, "\nfoo\n", Undent("\n foo\n")) // Preserve trailing space assert.Equal(t, "\r\nfoo ", Undent("\r\nfoo ")) assert.Equal(t, "foo \r\n", Undent("foo \r\n")) assert.Equal(t, "\r\nfoo \r\n", Undent("\r\nfoo \r\n")) assert.Equal(t, "\nfoo ", Undent("\nfoo ")) assert.Equal(t, "foo \n", Undent("foo \n")) assert.Equal(t, "\nfoo \n", Undent("\nfoo \n")) // Trim leading space, preserve trailing space assert.Equal(t, "\r\nfoo ", Undent("\r\n foo ")) assert.Equal(t, "foo \r\n", Undent(" foo \r\n")) assert.Equal(t, "\r\nfoo \r\n", Undent("\r\n foo \r\n")) assert.Equal(t, "\nfoo ", Undent("\n foo ")) assert.Equal(t, "foo \n", Undent(" foo \n")) assert.Equal(t, "\nfoo \n", Undent("\n foo \n")) } git-lfs-3.6.1/tools/stringset.go000066400000000000000000000102061472372047300166070ustar00rootroot00000000000000// Generated by: gen, modified by Steve Streeting // TypeWriter: container // Directive: +gen on main.string // See http://clipperhouse.github.io/gen for documentation // Set is a modification of https://github.com/deckarep/golang-set // The MIT License (MIT) // Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com) package tools // The primary type that represents a set type StringSet map[string]struct{} // Creates and returns a reference to an empty set. func NewStringSet() StringSet { return make(StringSet) } // Creates and returns a reference to an empty set with a capacity. func NewStringSetWithCapacity(capacity int) StringSet { return make(StringSet, capacity) } // Creates and returns a reference to a set from an existing slice func NewStringSetFromSlice(s []string) StringSet { a := NewStringSetWithCapacity(len(s)) for _, item := range s { a.Add(item) } return a } // Adds an item to the current set if it doesn't already exist in the set. func (set StringSet) Add(i string) bool { _, found := set[i] set[i] = struct{}{} return !found //False if it e xisted already } // Determines if a given item is already in the set. func (set StringSet) Contains(i string) bool { _, found := set[i] return found } // Determines if the given items are all in the set func (set StringSet) ContainsAll(i ...string) bool { allSet := NewStringSetFromSlice(i) if allSet.IsSubset(set) { return true } return false } // Determines if every item in the other set is in this set. func (set StringSet) IsSubset(other StringSet) bool { for elem := range set { if !other.Contains(elem) { return false } } return true } // Determines if every item of this set is in the other set. func (set StringSet) IsSuperset(other StringSet) bool { return other.IsSubset(set) } // Returns a new set with all items in both sets. func (set StringSet) Union(other StringSet) StringSet { unionedSet := NewStringSet() for elem := range set { unionedSet.Add(elem) } for elem := range other { unionedSet.Add(elem) } return unionedSet } // Returns a new set with items that exist only in both sets. func (set StringSet) Intersect(other StringSet) StringSet { intersection := NewStringSet() // loop over smaller set if set.Cardinality() < other.Cardinality() { for elem := range set { if other.Contains(elem) { intersection.Add(elem) } } } else { for elem := range other { if set.Contains(elem) { intersection.Add(elem) } } } return intersection } // Returns a new set with items in the current set but not in the other set func (set StringSet) Difference(other StringSet) StringSet { differencedSet := NewStringSet() for elem := range set { if !other.Contains(elem) { differencedSet.Add(elem) } } return differencedSet } // Returns a new set with items in the current set or the other set but not in both. func (set StringSet) SymmetricDifference(other StringSet) StringSet { aDiff := set.Difference(other) bDiff := other.Difference(set) return aDiff.Union(bDiff) } // Clears the entire set to be the empty set. func (set *StringSet) Clear() { *set = make(StringSet) } // Allows the removal of a single item in the set. func (set StringSet) Remove(i string) { delete(set, i) } // Cardinality returns how many items are currently in the set. func (set StringSet) Cardinality() int { return len(set) } // Iter() returns a channel of type string that you can range over. func (set StringSet) Iter() <-chan string { ch := make(chan string) go func() { for elem := range set { ch <- elem } close(ch) }() return ch } // Equal determines if two sets are equal to each other. // If they both are the same size and have the same items they are considered equal. // Order of items is not relevant for sets to be equal. func (set StringSet) Equal(other StringSet) bool { if set.Cardinality() != other.Cardinality() { return false } for elem := range set { if !other.Contains(elem) { return false } } return true } // Returns a clone of the set. // Does NOT clone the underlying elements. func (set StringSet) Clone() StringSet { clonedSet := NewStringSet() for elem := range set { clonedSet.Add(elem) } return clonedSet } git-lfs-3.6.1/tools/sync_writer.go000066400000000000000000000022711472372047300171400ustar00rootroot00000000000000package tools import "io" // closeFn is the type of func Close() in the io.Closer interface. type closeFn func() error // syncFn is the type of func Sync() in the *os.File implementation. type syncFn func() error // SyncWriter provides a wrapper around an io.Writer that synchronizes all // write after they occur, if the underlying writer supports synchronization. type SyncWriter struct { w io.Writer closeFn closeFn syncFn syncFn } // NewSyncWriter returns a new instance of the *SyncWriter that sends all writes // to the given io.Writer. func NewSyncWriter(w io.Writer) *SyncWriter { sw := &SyncWriter{ w: w, } if sync, ok := w.(interface { Sync() error }); ok { sw.syncFn = sync.Sync } else { sw.syncFn = func() error { return nil } } if close, ok := w.(io.Closer); ok { sw.closeFn = close.Close } else { sw.closeFn = func() error { return nil } } return sw } // Write will write to the file and perform a Sync() if writing succeeds. func (w *SyncWriter) Write(b []byte) error { if _, err := w.w.Write(b); err != nil { return err } return w.syncFn() } // Close will call Close() on the underlying file func (w *SyncWriter) Close() error { return w.closeFn() } git-lfs-3.6.1/tools/time_tools.go000066400000000000000000000013171472372047300167460ustar00rootroot00000000000000package tools import ( "time" ) // IsExpiredAtOrIn returns whether or not the result of calling TimeAtOrIn is // "expired" within "until" units of time from now. func IsExpiredAtOrIn(from time.Time, until time.Duration, at time.Time, in time.Duration) (time.Time, bool) { expiration := TimeAtOrIn(from, at, in) if expiration.IsZero() { return expiration, false } return expiration, expiration.Before(time.Now().Add(until)) } // TimeAtOrIn returns either "at", or the "in" duration added to the current // time. TimeAtOrIn prefers to add a duration rather than return the "at" // parameter. func TimeAtOrIn(from, at time.Time, in time.Duration) time.Time { if in == 0 { return at } return from.Add(in) } git-lfs-3.6.1/tools/time_tools_test.go000066400000000000000000000037011472372047300200040ustar00rootroot00000000000000package tools import ( "testing" "time" "github.com/stretchr/testify/assert" ) func TestTimeAtOrInNoDuration(t *testing.T) { now := time.Now() then := time.Now().Add(24 * time.Hour) got := TimeAtOrIn(now, then, time.Duration(0)) assert.Equal(t, then, got) } func TestTimeAtOrInWithDuration(t *testing.T) { now := time.Now() duration := 5 * time.Minute expected := now.Add(duration) got := TimeAtOrIn(now, now, duration) assert.Equal(t, expected, got) } func TestTimeAtOrInZeroTime(t *testing.T) { now := time.Now() zero := time.Time{} got := TimeAtOrIn(now, zero, 0) assert.Equal(t, zero, got) } func TestIsExpiredAtOrInWithNonZeroTime(t *testing.T) { now := time.Now() within := 5 * time.Minute at := now.Add(10 * time.Minute) in := time.Duration(0) expired, ok := IsExpiredAtOrIn(now, within, at, in) assert.False(t, ok) assert.Equal(t, at, expired) } func TestIsExpiredAtOrInWithNonZeroDuration(t *testing.T) { now := time.Now() within := 5 * time.Minute at := time.Time{} in := 10 * time.Minute expired, ok := IsExpiredAtOrIn(now, within, at, in) assert.Equal(t, now.Add(in), expired) assert.False(t, ok) } func TestIsExpiredAtOrInWithNonZeroTimeExpired(t *testing.T) { now := time.Now() within := 5 * time.Minute at := now.Add(3 * time.Minute) in := time.Duration(0) expired, ok := IsExpiredAtOrIn(now, within, at, in) assert.True(t, ok) assert.Equal(t, at, expired) } func TestIsExpiredAtOrInWithNonZeroDurationExpired(t *testing.T) { now := time.Now() within := 5 * time.Minute at := time.Time{} in := -10 * time.Minute expired, ok := IsExpiredAtOrIn(now, within, at, in) assert.Equal(t, now.Add(in), expired) assert.True(t, ok) } func TestIsExpiredAtOrInWithAmbiguousTime(t *testing.T) { now := time.Now() within := 5 * time.Minute at := now.Add(-10 * time.Minute) in := 10 * time.Minute expired, ok := IsExpiredAtOrIn(now, within, at, in) assert.Equal(t, now.Add(in), expired) assert.False(t, ok) } git-lfs-3.6.1/tools/umask_nix.go000066400000000000000000000003011472372047300165560ustar00rootroot00000000000000//go:build !windows // +build !windows package tools import "syscall" func doWithUmask(mask int, f func() error) error { mask = syscall.Umask(mask) defer syscall.Umask(mask) return f() } git-lfs-3.6.1/tools/umask_windows.go000066400000000000000000000001661472372047300174630ustar00rootroot00000000000000//go:build windows // +build windows package tools func doWithUmask(mask int, f func() error) error { return f() } git-lfs-3.6.1/tools/util_darwin.go000066400000000000000000000052571472372047300171200ustar00rootroot00000000000000//go:build darwin // +build darwin package tools import ( "io" "os" "strconv" "strings" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/tr" "golang.org/x/sys/unix" ) var cloneFileSupported bool func init() { cloneFileSupported = checkCloneFileSupported() } // checkCloneFileSupported return iff Mac OS version is greater or equal to 10.12.x Sierra. // // clonefile is supported since Mac OS X 10.12 // https://www.manpagez.com/man/2/clonefile/ // // kern.osrelease mapping // 17.x.x. macOS 10.13.x High Sierra. // 16.x.x macOS 10.12.x Sierra. // 15.x.x OS X 10.11.x El Capitan. func checkCloneFileSupported() bool { bytes, err := unix.Sysctl("kern.osrelease") if err != nil { return false } versionString := strings.Split(string(bytes), ".") // major.minor.patch if len(versionString) < 2 { return false } major, err := strconv.Atoi(versionString[0]) if err != nil { return false } return major >= 16 } // CheckCloneFileSupported runs explicit test of clone file on supplied directory. // This function creates some (src and dst) file in the directory and remove after test finished. // // If check failed (e.g. directory is read-only), returns err. func CheckCloneFileSupported(dir string) (supported bool, err error) { if !cloneFileSupported { return false, errors.New(tr.Tr.Get("Unsupported OS version. 10.12.x Sierra or higher required.")) } src, err := os.CreateTemp(dir, "src") if err != nil { return false, err } defer os.Remove(src.Name()) src.Close() dst, err := os.CreateTemp(dir, "dst") if err != nil { return false, err } defer os.Remove(dst.Name()) dst.Close() return CloneFileByPath(dst.Name(), src.Name()) } type CloneFileError struct { Unsupported bool errorString string } func (c *CloneFileError) Error() string { return c.errorString } func CloneFile(_ io.Writer, _ io.Reader) (bool, error) { return false, nil // Cloning from io.Writer(file descriptor) is not supported by Darwin. } func CloneFileByPath(dst, src string) (bool, error) { if !cloneFileSupported { return false, &CloneFileError{Unsupported: true, errorString: tr.Tr.Get("clonefile is not supported")} } if FileExists(dst) { if err := os.Remove(dst); err != nil { return false, err // File should be not exists before create } } if err := cloneFileSyscall(dst, src); err != nil { return false, err } return true, nil } func cloneFileSyscall(dst, src string) *CloneFileError { err := unix.Clonefileat(unix.AT_FDCWD, src, unix.AT_FDCWD, dst, unix.CLONE_NOFOLLOW) if err != nil { return &CloneFileError{ Unsupported: err == unix.ENOTSUP, errorString: tr.Tr.Get("error cloning from %v to %v: %s", src, dst, err), } } return nil } git-lfs-3.6.1/tools/util_darwin_test.go000066400000000000000000000025321472372047300201500ustar00rootroot00000000000000//go:build darwin // +build darwin package tools import ( "os" "path" "testing" "github.com/stretchr/testify/assert" ) func TestCheckCloneFileSupported(t *testing.T) { as := assert.New(t) // Do ok, err := CheckCloneFileSupported(os.TempDir()) // Verify t.Logf("ok = %v, err = %v", ok, err) // Just logging for 1st element if !checkCloneFileSupported() { as.EqualError(err, "unsupported OS version. >= 10.12.x Sierra required") } } func TestCloneFile(t *testing.T) { as := assert.New(t) // Do ok, err := CloneFile(nil, nil) // Verify always no error and not ok as.NoError(err) as.False(ok) } func TestCloneFileByPath(t *testing.T) { if !cloneFileSupported { t.Skip("clone not supported on this platform") } src := path.Join(os.TempDir(), "src") t.Logf("src = %s", src) dst := path.Join(os.TempDir(), "dst") t.Logf("dst = %s", dst) as := assert.New(t) // Precondition err := os.WriteFile(src, []byte("TEST"), 0666) as.NoError(err) // Do ok, err := CloneFileByPath(dst, src) if err != nil { if cloneFileError, ok := err.(*CloneFileError); ok && cloneFileError.Unsupported { t.Log(err) t.Skip("tmp file is not support clonefile in this os installation.") } t.Error(err) } // Verify as.NoError(err) as.True(ok) dstContents, err := os.ReadFile(dst) as.NoError(err) as.Equal("TEST", string(dstContents)) } git-lfs-3.6.1/tools/util_generic.go000066400000000000000000000007411472372047300172410ustar00rootroot00000000000000//go:build !linux && !darwin && !windows // +build !linux,!darwin,!windows package tools import ( "io" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/tr" ) func CheckCloneFileSupported(dir string) (supported bool, err error) { return false, errors.New(tr.Tr.Get("unsupported platform")) } func CloneFile(writer io.Writer, reader io.Reader) (bool, error) { return false, nil } func CloneFileByPath(_, _ string) (bool, error) { return false, nil } git-lfs-3.6.1/tools/util_linux.go000066400000000000000000000026341472372047300167670ustar00rootroot00000000000000//go:build linux // +build linux package tools import ( "io" "os" "golang.org/x/sys/unix" ) // CheckCloneFileSupported runs explicit test of clone file on supplied directory. // This function creates some (src and dst) file in the directory and remove after test finished. // // If check failed (e.g. directory is read-only), returns err. func CheckCloneFileSupported(dir string) (supported bool, err error) { src, err := os.CreateTemp(dir, "src") if err != nil { return false, err } defer func() { src.Close() os.Remove(src.Name()) }() dst, err := os.CreateTemp(dir, "dst") if err != nil { return false, err } defer func() { dst.Close() os.Remove(dst.Name()) }() if ok, err := CloneFile(dst, src); err != nil { return false, err } else { return ok, nil } } func CloneFile(writer io.Writer, reader io.Reader) (bool, error) { fdst, fdstFound := writer.(*os.File) fsrc, fsrcFound := reader.(*os.File) if fdstFound && fsrcFound { if err := unix.IoctlFileClone(int(fdst.Fd()), int(fsrc.Fd())); err != nil { return false, err } return true, nil } return false, nil } func CloneFileByPath(dst, src string) (bool, error) { srcFile, err := os.Open(src) if err != nil { return false, err } defer srcFile.Close() dstFile, err := os.Create(dst) //truncating, it if it already exists. if err != nil { return false, err } defer dstFile.Close() return CloneFile(dstFile, srcFile) } git-lfs-3.6.1/tools/util_test.go000066400000000000000000000014701472372047300166040ustar00rootroot00000000000000package tools import ( "bytes" "io" "os" "testing" "github.com/stretchr/testify/assert" ) func TestCopyWithCallback(t *testing.T) { buf := bytes.NewBufferString("BOOYA") called := 0 calledWritten := make([]int64, 0, 2) n, err := CopyWithCallback(io.Discard, buf, 5, func(total int64, written int64, current int) error { called += 1 calledWritten = append(calledWritten, written) assert.Equal(t, 5, int(total)) return nil }) assert.Nil(t, err) assert.Equal(t, 5, int(n)) assert.Equal(t, 1, called) assert.Len(t, calledWritten, 1) assert.Equal(t, 5, int(calledWritten[0])) } func TestMethodExists(t *testing.T) { // testing following methods exist in all platform. _, _ = CheckCloneFileSupported(os.TempDir()) _, _ = CloneFile(io.Writer(nil), io.Reader(nil)) _, _ = CloneFileByPath("", "") } git-lfs-3.6.1/tools/util_windows.go000066400000000000000000000103571472372047300173230ustar00rootroot00000000000000//go:build windows // +build windows package tools import ( "io" "os" "unsafe" "golang.org/x/sys/windows" ) var ( availableClusterSize = []int64{64 * 1024, 4 * 1024} // ReFS only supports 64KiB and 4KiB cluster. GiB = int64(1024 * 1024 * 1024) ) // fsctlDuplicateExtentsToFile = FSCTL_DUPLICATE_EXTENTS_TO_FILE IOCTL // Instructs the file system to copy a range of file bytes on behalf of an application. // // https://docs.microsoft.com/windows/win32/api/winioctl/ni-winioctl-fsctl_duplicate_extents_to_file const fsctlDuplicateExtentsToFile = 623428 // duplicateExtentsData = DUPLICATE_EXTENTS_DATA structure // Contains parameters for the FSCTL_DUPLICATE_EXTENTS control code that performs the Block Cloning operation. // // https://docs.microsoft.com/windows/win32/api/winioctl/ns-winioctl-duplicate_extents_data type duplicateExtentsData struct { FileHandle windows.Handle SourceFileOffset int64 TargetFileOffset int64 ByteCount int64 } // CheckCloneFileSupported runs explicit test of clone file on supplied directory. // This function creates some (src and dst) file in the directory and remove after test finished. // // If check failed (e.g. directory is read-only), returns err. func CheckCloneFileSupported(dir string) (supported bool, err error) { src, err := os.CreateTemp(dir, "src") if err != nil { return false, err } defer func() { src.Close() os.Remove(src.Name()) }() // Make src file not empty. // Because `FSCTL_DUPLICATE_EXTENTS_TO_FILE` on empty file is always success even filesystem don't support it. _, err = src.WriteString("TESTING") if err != nil { return false, err } dst, err := os.CreateTemp(dir, "dst") if err != nil { return false, err } defer func() { dst.Close() os.Remove(dst.Name()) }() return CloneFile(dst, src) } func CloneFileByPath(dst, src string) (success bool, err error) { dstFile, err := os.OpenFile(dst, os.O_RDWR|os.O_CREATE, 0666) // No truncate version of os.Create if err != nil { return } defer dstFile.Close() srcFile, err := os.Open(src) if err != nil { return } defer srcFile.Close() return CloneFile(dstFile, srcFile) } func CloneFile(writer io.Writer, reader io.Reader) (success bool, err error) { dst, dstIsFile := writer.(*os.File) src, srcIsFile := reader.(*os.File) if !(dstIsFile && srcIsFile) { return false, nil } srcStat, err := src.Stat() if err != nil { return } fileSize := srcStat.Size() err = dst.Truncate(fileSize) // set file size. There is a requirement "The destination region must not extend past the end of file." if err != nil { return } offset := int64(0) // Requirement // * The source and destination regions must begin and end at a cluster boundary. (4KiB or 64KiB) // * cloneRegionSize less than 4GiB. // see https://docs.microsoft.com/windows/win32/fileio/block-cloning // Clone first xGiB region. for ; offset+GiB < fileSize; offset += GiB { err = callDuplicateExtentsToFile(dst, src, offset, GiB) if err != nil { return false, err } } // Clone tail. First try with 64KiB round up, then fallback to 4KiB. for _, cloneRegionSize := range availableClusterSize { err = callDuplicateExtentsToFile(dst, src, offset, roundUp(fileSize-offset, cloneRegionSize)) if err != nil { continue } break } return err == nil, err } // call FSCTL_DUPLICATE_EXTENTS_TO_FILE IOCTL // see https://docs.microsoft.com/en-us/windows/win32/api/winioctl/ni-winioctl-fsctl_duplicate_extents_to_file // // memo: Overflow (cloneRegionSize is greater than file ends) is safe and just ignored by windows. func callDuplicateExtentsToFile(dst, src *os.File, offset int64, cloneRegionSize int64) (err error) { var ( bytesReturned uint32 overlapped windows.Overlapped ) request := duplicateExtentsData{ FileHandle: windows.Handle(src.Fd()), SourceFileOffset: offset, TargetFileOffset: offset, ByteCount: cloneRegionSize, } return windows.DeviceIoControl( windows.Handle(dst.Fd()), fsctlDuplicateExtentsToFile, (*byte)(unsafe.Pointer(&request)), uint32(unsafe.Sizeof(request)), (*byte)(unsafe.Pointer(nil)), // = nullptr 0, &bytesReturned, &overlapped) } func roundUp(value, base int64) int64 { mod := value % base if mod == 0 { return value } return value - mod + base } git-lfs-3.6.1/tools/util_windows_test.go000066400000000000000000000037411472372047300203610ustar00rootroot00000000000000//go:build windows // +build windows package tools import ( "crypto/sha256" "encoding/hex" "fmt" "io" "os" "testing" "github.com/git-lfs/git-lfs/v3/errors" "github.com/stretchr/testify/assert" ) func TestCloneFile(t *testing.T) { testDir := os.Getenv("REFS_TEST_DIR") if testDir == "" { testDir, _ = Getwd() } t.Logf("testing on: %s", testDir) supported, err := CheckCloneFileSupported(testDir) if err != nil || !supported { t.Skip(err) } testCases := []struct { name string size int64 }{ {"Small", 123}, {"Smaller than 4K", 4*1024 - 1}, {"Equal to 4K", 4 * 1024}, {"Larger than 4K", 4*1024 + 1}, {"Smaller than 64K", 64*1024 - 1}, {"Equal to 64K", 64 * 1024}, {"Larger than 64K", 64*1024 + 1}, {"Large", 12345678}, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { as := assert.New(t) src, err := os.CreateTemp(testDir, tc.name+"_src") as.NoError(err) dst, err := os.CreateTemp(testDir, tc.name+"_dst") as.NoError(err) srcHash, err := fillFile(src, tc.size) as.NoError(err) ok, err := CloneFile(dst, src) as.NoError(err) as.True(ok) sha := sha256.New() dst.Seek(0, io.SeekStart) io.Copy(sha, dst) dstHash := hex.EncodeToString(sha.Sum(nil)) as.Equal(srcHash, dstHash) }) } } func fillFile(target *os.File, size int64) (hash string, err error) { str := make([]byte, 1024) for i := 0; i < 1023; i++ { str[i] = fmt.Sprintf("%x", i%16)[0] } str[1023] = '\n' for i := int64(0); i < size; i += 1024 { _, err := target.Write(str) if err != nil { panic(err) } } err = target.Truncate(size) if err != nil { return "", err } err = target.Sync() if err != nil { return "", err } _, err = target.Seek(0, io.SeekStart) if err != nil { return "", err } sha := sha256.New() copySize, err := io.Copy(sha, target) if err != nil { return "", err } if size != copySize { return "", errors.New("size mismatch") } return hex.EncodeToString(sha.Sum(nil)), nil } git-lfs-3.6.1/tq/000077500000000000000000000000001472372047300135235ustar00rootroot00000000000000git-lfs-3.6.1/tq/adapterbase.go000066400000000000000000000163411472372047300163320ustar00rootroot00000000000000package tq import ( "net/http" "regexp" "strings" "sync" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/fs" "github.com/git-lfs/git-lfs/v3/lfsapi" "github.com/git-lfs/git-lfs/v3/tr" "github.com/rubyist/tracerx" ) // adapterBase implements the common functionality for core adapters which // process transfers with N workers handling an oid each, and which wait for // authentication to succeed on one worker before proceeding type adapterBase struct { fs *fs.Filesystem name string direction Direction transferImpl transferImplementation apiClient *lfsapi.Client remote string jobChan chan *job debugging bool cb ProgressCallback // WaitGroup to sync the completion of all workers workerWait sync.WaitGroup // WaitGroup to sync the completion of all in-flight jobs jobWait *sync.WaitGroup // WaitGroup to serialise the first transfer response to perform login if needed authWait sync.WaitGroup } // transferImplementation must be implemented to provide the actual upload/download // implementation for all core transfer approaches that use adapterBase for // convenience. This function will be called on multiple goroutines so it // must be either stateless or thread safe. However it will never be called // for the same oid in parallel. // If authOkFunc is not nil, implementations must call it as early as possible // when authentication succeeded, before the whole file content is transferred type transferImplementation interface { // WorkerStarting is called when a worker goroutine starts to process jobs // Implementations can run some startup logic here & return some context if needed WorkerStarting(workerNum int) (interface{}, error) // WorkerEnding is called when a worker goroutine is shutting down // Implementations can clean up per-worker resources here, context is as returned from WorkerStarting WorkerEnding(workerNum int, ctx interface{}) // DoTransfer performs a single transfer within a worker. ctx is any context returned from WorkerStarting DoTransfer(ctx interface{}, t *Transfer, cb ProgressCallback, authOkFunc func()) error } const ( enableHrefRewriteKey = "lfs.transfer.enablehrefrewrite" defaultEnableHrefRewrite = false ) func newAdapterBase(f *fs.Filesystem, name string, dir Direction, ti transferImplementation) *adapterBase { return &adapterBase{ fs: f, name: name, direction: dir, transferImpl: ti, jobWait: new(sync.WaitGroup), } } func (a *adapterBase) Name() string { return a.name } func (a *adapterBase) Direction() Direction { return a.direction } func (a *adapterBase) Begin(cfg AdapterConfig, cb ProgressCallback) error { a.apiClient = cfg.APIClient() a.remote = cfg.Remote() a.cb = cb a.jobChan = make(chan *job, 100) a.debugging = a.apiClient.OSEnv().Bool("GIT_TRANSFER_TRACE", false) || a.apiClient.OSEnv().Bool("GIT_CURL_VERBOSE", false) maxConcurrency := cfg.ConcurrentTransfers() a.Trace("xfer: adapter %q Begin() with %d workers", a.Name(), maxConcurrency) a.workerWait.Add(maxConcurrency) a.authWait.Add(1) for i := 0; i < maxConcurrency; i++ { ctx, err := a.transferImpl.WorkerStarting(i) if err != nil { return err } go a.worker(i, ctx) } a.Trace("xfer: adapter %q started", a.Name()) return nil } type job struct { T *Transfer results chan<- TransferResult wg *sync.WaitGroup } func (j *job) Done(err error) { j.results <- TransferResult{j.T, err} j.wg.Done() } func (a *adapterBase) Add(transfers ...*Transfer) <-chan TransferResult { results := make(chan TransferResult, len(transfers)) a.jobWait.Add(len(transfers)) go func() { for _, t := range transfers { a.jobChan <- &job{t, results, a.jobWait} } a.jobWait.Wait() close(results) }() return results } func (a *adapterBase) End() { a.Trace("xfer: adapter %q End()", a.Name()) a.jobWait.Wait() close(a.jobChan) // wait for all transfers to complete a.workerWait.Wait() a.Trace("xfer: adapter %q stopped", a.Name()) } func (a *adapterBase) Trace(format string, args ...interface{}) { if !a.debugging { return } tracerx.Printf(format, args...) } // worker function, many of these run per adapter func (a *adapterBase) worker(workerNum int, ctx interface{}) { a.Trace("xfer: adapter %q worker %d starting", a.Name(), workerNum) waitForAuth := workerNum > 0 signalAuthOnResponse := workerNum == 0 // First worker is the only one allowed to start immediately // The rest wait until successful response from 1st worker to // make sure only 1 login prompt is presented if necessary // Deliberately outside jobChan processing so we know worker 0 will process 1st item if waitForAuth { a.Trace("xfer: adapter %q worker %d waiting for Auth", a.Name(), workerNum) a.authWait.Wait() a.Trace("xfer: adapter %q worker %d auth signal received", a.Name(), workerNum) } for job := range a.jobChan { t := job.T var authCallback func() if signalAuthOnResponse { authCallback = func() { a.authWait.Done() signalAuthOnResponse = false } } a.Trace("xfer: adapter %q worker %d processing job for %q", a.Name(), workerNum, t.Oid) // Actual transfer happens here var err error if t.Size < 0 { err = errors.New(tr.Tr.Get("object %q has invalid size (got: %d)", t.Oid, t.Size)) } else { err = a.transferImpl.DoTransfer(ctx, t, a.cb, authCallback) } // Mark the job as completed, and alter all listeners job.Done(err) a.Trace("xfer: adapter %q worker %d finished job for %q", a.Name(), workerNum, t.Oid) } // This will only happen if no jobs were submitted; just wake up all workers to finish if signalAuthOnResponse { a.authWait.Done() } a.Trace("xfer: adapter %q worker %d stopping", a.Name(), workerNum) a.transferImpl.WorkerEnding(workerNum, ctx) a.workerWait.Done() } var httpRE = regexp.MustCompile(`\Ahttps?://`) func (a *adapterBase) newHTTPRequest(method string, rel *Action) (*http.Request, error) { enableRewrite := a.apiClient.GitEnv().Bool(enableHrefRewriteKey, defaultEnableHrefRewrite) href := rel.Href if enableRewrite { href = a.apiClient.Endpoints.NewEndpoint(a.direction.String(), rel.Href).Url } if !httpRE.MatchString(href) { urlfragment := strings.SplitN(href, "?", 2)[0] return nil, errors.New(tr.Tr.Get("missing protocol: %q", urlfragment)) } req, err := http.NewRequest(method, href, nil) if err != nil { return nil, err } for key, value := range rel.Header { req.Header.Set(key, value) } return req, nil } func (a *adapterBase) doHTTP(t *Transfer, req *http.Request) (*http.Response, error) { if t.Authenticated { return a.apiClient.Do(req) } endpoint := endpointURL(req.URL.String(), t.Oid) return a.apiClient.DoWithAuthNoRetry(a.remote, a.apiClient.Endpoints.AccessFor(endpoint), req) } func advanceCallbackProgress(cb ProgressCallback, t *Transfer, numBytes int64) { if cb != nil { // Must split into max int sizes since read count is int const maxInt = int(^uint(0) >> 1) for read := int64(0); read < numBytes; { remainder := numBytes - read if remainder > int64(maxInt) { read += int64(maxInt) cb(t.Name, t.Size, read, maxInt) } else { read += remainder cb(t.Name, t.Size, read, int(remainder)) } } } } func endpointURL(rawurl, oid string) string { return strings.Split(rawurl, oid)[0] } git-lfs-3.6.1/tq/api.go000066400000000000000000000060551472372047300146310ustar00rootroot00000000000000package tq import ( "time" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/git" "github.com/git-lfs/git-lfs/v3/lfsapi" "github.com/git-lfs/git-lfs/v3/lfshttp" "github.com/git-lfs/git-lfs/v3/tr" "github.com/rubyist/tracerx" ) type tqClient struct { maxRetries int *lfsapi.Client } type batchRef struct { Name string `json:"name,omitempty"` } type batchRequest struct { Operation string `json:"operation"` Objects []*Transfer `json:"objects"` TransferAdapterNames []string `json:"transfers,omitempty"` Ref *batchRef `json:"ref"` HashAlgorithm string `json:"hash_algo"` } type BatchResponse struct { Objects []*Transfer `json:"objects"` TransferAdapterName string `json:"transfer"` HashAlgorithm string `json:"hash_algo"` endpoint lfshttp.Endpoint } func Batch(m Manifest, dir Direction, remote string, remoteRef *git.Ref, objects []*Transfer) (*BatchResponse, error) { if len(objects) == 0 { return &BatchResponse{}, nil } cm := m.Upgrade() return cm.batchClient().Batch(remote, &batchRequest{ Operation: dir.String(), Objects: objects, TransferAdapterNames: m.GetAdapterNames(dir), Ref: &batchRef{Name: remoteRef.Refspec()}, HashAlgorithm: "sha256", }) } type BatchClient interface { Batch(remote string, bReq *batchRequest) (*BatchResponse, error) MaxRetries() int SetMaxRetries(n int) } func (c *tqClient) MaxRetries() int { return c.maxRetries } func (c *tqClient) SetMaxRetries(n int) { c.maxRetries = n } func (c *tqClient) Batch(remote string, bReq *batchRequest) (*BatchResponse, error) { bRes := &BatchResponse{} if len(bReq.Objects) == 0 { return bRes, nil } if len(bReq.TransferAdapterNames) == 1 && bReq.TransferAdapterNames[0] == "basic" { bReq.TransferAdapterNames = nil } missing := make(map[string]bool) for _, obj := range bReq.Objects { missing[obj.Oid] = obj.Missing } bRes.endpoint = c.Endpoints.Endpoint(bReq.Operation, remote) requestedAt := time.Now() req, err := c.NewRequest("POST", bRes.endpoint, "objects/batch", bReq) if err != nil { return nil, errors.Wrap(err, tr.Tr.Get("batch request")) } tracerx.Printf("api: batch %d files", len(bReq.Objects)) req = c.Client.LogRequest(req, "lfs.batch") res, err := c.DoAPIRequestWithAuth(remote, lfshttp.WithRetries(req, c.MaxRetries())) if err != nil { tracerx.Printf("api error: %s", err) return nil, errors.Wrap(err, tr.Tr.Get("batch response")) } if err := lfshttp.DecodeJSON(res, bRes); err != nil { return bRes, errors.Wrap(err, tr.Tr.Get("batch response")) } if bRes.HashAlgorithm != "" && bRes.HashAlgorithm != "sha256" { return bRes, errors.Wrap(errors.New(tr.Tr.Get("unsupported hash algorithm")), tr.Tr.Get("batch response")) } if res.StatusCode != 200 { return nil, lfshttp.NewStatusCodeError(res) } for _, obj := range bRes.Objects { obj.Missing = missing[obj.Oid] for _, a := range obj.Actions { a.createdAt = requestedAt } } return bRes, nil } git-lfs-3.6.1/tq/api_test.go000066400000000000000000000116211472372047300156630ustar00rootroot00000000000000package tq import ( "encoding/json" "fmt" "net/http" "net/http/httptest" "os" "path/filepath" "strings" "testing" "github.com/git-lfs/git-lfs/v3/lfsapi" "github.com/git-lfs/git-lfs/v3/lfshttp" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/xeipuuv/gojsonschema" ) func TestAPIBatch(t *testing.T) { require.NotNil(t, batchReqSchema, batchReqSchema.Source) require.NotNil(t, batchResSchema, batchResSchema.Source) srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path != "/api/objects/batch" { w.WriteHeader(404) return } assert.Equal(t, "POST", r.Method) assert.Equal(t, "106", r.Header.Get("Content-Length")) bodyLoader, body := gojsonschema.NewReaderLoader(r.Body) bReq := &batchRequest{} err := json.NewDecoder(body).Decode(bReq) r.Body.Close() assert.Nil(t, err) assertSchema(t, batchReqSchema, bodyLoader) assert.EqualValues(t, []string{"basic", "whatev"}, bReq.TransferAdapterNames) if assert.Equal(t, 1, len(bReq.Objects)) { assert.Equal(t, "a", bReq.Objects[0].Oid) } w.Header().Set("Content-Type", "application/json") writeLoader, resWriter := gojsonschema.NewWriterLoader(w) err = json.NewEncoder(resWriter).Encode(&BatchResponse{ TransferAdapterName: "basic", Objects: bReq.Objects, }) assert.Nil(t, err) assertSchema(t, batchResSchema, writeLoader) })) defer srv.Close() c, err := lfsapi.NewClient(lfshttp.NewContext(nil, nil, map[string]string{ "lfs.url": srv.URL + "/api", })) require.Nil(t, err) tqc := &tqClient{Client: c} bReq := &batchRequest{ TransferAdapterNames: []string{"basic", "whatev"}, Objects: []*Transfer{ &Transfer{Oid: "a", Size: 1}, }, } bRes, err := tqc.Batch("remote", bReq) require.Nil(t, err) assert.Equal(t, "basic", bRes.TransferAdapterName) if assert.Equal(t, 1, len(bRes.Objects)) { assert.Equal(t, "a", bRes.Objects[0].Oid) } } func TestAPIBatchOnlyBasic(t *testing.T) { require.NotNil(t, batchReqSchema, batchReqSchema.Source) require.NotNil(t, batchResSchema, batchResSchema.Source) srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path != "/api/objects/batch" { w.WriteHeader(404) return } assert.Equal(t, "POST", r.Method) bodyLoader, body := gojsonschema.NewReaderLoader(r.Body) bReq := &batchRequest{} err := json.NewDecoder(body).Decode(bReq) r.Body.Close() assert.Nil(t, err) assertSchema(t, batchReqSchema, bodyLoader) assert.Equal(t, 0, len(bReq.TransferAdapterNames)) if assert.Equal(t, 1, len(bReq.Objects)) { assert.Equal(t, "a", bReq.Objects[0].Oid) } w.Header().Set("Content-Type", "application/json") writeLoader, resWriter := gojsonschema.NewWriterLoader(w) err = json.NewEncoder(resWriter).Encode(&BatchResponse{ TransferAdapterName: "basic", Objects: make([]*Transfer, 0), }) assert.Nil(t, err) assertSchema(t, batchResSchema, writeLoader) })) defer srv.Close() c, err := lfsapi.NewClient(lfshttp.NewContext(nil, nil, map[string]string{ "lfs.url": srv.URL + "/api", })) require.Nil(t, err) tqc := &tqClient{Client: c} bReq := &batchRequest{ TransferAdapterNames: []string{"basic"}, Objects: []*Transfer{ &Transfer{Oid: "a", Size: 1}, }, } bRes, err := tqc.Batch("remote", bReq) require.Nil(t, err) assert.Equal(t, "basic", bRes.TransferAdapterName) } func TestAPIBatchEmptyObjects(t *testing.T) { c, err := lfsapi.NewClient(nil) require.Nil(t, err) tqc := &tqClient{Client: c} bReq := &batchRequest{ TransferAdapterNames: []string{"basic", "whatev"}, } bRes, err := tqc.Batch("remote", bReq) require.Nil(t, err) assert.Equal(t, "", bRes.TransferAdapterName) assert.Equal(t, 0, len(bRes.Objects)) } var ( batchReqSchema *sourcedSchema batchResSchema *sourcedSchema ) func init() { wd, err := os.Getwd() if err != nil { fmt.Println("getwd error:", err) return } batchReqSchema = getSchema(wd, "schemas/http-batch-request-schema.json") batchResSchema = getSchema(wd, "schemas/http-batch-response-schema.json") } type sourcedSchema struct { Source string *gojsonschema.Schema } func getSchema(wd, relpath string) *sourcedSchema { abspath := filepath.ToSlash(filepath.Join(wd, relpath)) s, err := gojsonschema.NewSchema(gojsonschema.NewReferenceLoader(fmt.Sprintf("file:///%s", abspath))) if err != nil { fmt.Printf("schema load error for %q: %+v\n", relpath, err) } return &sourcedSchema{Source: relpath, Schema: s} } func assertSchema(t *testing.T, schema *sourcedSchema, dataLoader gojsonschema.JSONLoader) { res, err := schema.Validate(dataLoader) if assert.Nil(t, err) { if res.Valid() { return } resErrors := res.Errors() valErrors := make([]string, 0, len(resErrors)) for _, resErr := range resErrors { valErrors = append(valErrors, resErr.String()) } t.Errorf("Schema: %s\n%s", schema.Source, strings.Join(valErrors, "\n")) } } git-lfs-3.6.1/tq/basic_download.go000066400000000000000000000210301472372047300170160ustar00rootroot00000000000000package tq import ( "fmt" "hash" "io" "net/http" "os" "path/filepath" "regexp" "strconv" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/tools" "github.com/git-lfs/git-lfs/v3/tr" "github.com/rubyist/tracerx" ) // Adapter for basic HTTP downloads, includes resuming via HTTP Range type basicDownloadAdapter struct { *adapterBase } func (a *basicDownloadAdapter) tempDir() string { // Shared with the SSH adapter. d := filepath.Join(a.fs.LFSStorageDir, "incomplete") if err := tools.MkdirAll(d, a.fs); err != nil { return os.TempDir() } return d } func (a *basicDownloadAdapter) WorkerStarting(workerNum int) (interface{}, error) { return nil, nil } func (a *basicDownloadAdapter) WorkerEnding(workerNum int, ctx interface{}) { } func (a *basicDownloadAdapter) DoTransfer(ctx interface{}, t *Transfer, cb ProgressCallback, authOkFunc func()) error { // Reserve a temporary filename. We need to make sure nobody operates on the file simultaneously with us. f, err := tools.TempFile(a.tempDir(), t.Oid, a.fs) if err != nil { return err } tmpName := f.Name() defer func() { // Fail-safe: Most implementation of os.File.Close() does nil check if f != nil { f.Close() } // This will delete temp file if: // - we failed to fully download file and move it to final location including the case when final location already // exists because other parallel git-lfs processes downloaded file // - we also failed to move it to a partially-downloaded location os.Remove(tmpName) }() // Close file because we will attempt to move partially-downloaded one on top of it if err := f.Close(); err != nil { return err } // Attempt to resume download. No error checking here. If we fail, we'll simply download from the start tools.RobustRename(a.downloadFilename(t), f.Name()) // Open temp file. It is either empty or partially downloaded f, err = os.OpenFile(f.Name(), os.O_RDWR, 0644) if err != nil { return err } // Read any existing data into hash hash := tools.NewLfsContentHash() fromByte, err := io.Copy(hash, f) if err != nil { return err } // Ensure that partial file seems valid if fromByte > 0 { if fromByte < t.Size-1 { tracerx.Printf("xfer: Attempting to resume download of %q from byte %d", t.Oid, fromByte) } else { // Somehow we have more data than expected. Let's retry from the beginning. if _, err := f.Seek(0, io.SeekStart); err != nil { return err } if err := f.Truncate(0); err != nil { return err } fromByte = 0 hash = nil } } err = a.download(t, cb, authOkFunc, f, fromByte, hash) if err != nil { f.Close() // Rename file so next download can resume from where we stopped. // No error checking here, if rename fails then file will be deleted and there just will be no download resuming tools.RobustRename(f.Name(), a.downloadFilename(t)) } return err } // Returns path where partially downloaded file should be stored for download resuming func (a *basicDownloadAdapter) downloadFilename(t *Transfer) string { return filepath.Join(a.tempDir(), t.Oid+".part") } // download starts or resumes and download. dlFile is expected to be an existing file open in RW mode func (a *basicDownloadAdapter) download(t *Transfer, cb ProgressCallback, authOkFunc func(), dlFile *os.File, fromByte int64, hash hash.Hash) error { rel, err := t.Rel("download") if err != nil { return err } if rel == nil { return errors.Errorf(tr.Tr.Get("Object %s not found on the server.", t.Oid)) } req, err := a.newHTTPRequest("GET", rel) if err != nil { return err } if fromByte > 0 { // We could just use a start byte, but since we know the length be specific req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", fromByte, t.Size-1)) } req = a.apiClient.LogRequest(req, "lfs.data.download") res, err := a.makeRequest(t, req) if err != nil { if res == nil { // We encountered a network or similar error which caused us // to not receive a response at all. return errors.NewRetriableError(err) } // Special-case status code 416 () - fall back if fromByte > 0 && dlFile != nil && res.StatusCode == 416 { tracerx.Printf("xfer: server rejected resume download request for %q from byte %d; re-downloading from start", t.Oid, fromByte) if _, err := dlFile.Seek(0, io.SeekStart); err != nil { return err } if err := dlFile.Truncate(0); err != nil { return err } return a.download(t, cb, authOkFunc, dlFile, 0, nil) } // Special-cae status code 429 - retry after certain time if res.StatusCode == 429 { retLaterErr := errors.NewRetriableLaterError(err, res.Header.Get("Retry-After")) if retLaterErr != nil { return retLaterErr } } return errors.NewRetriableError(err) } defer res.Body.Close() // Range request must return 206 & content range to confirm if fromByte > 0 { rangeRequestOk := false var failReason string // check 206 and Content-Range, fall back if either not as expected if res.StatusCode == 206 { // Probably a successful range request, check Content-Range if rangeHdr := res.Header.Get("Content-Range"); rangeHdr != "" { regex := regexp.MustCompile(`bytes (\d+)\-.*`) match := regex.FindStringSubmatch(rangeHdr) if match != nil && len(match) > 1 { contentStart, _ := strconv.ParseInt(match[1], 10, 64) if contentStart == fromByte { rangeRequestOk = true } else { failReason = fmt.Sprintf("Content-Range start byte incorrect: %s expected %d", match[1], fromByte) } } else { failReason = fmt.Sprintf("badly formatted Content-Range header: %q", rangeHdr) } } else { failReason = "missing Content-Range header in response" } } else { failReason = fmt.Sprintf("expected status code 206, received %d", res.StatusCode) } if rangeRequestOk { tracerx.Printf("xfer: server accepted resume download request: %q from byte %d", t.Oid, fromByte) advanceCallbackProgress(cb, t, fromByte) } else { // Abort resume, perform regular download tracerx.Printf("xfer: failed to resume download for %q from byte %d: %s. Re-downloading from start", t.Oid, fromByte, failReason) if _, err := dlFile.Seek(0, io.SeekStart); err != nil { return err } if err := dlFile.Truncate(0); err != nil { return err } fromByte = 0 hash = nil if res.StatusCode == 200 { // If status code was 200 then server just ignored Range header and // sent everything. Don't re-request, use this one from byte 0 } else { // re-request needed return a.download(t, cb, authOkFunc, dlFile, fromByte, hash) } } } // Signal auth OK on success response, before starting download to free up // other workers immediately if authOkFunc != nil { authOkFunc() } var hasher *tools.HashingReader httpReader := tools.NewRetriableReader(res.Body) if fromByte > 0 && hash != nil { // pre-load hashing reader with previous content hasher = tools.NewHashingReaderPreloadHash(httpReader, hash) } else { hasher = tools.NewHashingReader(httpReader) } dlfilename := dlFile.Name() // Wrap callback to give name context ccb := func(totalSize int64, readSoFar int64, readSinceLast int) error { if cb != nil { return cb(t.Name, totalSize, readSoFar+fromByte, readSinceLast) } return nil } written, err := tools.CopyWithCallback(dlFile, hasher, res.ContentLength, ccb) if err != nil { return errors.Wrapf(err, tr.Tr.Get("cannot write data to temporary file %q", dlfilename)) } if actual := hasher.Hash(); actual != t.Oid { return errors.New(tr.Tr.Get("expected OID %s, got %s after %d bytes written", t.Oid, actual, written)) } if err := dlFile.Close(); err != nil { return errors.New(tr.Tr.Get("can't close temporary file %q: %v", dlfilename, err)) } err = tools.RenameFileCopyPermissions(dlfilename, t.Path) if _, err2 := os.Stat(t.Path); err2 == nil { // Target file already exists, possibly was downloaded by other git-lfs process return nil } return err } func configureBasicDownloadAdapter(m *concreteManifest) { m.RegisterNewAdapterFunc(BasicAdapterName, Download, func(name string, dir Direction) Adapter { switch dir { case Download: bd := &basicDownloadAdapter{newAdapterBase(m.fs, name, dir, nil)} // self implements impl bd.transferImpl = bd return bd case Upload: panic(tr.Tr.Get("Should never ask this function to upload")) } return nil }) } func (a *basicDownloadAdapter) makeRequest(t *Transfer, req *http.Request) (*http.Response, error) { res, err := a.doHTTP(t, req) if errors.IsAuthError(err) && len(req.Header.Get("Authorization")) == 0 { return a.makeRequest(t, req) } return res, err } git-lfs-3.6.1/tq/basic_upload.go000066400000000000000000000147101472372047300165020ustar00rootroot00000000000000package tq import ( "io" "net/http" "os" "path/filepath" "strconv" "strings" "github.com/git-lfs/git-lfs/v3/config" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/lfsapi" "github.com/git-lfs/git-lfs/v3/tools" "github.com/git-lfs/git-lfs/v3/tr" ) const ( BasicAdapterName = "basic" defaultContentType = "application/octet-stream" ) // Adapter for basic uploads (non resumable) type basicUploadAdapter struct { *adapterBase } func (a *basicUploadAdapter) tempDir() string { // Dedicated to this adapter rather than shared with basic download. d := filepath.Join(os.TempDir(), "git-lfs-basic-temp") if err := tools.MkdirAll(d, a.fs); err != nil { return os.TempDir() } return d } func (a *basicUploadAdapter) WorkerStarting(workerNum int) (interface{}, error) { return nil, nil } func (a *basicUploadAdapter) WorkerEnding(workerNum int, ctx interface{}) { } func (a *basicUploadAdapter) DoTransfer(ctx interface{}, t *Transfer, cb ProgressCallback, authOkFunc func()) error { rel, err := t.Rel("upload") if err != nil { return err } if rel == nil { return errors.Errorf(tr.Tr.Get("No upload action for object: %s", t.Oid)) } req, err := a.newHTTPRequest("PUT", rel) if err != nil { return err } if req.Header.Get("Transfer-Encoding") == "chunked" { req.TransferEncoding = []string{"chunked"} } else { req.Header.Set("Content-Length", strconv.FormatInt(t.Size, 10)) } req.ContentLength = t.Size f, err := os.OpenFile(t.Path, os.O_RDONLY, 0644) if err != nil { return errors.Wrap(err, tr.Tr.Get("basic upload")) } defer f.Close() if err := a.setContentTypeFor(req, f); err != nil { return err } // Ensure progress callbacks made while uploading // Wrap callback to give name context ccb := func(totalSize int64, readSoFar int64, readSinceLast int) error { if cb != nil { return cb(t.Name, totalSize, readSoFar, readSinceLast) } return nil } cbr := tools.NewFileBodyWithCallback(f, t.Size, ccb) var reader lfsapi.ReadSeekCloser = cbr // Signal auth was ok on first read; this frees up other workers to start if authOkFunc != nil { reader = newStartCallbackReader(reader, func() error { authOkFunc() return nil }) } req.Body = reader req = a.apiClient.LogRequest(req, "lfs.data.upload") res, err := a.makeRequest(t, req) if err != nil { if errors.IsUnprocessableEntityError(err) { // If we got an HTTP 422, we do _not_ want to retry the // request later below, because it is likely that the // implementing server does not support non-standard // Content-Type headers. // // Instead, return immediately and wait for the // *tq.TransferQueue to report an error message. return err } // We're about to return a retriable error, meaning that this // transfer will either be retried, or it will fail. // // Either way, let's decrement the number of bytes that we've // read _so far_, so that the next iteration doesn't re-transfer // those bytes, according to the progress meter. if perr := cbr.ResetProgress(); perr != nil { err = errors.Wrap(err, perr.Error()) } if res == nil { // We encountered a network or similar error which caused us // to not receive a response at all. return errors.NewRetriableError(err) } if res.StatusCode == 429 { retLaterErr := errors.NewRetriableLaterError(err, res.Header.Get("Retry-After")) if retLaterErr != nil { return retLaterErr } } return errors.NewRetriableError(err) } // A status code of 403 likely means that an authentication token for the // upload has expired. This can be safely retried. if res.StatusCode == 403 { err = errors.New(tr.Tr.Get("Received status %d", res.StatusCode)) return errors.NewRetriableError(err) } if res.StatusCode > 299 { return errors.Wrapf(nil, tr.Tr.Get("Invalid status for %s %s: %d", req.Method, strings.SplitN(req.URL.String(), "?", 2)[0], res.StatusCode, )) } io.Copy(io.Discard, res.Body) res.Body.Close() return verifyUpload(a.apiClient, a.remote, t) } func (a *adapterBase) setContentTypeFor(req *http.Request, r io.ReadSeeker) error { uc := config.NewURLConfig(a.apiClient.GitEnv()) disabled := !uc.Bool("lfs", req.URL.String(), "contenttype", true) if len(req.Header.Get("Content-Type")) != 0 { return nil } var contentType string if !disabled { buffer := make([]byte, 512) n, err := r.Read(buffer) if err != nil && err != io.EOF { return errors.Wrap(err, tr.Tr.Get("content type detection error")) } contentType = http.DetectContentType(buffer[:n]) if _, err := r.Seek(0, io.SeekStart); err != nil { return errors.Wrap(err, tr.Tr.Get("content type rewind failure")) } } if contentType == "" { contentType = defaultContentType } req.Header.Set("Content-Type", contentType) return nil } // startCallbackReader is a reader wrapper which calls a function as soon as the // first Read() call is made. This callback is only made once type startCallbackReader struct { cb func() error cbDone bool lfsapi.ReadSeekCloser } func (s *startCallbackReader) Read(p []byte) (n int, err error) { if !s.cbDone && s.cb != nil { if err := s.cb(); err != nil { return 0, err } s.cbDone = true } return s.ReadSeekCloser.Read(p) } func newStartCallbackReader(r lfsapi.ReadSeekCloser, cb func() error) *startCallbackReader { return &startCallbackReader{ ReadSeekCloser: r, cb: cb, } } func configureBasicUploadAdapter(m *concreteManifest) { m.RegisterNewAdapterFunc(BasicAdapterName, Upload, func(name string, dir Direction) Adapter { switch dir { case Upload: bu := &basicUploadAdapter{newAdapterBase(m.fs, name, dir, nil)} // self implements impl bu.transferImpl = bu return bu case Download: panic(tr.Tr.Get("Should never ask this function to download")) } return nil }) } func (a *basicUploadAdapter) makeRequest(t *Transfer, req *http.Request) (*http.Response, error) { res, err := a.doHTTP(t, req) if errors.IsAuthError(err) && len(req.Header.Get("Authorization")) == 0 { // Construct a new body with just the raw file and no callbacks. Since // all progress tracking happens when the net.http code copies our // request body into a new request, we can safely make this request // outside of the flow of the transfer adapter, and if it fails, the // transfer progress will be rewound at the top level f, _ := os.OpenFile(t.Path, os.O_RDONLY, 0644) defer f.Close() req.Body = tools.NewBodyWithCallback(f, t.Size, nil) return a.makeRequest(t, req) } return res, err } git-lfs-3.6.1/tq/custom.go000066400000000000000000000312041472372047300153640ustar00rootroot00000000000000package tq import ( "bufio" "bytes" "encoding/json" "fmt" "io" "path/filepath" "regexp" "strings" "time" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/fs" "github.com/git-lfs/git-lfs/v3/tools" "github.com/git-lfs/git-lfs/v3/tr" "github.com/git-lfs/git-lfs/v3/subprocess" "github.com/rubyist/tracerx" ) // Adapter for custom transfer via external process type customAdapter struct { *adapterBase path string args string concurrent bool originalConcurrency int standalone bool } // Struct to capture stderr and write to trace type traceWriter struct { buf bytes.Buffer processName string } func (t *traceWriter) Write(b []byte) (int, error) { n, err := t.buf.Write(b) t.Flush() return n, err } func (t *traceWriter) Flush() { var err error for err == nil { var s string s, err = t.buf.ReadString('\n') if len(s) > 0 { tracerx.Printf("xfer[%v]: %v", t.processName, strings.TrimSpace(s)) } } } type customAdapterWorkerContext struct { workerNum int cmd *subprocess.Cmd stdout io.ReadCloser bufferedOut *bufio.Reader stdin io.WriteCloser errTracer *traceWriter } type customAdapterInitRequest struct { Event string `json:"event"` Operation string `json:"operation"` Remote string `json:"remote"` Concurrent bool `json:"concurrent"` ConcurrentTransfers int `json:"concurrenttransfers"` } func NewCustomAdapterInitRequest( op string, remote string, concurrent bool, concurrentTransfers int, ) *customAdapterInitRequest { return &customAdapterInitRequest{"init", op, remote, concurrent, concurrentTransfers} } type customAdapterTransferRequest struct { // common between upload/download Event string `json:"event"` Oid string `json:"oid"` Size int64 `json:"size"` Path string `json:"path,omitempty"` Action *Action `json:"action"` } func NewCustomAdapterUploadRequest(oid string, size int64, path string, action *Action) *customAdapterTransferRequest { return &customAdapterTransferRequest{"upload", oid, size, path, action} } func NewCustomAdapterDownloadRequest(oid string, size int64, action *Action) *customAdapterTransferRequest { return &customAdapterTransferRequest{"download", oid, size, "", action} } type customAdapterTerminateRequest struct { Event string `json:"event"` } func NewCustomAdapterTerminateRequest() *customAdapterTerminateRequest { return &customAdapterTerminateRequest{"terminate"} } // A common struct that allows all types of response to be identified type customAdapterResponseMessage struct { Event string `json:"event"` Error *ObjectError `json:"error"` Oid string `json:"oid"` Path string `json:"path,omitempty"` // always blank for upload BytesSoFar int64 `json:"bytesSoFar"` BytesSinceLast int `json:"bytesSinceLast"` } func (a *customAdapter) Begin(cfg AdapterConfig, cb ProgressCallback) error { a.originalConcurrency = cfg.ConcurrentTransfers() if a.concurrent { // Use common workers impl, but downgrade workers to number of processes return a.adapterBase.Begin(cfg, cb) } // If config says not to launch multiple processes, downgrade incoming value return a.adapterBase.Begin(&customAdapterConfig{AdapterConfig: cfg}, cb) } func (a *customAdapter) WorkerStarting(workerNum int) (interface{}, error) { // Start a process per worker // If concurrent = false we have already dialled back workers to 1 a.Trace("xfer: starting up custom transfer process %q for worker %d", a.name, workerNum) cmdName, cmdArgs := subprocess.FormatForShell(subprocess.ShellQuoteSingle(a.path), a.args) cmd, err := subprocess.ExecCommand(cmdName, cmdArgs...) if err != nil { return nil, errors.New(tr.Tr.Get("failed to find custom transfer command %q remote: %v", a.path, err)) } outp, err := cmd.StdoutPipe() if err != nil { return nil, errors.New(tr.Tr.Get("failed to get stdout for custom transfer command %q remote: %v", a.path, err)) } inp, err := cmd.StdinPipe() if err != nil { return nil, errors.New(tr.Tr.Get("failed to get stdin for custom transfer command %q remote: %v", a.path, err)) } // Capture stderr to trace tracer := &traceWriter{} tracer.processName = filepath.Base(a.path) cmd.Stderr = tracer err = cmd.Start() if err != nil { return nil, errors.New(tr.Tr.Get("failed to start custom transfer command %q remote: %v", a.path, err)) } // Set up buffered reader/writer since we operate on lines ctx := &customAdapterWorkerContext{workerNum, cmd, outp, bufio.NewReader(outp), inp, tracer} // send initiate message initReq := NewCustomAdapterInitRequest( a.getOperationName(), a.remote, a.concurrent, a.originalConcurrency, ) resp, err := a.exchangeMessage(ctx, initReq) if err != nil { a.abortWorkerProcess(ctx) return nil, err } if resp.Error != nil { a.abortWorkerProcess(ctx) return nil, errors.New(tr.Tr.Get("error initializing custom adapter %q worker %d: %v", a.name, workerNum, resp.Error)) } a.Trace("xfer: started custom adapter process %q for worker %d OK", a.path, workerNum) // Save this process context and use in future callbacks return ctx, nil } func (a *customAdapter) getOperationName() string { if a.direction == Download { return "download" } return "upload" } // sendMessage sends a JSON message to the custom adapter process func (a *customAdapter) sendMessage(ctx *customAdapterWorkerContext, req interface{}) error { b, err := json.Marshal(req) if err != nil { return err } a.Trace("xfer: Custom adapter worker %d sending message: %v", ctx.workerNum, string(b)) // Line oriented JSON b = append(b, '\n') _, err = ctx.stdin.Write(b) return err } func (a *customAdapter) readResponse(ctx *customAdapterWorkerContext) (*customAdapterResponseMessage, error) { line, err := ctx.bufferedOut.ReadString('\n') if err != nil { return nil, err } a.Trace("xfer: Custom adapter worker %d received response: %v", ctx.workerNum, strings.TrimSpace(line)) resp := &customAdapterResponseMessage{} err = json.Unmarshal([]byte(line), resp) return resp, err } // exchangeMessage sends a message to a process and reads a response if resp != nil // Only fatal errors to communicate return an error, errors may be embedded in reply func (a *customAdapter) exchangeMessage(ctx *customAdapterWorkerContext, req interface{}) (*customAdapterResponseMessage, error) { err := a.sendMessage(ctx, req) if err != nil { return nil, err } return a.readResponse(ctx) } // shutdownWorkerProcess terminates gracefully a custom adapter process // returns an error if it couldn't shut down gracefully (caller may abortWorkerProcess) func (a *customAdapter) shutdownWorkerProcess(ctx *customAdapterWorkerContext) error { defer ctx.errTracer.Flush() a.Trace("xfer: Shutting down adapter worker %d", ctx.workerNum) finishChan := make(chan error, 1) go func() { termReq := NewCustomAdapterTerminateRequest() err := a.sendMessage(ctx, termReq) if err != nil { finishChan <- err } ctx.stdin.Close() ctx.stdout.Close() finishChan <- ctx.cmd.Wait() }() select { case err := <-finishChan: return err case <-time.After(30 * time.Second): return errors.New(tr.Tr.Get("timeout while shutting down worker process %d", ctx.workerNum)) } } // abortWorkerProcess terminates & aborts untidily, most probably breakdown of comms or internal error func (a *customAdapter) abortWorkerProcess(ctx *customAdapterWorkerContext) { a.Trace("xfer: Aborting worker process: %d", ctx.workerNum) ctx.stdin.Close() ctx.stdout.Close() ctx.cmd.Process.Kill() } func (a *customAdapter) WorkerEnding(workerNum int, ctx interface{}) { customCtx, ok := ctx.(*customAdapterWorkerContext) if !ok { tracerx.Printf("Context object for custom transfer %q was of the wrong type", a.name) return } err := a.shutdownWorkerProcess(customCtx) if err != nil { tracerx.Printf("xfer: error finishing up custom transfer process %q worker %d, aborting: %v", a.path, customCtx.workerNum, err) a.abortWorkerProcess(customCtx) } } func (a *customAdapter) DoTransfer(ctx interface{}, t *Transfer, cb ProgressCallback, authOkFunc func()) error { if ctx == nil { return errors.New(tr.Tr.Get("custom transfer %q was not properly initialized, see previous errors", a.name)) } customCtx, ok := ctx.(*customAdapterWorkerContext) if !ok { return errors.New(tr.Tr.Get("context object for custom transfer %q was of the wrong type", a.name)) } var authCalled bool rel, err := t.Rel(a.getOperationName()) if err != nil { return err } if rel == nil && !a.standalone { return errors.Errorf(tr.Tr.Get("Object %s not found on the server.", t.Oid)) } var req *customAdapterTransferRequest if a.direction == Upload { req = NewCustomAdapterUploadRequest(t.Oid, t.Size, t.Path, rel) } else { req = NewCustomAdapterDownloadRequest(t.Oid, t.Size, rel) } if err = a.sendMessage(customCtx, req); err != nil { return err } // 1..N replies (including progress & one of download / upload) var complete bool for !complete { resp, err := a.readResponse(customCtx) if err != nil { return err } var wasAuthOk bool switch resp.Event { case "progress": // Progress if resp.Oid != t.Oid { return errors.New(tr.Tr.Get("unexpected OID %q in response, expecting %q", resp.Oid, t.Oid)) } if cb != nil { cb(t.Name, t.Size, resp.BytesSoFar, resp.BytesSinceLast) } wasAuthOk = resp.BytesSoFar > 0 case "complete": // Download/Upload complete if resp.Oid != t.Oid { return errors.New(tr.Tr.Get("unexpected OID %q in response, expecting %q", resp.Oid, t.Oid)) } if resp.Error != nil { return errors.New(tr.Tr.Get("error transferring %q: %v", t.Oid, resp.Error)) } if a.direction == Download { // So we don't have to blindly trust external providers, check SHA if err = tools.VerifyFileHash(t.Oid, resp.Path); err != nil { return errors.New(tr.Tr.Get("downloaded file failed checks: %v", err)) } // Move file to final location if err = tools.RenameFileCopyPermissions(resp.Path, t.Path); err != nil { return errors.New(tr.Tr.Get("failed to copy downloaded file: %v", err)) } } else if a.direction == Upload { if err = verifyUpload(a.apiClient, a.remote, t); err != nil { return err } } wasAuthOk = true complete = true default: return errors.New(tr.Tr.Get("invalid message %q from custom adapter %q", resp.Event, a.name)) } // Fall through from both progress and completion messages // Call auth on first progress or success to free up other workers if wasAuthOk && authOkFunc != nil && !authCalled { authOkFunc() authCalled = true } } return nil } func newCustomAdapter(f *fs.Filesystem, name string, dir Direction, path, args string, concurrent, standalone bool) *customAdapter { c := &customAdapter{newAdapterBase(f, name, dir, nil), path, args, concurrent, 3, standalone} // self implements impl c.transferImpl = c return c } const ( standaloneFileName = "lfs-standalone-file" ) func configureDefaultCustomAdapters(git Env, m *concreteManifest) { newfunc := func(name string, dir Direction) Adapter { standalone := m.standaloneTransferAgent != "" return newCustomAdapter(m.fs, standaloneFileName, dir, "git-lfs", "standalone-file", false, standalone) } m.RegisterNewAdapterFunc(standaloneFileName, Download, newfunc) m.RegisterNewAdapterFunc(standaloneFileName, Upload, newfunc) } // Initialise custom adapters based on current config func configureCustomAdapters(git Env, m *concreteManifest) { configureDefaultCustomAdapters(git, m) pathRegex := regexp.MustCompile(`lfs.customtransfer.([^.]+).path`) for k, _ := range git.All() { match := pathRegex.FindStringSubmatch(k) if match == nil { continue } name := match[1] path, _ := git.Get(k) // retrieve other values args, _ := git.Get(fmt.Sprintf("lfs.customtransfer.%s.args", name)) concurrent := git.Bool(fmt.Sprintf("lfs.customtransfer.%s.concurrent", name), true) direction, _ := git.Get(fmt.Sprintf("lfs.customtransfer.%s.direction", name)) if len(direction) == 0 { direction = "both" } else { direction = strings.ToLower(direction) } // Separate closure for each since we need to capture vars above newfunc := func(name string, dir Direction) Adapter { standalone := m.standaloneTransferAgent != "" return newCustomAdapter(m.fs, name, dir, path, args, concurrent, standalone) } if direction == "download" || direction == "both" { m.RegisterNewAdapterFunc(name, Download, newfunc) } if direction == "upload" || direction == "both" { m.RegisterNewAdapterFunc(name, Upload, newfunc) } } } type customAdapterConfig struct { AdapterConfig } func (c *customAdapterConfig) ConcurrentTransfers() int { return 1 } git-lfs-3.6.1/tq/custom_test.go000066400000000000000000000106731472372047300164320ustar00rootroot00000000000000package tq import ( "testing" "github.com/git-lfs/git-lfs/v3/lfsapi" "github.com/git-lfs/git-lfs/v3/lfshttp" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestCustomTransferBasicConfig(t *testing.T) { path := "/path/to/binary" cli, err := lfsapi.NewClient(lfshttp.NewContext(nil, nil, map[string]string{ "lfs.customtransfer.testsimple.path": path, })) require.Nil(t, err) m := NewManifest(nil, cli, "", "") u := m.NewUploadAdapter("testsimple") assert.NotNil(t, u, "Upload adapter should be present") cu, _ := u.(*customAdapter) assert.NotNil(t, cu, "Upload adapter should be customAdapter") assert.Equal(t, cu.path, path, "Path should be correct") assert.Equal(t, cu.args, "", "args should be blank") assert.Equal(t, cu.concurrent, true, "concurrent should be defaulted") d := m.NewDownloadAdapter("testsimple") assert.NotNil(t, d, "Download adapter should be present") cd, _ := u.(*customAdapter) assert.NotNil(t, cd, "Download adapter should be customAdapter") assert.Equal(t, cd.path, path, "Path should be correct") assert.Equal(t, cd.args, "", "args should be blank") assert.Equal(t, cd.concurrent, true, "concurrent should be defaulted") } func TestCustomTransferDownloadConfig(t *testing.T) { path := "/path/to/binary" args := "-c 1 --whatever" cli, err := lfsapi.NewClient(lfshttp.NewContext(nil, nil, map[string]string{ "lfs.customtransfer.testdownload.path": path, "lfs.customtransfer.testdownload.args": args, "lfs.customtransfer.testdownload.concurrent": "false", "lfs.customtransfer.testdownload.direction": "download", })) require.Nil(t, err) m := NewManifest(nil, cli, "", "") u := m.NewUploadAdapter("testdownload") assert.NotNil(t, u, "Upload adapter should always be created") cu, _ := u.(*customAdapter) assert.Nil(t, cu, "Upload adapter should NOT be custom (default to basic)") d := m.NewDownloadAdapter("testdownload") assert.NotNil(t, d, "Download adapter should be present") cd, _ := d.(*customAdapter) assert.NotNil(t, cd, "Download adapter should be customAdapter") assert.Equal(t, cd.path, path, "Path should be correct") assert.Equal(t, cd.args, args, "args should be correct") assert.Equal(t, cd.concurrent, false, "concurrent should be set") } func TestCustomTransferUploadConfig(t *testing.T) { path := "/path/to/binary" args := "-c 1 --whatever" cli, err := lfsapi.NewClient(lfshttp.NewContext(nil, nil, map[string]string{ "lfs.customtransfer.testupload.path": path, "lfs.customtransfer.testupload.args": args, "lfs.customtransfer.testupload.concurrent": "false", "lfs.customtransfer.testupload.direction": "upload", })) require.Nil(t, err) m := NewManifest(nil, cli, "", "") d := m.NewDownloadAdapter("testupload") assert.NotNil(t, d, "Download adapter should always be created") cd, _ := d.(*customAdapter) assert.Nil(t, cd, "Download adapter should NOT be custom (default to basic)") u := m.NewUploadAdapter("testupload") assert.NotNil(t, u, "Upload adapter should be present") cu, _ := u.(*customAdapter) assert.NotNil(t, cu, "Upload adapter should be customAdapter") assert.Equal(t, cu.path, path, "Path should be correct") assert.Equal(t, cu.args, args, "args should be correct") assert.Equal(t, cu.concurrent, false, "concurrent should be set") } func TestCustomTransferBothConfig(t *testing.T) { path := "/path/to/binary" args := "-c 1 --whatever --yeah" cli, err := lfsapi.NewClient(lfshttp.NewContext(nil, nil, map[string]string{ "lfs.customtransfer.testboth.path": path, "lfs.customtransfer.testboth.args": args, "lfs.customtransfer.testboth.concurrent": "yes", "lfs.customtransfer.testboth.direction": "both", })) require.Nil(t, err) m := NewManifest(nil, cli, "", "") d := m.NewDownloadAdapter("testboth") assert.NotNil(t, d, "Download adapter should be present") cd, _ := d.(*customAdapter) assert.NotNil(t, cd, "Download adapter should be customAdapter") assert.Equal(t, cd.path, path, "Path should be correct") assert.Equal(t, cd.args, args, "args should be correct") assert.Equal(t, cd.concurrent, true, "concurrent should be set") u := m.NewUploadAdapter("testboth") assert.NotNil(t, u, "Upload adapter should be present") cu, _ := u.(*customAdapter) assert.NotNil(t, cu, "Upload adapter should be customAdapter") assert.Equal(t, cu.path, path, "Path should be correct") assert.Equal(t, cu.args, args, "args should be correct") assert.Equal(t, cu.concurrent, true, "concurrent should be set") } git-lfs-3.6.1/tq/errors.go000066400000000000000000000013051472372047300153650ustar00rootroot00000000000000package tq import "github.com/git-lfs/git-lfs/v3/tr" type MalformedObjectError struct { Name string Oid string missing bool } func newObjectMissingError(name, oid string) error { return &MalformedObjectError{Name: name, Oid: oid, missing: true} } func newCorruptObjectError(name, oid string) error { return &MalformedObjectError{Name: name, Oid: oid, missing: false} } func (e MalformedObjectError) Missing() bool { return e.missing } func (e MalformedObjectError) Corrupt() bool { return !e.Missing() } func (e MalformedObjectError) Error() string { if e.Corrupt() { return tr.Tr.Get("corrupt object: %s (%s)", e.Name, e.Oid) } return tr.Tr.Get("missing object: %s (%s)", e.Name, e.Oid) } git-lfs-3.6.1/tq/errors_test.go000066400000000000000000000011011472372047300164160ustar00rootroot00000000000000package tq import ( "testing" "github.com/stretchr/testify/assert" ) func TestMissingObjectErrorsAreRecognizable(t *testing.T) { err := newObjectMissingError("some-name", "some-oid").(*MalformedObjectError) assert.Equal(t, "some-name", err.Name) assert.Equal(t, "some-oid", err.Oid) assert.True(t, err.Missing()) } func TestCorruptObjectErrorsAreRecognizable(t *testing.T) { err := newCorruptObjectError("some-name", "some-oid").(*MalformedObjectError) assert.Equal(t, "some-name", err.Name) assert.Equal(t, "some-oid", err.Oid) assert.True(t, err.Corrupt()) } git-lfs-3.6.1/tq/manifest.go000066400000000000000000000241001472372047300156550ustar00rootroot00000000000000package tq import ( "strings" "sync" "github.com/git-lfs/git-lfs/v3/config" "github.com/git-lfs/git-lfs/v3/fs" "github.com/git-lfs/git-lfs/v3/lfsapi" "github.com/git-lfs/git-lfs/v3/ssh" "github.com/rubyist/tracerx" ) const ( defaultMaxRetries = 8 defaultMaxRetryDelay = 10 defaultConcurrentTransfers = 8 ) type Manifest interface { APIClient() *lfsapi.Client MaxRetries() int MaxRetryDelay() int ConcurrentTransfers() int IsStandaloneTransfer() bool batchClient() BatchClient GetAdapterNames(dir Direction) []string GetDownloadAdapterNames() []string GetUploadAdapterNames() []string getAdapterNames(adapters map[string]NewAdapterFunc) []string RegisterNewAdapterFunc(name string, dir Direction, f NewAdapterFunc) NewAdapterOrDefault(name string, dir Direction) Adapter NewAdapter(name string, dir Direction) Adapter NewDownloadAdapter(name string) Adapter NewUploadAdapter(name string) Adapter Upgrade() *concreteManifest Upgraded() bool } type lazyManifest struct { f *fs.Filesystem apiClient *lfsapi.Client operation string remote string m *concreteManifest } func newLazyManifest(f *fs.Filesystem, apiClient *lfsapi.Client, operation, remote string) *lazyManifest { return &lazyManifest{ f: f, apiClient: apiClient, operation: operation, remote: remote, m: nil, } } func (m *lazyManifest) APIClient() *lfsapi.Client { return m.Upgrade().APIClient() } func (m *lazyManifest) MaxRetries() int { return m.Upgrade().MaxRetries() } func (m *lazyManifest) MaxRetryDelay() int { return m.Upgrade().MaxRetryDelay() } func (m *lazyManifest) ConcurrentTransfers() int { return m.Upgrade().ConcurrentTransfers() } func (m *lazyManifest) IsStandaloneTransfer() bool { return m.Upgrade().IsStandaloneTransfer() } func (m *lazyManifest) batchClient() BatchClient { return m.Upgrade().batchClient() } func (m *lazyManifest) GetAdapterNames(dir Direction) []string { return m.Upgrade().GetAdapterNames(dir) } func (m *lazyManifest) GetDownloadAdapterNames() []string { return m.Upgrade().GetDownloadAdapterNames() } func (m *lazyManifest) GetUploadAdapterNames() []string { return m.Upgrade().GetUploadAdapterNames() } func (m *lazyManifest) getAdapterNames(adapters map[string]NewAdapterFunc) []string { return m.Upgrade().getAdapterNames(adapters) } func (m *lazyManifest) RegisterNewAdapterFunc(name string, dir Direction, f NewAdapterFunc) { m.Upgrade().RegisterNewAdapterFunc(name, dir, f) } func (m *lazyManifest) NewAdapterOrDefault(name string, dir Direction) Adapter { return m.Upgrade().NewAdapterOrDefault(name, dir) } func (m *lazyManifest) NewAdapter(name string, dir Direction) Adapter { return m.Upgrade().NewAdapter(name, dir) } func (m *lazyManifest) NewDownloadAdapter(name string) Adapter { return m.Upgrade().NewDownloadAdapter(name) } func (m *lazyManifest) NewUploadAdapter(name string) Adapter { return m.Upgrade().NewUploadAdapter(name) } func (m *lazyManifest) Upgrade() *concreteManifest { if m.m == nil { m.m = newConcreteManifest(m.f, m.apiClient, m.operation, m.remote) } return m.m } func (m *lazyManifest) Upgraded() bool { return m.m != nil } type concreteManifest struct { // maxRetries is the maximum number of retries a single object can // attempt to make before it will be dropped. maxRetryDelay is the maximum // time in seconds to wait between retry attempts when using backoff. maxRetries int maxRetryDelay int concurrentTransfers int basicTransfersOnly bool standaloneTransferAgent string tusTransfersAllowed bool downloadAdapterFuncs map[string]NewAdapterFunc uploadAdapterFuncs map[string]NewAdapterFunc fs *fs.Filesystem apiClient *lfsapi.Client sshTransfer *ssh.SSHTransfer batchClientAdapter BatchClient mu sync.Mutex } func (m *concreteManifest) APIClient() *lfsapi.Client { return m.apiClient } func (m *concreteManifest) MaxRetries() int { return m.maxRetries } func (m *concreteManifest) MaxRetryDelay() int { return m.maxRetryDelay } func (m *concreteManifest) ConcurrentTransfers() int { return m.concurrentTransfers } func (m *concreteManifest) IsStandaloneTransfer() bool { return m.standaloneTransferAgent != "" } func (m *concreteManifest) batchClient() BatchClient { if r := m.MaxRetries(); r > 0 { m.batchClientAdapter.SetMaxRetries(r) } return m.batchClientAdapter } func (m *concreteManifest) Upgrade() *concreteManifest { return m } func (m *concreteManifest) Upgraded() bool { return true } func NewManifest(f *fs.Filesystem, apiClient *lfsapi.Client, operation, remote string) Manifest { return newLazyManifest(f, apiClient, operation, remote) } func newConcreteManifest(f *fs.Filesystem, apiClient *lfsapi.Client, operation, remote string) *concreteManifest { if apiClient == nil { cli, err := lfsapi.NewClient(nil) if err != nil { tracerx.Printf("unable to init tq.Manifest: %s", err) return nil } apiClient = cli } sshTransfer := apiClient.SSHTransfer(operation, remote) useSSHMultiplexing := false if sshTransfer != nil { useSSHMultiplexing = sshTransfer.IsMultiplexingEnabled() } m := &concreteManifest{ fs: f, apiClient: apiClient, batchClientAdapter: &tqClient{Client: apiClient}, downloadAdapterFuncs: make(map[string]NewAdapterFunc), uploadAdapterFuncs: make(map[string]NewAdapterFunc), sshTransfer: sshTransfer, } var tusAllowed bool if git := apiClient.GitEnv(); git != nil { if v := git.Int("lfs.transfer.maxretries", 0); v > 0 { m.maxRetries = v } if v := git.Int("lfs.transfer.maxretrydelay", -1); v > -1 { m.maxRetryDelay = v } if v := git.Int("lfs.concurrenttransfers", 0); v > 0 { m.concurrentTransfers = v } m.basicTransfersOnly = git.Bool("lfs.basictransfersonly", false) m.standaloneTransferAgent = findStandaloneTransfer( apiClient, operation, remote, ) tusAllowed = git.Bool("lfs.tustransfers", false) configureCustomAdapters(git, m) } if m.maxRetries < 1 { m.maxRetries = defaultMaxRetries } if m.maxRetryDelay < 1 { m.maxRetryDelay = defaultMaxRetryDelay } if m.concurrentTransfers < 1 { m.concurrentTransfers = defaultConcurrentTransfers } if sshTransfer != nil { if !useSSHMultiplexing { m.concurrentTransfers = 1 } // Multiple concurrent transfers are not yet supported. m.batchClientAdapter = &SSHBatchClient{ maxRetries: m.maxRetries, transfer: sshTransfer, } } configureBasicDownloadAdapter(m) configureBasicUploadAdapter(m) if tusAllowed { configureTusAdapter(m) } configureSSHAdapter(m) return m } func findDefaultStandaloneTransfer(url string) string { if strings.HasPrefix(url, "file://") { return standaloneFileName } return "" } func findStandaloneTransfer(client *lfsapi.Client, operation, remote string) string { if operation == "" || remote == "" { v, _ := client.GitEnv().Get("lfs.standalonetransferagent") return v } ep := client.Endpoints.Endpoint(operation, remote) uc := config.NewURLConfig(client.GitEnv()) v, ok := uc.Get("lfs", ep.Url, "standalonetransferagent") if !ok { return findDefaultStandaloneTransfer(ep.Url) } return v } // GetAdapterNames returns a list of the names of adapters available to be created func (m *concreteManifest) GetAdapterNames(dir Direction) []string { switch dir { case Upload: return m.GetUploadAdapterNames() case Download: return m.GetDownloadAdapterNames() } return nil } // GetDownloadAdapterNames returns a list of the names of download adapters available to be created func (m *concreteManifest) GetDownloadAdapterNames() []string { return m.getAdapterNames(m.downloadAdapterFuncs) } // GetUploadAdapterNames returns a list of the names of upload adapters available to be created func (m *concreteManifest) GetUploadAdapterNames() []string { return m.getAdapterNames(m.uploadAdapterFuncs) } // getAdapterNames returns a list of the names of adapters available to be created func (m *concreteManifest) getAdapterNames(adapters map[string]NewAdapterFunc) []string { if m.basicTransfersOnly { return []string{BasicAdapterName} } m.mu.Lock() defer m.mu.Unlock() ret := make([]string, 0, len(adapters)) for n, _ := range adapters { ret = append(ret, n) } return ret } // RegisterNewTransferAdapterFunc registers a new function for creating upload // or download adapters. If a function with that name & direction is already // registered, it is overridden func (m *concreteManifest) RegisterNewAdapterFunc(name string, dir Direction, f NewAdapterFunc) { m.mu.Lock() defer m.mu.Unlock() switch dir { case Upload: m.uploadAdapterFuncs[name] = f case Download: m.downloadAdapterFuncs[name] = f } } // Create a new adapter by name and direction; default to BasicAdapterName if doesn't exist func (m *concreteManifest) NewAdapterOrDefault(name string, dir Direction) Adapter { if len(name) == 0 { name = BasicAdapterName } a := m.NewAdapter(name, dir) if a == nil { tracerx.Printf("Defaulting to basic transfer adapter since %q did not exist", name) a = m.NewAdapter(BasicAdapterName, dir) } return a } // Create a new adapter by name and direction, or nil if doesn't exist func (m *concreteManifest) NewAdapter(name string, dir Direction) Adapter { m.mu.Lock() defer m.mu.Unlock() switch dir { case Upload: if u, ok := m.uploadAdapterFuncs[name]; ok { return u(name, dir) } case Download: if d, ok := m.downloadAdapterFuncs[name]; ok { return d(name, dir) } } return nil } // Create a new download adapter by name, or BasicAdapterName if doesn't exist func (m *concreteManifest) NewDownloadAdapter(name string) Adapter { return m.NewAdapterOrDefault(name, Download) } // Create a new upload adapter by name, or BasicAdapterName if doesn't exist func (m *concreteManifest) NewUploadAdapter(name string) Adapter { return m.NewAdapterOrDefault(name, Upload) } // Env is any object with a config.Environment interface. type Env interface { Get(key string) (val string, ok bool) GetAll(key string) []string Bool(key string, def bool) (val bool) Int(key string, def int) (val int) All() map[string][]string } git-lfs-3.6.1/tq/manifest_test.go000066400000000000000000000017401472372047300167210ustar00rootroot00000000000000package tq import ( "testing" "github.com/git-lfs/git-lfs/v3/lfsapi" "github.com/git-lfs/git-lfs/v3/lfshttp" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestManifestIsConfigurable(t *testing.T) { cli, err := lfsapi.NewClient(lfshttp.NewContext(nil, nil, map[string]string{ "lfs.transfer.maxretries": "3", })) require.Nil(t, err) m := NewManifest(nil, cli, "", "") assert.Equal(t, 3, m.MaxRetries()) } func TestManifestClampsValidValues(t *testing.T) { cli, err := lfsapi.NewClient(lfshttp.NewContext(nil, nil, map[string]string{ "lfs.transfer.maxretries": "-1", })) require.Nil(t, err) m := NewManifest(nil, cli, "", "") assert.Equal(t, 8, m.MaxRetries()) } func TestManifestIgnoresNonInts(t *testing.T) { cli, err := lfsapi.NewClient(lfshttp.NewContext(nil, nil, map[string]string{ "lfs.transfer.maxretries": "not_an_int", })) require.Nil(t, err) m := NewManifest(nil, cli, "", "") assert.Equal(t, 8, m.MaxRetries()) } git-lfs-3.6.1/tq/meter.go000066400000000000000000000144301472372047300151700ustar00rootroot00000000000000package tq import ( "fmt" "math" "os" "path/filepath" "sync" "sync/atomic" "time" "github.com/git-lfs/git-lfs/v3/config" "github.com/git-lfs/git-lfs/v3/tasklog" "github.com/git-lfs/git-lfs/v3/tools" "github.com/git-lfs/git-lfs/v3/tools/humanize" "github.com/git-lfs/git-lfs/v3/tr" ) // Meter provides a progress bar type output for the TransferQueue. It // is given an estimated file count and size up front and tracks the number of // files and bytes transferred as well as the number of files and bytes that // get skipped because the transfer is unnecessary. type Meter struct { finishedFiles int64 // int64s must come first for struct alignment transferringFiles int64 estimatedBytes int64 lastBytes int64 currentBytes int64 sampleCount uint64 avgBytes float64 lastAvg time.Time estimatedFiles int32 paused uint32 fileIndex map[string]int64 // Maps a file name to its transfer number fileIndexMutex *sync.Mutex updates chan *tasklog.Update cfg *config.Configuration DryRun bool Logger *tools.SyncWriter Direction Direction } type env interface { Get(key string) (val string, ok bool) } func (m *Meter) LoggerFromEnv(os env) *tools.SyncWriter { name, _ := os.Get("GIT_LFS_PROGRESS") if len(name) < 1 { return nil } return m.LoggerToFile(name) } func (m *Meter) LoggerToFile(name string) *tools.SyncWriter { printErr := func(err string) { fmt.Fprintln(os.Stderr, tr.Tr.Get("Error creating progress logger: %s", err)) } if !filepath.IsAbs(name) { printErr(tr.Tr.Get("GIT_LFS_PROGRESS must be an absolute path")) return nil } if err := tools.MkdirAll(filepath.Dir(name), m.cfg); err != nil { printErr(err.Error()) return nil } file, err := os.OpenFile(name, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666) if err != nil { printErr(err.Error()) return nil } return tools.NewSyncWriter(file) } // NewMeter creates a new Meter. func NewMeter(cfg *config.Configuration) *Meter { m := &Meter{ fileIndex: make(map[string]int64), fileIndexMutex: &sync.Mutex{}, updates: make(chan *tasklog.Update), cfg: cfg, } return m } // Start begins sending status updates to the optional log file, and stdout. func (m *Meter) Start() { if m == nil { return } atomic.StoreUint32(&m.paused, 0) } // Pause stops sending status updates temporarily, until Start() is called again. func (m *Meter) Pause() { if m == nil { return } atomic.StoreUint32(&m.paused, 1) } // Add tells the progress meter that a single file of the given size will // possibly be transferred. If a file doesn't need to be transferred for some // reason, be sure to call Skip(int64) with the same size. func (m *Meter) Add(size int64) { if m == nil { return } defer m.update(false) atomic.AddInt32(&m.estimatedFiles, 1) atomic.AddInt64(&m.estimatedBytes, size) } // Skip tells the progress meter that a file of size `size` is being skipped // because the transfer is unnecessary. func (m *Meter) Skip(size int64) { if m == nil { return } defer m.update(false) atomic.AddInt64(&m.finishedFiles, 1) atomic.AddInt64(&m.currentBytes, size) } // StartTransfer tells the progress meter that a transferring file is being // added to the TransferQueue. func (m *Meter) StartTransfer(name string) { if m == nil { return } defer m.update(false) idx := atomic.AddInt64(&m.transferringFiles, 1) m.fileIndexMutex.Lock() m.fileIndex[name] = idx m.fileIndexMutex.Unlock() } // TransferBytes increments the number of bytes transferred func (m *Meter) TransferBytes(direction, name string, read, total int64, current int) { if m == nil { return } defer m.update(false) now := time.Now() since := now.Sub(m.lastAvg) atomic.AddInt64(&m.currentBytes, int64(current)) atomic.AddInt64(&m.lastBytes, int64(current)) if since > time.Second { m.lastAvg = now bps := float64(m.lastBytes) / since.Seconds() m.avgBytes = (m.avgBytes*float64(m.sampleCount) + bps) / (float64(m.sampleCount) + 1.0) atomic.StoreInt64(&m.lastBytes, 0) atomic.AddUint64(&m.sampleCount, 1) } m.logBytes(direction, name, read, total) } // FinishTransfer increments the finished transfer count func (m *Meter) FinishTransfer(name string) { if m == nil { return } defer m.update(false) atomic.AddInt64(&m.finishedFiles, 1) m.fileIndexMutex.Lock() delete(m.fileIndex, name) m.fileIndexMutex.Unlock() } // Flush sends the latest progress update, while leaving the meter active. func (m *Meter) Flush() { if m == nil { return } m.update(true) } // Finish shuts down the Meter. func (m *Meter) Finish() { if m == nil { return } m.update(false) close(m.updates) } func (m *Meter) Updates() <-chan *tasklog.Update { if m == nil { return nil } return m.updates } func (m *Meter) Throttled() bool { return true } func (m *Meter) update(force bool) { if m.skipUpdate() { return } m.updates <- &tasklog.Update{ S: m.str(), At: time.Now(), Force: force, } } func (m *Meter) skipUpdate() bool { return m.DryRun || m.estimatedFiles == 0 || atomic.LoadUint32(&m.paused) == 1 } func (m *Meter) str() string { // (Uploading|Downloading) LFS objects: 100% (10/10) 100 MiB | 10 MiB/s percentage := 100 * float64(m.finishedFiles) / float64(m.estimatedFiles) return fmt.Sprintf("%s: %3.f%% (%d/%d), %s | %s", m.Direction.Progress(), percentage, m.finishedFiles, m.estimatedFiles, humanize.FormatBytes(clamp(m.currentBytes)), humanize.FormatByteRate(clampf(m.avgBytes), time.Second)) } // clamp clamps the given "x" within the acceptable domain of the uint64 integer // type, so as to prevent over- and underflow. func clamp(x int64) uint64 { if x < 0 { return 0 } if x > math.MaxInt64 { return math.MaxUint64 } return uint64(x) } func clampf(x float64) uint64 { if x < 0 { return 0 } if x > math.MaxUint64 { return math.MaxUint64 } return uint64(x) } func (m *Meter) logBytes(direction, name string, read, total int64) { m.fileIndexMutex.Lock() idx := m.fileIndex[name] logger := m.Logger m.fileIndexMutex.Unlock() if logger == nil { return } line := fmt.Sprintf("%s %d/%d %d/%d %s\n", direction, idx, m.estimatedFiles, read, total, name) if err := m.Logger.Write([]byte(line)); err != nil { m.fileIndexMutex.Lock() m.Logger = nil m.fileIndexMutex.Unlock() } } git-lfs-3.6.1/tq/schemas/000077500000000000000000000000001472372047300151465ustar00rootroot00000000000000git-lfs-3.6.1/tq/schemas/http-batch-request-schema.json000066400000000000000000000013721472372047300230260ustar00rootroot00000000000000{ "$schema": "http://json-schema.org/draft-04/schema", "title": "Git LFS HTTPS Batch API Request", "type": "object", "properties": { "transfers": { "type": "array", "items": { "type": "string" } }, "operation": { "type": "string" }, "objects": { "type": "array", "items": { "type": "object", "properties": { "oid": { "type": "string" }, "size": { "type": "number", "minimum": 0 }, "authenticated": { "type": "boolean" } }, "required": ["oid", "size"], "additionalProperties": false } } }, "required": ["objects", "operation"] } git-lfs-3.6.1/tq/schemas/http-batch-response-schema.json000066400000000000000000000037151472372047300231770ustar00rootroot00000000000000{ "$schema": "http://json-schema.org/draft-04/schema", "title": "Git LFS HTTPS Batch API Response", "type": "object", "definitions": { "action": { "type": "object", "properties": { "href": { "type": "string" }, "header": { "type": "object", "additionalProperties": true }, "expires_in": { "type": "number", "maximum": 2147483647, "minimum": -2147483647 }, "expires_at": { "type": "string" } }, "required": ["href"], "additionalProperties": false } }, "properties": { "transfer": { "type": "string" }, "objects": { "type": "array", "items": { "type": "object", "properties": { "oid": { "type": "string" }, "size": { "type": "number", "minimum": 0 }, "authenticated": { "type": "boolean" }, "actions": { "type": "object", "properties": { "download": { "$ref": "#/definitions/action" }, "upload": { "$ref": "#/definitions/action" }, "verify": { "$ref": "#/definitions/action" } }, "additionalProperties": false }, "error": { "type": "object", "properties": { "code": { "type": "number" }, "message": { "type": "string" } }, "required": ["code", "message"], "additionalProperties": false } }, "required": ["oid", "size"], "additionalProperties": false } }, "message": { "type": "string" }, "request_id": { "type": "string" }, "documentation_url": { "type": "string" } }, "required": ["objects"] } git-lfs-3.6.1/tq/ssh.go000066400000000000000000000301161472372047300146500ustar00rootroot00000000000000package tq import ( "bytes" "fmt" "io" "os" "path/filepath" "sort" "strconv" "strings" "time" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/lfshttp" "github.com/git-lfs/git-lfs/v3/ssh" "github.com/git-lfs/git-lfs/v3/tools" "github.com/git-lfs/git-lfs/v3/tr" "github.com/rubyist/tracerx" ) type SSHBatchClient struct { maxRetries int transfer *ssh.SSHTransfer } func (a *SSHBatchClient) batchInternal(args []string, batchLines []string) (int, []string, []string, error) { conn, err := a.transfer.Connection(0) if err != nil { return 0, nil, nil, errors.Wrap(err, tr.Tr.Get("could not get connection for batch request")) } conn.Lock() defer conn.Unlock() err = conn.SendMessageWithLines("batch", args, batchLines) if err != nil { return 0, nil, nil, errors.Wrap(err, tr.Tr.Get("batch request")) } status, args, lines, err := conn.ReadStatusWithLines() if err != nil { return status, nil, nil, errors.Wrap(err, tr.Tr.Get("batch response")) } return status, args, lines, err } func (a *SSHBatchClient) Batch(remote string, bReq *batchRequest) (*BatchResponse, error) { bRes := &BatchResponse{TransferAdapterName: "ssh"} if len(bReq.Objects) == 0 { return bRes, nil } missing := make(map[string]bool) batchLines := make([]string, 0, len(bReq.Objects)) for _, obj := range bReq.Objects { missing[obj.Oid] = obj.Missing batchLines = append(batchLines, fmt.Sprintf("%s %d", obj.Oid, obj.Size)) } tracerx.Printf("api: batch %d files", len(bReq.Objects)) requestedAt := time.Now() args := []string{"transfer=ssh", "hash-algo=sha256"} if bReq.Ref != nil { args = append(args, fmt.Sprintf("refname=%s", bReq.Ref.Name)) } status, args, lines, err := a.batchInternal(args, batchLines) if err != nil { return nil, err } if status != 200 { msg := tr.Tr.Get("no message provided") if len(lines) > 0 { msg = lines[0] } return nil, errors.New(tr.Tr.Get("batch response: status %d from server (%s)", status, msg)) } for _, arg := range args { entries := strings.SplitN(arg, "=", 2) if len(entries) < 2 { continue } if entries[0] == "hash-algo" { bRes.HashAlgorithm = entries[1] if bRes.HashAlgorithm != "sha256" { return nil, errors.New(tr.Tr.Get("batch response: unsupported hash algorithm: %q", entries[1])) } } } sort.Strings(lines) for _, line := range lines { entries := strings.Split(line, " ") if len(entries) < 3 { return nil, errors.New(tr.Tr.Get("batch response: malformed response: %q", line)) } length := len(bRes.Objects) if length == 0 || bRes.Objects[length-1].Oid != entries[0] { bRes.Objects = append(bRes.Objects, &Transfer{Actions: make(map[string]*Action)}) } transfer := bRes.Objects[len(bRes.Objects)-1] transfer.Oid = entries[0] transfer.Size, err = strconv.ParseInt(entries[1], 10, 64) if err != nil { return nil, errors.New(tr.Tr.Get("batch response: invalid size: %s", entries[1])) } if entries[2] == "noop" { continue } transfer.Actions[entries[2]] = &Action{} if len(entries) > 3 { for _, entry := range entries[3:] { if strings.HasPrefix(entry, "id=") { transfer.Actions[entries[2]].Id = entry[3:] } else if strings.HasPrefix(entry, "token=") { transfer.Actions[entries[2]].Token = entry[6:] } else if strings.HasPrefix(entry, "expires-in=") { transfer.Actions[entries[2]].ExpiresIn, err = strconv.Atoi(entry[11:]) if err != nil { return nil, errors.New(tr.Tr.Get("batch response: invalid expires-in: %s", entry)) } } else if strings.HasPrefix(entry, "expires-at=") { transfer.Actions[entries[2]].ExpiresAt, err = time.Parse(time.RFC3339, entry[11:]) if err != nil { return nil, errors.New(tr.Tr.Get("batch response: invalid expires-at: %s", entry)) } } } } } for _, obj := range bRes.Objects { obj.Missing = missing[obj.Oid] for _, a := range obj.Actions { a.createdAt = requestedAt } } return bRes, nil } func (a *SSHBatchClient) MaxRetries() int { return a.maxRetries } func (a *SSHBatchClient) SetMaxRetries(n int) { a.maxRetries = n } type SSHAdapter struct { *adapterBase ctx lfshttp.Context transfer *ssh.SSHTransfer } // WorkerStarting is called when a worker goroutine starts to process jobs // Implementations can run some startup logic here & return some context if needed func (a *SSHAdapter) WorkerStarting(workerNum int) (interface{}, error) { a.transfer.SetConnectionCountAtLeast(workerNum + 1) return workerNum, nil } // WorkerEnding is called when a worker goroutine is shutting down // Implementations can clean up per-worker resources here, context is as returned from WorkerStarting func (a *SSHAdapter) WorkerEnding(workerNum int, ctx interface{}) { } func (a *SSHAdapter) tempDir() string { // Shared with the basic download adapter. d := filepath.Join(a.fs.LFSStorageDir, "incomplete") if err := tools.MkdirAll(d, a.fs); err != nil { return os.TempDir() } return d } // DoTransfer performs a single transfer within a worker. ctx is any context returned from WorkerStarting func (a *SSHAdapter) DoTransfer(ctx interface{}, t *Transfer, cb ProgressCallback, authOkFunc func()) error { if authOkFunc != nil { authOkFunc() } workerNum := ctx.(int) if a.adapterBase.direction == Upload { return a.upload(t, workerNum, cb) } else { return a.download(t, workerNum, cb) } } func (a *SSHAdapter) download(t *Transfer, workerNum int, cb ProgressCallback) error { rel, err := t.Rel("download") if err != nil { return err } if rel == nil { return errors.Errorf(tr.Tr.Get("No download action for object: %s", t.Oid)) } // Reserve a temporary filename. We need to make sure nobody operates on the file simultaneously with us. f, err := tools.TempFile(a.tempDir(), t.Oid, a.fs) if err != nil { return err } tmpName := f.Name() defer func() { if f != nil { f.Close() } os.Remove(tmpName) }() return a.doDownload(t, workerNum, f, cb) } // doDownload starts a download. f is expected to be an existing file open in RW mode func (a *SSHAdapter) doDownload(t *Transfer, workerNum int, f *os.File, cb ProgressCallback) error { args := a.argumentsForTransfer(t, "download") conn, err := a.transfer.Connection(workerNum) if err != nil { return err } conn.Lock() defer conn.Unlock() err = conn.SendMessage(fmt.Sprintf("get-object %s", t.Oid), args) if err != nil { return err } status, args, data, err := conn.ReadStatusWithData() if err != nil { return err } if status < 200 || status > 299 { buffer := &bytes.Buffer{} if data != nil { io.CopyN(buffer, data, 1024) io.Copy(io.Discard, data) } return errors.NewRetriableError(errors.New(tr.Tr.Get("got status %d when fetching OID %s: %s", status, t.Oid, buffer.String()))) } var actualSize int64 seenSize := false for _, arg := range args { if strings.HasPrefix(arg, "size=") { if seenSize { return errors.NewProtocolError(tr.Tr.Get("unexpected size argument"), nil) } actualSize, err = strconv.ParseInt(arg[5:], 10, 64) if err != nil || actualSize < 0 { return errors.NewProtocolError(tr.Tr.Get("expected valid size, got %q", arg[5:]), err) } seenSize = true } } if !seenSize { return errors.NewProtocolError(tr.Tr.Get("no size argument seen"), nil) } dlfilename := f.Name() // Wrap callback to give name context ccb := func(totalSize int64, readSoFar int64, readSinceLast int) error { if cb != nil { return cb(t.Name, totalSize, readSoFar, readSinceLast) } return nil } hasher := tools.NewHashingReader(data) written, err := tools.CopyWithCallback(f, hasher, t.Size, ccb) if err != nil { return errors.Wrapf(err, tr.Tr.Get("cannot write data to temporary file %q", dlfilename)) } if actual := hasher.Hash(); actual != t.Oid { return errors.New(tr.Tr.Get("expected OID %s, got %s after %d bytes written", t.Oid, actual, written)) } if err := f.Close(); err != nil { return errors.New(tr.Tr.Get("can't close temporary file %q: %v", dlfilename, err)) } err = tools.RenameFileCopyPermissions(dlfilename, t.Path) if _, err2 := os.Stat(t.Path); err2 == nil { // Target file already exists, possibly was downloaded by other git-lfs process return nil } return err } func (a *SSHAdapter) verifyUpload(t *Transfer, workerNum int) error { args := a.argumentsForTransfer(t, "upload") conn, err := a.transfer.Connection(workerNum) if err != nil { return err } conn.Lock() defer conn.Unlock() err = conn.SendMessage(fmt.Sprintf("verify-object %s", t.Oid), args) if err != nil { return err } status, _, lines, err := conn.ReadStatusWithLines() if err != nil { return err } if status < 200 || status > 299 { if len(lines) > 0 { return errors.New(tr.Tr.Get("got status %d when verifying upload OID %s: %s", status, t.Oid, lines[0])) } return errors.New(tr.Tr.Get("got status %d when verifying upload OID %s", status, t.Oid)) } return nil } func (a *SSHAdapter) doUpload(t *Transfer, workerNum int, f *os.File, cb ProgressCallback) (int, []string, []string, error) { args := a.argumentsForTransfer(t, "upload") // Ensure progress callbacks made while uploading // Wrap callback to give name context ccb := func(totalSize int64, readSoFar int64, readSinceLast int) error { if cb != nil { return cb(t.Name, totalSize, readSoFar, readSinceLast) } return nil } cbr := tools.NewFileBodyWithCallback(f, t.Size, ccb) conn, err := a.transfer.Connection(workerNum) if err != nil { return 0, nil, nil, err } conn.Lock() defer conn.Unlock() defer cbr.Close() err = conn.SendMessageWithData(fmt.Sprintf("put-object %s", t.Oid), args, cbr) if err != nil { return 0, nil, nil, err } return conn.ReadStatusWithLines() } // upload starts an upload. func (a *SSHAdapter) upload(t *Transfer, workerNum int, cb ProgressCallback) error { rel, err := t.Rel("upload") if err != nil { return err } if rel == nil { return errors.Errorf(tr.Tr.Get("No upload action for object: %s", t.Oid)) } f, err := os.OpenFile(t.Path, os.O_RDONLY, 0644) if err != nil { return errors.Wrap(err, tr.Tr.Get("SSH upload")) } defer f.Close() status, _, lines, err := a.doUpload(t, workerNum, f, cb) if err != nil { return err } if status < 200 || status > 299 { // A status code of 403 likely means that an authentication token for the // upload has expired. This can be safely retried. if status == 403 { err = errors.New(tr.Tr.Get("Received status %d", status)) return errors.NewRetriableError(err) } if status == 429 { return errors.NewRetriableError(errors.New(tr.Tr.Get("got status %d when uploading OID %s", status, t.Oid))) } if len(lines) > 0 { return errors.New(tr.Tr.Get("got status %d when uploading OID %s: %s", status, t.Oid, lines[0])) } return errors.New(tr.Tr.Get("got status %d when uploading OID %s", status, t.Oid)) } return a.verifyUpload(t, workerNum) } func (a *SSHAdapter) argumentsForTransfer(t *Transfer, action string) []string { args := make([]string, 0, 3) set, ok := t.Actions[action] if !ok { return nil } args = append(args, fmt.Sprintf("size=%d", t.Size)) if set.Id != "" { args = append(args, fmt.Sprintf("id=%s", set.Id)) } if set.Token != "" { args = append(args, fmt.Sprintf("token=%s", set.Token)) } return args } // Begin a new batch of uploads or downloads. Call this first, followed by one // or more Add calls. The passed in callback will receive updates on progress. func (a *SSHAdapter) Begin(cfg AdapterConfig, cb ProgressCallback) error { if err := a.adapterBase.Begin(cfg, cb); err != nil { return err } a.ctx = a.adapterBase.apiClient.Context() a.debugging = a.ctx.OSEnv().Bool("GIT_TRANSFER_TRACE", false) return nil } func (a *SSHAdapter) Trace(format string, args ...interface{}) { if !a.adapterBase.debugging { return } tracerx.Printf(format, args...) } func configureSSHAdapter(m *concreteManifest) { m.RegisterNewAdapterFunc("ssh", Upload, func(name string, dir Direction) Adapter { a := &SSHAdapter{newAdapterBase(m.fs, name, dir, nil), nil, m.sshTransfer} a.transferImpl = a return a }) m.RegisterNewAdapterFunc("ssh", Download, func(name string, dir Direction) Adapter { a := &SSHAdapter{newAdapterBase(m.fs, name, dir, nil), nil, m.sshTransfer} a.transferImpl = a return a }) } git-lfs-3.6.1/tq/transfer.go000066400000000000000000000163001472372047300156760ustar00rootroot00000000000000// Package transfer collects together adapters for uploading and downloading LFS content // NOTE: Subject to change, do not rely on this package from outside git-lfs source package tq import ( "fmt" "time" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/lfsapi" "github.com/git-lfs/git-lfs/v3/tools" "github.com/git-lfs/git-lfs/v3/tr" ) type Direction int const ( Upload = Direction(iota) Download = Direction(iota) Checkout = Direction(iota) ) // Progress returns a string containing the operation in progress. func (d Direction) Progress() string { switch d { case Checkout: return tr.Tr.Get("Checking out LFS objects") case Download: return tr.Tr.Get("Downloading LFS objects") case Upload: return tr.Tr.Get("Uploading LFS objects") default: return "" } } func (d Direction) String() string { switch d { case Checkout: return "checkout" case Download: return "download" case Upload: return "upload" default: return "" } } type Transfer struct { Name string `json:"name,omitempty"` Oid string `json:"oid,omitempty"` Size int64 `json:"size"` Authenticated bool `json:"authenticated,omitempty"` Actions ActionSet `json:"actions,omitempty"` Links ActionSet `json:"_links,omitempty"` Error *ObjectError `json:"error,omitempty"` Path string `json:"path,omitempty"` Missing bool `json:"-"` } func (t *Transfer) Rel(name string) (*Action, error) { a, err := t.Actions.Get(name) if a != nil || err != nil { return a, err } if t.Links != nil { a, err := t.Links.Get(name) if a != nil || err != nil { return a, err } } return nil, nil } type ObjectError struct { Code int `json:"code"` Message string `json:"message"` } func (e *ObjectError) Error() string { return fmt.Sprintf("[%d] %s", e.Code, e.Message) } // newTransfer returns a copy of the given Transfer, with the name and path // values set. func newTransfer(tr *Transfer, name string, path string) *Transfer { t := &Transfer{ Name: name, Path: path, Oid: tr.Oid, Size: tr.Size, Authenticated: tr.Authenticated, Actions: make(ActionSet), } if tr.Error != nil { t.Error = &ObjectError{ Code: tr.Error.Code, Message: tr.Error.Message, } } for rel, action := range tr.Actions { t.Actions[rel] = &Action{ Href: action.Href, Header: action.Header, ExpiresAt: action.ExpiresAt, ExpiresIn: action.ExpiresIn, Id: action.Id, Token: action.Token, createdAt: action.createdAt, } } if tr.Links != nil { t.Links = make(ActionSet) for rel, link := range tr.Links { t.Links[rel] = &Action{ Href: link.Href, Header: link.Header, ExpiresAt: link.ExpiresAt, ExpiresIn: link.ExpiresIn, Id: link.Id, Token: link.Token, createdAt: link.createdAt, } } } return t } type Action struct { Href string `json:"href"` Header map[string]string `json:"header,omitempty"` ExpiresAt time.Time `json:"expires_at,omitempty"` ExpiresIn int `json:"expires_in,omitempty"` Id string `json:"-"` Token string `json:"-"` createdAt time.Time } func (a *Action) IsExpiredWithin(d time.Duration) (time.Time, bool) { return tools.IsExpiredAtOrIn(a.createdAt, d, a.ExpiresAt, time.Duration(a.ExpiresIn)*time.Second) } type ActionSet map[string]*Action const ( // objectExpirationToTransfer is the duration we expect to have passed // from the time that the object's expires_at (or expires_in) property // is checked to when the transfer is executed. objectExpirationToTransfer = 5 * time.Second ) func (as ActionSet) Get(rel string) (*Action, error) { a, ok := as[rel] if !ok { return nil, nil } if at, expired := a.IsExpiredWithin(objectExpirationToTransfer); expired { return nil, errors.NewRetriableError(&ActionExpiredErr{Rel: rel, At: at}) } return a, nil } type ActionExpiredErr struct { Rel string At time.Time } func (e ActionExpiredErr) Error() string { return tr.Tr.Get("action %q expires at %s", e.Rel, e.At.In(time.Local).Format(time.RFC822)) } func IsActionExpiredError(err error) bool { if _, ok := err.(*ActionExpiredErr); ok { return true } return false } // NewAdapterFunc creates new instances of Adapter. Code that wishes // to provide new Adapter instances should pass an implementation of this // function to RegisterNewTransferAdapterFunc() on a *Manifest. // name and dir are to provide context if one func implements many instances type NewAdapterFunc func(name string, dir Direction) Adapter type ProgressCallback func(name string, totalSize, readSoFar int64, readSinceLast int) error type AdapterConfig interface { APIClient() *lfsapi.Client ConcurrentTransfers() int Remote() string } type adapterConfig struct { apiClient *lfsapi.Client concurrentTransfers int remote string } func (c *adapterConfig) ConcurrentTransfers() int { return c.concurrentTransfers } func (c *adapterConfig) APIClient() *lfsapi.Client { return c.apiClient } func (c *adapterConfig) Remote() string { return c.remote } // Adapter is implemented by types which can upload and/or download LFS // file content to a remote store. Each Adapter accepts one or more requests // which it may schedule and parallelise in whatever way it chooses, clients of // this interface will receive notifications of progress and completion asynchronously. // TransferAdapters support transfers in one direction; if an implementation // provides support for upload and download, it should be instantiated twice, // advertising support for each direction separately. // Note that Adapter only implements the actual upload/download of content // itself; organising the wider process including calling the API to get URLs, // handling progress reporting and retries is the job of the core TransferQueue. // This is so that the orchestration remains core & standard but Adapter // can be changed to physically transfer to different hosts with less code. type Adapter interface { // Name returns the name of this adapter, which is the same for all instances // of this type of adapter Name() string // Direction returns whether this instance is an upload or download instance // Adapter instances can only be one or the other, although the same // type may be instantiated for each direction Direction() Direction // Begin a new batch of uploads or downloads. Call this first, followed by one // or more Add calls. The passed in callback will receive updates on progress. Begin(cfg AdapterConfig, cb ProgressCallback) error // Add queues a download/upload, which will complete asynchronously and // notify the callbacks given to Begin() Add(transfers ...*Transfer) (results <-chan TransferResult) // Indicate that all transfers have been scheduled and resources can be released // once the queued items have completed. // This call blocks until all items have been processed End() } // Result of a transfer returned through CompletionChannel() type TransferResult struct { Transfer *Transfer // This will be non-nil if there was an error transferring this item Error error } git-lfs-3.6.1/tq/transfer_queue.go000066400000000000000000000705131472372047300171100ustar00rootroot00000000000000package tq import ( "fmt" "os" "sort" "sync" "time" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/git" "github.com/git-lfs/git-lfs/v3/lfshttp" "github.com/git-lfs/git-lfs/v3/tools" "github.com/git-lfs/git-lfs/v3/tr" "github.com/rubyist/tracerx" ) const ( defaultBatchSize = 100 baseRetryDelayMs = 250 ) type retryCounter struct { MaxRetries int MaxRetryDelay int // cmu guards count cmu sync.Mutex // count maps OIDs to number of retry attempts count map[string]int } // newRetryCounter instantiates a new *retryCounter. func newRetryCounter() *retryCounter { return &retryCounter{ MaxRetries: defaultMaxRetries, MaxRetryDelay: defaultMaxRetryDelay, count: make(map[string]int), } } // Increment increments the number of retries for a given OID and returns the // new value. It is safe to call across multiple goroutines. func (r *retryCounter) Increment(oid string) int { r.cmu.Lock() defer r.cmu.Unlock() r.count[oid]++ return r.count[oid] } // CountFor returns the current number of retries for a given OID. It is safe to // call across multiple goroutines. func (r *retryCounter) CountFor(oid string) int { r.cmu.Lock() defer r.cmu.Unlock() return r.count[oid] } // CanRetry returns the current number of retries, and whether or not it exceeds // the maximum number of retries (see: retryCounter.MaxRetries). func (r *retryCounter) CanRetry(oid string) (int, bool) { count := r.CountFor(oid) return count, count < r.MaxRetries } // ReadyTime returns the time from now when the current retry can occur or the // zero time if the retry can occur immediately. func (r *retryCounter) ReadyTime(oid string) time.Time { count := r.CountFor(oid) if count < 1 { return time.Time{} } maxDelayMs := 1000 * uint64(r.MaxRetryDelay) delay := uint64(baseRetryDelayMs) * (1 << uint(count-1)) if delay == 0 || delay > maxDelayMs { delay = maxDelayMs } return time.Now().Add(time.Duration(delay) * time.Millisecond) } // batch implements the sort.Interface interface and enables sorting on a slice // of `*Transfer`s by object size. // // This interface is implemented here so that the largest objects can be // processed first. Since adding a new batch is unable to occur until the // current batch has finished processing, this enables us to reduce the risk of // a single worker getting tied up on a large item at the end of a batch while // all other workers are sitting idle. type batch []*objectTuple // Concat concatenates two batches together, returning a single, clamped batch as // "left", and the remainder of elements as "right". If the union of the // receiver and "other" has cardinality less than "size", "right" will be // returned as nil. Any object tuple that is not currently able to be retried // (ie Retry-After response), will also go into the right batch. Also, when object(s) // are returned that are rate-limited, return the minimum duration required to wait until // a object is ready. func (b batch) Concat(other batch, size int) (left, right batch, minWait time.Duration) { u := batch(append(b, other...)) for _, ot := range u { if time.Now().After(ot.ReadyTime) { // The current time is past the time the object should // be available. left = append(left, ot) } else { // The time hasn't passed for the object. right = append(right, ot) wait := time.Until(ot.ReadyTime) if minWait == 0 { minWait = wait } else if wait < minWait { minWait = wait } } } if len(left) <= size { // If the size of left fits the given size limit, return with no adjustments. return left, right, minWait } // If left is too large, trip left up to size and append the rest to right. right = append(right, left[size:]...) left = left[:size] return left, right, minWait } func (b batch) ToTransfers() []*Transfer { transfers := make([]*Transfer, 0, len(b)) for _, t := range b { transfers = append(transfers, &Transfer{Oid: t.Oid, Size: t.Size, Missing: t.Missing}) } return transfers } func (b batch) Len() int { return len(b) } func (b batch) Less(i, j int) bool { return b[i].Size < b[j].Size } func (b batch) Swap(i, j int) { b[i], b[j] = b[j], b[i] } type abortableWaitGroup struct { wq sync.WaitGroup counter int mu sync.Mutex abort bool } func newAbortableWaitGroup() *abortableWaitGroup { return &abortableWaitGroup{} } func (q *abortableWaitGroup) Add(delta int) { q.mu.Lock() defer q.mu.Unlock() if !q.abort { q.counter += delta q.wq.Add(delta) } } func (q *abortableWaitGroup) Done() { q.mu.Lock() defer q.mu.Unlock() if !q.abort { q.counter -= 1 q.wq.Done() } } func (q *abortableWaitGroup) Abort() { q.mu.Lock() defer q.mu.Unlock() q.abort = true q.wq.Add(-q.counter) } func (q *abortableWaitGroup) Wait() { q.wq.Wait() } // TransferQueue organises the wider process of uploading and downloading, // including calling the API, passing the actual transfer request to transfer // adapters, and dealing with progress, errors and retries. type TransferQueue struct { direction Direction client *tqClient remote string ref *git.Ref adapter Adapter adapterInProgress bool adapterInitMutex sync.Mutex dryRun bool cb tools.CopyCallback meter *Meter errors []error transfers map[string]*objects batchSize int bufferDepth int incoming chan *objectTuple // Channel for processing incoming items errorc chan error // Channel for processing errors watchers []chan *Transfer trMutex *sync.Mutex collectorWait sync.WaitGroup errorwait sync.WaitGroup // wait is used to keep track of pending transfers. It is incremented // once per unique OID on Add(), and is decremented when that transfer // is marked as completed or failed, but not retried. wait *abortableWaitGroup manifest Manifest rc *retryCounter // unsupportedContentType indicates whether the transfer queue ever saw // an HTTP 422 response indicating that their upload destination does // not support Content-Type detection. unsupportedContentType bool } // objects holds a set of objects. type objects struct { completed bool objects []*objectTuple } // All returns all *objectTuple's contained in the *objects set. func (s *objects) All() []*objectTuple { return s.objects } // Append returns a new *objects with the given *objectTuple(s) appended to the // end of the known objects. func (s *objects) Append(os ...*objectTuple) *objects { return &objects{ completed: s.completed, objects: append(s.objects, os...), } } // First returns the first *objectTuple in the chain of objects. func (s *objects) First() *objectTuple { if len(s.objects) == 0 { return nil } return s.objects[0] } type objectTuple struct { Name, Path, Oid string Size int64 Missing bool ReadyTime time.Time } func (o *objectTuple) ToTransfer() *Transfer { return &Transfer{ Name: o.Name, Path: o.Path, Oid: o.Oid, Size: o.Size, Missing: o.Missing, } } type Option func(*TransferQueue) func DryRun(dryRun bool) Option { return func(tq *TransferQueue) { tq.dryRun = dryRun } } func WithProgress(m *Meter) Option { return func(tq *TransferQueue) { tq.meter = m } } func RemoteRef(ref *git.Ref) Option { return func(tq *TransferQueue) { tq.ref = ref } } func WithProgressCallback(cb tools.CopyCallback) Option { return func(tq *TransferQueue) { tq.cb = cb } } func WithBatchSize(size int) Option { return func(tq *TransferQueue) { tq.batchSize = size } } func WithBufferDepth(depth int) Option { return func(tq *TransferQueue) { tq.bufferDepth = depth } } // NewTransferQueue builds a TransferQueue, direction and underlying mechanism determined by adapter func NewTransferQueue(dir Direction, manifest Manifest, remote string, options ...Option) *TransferQueue { q := &TransferQueue{ direction: dir, remote: remote, errorc: make(chan error), transfers: make(map[string]*objects), trMutex: &sync.Mutex{}, manifest: manifest, rc: newRetryCounter(), wait: newAbortableWaitGroup(), } for _, opt := range options { opt(q) } if q.batchSize <= 0 { q.batchSize = defaultBatchSize } if q.bufferDepth <= 0 { q.bufferDepth = q.batchSize } if q.meter != nil { q.meter.Direction = q.direction } q.incoming = make(chan *objectTuple, q.bufferDepth) q.collectorWait.Add(1) q.errorwait.Add(1) q.run() return q } // Ensure we have a concrete manifest and that certain delayed variables are set // properly. func (q *TransferQueue) Upgrade() { if q.client == nil { manifest := q.manifest.Upgrade() q.client = &tqClient{Client: manifest.APIClient()} q.rc.MaxRetries = manifest.maxRetries q.rc.MaxRetryDelay = manifest.maxRetryDelay q.client.SetMaxRetries(manifest.maxRetries) } } // Add adds a *Transfer to the transfer queue. It only increments the amount // of waiting the TransferQueue has to do if the *Transfer "t" is new. // // If another transfer(s) with the same OID has been added to the *TransferQueue // already, the given transfer will not be enqueued, but will be sent to any // channel created by Watch() once the oldest transfer has completed. // // Only one file will be transferred to/from the Path element of the first // transfer. func (q *TransferQueue) Add(name, path, oid string, size int64, missing bool, err error) { q.Upgrade() if err != nil { q.errorc <- err return } t := &objectTuple{ Name: name, Path: path, Oid: oid, Size: size, Missing: missing, } if objs := q.remember(t); len(objs.objects) > 1 { if objs.completed { // If there is already a completed transfer chain for // this OID, then this object is already "done", and can // be sent through as completed to the watchers. for _, w := range q.watchers { w <- t.ToTransfer() } } // If the chain is not done, there is no reason to enqueue this // transfer into 'q.incoming'. tracerx.Printf("already transferring %q, skipping duplicate", t.Oid) return } q.incoming <- t } // remember remembers the *Transfer "t" if the *TransferQueue doesn't already // know about a Transfer with the same OID. // // It returns if the value is new or not. func (q *TransferQueue) remember(t *objectTuple) objects { q.Upgrade() q.trMutex.Lock() defer q.trMutex.Unlock() if _, ok := q.transfers[t.Oid]; !ok { q.wait.Add(1) q.transfers[t.Oid] = &objects{ objects: []*objectTuple{t}, } return *q.transfers[t.Oid] } q.transfers[t.Oid] = q.transfers[t.Oid].Append(t) return *q.transfers[t.Oid] } // collectBatches collects batches in a loop, prioritizing failed items from the // previous before adding new items. The process works as follows: // // 1. Create a new batch, of size `q.batchSize`, and containing no items // 2. While the batch contains less items than `q.batchSize` AND the channel // is open, read one item from the `q.incoming` channel. // a. If the read was a channel close, go to step 4. // b. If the read was a transferable item, go to step 3. // 3. Append the item to the batch. // 4. Sort the batch by descending object size, make a batch API call, send // the items to the `*adapterBase`. // 5. In a separate goroutine, process the worker results, incrementing and // appending retries if possible. On the main goroutine, accept new items // into "pending". // 6. Concat() the "next" and "pending" batches such that no more items than // the maximum allowed per batch are in next, and the rest are in pending. // 7. If the `q.incoming` channel is open, go to step 2. // 8. If the next batch is empty AND the `q.incoming` channel is closed, // terminate immediately. // // collectBatches runs in its own goroutine. func (q *TransferQueue) collectBatches() { defer q.collectorWait.Done() var closing bool next := q.makeBatch() pending := q.makeBatch() for { for !closing && (len(next) < q.batchSize) { t, ok := <-q.incoming if !ok { closing = true break } next = append(next, t) } // Before enqueuing the next batch, sort by descending object // size. sort.Sort(sort.Reverse(next)) done := make(chan struct{}) var retries batch var err error go func() { defer close(done) if len(next) == 0 { return } retries, err = q.enqueueAndCollectRetriesFor(next) if err != nil { q.errorc <- err } }() var collected batch collected, closing = q.collectPendingUntil(done) // If we've encountered a serious error here, abort immediately; // don't process further batches. Abort the wait queue so that // we don't deadlock waiting for objects to complete when they // never will. if err != nil && !errors.IsRetriableError(err) { q.wait.Abort() break } // Ensure the next batch is filled with, in order: // // - retries from the previous batch, // - new additions that were enqueued behind retries, & // - items collected while the batch was processing. var minWaitTime time.Duration next, pending, minWaitTime = retries.Concat(append(pending, collected...), q.batchSize) if len(next) == 0 && len(pending) != 0 { // There are some pending that could not be queued. // Wait the requested time before resuming loop. time.Sleep(minWaitTime) } else if len(next) == 0 && len(pending) == 0 && closing { // There are no items remaining, it is safe to break break } } } // collectPendingUntil collects items from q.incoming into a "pending" batch // until the given "done" channel is written to, or is closed. // // A "pending" batch is returned, along with whether or not "q.incoming" is // closed. func (q *TransferQueue) collectPendingUntil(done <-chan struct{}) (pending batch, closing bool) { q.Upgrade() for { select { case t, ok := <-q.incoming: if !ok { closing = true <-done return } pending = append(pending, t) case <-done: return } } } // enqueueAndCollectRetriesFor makes a Batch API call and returns a "next" batch // containing all of the objects that failed from the previous batch and had // retries available to them. // // If an error was encountered while making the API request, _all_ of the items // from the previous batch (that have retries available to them) will be // returned immediately, along with the error that was encountered. // // enqueueAndCollectRetriesFor blocks until the entire Batch "batch" has been // processed. func (q *TransferQueue) enqueueAndCollectRetriesFor(batch batch) (batch, error) { q.Upgrade() next := q.makeBatch() tracerx.Printf("tq: sending batch of size %d", len(batch)) enqueueRetry := func(t *objectTuple, err error, readyTime *time.Time) { count := q.rc.Increment(t.Oid) if readyTime == nil { t.ReadyTime = q.rc.ReadyTime(t.Oid) } else { t.ReadyTime = *readyTime } delay := time.Until(t.ReadyTime).Seconds() var errMsg string if err != nil { errMsg = fmt.Sprintf(": %s", err) } tracerx.Printf("tq: enqueue retry #%d after %.2fs for %q (size: %d)%s", count, delay, t.Oid, t.Size, errMsg) next = append(next, t) } q.meter.Pause() var bRes *BatchResponse manifest := q.manifest.Upgrade() if manifest.standaloneTransferAgent != "" { // Trust the external transfer agent can do everything by itself. objects := make([]*Transfer, 0, len(batch)) for _, t := range batch { objects = append(objects, &Transfer{Oid: t.Oid, Size: t.Size, Path: t.Path, Missing: t.Missing}) } bRes = &BatchResponse{ Objects: objects, TransferAdapterName: manifest.standaloneTransferAgent, } } else { // Query the Git LFS server for what transfer method to use and // details such as URLs, authentication, etc. var err error bRes, err = Batch(q.manifest, q.direction, q.remote, q.ref, batch.ToTransfers()) if err != nil { var hasNonRetriableObjects = false // If there was an error making the batch API call, mark all of // the objects for retry if possible. If any should not be retried, // they will be marked as failed. for _, t := range batch { if q.canRetryObject(t.Oid, err) { enqueueRetry(t, err, nil) } else if readyTime, canRetry := q.canRetryObjectLater(t.Oid, err); canRetry { enqueueRetry(t, err, &readyTime) } else { hasNonRetriableObjects = true q.wait.Done() } } // Only return error and mark operation as failure if at least one object // was not enqueued for retrial at a later point. // Make sure to return an error which causes all other objects to be retried. if hasNonRetriableObjects { return next, errors.NewRetriableError(err) } else { return next, nil } } } if len(bRes.Objects) == 0 { return next, nil } // We check first that all of the objects we want to upload are present, // and abort if any are missing. We'll never have any objects marked as // missing except possibly on upload, so just skip iterating over the // objects in that case. if q.direction == Upload { for _, o := range bRes.Objects { // If the server already has the object, the list of // actions will be empty. It's fine if the file is // missing in that case, since we don't need to upload // it. if o.Missing && len(o.Actions) != 0 { return nil, errors.New(tr.Tr.Get("Unable to find source for object %v (try running `git lfs fetch --all`)", o.Oid)) } } } q.useAdapter(bRes.TransferAdapterName) q.meter.Start() toTransfer := make([]*Transfer, 0, len(bRes.Objects)) for _, o := range bRes.Objects { if o.Error != nil { q.errorc <- errors.Wrapf(o.Error, "[%v] %v", o.Oid, o.Error.Message) q.Skip(o.Size) q.wait.Done() continue } q.trMutex.Lock() objects, ok := q.transfers[o.Oid] q.trMutex.Unlock() if !ok { // If we couldn't find any associated // Transfer object, then we give up on the // transfer by telling the progress meter to // skip the number of bytes in "o". q.errorc <- errors.Errorf(tr.Tr.Get("[%v] The server returned an unknown OID.", o.Oid)) q.Skip(o.Size) q.wait.Done() } else { // Pick t[0], since it will cover all transfers with the // same OID. tr := newTransfer(o, objects.First().Name, objects.First().Path) if a, err := tr.Rel(q.direction.String()); err != nil { if q.canRetryObject(tr.Oid, err) { enqueueRetry(objects.First(), err, nil) } else { q.errorc <- errors.Errorf("[%v] %v", tr.Name, err) q.Skip(o.Size) q.wait.Done() } } else if a == nil && manifest.standaloneTransferAgent == "" { q.Skip(o.Size) q.wait.Done() } else { q.meter.StartTransfer(objects.First().Name) toTransfer = append(toTransfer, tr) } } } retries := q.addToAdapter(bRes.endpoint, toTransfer) for t := range retries { enqueueRetry(t, nil, nil) } return next, nil } // makeBatch returns a new, empty batch, with a capacity equal to the maximum // batch size designated by the `*TransferQueue`. func (q *TransferQueue) makeBatch() batch { return make(batch, 0, q.batchSize) } // addToAdapter adds the given "pending" transfers to the transfer adapters and // returns a channel of Transfers that are to be retried in the next batch. // After all of the items in the batch have been processed, the channel is // closed. // // addToAdapter returns immediately, and does not block. func (q *TransferQueue) addToAdapter(e lfshttp.Endpoint, pending []*Transfer) <-chan *objectTuple { q.Upgrade() retries := make(chan *objectTuple, len(pending)) if err := q.ensureAdapterBegun(e); err != nil { close(retries) q.errorc <- err for _, t := range pending { q.Skip(t.Size) q.wait.Done() } return retries } present, missingResults := q.partitionTransfers(pending) go func() { defer close(retries) var results <-chan TransferResult if q.dryRun { results = q.makeDryRunResults(present) } else { results = q.adapter.Add(present...) } for _, res := range missingResults { q.handleTransferResult(res, retries) } for res := range results { q.handleTransferResult(res, retries) } }() return retries } func (q *TransferQueue) partitionTransfers(transfers []*Transfer) (present []*Transfer, results []TransferResult) { q.Upgrade() if q.direction != Upload { return transfers, nil } present = make([]*Transfer, 0, len(transfers)) results = make([]TransferResult, 0, len(transfers)) for _, t := range transfers { var err error if t.Size < 0 { err = errors.Errorf(tr.Tr.Get("object %q has invalid size (got: %d)", t.Oid, t.Size)) } else { fd, serr := os.Stat(t.Path) if serr != nil { if os.IsNotExist(serr) { err = newObjectMissingError(t.Name, t.Oid) } else { err = serr } } else if t.Size != fd.Size() { err = newCorruptObjectError(t.Name, t.Oid) } } if err != nil { results = append(results, TransferResult{ Transfer: t, Error: err, }) } else { present = append(present, t) } } return } // makeDryRunResults returns a channel populated immediately with "successful" // results for all of the given transfers in "ts". func (q *TransferQueue) makeDryRunResults(ts []*Transfer) <-chan TransferResult { results := make(chan TransferResult, len(ts)) for _, t := range ts { results <- TransferResult{t, nil} } close(results) return results } // handleTransferResult observes the transfer result, sending it on the retries // channel if it was able to be retried. func (q *TransferQueue) handleTransferResult( res TransferResult, retries chan<- *objectTuple, ) { oid := res.Transfer.Oid if res.Error != nil { // If there was an error encountered when processing the // transfer (res.Transfer), handle the error as is appropriate: if readyTime, canRetry := q.canRetryObjectLater(oid, res.Error); canRetry { // If the object can't be retried now, but can be // after a certain period of time, send it to // the retry channel with a time when it's ready. tracerx.Printf("tq: retrying object %s after %s seconds.", oid, time.Until(readyTime).Seconds()) q.trMutex.Lock() objects, ok := q.transfers[oid] q.trMutex.Unlock() if ok { t := objects.First() t.ReadyTime = readyTime retries <- t } else { q.errorc <- res.Error } } else if q.canRetryObject(oid, res.Error) { // If the object can be retried, send it on the retries // channel, where it will be read at the call-site and // its retry count will be incremented. tracerx.Printf("tq: retrying object %s: %s", oid, res.Error) q.trMutex.Lock() objects, ok := q.transfers[oid] q.trMutex.Unlock() if ok { retries <- objects.First() } else { q.errorc <- res.Error } } else { // If the error wasn't retriable, OR the object has // exceeded its retry budget, it will be NOT be sent to // the retry channel, and the error will be reported // immediately (unless the error is in response to a // HTTP 422). if errors.IsUnprocessableEntityError(res.Error) { q.unsupportedContentType = true } else { q.errorc <- res.Error } q.wait.Done() } } else { q.trMutex.Lock() objects := q.transfers[oid] objects.completed = true // Otherwise, if the transfer was successful, notify all of the // watchers, and mark it as finished. for _, c := range q.watchers { // Send one update for each transfer with the // same OID. for _, t := range objects.All() { c <- &Transfer{ Name: t.Name, Path: t.Path, Oid: t.Oid, Size: t.Size, } } } q.trMutex.Unlock() q.meter.FinishTransfer(res.Transfer.Name) q.wait.Done() } } func (q *TransferQueue) useAdapter(name string) { q.adapterInitMutex.Lock() defer q.adapterInitMutex.Unlock() if q.adapter != nil { if q.adapter.Name() == name { // re-use, this is the normal path return } // If the adapter we're using isn't the same as the one we've been // told to use now, must wait for the current one to finish then switch // This will probably never happen but is just in case server starts // changing adapter support in between batches q.finishAdapter() } q.adapter = q.manifest.NewAdapterOrDefault(name, q.direction) } func (q *TransferQueue) finishAdapter() { if q.adapterInProgress { q.adapter.End() q.adapterInProgress = false q.adapter = nil } } // BatchSize returns the batch size of the receiving *TransferQueue, or, the // number of transfers to accept before beginning work on them. func (q *TransferQueue) BatchSize() int { return q.batchSize } func (q *TransferQueue) Skip(size int64) { q.meter.Skip(size) } func (q *TransferQueue) ensureAdapterBegun(e lfshttp.Endpoint) error { q.Upgrade() q.adapterInitMutex.Lock() defer q.adapterInitMutex.Unlock() if q.adapterInProgress { return nil } // Progress callback - receives byte updates cb := func(name string, total, read int64, current int) error { q.meter.TransferBytes(q.direction.String(), name, read, total, current) if q.cb != nil { // NOTE: this is the mechanism by which the logpath // specified by GIT_LFS_PROGRESS is written to. // // See: lfs.downloadFile() for more. q.cb(total, read, current) } return nil } tracerx.Printf("tq: starting transfer adapter %q", q.adapter.Name()) err := q.adapter.Begin(q.toAdapterCfg(e), cb) if err != nil { return err } q.adapterInProgress = true return nil } func (q *TransferQueue) toAdapterCfg(e lfshttp.Endpoint) AdapterConfig { apiClient := q.manifest.APIClient() concurrency := q.manifest.ConcurrentTransfers() return &adapterConfig{ concurrentTransfers: concurrency, apiClient: apiClient, remote: q.remote, } } // Wait waits for the queue to finish processing all transfers. Once Wait is // called, Add will no longer add transfers to the queue. Any failed // transfers will be automatically retried once. func (q *TransferQueue) Wait() { close(q.incoming) q.wait.Wait() q.collectorWait.Wait() q.finishAdapter() close(q.errorc) for _, watcher := range q.watchers { close(watcher) } q.meter.Flush() q.errorwait.Wait() if q.manifest.Upgraded() { manifest := q.manifest.Upgrade() if manifest.sshTransfer != nil { manifest.sshTransfer.Shutdown() manifest.sshTransfer = nil } } if q.unsupportedContentType { fmt.Fprintln(os.Stderr, tr.Tr.Get(`info: Uploading failed due to unsupported Content-Type header(s). info: Consider disabling Content-Type detection with: info: info: $ git config lfs.contenttype false`)) } } // Watch returns a channel where the queue will write the value of each transfer // as it completes. If multiple transfers exist with the same OID, they will all // be recorded here, even though only one actual transfer took place. The // channel will be closed when the queue finishes processing. func (q *TransferQueue) Watch() chan *Transfer { c := make(chan *Transfer, q.batchSize) q.watchers = append(q.watchers, c) return c } // This goroutine collects errors returned from transfers func (q *TransferQueue) errorCollector() { for err := range q.errorc { q.errors = append(q.errors, err) } q.errorwait.Done() } // run begins the transfer queue. It transfers files sequentially or // concurrently depending on the Config.ConcurrentTransfers() value. func (q *TransferQueue) run() { tracerx.Printf("tq: running as batched queue, batch size of %d", q.batchSize) go q.errorCollector() go q.collectBatches() } // canRetry returns whether or not the given error "err" is retriable. func (q *TransferQueue) canRetry(err error) bool { return errors.IsRetriableError(err) } // canRetryLater returns the number of seconds until an error can be retried and if the error // is a delayed-retriable error. func (q *TransferQueue) canRetryLater(err error) (time.Time, bool) { return errors.IsRetriableLaterError(err) } // canRetryObject returns whether the given error is retriable for the object // given by "oid". If the an OID has met its retry limit, then it will not be // able to be retried again. If so, canRetryObject returns whether or not that // given error "err" is retriable. func (q *TransferQueue) canRetryObject(oid string, err error) bool { if count, ok := q.rc.CanRetry(oid); !ok { tracerx.Printf("tq: refusing to retry %q, too many retries (%d)", oid, count) return false } return q.canRetry(err) } func (q *TransferQueue) canRetryObjectLater(oid string, err error) (time.Time, bool) { if count, ok := q.rc.CanRetry(oid); !ok { tracerx.Printf("tq: refusing to retry %q, too many retries (%d)", oid, count) return time.Time{}, false } return q.canRetryLater(err) } // Errors returns any errors encountered during transfer. func (q *TransferQueue) Errors() []error { return q.errors } git-lfs-3.6.1/tq/transfer_queue_test.go000066400000000000000000000037561472372047300201540ustar00rootroot00000000000000package tq import ( "testing" "time" "github.com/stretchr/testify/assert" ) func TestManifestDefaultsToFixedRetries(t *testing.T) { assert.Equal(t, 8, NewManifest(nil, nil, "", "").MaxRetries()) } func TestManifestDefaultsToFixedRetryDelay(t *testing.T) { assert.Equal(t, 10, NewManifest(nil, nil, "", "").MaxRetryDelay()) } func TestRetryCounterDefaultsToFixedRetries(t *testing.T) { rc := newRetryCounter() assert.Equal(t, 8, rc.MaxRetries) } func TestRetryCounterDefaultsToFixedRetryDelay(t *testing.T) { rc := newRetryCounter() assert.Equal(t, 10, rc.MaxRetryDelay) } func TestRetryCounterIncrementsObjects(t *testing.T) { rc := newRetryCounter() assert.Equal(t, 1, rc.Increment("oid")) assert.Equal(t, 1, rc.CountFor("oid")) assert.Equal(t, 2, rc.Increment("oid")) assert.Equal(t, 2, rc.CountFor("oid")) } func TestRetryCounterCanNotRetryAfterExceedingRetryCount(t *testing.T) { rc := newRetryCounter() rc.MaxRetries = 1 rc.Increment("oid") count, canRetry := rc.CanRetry("oid") assert.Equal(t, 1, count) assert.False(t, canRetry) } func TestRetryCounterDoesNotDelayFirstAttempt(t *testing.T) { rc := newRetryCounter() assert.Equal(t, time.Time{}, rc.ReadyTime("oid")) } func TestRetryCounterDelaysExponentially(t *testing.T) { rc := newRetryCounter() start := time.Now() rc.Increment("oid") ready1 := rc.ReadyTime("oid") assert.GreaterOrEqual(t, int64(ready1.Sub(start)/time.Millisecond), int64(baseRetryDelayMs)) rc.Increment("oid") ready2 := rc.ReadyTime("oid") assert.GreaterOrEqual(t, int64(ready2.Sub(start)/time.Millisecond), int64(2*baseRetryDelayMs)) } func TestRetryCounterLimitsDelay(t *testing.T) { rc := newRetryCounter() rc.MaxRetryDelay = 1 for i := 0; i < 4; i++ { rc.Increment("oid") } rt := rc.ReadyTime("oid") assert.WithinDuration(t, time.Now(), rt, 1*time.Second) } func TestBatchSizeReturnsBatchSize(t *testing.T) { q := NewTransferQueue( Upload, NewManifest(nil, nil, "", ""), "origin", WithBatchSize(3)) assert.Equal(t, 3, q.BatchSize()) } git-lfs-3.6.1/tq/transfer_test.go000066400000000000000000000101451472372047300167360ustar00rootroot00000000000000package tq import ( "testing" "github.com/git-lfs/git-lfs/v3/lfsapi" "github.com/git-lfs/git-lfs/v3/lfshttp" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) type testAdapter struct { name string dir Direction } func (a *testAdapter) Name() string { return a.name } func (a *testAdapter) Direction() Direction { return a.dir } func (a *testAdapter) Begin(cfg AdapterConfig, cb ProgressCallback) error { return nil } func (a *testAdapter) Add(ts ...*Transfer) (retries <-chan TransferResult) { return nil } func (a *testAdapter) End() { } func newTestAdapter(name string, dir Direction) Adapter { return &testAdapter{name, dir} } func newRenamedTestAdapter(name string, dir Direction) Adapter { return &testAdapter{"RENAMED", dir} } func TestBasicAdapterExists(t *testing.T) { m := NewManifest(nil, nil, "", "") assert := assert.New(t) dls := m.GetDownloadAdapterNames() if assert.NotNil(dls) { assert.ElementsMatch([]string{"basic", "lfs-standalone-file", "ssh"}, dls) } uls := m.GetUploadAdapterNames() if assert.NotNil(uls) { assert.ElementsMatch([]string{"basic", "lfs-standalone-file", "ssh"}, dls) } da := m.NewDownloadAdapter("basic") if assert.NotNil(da) { assert.Equal("basic", da.Name()) assert.Equal(Download, da.Direction()) } ua := m.NewUploadAdapter("basic") if assert.NotNil(ua) { assert.Equal("basic", ua.Name()) assert.Equal(Upload, ua.Direction()) } } func TestAdapterRegAndOverride(t *testing.T) { m := NewManifest(nil, nil, "", "") assert := assert.New(t) assert.Nil(m.NewAdapter("test", Download)) assert.Nil(m.NewAdapter("test", Upload)) da := m.NewDownloadAdapter("test") if assert.NotNil(da) { assert.Equal("basic", da.Name()) assert.Equal(Download, da.Direction()) } ua := m.NewUploadAdapter("test") if assert.NotNil(ua) { assert.Equal("basic", ua.Name()) assert.Equal(Upload, ua.Direction()) } m.RegisterNewAdapterFunc("test", Upload, newTestAdapter) assert.Nil(m.NewAdapter("test", Download)) assert.NotNil(m.NewAdapter("test", Upload)) da = m.NewDownloadAdapter("test") if assert.NotNil(da) { assert.Equal("basic", da.Name()) assert.Equal(Download, da.Direction()) } ua = m.NewUploadAdapter("test") if assert.NotNil(ua) { assert.Equal("test", ua.Name()) assert.Equal(Upload, ua.Direction()) } m.RegisterNewAdapterFunc("test", Download, newTestAdapter) assert.NotNil(m.NewAdapter("test", Download)) assert.NotNil(m.NewAdapter("test", Upload)) da = m.NewDownloadAdapter("test") if assert.NotNil(da) { assert.Equal("test", da.Name()) assert.Equal(Download, da.Direction()) } ua = m.NewUploadAdapter("test") if assert.NotNil(ua) { assert.Equal("test", ua.Name()) assert.Equal(Upload, ua.Direction()) } // Test override m.RegisterNewAdapterFunc("test", Upload, newRenamedTestAdapter) ua = m.NewUploadAdapter("test") if assert.NotNil(ua) { assert.Equal("RENAMED", ua.Name()) assert.Equal(Upload, ua.Direction()) } da = m.NewDownloadAdapter("test") if assert.NotNil(da) { assert.Equal("test", da.Name()) assert.Equal(Download, da.Direction()) } m.RegisterNewAdapterFunc("test", Download, newRenamedTestAdapter) da = m.NewDownloadAdapter("test") if assert.NotNil(da) { assert.Equal("RENAMED", da.Name()) assert.Equal(Download, da.Direction()) } } func TestAdapterRegButBasicOnly(t *testing.T) { cli, err := lfsapi.NewClient(lfshttp.NewContext(nil, nil, map[string]string{ "lfs.basictransfersonly": "yes", })) require.Nil(t, err) m := NewManifest(nil, cli, "", "") assert := assert.New(t) m.RegisterNewAdapterFunc("test", Upload, newTestAdapter) m.RegisterNewAdapterFunc("test", Download, newTestAdapter) // Will still be created if we ask for them da := m.NewDownloadAdapter("test") if assert.NotNil(da) { assert.Equal("test", da.Name()) assert.Equal(Download, da.Direction()) } ua := m.NewUploadAdapter("test") if assert.NotNil(ua) { assert.Equal("test", ua.Name()) assert.Equal(Upload, ua.Direction()) } // But list will exclude ld := m.GetDownloadAdapterNames() assert.Equal([]string{"basic"}, ld) lu := m.GetUploadAdapterNames() assert.Equal([]string{"basic"}, lu) } git-lfs-3.6.1/tq/tus_upload.go000066400000000000000000000117511472372047300162360ustar00rootroot00000000000000package tq import ( "io" "os" "strconv" "strings" "github.com/git-lfs/git-lfs/v3/errors" "github.com/git-lfs/git-lfs/v3/lfsapi" "github.com/git-lfs/git-lfs/v3/tools" "github.com/git-lfs/git-lfs/v3/tr" ) const ( TusAdapterName = "tus" TusVersion = "1.0.0" ) // Adapter for tus.io protocol resumaable uploads type tusUploadAdapter struct { *adapterBase } func (a *tusUploadAdapter) WorkerStarting(workerNum int) (interface{}, error) { return nil, nil } func (a *tusUploadAdapter) WorkerEnding(workerNum int, ctx interface{}) { } func (a *tusUploadAdapter) DoTransfer(ctx interface{}, t *Transfer, cb ProgressCallback, authOkFunc func()) error { rel, err := t.Rel("upload") if err != nil { return err } if rel == nil { return errors.Errorf(tr.Tr.Get("No upload action for object: %s", t.Oid)) } // Note not supporting the Creation extension since the batch API generates URLs // Also not supporting Concatenation to support parallel uploads of chunks; forward only // 1. Send HEAD request to determine upload start point // Request must include Tus-Resumable header (version) a.Trace("xfer: sending tus.io HEAD request for %q", t.Oid) req, err := a.newHTTPRequest("HEAD", rel) if err != nil { return err } req.Header.Set("Tus-Resumable", TusVersion) res, err := a.doHTTP(t, req) if err != nil { return errors.NewRetriableError(err) } // Response will contain Upload-Offset if supported offHdr := res.Header.Get("Upload-Offset") if len(offHdr) == 0 { return errors.New(tr.Tr.Get("missing Upload-Offset header from tus.io HEAD response at %q, contact server admin", rel.Href)) } offset, err := strconv.ParseInt(offHdr, 10, 64) if err != nil || offset < 0 { return errors.New(tr.Tr.Get("invalid Upload-Offset value %q in response from tus.io HEAD at %q, contact server admin", offHdr, rel.Href)) } // Upload-Offset=size means already completed (skip) // Batch API will probably already detect this, but handle just in case if offset >= t.Size { a.Trace("xfer: tus.io HEAD offset %d indicates %q is already fully uploaded, skipping", offset, t.Oid) advanceCallbackProgress(cb, t, t.Size) return nil } // Open file for uploading f, err := os.OpenFile(t.Path, os.O_RDONLY, 0644) if err != nil { return errors.Wrap(err, tr.Tr.Get("tus.io upload")) } defer f.Close() // Upload-Offset=0 means start from scratch, but still send PATCH if offset == 0 { a.Trace("xfer: tus.io uploading %q from start", t.Oid) } else { a.Trace("xfer: tus.io resuming upload %q from %d", t.Oid, offset) advanceCallbackProgress(cb, t, offset) } // 2. Send PATCH request with byte start point (even if 0) in Upload-Offset // Response status must be 204 // Response Upload-Offset must be request Upload-Offset plus sent bytes // Response may include Upload-Expires header in which case check not passed a.Trace("xfer: sending tus.io PATCH request for %q", t.Oid) req, err = a.newHTTPRequest("PATCH", rel) if err != nil { return err } req.Header.Set("Tus-Resumable", TusVersion) req.Header.Set("Upload-Offset", strconv.FormatInt(offset, 10)) req.Header.Set("Content-Type", "application/offset+octet-stream") req.Header.Set("Content-Length", strconv.FormatInt(t.Size-offset, 10)) req.ContentLength = t.Size - offset // Ensure progress callbacks made while uploading // Wrap callback to give name context ccb := func(totalSize int64, readSoFar int64, readSinceLast int) error { if cb != nil { return cb(t.Name, totalSize, readSoFar, readSinceLast) } return nil } var reader lfsapi.ReadSeekCloser = tools.NewBodyWithCallback(f, t.Size, ccb) reader = newStartCallbackReader(reader, func() error { // seek to the offset since lfsapi.Client rewinds the body if _, err := f.Seek(offset, io.SeekCurrent); err != nil { return err } // Signal auth was ok on first read; this frees up other workers to start if authOkFunc != nil { authOkFunc() } return nil }) req.Body = reader req = a.apiClient.LogRequest(req, "lfs.data.upload") res, err = a.doHTTP(t, req) if err != nil { return errors.NewRetriableError(err) } // A status code of 403 likely means that an authentication token for the // upload has expired. This can be safely retried. if res.StatusCode == 403 { err = errors.New(tr.Tr.Get("Received status %d", res.StatusCode)) return errors.NewRetriableError(err) } if res.StatusCode > 299 { return errors.Wrapf(nil, tr.Tr.Get("Invalid status for %s %s: %d", req.Method, strings.SplitN(req.URL.String(), "?", 2)[0], res.StatusCode, )) } io.Copy(io.Discard, res.Body) res.Body.Close() return verifyUpload(a.apiClient, a.remote, t) } func configureTusAdapter(m *concreteManifest) { m.RegisterNewAdapterFunc(TusAdapterName, Upload, func(name string, dir Direction) Adapter { switch dir { case Upload: bu := &tusUploadAdapter{newAdapterBase(m.fs, name, dir, nil)} // self implements impl bu.transferImpl = bu return bu case Download: panic(tr.Tr.Get("Should never ask this function to download")) } return nil }) } git-lfs-3.6.1/tq/verify.go000066400000000000000000000026361472372047300153650ustar00rootroot00000000000000package tq import ( "net/http" "github.com/git-lfs/git-lfs/v3/lfsapi" "github.com/git-lfs/git-lfs/v3/tools" "github.com/rubyist/tracerx" ) const ( maxVerifiesConfigKey = "lfs.transfer.maxverifies" defaultMaxVerifyAttempts = 3 ) func verifyUpload(c *lfsapi.Client, remote string, t *Transfer) error { action, err := t.Actions.Get("verify") if err != nil { return err } if action == nil { return nil } req, err := http.NewRequest("POST", action.Href, nil) if err != nil { return err } err = lfsapi.MarshalToRequest(req, struct { Oid string `json:"oid"` Size int64 `json:"size"` }{Oid: t.Oid, Size: t.Size}) if err != nil { return err } req.Header.Set("Content-Type", "application/vnd.git-lfs+json") req.Header.Set("Accept", "application/vnd.git-lfs+json") for key, value := range action.Header { req.Header.Set(key, value) } mv := c.GitEnv().Int(maxVerifiesConfigKey, defaultMaxVerifyAttempts) mv = tools.MaxInt(defaultMaxVerifyAttempts, mv) req = c.LogRequest(req, "lfs.verify") for i := 1; i <= mv; i++ { tracerx.Printf("tq: verify %s attempt #%d (max: %d)", t.Oid[:7], i, mv) var res *http.Response if t.Authenticated { res, err = c.Do(req) } else { res, err = c.DoWithAuth(remote, c.Endpoints.AccessFor(action.Href), req) } if err != nil { tracerx.Printf("tq: verify err: %+v", err.Error()) } else { err = res.Body.Close() break } } return err } git-lfs-3.6.1/tq/verify_test.go000066400000000000000000000034311472372047300164160ustar00rootroot00000000000000package tq import ( "encoding/json" "net/http" "net/http/httptest" "sync/atomic" "testing" "github.com/git-lfs/git-lfs/v3/lfsapi" "github.com/git-lfs/git-lfs/v3/lfshttp" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestVerifyWithoutAction(t *testing.T) { c, _ := lfsapi.NewClient(nil) tr := &Transfer{ Oid: "abc", Size: 123, } assert.Nil(t, verifyUpload(c, "origin", tr)) } func TestVerifySuccess(t *testing.T) { var called uint32 srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.String() != "/verify" { w.WriteHeader(http.StatusNotFound) return } atomic.AddUint32(&called, 1) assert.Equal(t, "POST", r.Method) assert.Equal(t, "bar", r.Header.Get("Foo")) assert.Equal(t, "29", r.Header.Get("Content-Length")) assert.Equal(t, "application/vnd.git-lfs+json", r.Header.Get("Content-Type")) var tr Transfer assert.Nil(t, json.NewDecoder(r.Body).Decode(&tr)) assert.Equal(t, "abcd1234", tr.Oid) assert.EqualValues(t, 123, tr.Size) })) defer srv.Close() // Set auth on the server URL but not on the /verify endpoint. Since auth // will cause the request to fail, this will test that the correct access // mode is being passed to `DoWithAuth()` c, err := lfsapi.NewClient(lfshttp.NewContext(nil, nil, map[string]string{ "lfs.transfer.maxverifies": "1", "lfs." + srv.URL + ".access": "Basic", "lfs." + srv.URL + "/verify.access": "None", })) require.Nil(t, err) tr := &Transfer{ Oid: "abcd1234", Size: 123, Actions: map[string]*Action{ "verify": &Action{ Href: srv.URL + "/verify", Header: map[string]string{ "foo": "bar", }, }, }, } assert.Nil(t, verifyUpload(c, "origin", tr)) assert.EqualValues(t, 1, called) } git-lfs-3.6.1/tr/000077500000000000000000000000001472372047300135245ustar00rootroot00000000000000git-lfs-3.6.1/tr/tr.go000066400000000000000000000023561472372047300145060ustar00rootroot00000000000000package tr import ( "encoding/base64" "os" "strings" "github.com/leonelquinteros/gotext" ) //go:generate go run ../tr/trgen/trgen.go var Tr = gotext.NewLocale("/usr/share/locale", "en") var locales = make(map[string]string) func findLocale() string { vars := []string{"LC_ALL", "LC_MESSAGES", "LANG"} for _, varname := range vars { if val, ok := os.LookupEnv(varname); ok { return val } } return "" } func processLocale(locale string) []string { options := make([]string, 0, 2) // For example, split "en_DK.UTF-8" into "en_DK" and "UTF-8". pieces := strings.Split(locale, ".") options = append(options, pieces[0]) // For example, split "en_DK" into "en" and "DK". pieces = strings.Split(pieces[0], "_") if len(pieces) > 1 { options = append(options, pieces[0]) } return options } func InitializeLocale() { locale := findLocale() if len(locale) == 0 { return } Tr = gotext.NewLocale("/usr/share/locale", locale) Tr.AddDomain("git-lfs") for _, loc := range processLocale(locale) { if moData, ok := locales[loc]; ok { mo := gotext.NewMo() decodedData, err := base64.StdEncoding.DecodeString(moData) if err != nil { continue } mo.Parse(decodedData) Tr.AddTranslator("git-lfs", mo) return } } } git-lfs-3.6.1/tr/trgen/000077500000000000000000000000001472372047300146435ustar00rootroot00000000000000git-lfs-3.6.1/tr/trgen/trgen.go000066400000000000000000000037431472372047300163200ustar00rootroot00000000000000package main import ( "encoding/base64" "flag" "fmt" "io" "os" "path/filepath" "regexp" ) func infof(w io.Writer, format string, a ...interface{}) { if !*verbose { return } fmt.Fprintf(w, format, a...) } func warnf(w io.Writer, format string, a ...interface{}) { fmt.Fprintf(w, format, a...) } func readPoDir() (string, []os.DirEntry) { rootDirs := []string{ ".", "..", "../..", } var err error for _, rootDir := range rootDirs { fs, err := os.ReadDir(filepath.Join(rootDir, "po", "build")) if err == nil { return rootDir, fs } } // In this case, we don't care about the fact that the build dir doesn't // exist since that just means there are no translations built. That's // fine for us, so just exit successfully. infof(os.Stderr, "Failed to open po dir: %v\n", err) os.Exit(0) return "", nil } var ( verbose = flag.Bool("verbose", false, "Show verbose output.") ) func main() { flag.Parse() infof(os.Stderr, "Converting po files into translations...\n") rootDir, fs := readPoDir() poDir := filepath.Join(rootDir, "po", "build") out, err := os.Create(filepath.Join(rootDir, "tr", "tr_gen.go")) if err != nil { warnf(os.Stderr, "Failed to create go file: %v\n", err) os.Exit(2) } out.WriteString("package tr\n\nfunc init() {\n") out.WriteString("\t// THIS FILE IS GENERATED, DO NOT EDIT\n") out.WriteString("\t// Use 'go generate ./tr/trgen' to update\n") fileregex := regexp.MustCompile(`([A-Za-z\-_]+).mo`) count := 0 for _, f := range fs { if match := fileregex.FindStringSubmatch(f.Name()); match != nil { infof(os.Stderr, "%v\n", f.Name()) cmd := match[1] content, err := os.ReadFile(filepath.Join(poDir, f.Name())) if err != nil { warnf(os.Stderr, "Failed to open %v: %v\n", f.Name(), err) os.Exit(2) } fmt.Fprintf(out, "\tlocales[\"%s\"] = \"%s\"\n", cmd, base64.StdEncoding.EncodeToString(content)) count++ } } out.WriteString("}\n") infof(os.Stderr, "Successfully processed %d translations.\n", count) } git-lfs-3.6.1/versioninfo.json000066400000000000000000000005541472372047300163370ustar00rootroot00000000000000{ "FixedFileInfo": { "FileVersion": { "Major": 3, "Minor": 6, "Patch": 1, "Build": 0 } }, "StringFileInfo": { "FileDescription": "Git LFS", "LegalCopyright": "GitHub, Inc. and Git LFS contributors", "ProductName": "Git Large File Storage (LFS)", "ProductVersion": "3.6.1" }, "IconPath": "script/windows-installer/git-lfs-logo.ico" }