git-lfs-2.3.4/000077500000000000000000000000001317167762300131025ustar00rootroot00000000000000git-lfs-2.3.4/.gitattributes000066400000000000000000000000441317167762300157730ustar00rootroot00000000000000* text=auto * eol=lf *.bat eol=crlf git-lfs-2.3.4/.gitignore000066400000000000000000000005241317167762300150730ustar00rootroot00000000000000bin/ benchmark/ out/ resource.syso # only allow man/*.\d.ronn files man/* *.test tmp test/remote debian/git-lfs/ debian/*.log debian/files debian/*.substvars debian/debhelper-build-stamp debian/.debhelper /.pc obj-* rpm/BUILD* rpm/*RPMS rpm/*.log rpm/SOURCES repos docker/*.key src commands/mancontent_gen.go lfstest-* !lfstest-*.go git-lfs-2.3.4/.mailmap000066400000000000000000000025431317167762300145270ustar00rootroot00000000000000Andy Neff Artem V. Navrotskiy a.navrotskiy Artem V. Navrotskiy Brandon Keepers David Pursehouse Evan Priestley Josh Vera Lars Schneider Lee Reilly Noam Y. Tenne noamt Rick Olson rick Rick Olson risk danger olson Rick Olson Your Name Riku Lääkkölä Ryan Simmen Scott Barron rubyist Scott Barron Scott Barron Scott Richmond Sebastian Schuberth Taylor Blau William Hipschman Will git-lfs-2.3.4/.travis.yml000066400000000000000000000026361317167762300152220ustar00rootroot00000000000000# http://docs.travis-ci.com/user/languages/go/ language: go go: 1.8.3 os: - linux env: global: - GIT_LFS_TEST_DIR="$HOME/git-lfs-tests" - GIT_SOURCE_REPO="https://github.com/git/git.git" - GIT_EARLIEST_SUPPORTED_VERSION="v2.0.0 - GIT_LATEST_SOURCE_BRANCH="master" - GIT_ASKPASS="" matrix: fast_finish: true include: - env: git-latest-master-from-source os: linux before_script: - > git clone $GIT_SOURCE_REPO git-source; cd git-source; git checkout $GIT_LATEST_SOURCE_BRANCH; make --jobs=2; make install; cd ..; - env: git-earliest-supported-version-from-source os: linux before_script: - > git clone $GIT_SOURCE_REPO git-source; cd git-source; git checkout $GIT_EARLIEST_SUPPORTED_VERSION; make --jobs=2; make install; cd ..; - env: git-latest os: linux addons: apt: sources: - git-core packages: - git before_install: - > repo=`basename $PWD`; localDir=`dirname $PWD`; cfDir="`dirname $localDir`/git-lfs"; if [[ "$localDir" != "$cfDir" ]]; then mv "$localDir" "$cfDir"; cd ../../git-lfs/$repo; export TRAVIS_BUILD_DIR=`dirname $TRAVIS_BUILD_DIR`/$repo; fi; install: true script: script/cibuild notifications: email: false git-lfs-2.3.4/CHANGELOG.md000066400000000000000000001212401317167762300147130ustar00rootroot00000000000000# Git LFS Changelog ## 2.3.4 (18 October, 2017) ### Features * 'git lfs install' updates filters with 'skip-smudge' option #2673 (@technoweenie) ### Bugs * FastWalkGitRepo: limit number of concurrent goroutines #2672 (@technoweenie) * handle scenario where multiple configuration values exist in ~/.gitconfig #2659 (@shiftkey) ## 2.3.3 (9 October, 2017) ### Bugs * invoke lfs for 'git update-index', fixing 'status' issues #2647 (@technoweenie) * cache http credential helper output by default #2648 (@technoweenie) ## 2.3.2 (3 October, 2017) ### Features * bump default activity timeout from 10s -> 30s #2632 (@technoweenie) ### Bugs * ensure files are marked readonly after unlocking by ID #2642 (@technoweenie) * add files to index with path relative to current dir #2641 (@technoweenie) * better Netrc errors #2633 (@technoweenie) * only use askpass if credential.helper is not configured #2637 (@technoweenie) * convert backslash to slash when writing to .gitattributes #2625 (@technoweenie) ### Misc * only copy req headers if there are git-configured extra headers #2622 (@technoweenie) * update tracerx to add timestamps #2620 (@rubyist) ## 2.3.1 (27 September, 2017) ### Features * add support for SSH_ASKPASS #2609 (@technoweenie) * `git lfs migrate --verbose` option #2610 (@technoweenie) * Support standalone custom transfer based on API URL prefix match #2590 (@sprohaska) ### Bugs * Improve invalid URL error messages #2614 (@technoweenie) * Fix double counting progress bug #2608 (@technoweenie) * trim whitespace from GIT_ASKPASS provided passwords #2607 (@technoweenie) * remove mmap usage in Packfile reader #2600 (@technoweenie) * `git lfs clone`: don't fetch for unborn repositories #2598 (@shiftkey) ### Misc * Windows Installer fixes: * Show proper icon in add/remove programs list #2585 (@shiftkey) * Make the Inno Setup installer script explicitly check for the binaries #2588 (@sschuberth) * Improve compile-win-installer-unsigned.bat a bit #2586 (@sschuberth) * Update migrate docs example for multiple file types #2596 (@technoweenie) ## 2.3.0 (14 September, 2017) Git LFS v2.3.0 includes performance optimizations for the `git-lfs-migrate(1)` and `git-clone(1)` commands, new features, bug-fixes, and more. This release was made possible by contributors to Git LFS. Specifically: - @aleb: added support for "standalone" transfer agents, for using `rsync(1)` and similar with Git LFS. - @bozaro: added support for custom `.git/lfs/objects` directories via the `lfs.storage` configuration option. - @larsxschneider: fixed a recursive process leak when shelling out to Git, added new features to `git lfs ls-files`, extra information in error messages used for debugging, documentation changes and more. - @mathstuf: contributed a documentation change clarifying LFS's handling of empty pointer files. - @rudineirk and @andyneff: updated our release process to build packages for fedora/26. - @ssgelm: ensured that LFS is able to be released on Ubuntu Universe. To everyone who has contributed to this or previous releases of Git LFS: Thank you! ### Features * git/odb/pack: improve `git lfs migrate` performance * git/odb/pack: introduce packed object reassembly #2550 #2551 #2552 #2553 #2554 (@ttaylorr) * git/odb/pack: teach packfile index entry lookups #2420 #2421 #2422 #2423 #2437 #2441 #2461 (@ttaylorr) * git/{odb,githistory}: don't write unchanged objects #2541 (@ttaylorr) * commands: improve `git clone` performance with 'delay' capability #2511 #2469 #2468 #2471 #2467 #2476 #2483 (@ttaylorr) * commands: mark `git lfs clone` as deprecated #2526 (@ttaylorr) * commands: enable `lfs.allowincompletepush` by default #2574 (@technoweenie) * commands: teach '--everything' to `git lfs migrate` #2558 (@ttaylorr) * commands: teach `git lfs ls-files` a '--debug' option #2540 (@larsxschneider) * commands,lfs: warn on 4gb size conversion during clean #2510 #2507 #2459 (@ttaylorr) * lfsapi/creds: teach about GIT_ASKPASS and core.askpass #2500 #2578 (@ttaylorr) * commands/status: indicate missing objects #2438 (@ttaylorr) * Allow using custom transfer agents directly #2429 (@aleb) * Add `lfs.storage` parameter for overriding LFS storage location #2023 (@bozaro) * lfsapi: enable credential caching by default #2508 (@ttaylorr) * commands/install: teach `--manual` to `git-lfs-install(1)` #2410 (@ttaylorr) ### Bugs * migrate: fix migrations with subdirectories in '--include' or '--exclude' #2485 (@ttaylorr) * commands/migrate: fix hardlinking issue when different filesystem is mounted at `/tmp` #2566 (@ttaylorr) * commands: make `git lfs migrate` fetch ref updates before migrating #2538 (@ttaylorr) * commands: remove '--above=1mb' default from `git lfs migrate info` #2460 (@ttaylorr) * filepathfilter: fix `HasPrefix()` when no '--include' filters present #2579 (@technoweenie) * git/githistory/log: fix race condition with `git/githistory/log` tests #2495 (@ttaylorr) * git/odb: fix closing object database test #2457 (@ttaylorr) * git/githistory: only update local refs after migrations #2559 (@ttaylorr) * locking: fix unlocking files not removing write flag #2514 (@ttaylorr) * locks: fix unlocking files in a symlinked directory #2505 (@ttaylorr) * commands: teach `git lfs unlock` to ignore status errs in appropriate conditions #2475 (@ttaylorr) * git: expand `GetAttributePaths` check to include non-LFS lockables #2528 (@ttaylorr) * fix multiple `git update-index` invocations #2531 (@larsxschneider) * tools: fix SSH credential cacher expiration #2530 (@ttaylorr) * lfsapi: fix read/write race condition in credential cacher #2493 (@ttaylorr) * lfs: fix cleaning contents larger than 1024 bytes over stdin #2488 (@ttaylorr) * fsck only scans current version of objects #2049 (@TheJare) * progress: fix writing updates to `$GIT_LFS_PROGRESS` #2465 (@ttaylorr) * commands/track: resolve symlinks before comparing attr paths #2463 (@ttaylorr) * test: ensure that empty pointers are empty #2458 (@ttaylorr) * git/githistory/log: prevent 'NaN' showing up in `*PercentageTask` #2455 (@ttaylorr) * tq: teach Batch() API to retry itself after io.EOF's #2516 (@ttaylorr) ### Misc * script/packagecloud: release LFS on Fedora/26 #2443 #2509 (@rudineirk, @andyneff) * git/githistory: change "Rewriting commits" when not updating refs #2577 (@ttaylorr) * commands: print IP addresses in error logs #2570 (@larsxschneider) * commands: print current time in UTC to error logs #2571 (@larsxschneider) * commands: Disable lock verification when using a standalone custom-tr… #2499 (@aleb) * docs/man: update `git lfs migrate` documentation with EXAMPLES #2580 (@technoweenie) * docs/man: recommend global per-host locking config #2546 (@larsxschneider) * commands: use transfer queue's batch size instead of constant #2529 (@ttaylorr) * add function to invoke Git with disabled LFS filters #2453 (@larsxschneider) * config: warn on unsafe keys in `.lfsconfig` #2502 (@ttaylorr) * glide: remove unused dependencies #2501 (@ttaylorr) * script/build: pass '-{ld,gc}flags' to compiler, if given #2462 (@ttaylorr) * spec: mention that an empty file is its own LFS pointer #2449 (@mathstuf) * Update to latest version of github.com/pkg/errors #2426 (@ssgelm) * Update gitignore to add some temp files that get created when building debs #2425 (@ssgelm) * lfs: indent contents of `git lfs install`, `update` #2392 (@ttaylorr) * tq: increase default `lfs.concurrenttransfers` to 8 #2506 (@ttaylorr) ## 2.2.1 (10 July, 2017) ### Bugs * git lfs status --json only includes lfs files #2374 (@asottile) * git/odb: remove temporary files after migration #2388 (@ttaylorr) * git/githistory: fix hanging on empty set of commits #2383 (@ttaylorr) * migrate: don't checkout HEAD on bare repositories #2389 (@ttaylorr) * git/odb: prevent cross-volume link error when saving objects #2382 (@ttaylorr) * commands: only pass --jobs to `git clone` if set #2369 (@technoweenie) ### Misc * lfs: trace hook install, uninstall, upgrade #2393 (@ttaylorr) * vendor: remove github.com/cheggaaa/pb #2386 (@ttaylorr) * Use FormatBytes from git-lfs/tools/humanize instead of cheggaaa/pb #2377 (@ssgelm) ## 2.2.0 (27 June, 2017) Git LFS v2.2.0 includes bug fixes, minor features, and a brand new `migrate` command. The `migrate` command rewrites commits, converting large files from Git blobs to LFS objects. The most common use case will fix a git push rejected for having large blobs: ``` $ git push origin master # ... remote: error: file a.psd is 1.2 gb; this exceeds github's file size limit of 100.00 mb to github.com:ttaylorr/demo.git ! [remote rejected] master -> master (pre-receive hook declined) error: failed to push some refs to 'git@github.com:ttaylorr/demo.git' $ git lfs migrate info *.psd 1.2 GB 27/27 files(s) 100% $ git lfs migrate import --include="*.psd" migrate: Sorting commits: ..., done migrate: Rewriting commits: 100% (810/810), done master f18bb746d44e8ea5065fc779bb1acdf3cdae7ed8 -> 35b0fe0a7bf3ae6952ec9584895a7fb6ebcd498b migrate: Updating refs: ..., done $ git push origin Git LFS: (1 of 1 files) 1.2 GB / 1.2 GB # ... To github.com:ttaylorr/demo.git * [new branch] master -> master ``` The `migrate` command has detailed options described in the `git-lfs-migrate(1)` man page. Keep in mind that this is the first pass at such a command, so we expect there to be bugs and performance issues (especially on long git histories). Future updates to the command will be focused on improvements to allow full LFS transitions on large repositories. ### Features * commands: add git-lfs-migrate(1) 'import' subcommand #2353 (@ttaylorr) * commands: add git-lfs-migrate(1) 'info' subcommand #2313 (@ttaylorr) * Implement status --json #2311 (@asottile) * commands/uploader: allow incomplete pushes #2199 (@ttaylorr) ### Bugs * Retry on timeout or temporary errors #2312 (@jakub-m) * commands/uploader: don't verify locks if verification is disabled #2278 (@ttaylorr) * Fix tools.TranslateCygwinPath() on MSYS #2277 (@raleksandar) * commands/clone: add new flags since Git 2.9 #2251, #2252 (@ttaylorr) * Make pull return non-zero error code when some downloads failed #2237 (@seth2810) * tq/basic_download: guard against nil HTTP response #2227 (@ttaylorr) * Bugfix: cannot push to scp style URL #2198 (@jiangxin) * support lfs..* values where url does not include .git #2192 (@technoweenie) * commands: fix logged error not interpolating format qualifiers #2228 (@ttaylorr) * commands/help: print helptext to stdout for consistency with Git #2210 (@ttaylorr) ### Misc * Minor cleanups in help index #2248 (@dpursehouse) * Add git-lfs-lock and git-lfs-unlock to help index #2232 (@dpursehouse) * packagecloud: add Debian 9 entry to formatted list #2211 (@ttaylorr) * Update Xenial is to use stretch packages #2212 (@andyneff) ## 2.1.1 (19 May, 2017) Git LFS v2.1.1 ships with bug fixes and a security patch fixing a remote code execution vulnerability exploitable by setting a SSH remote via your repository's `.lfsconfig` to contain the string "-oProxyCommand". This vulnerability is only exploitable if an attacker has write access to your repository, or you clone a repository with a `.lfsconfig` file containing that string. ### Bugs * Make pull return non-zero error code when some downloads failed #2245 (@seth2810, @technoweenie) * lfsapi: support cross-scheme redirection #2243 (@ttaylorr) * sanitize ssh options parsed from ssh:// url #2242 (@technoweenie) * filepathfilter: interpret as .gitignore syntax #2238 (@technoweenie) * tq/basic_download: guard against nil HTTP response #2229 (@ttaylorr) * commands: fix logged error not interpolating format qualifiers #2230 (@ttaylorr) ### Misc * release: backport Debian 9-related changes #2244 (@ssgelm, @andyneff, @ttaylorr) * Add git-lfs-lock and git-lfs-unlock to help index #2240 (@dpursehouse) * config: allow multiple environments when calling config.Unmarshal #2224 (@ttaylorr) ## 2.1.0 (28 April, 2017) ### Features * commands/track: teach --no-modify-attrs #2175 (@ttaylorr) * commands/status: add blob info to each entry #2070 (@ttaylorr) * lfsapi: improve HTTP request/response stats #2184 (@technoweenie) * all: support URL-style configuration lookups (@ttaylorr) * commands: support URL-style lookups for `lfs.{url}.locksverify` #2162 (@ttaylorr) * lfsapi: support URL-style lookups for `lfs.{url}.access` #2161 (@ttaylorr) * lfsapi/certs: use `*config.URLConfig` to do per-host config lookup #2160 (@ttaylorr) * lfsapi: support for http..extraHeader #2159 (@ttaylorr) * config: add prefix to URLConfig type #2158 (@ttaylorr) * config: remove dependency on lfsapi package #2156 (@ttaylorr) * config: support multi-value lookup on URLConfig #2154 (@ttaylorr) * lfsapi: initial httpconfig type #1912 (@technoweenie, @ttaylorr) * lfsapi,tq: relative expiration support #2130 (@ttaylorr) ### Bugs * commands: include error in `LoggedError()` #2179 (@ttaylorr) * commands: cross-platform log formatting to files #2178 (@ttaylorr) * locks: cross-platform path normalization #2139 (@ttaylorr) * commands,locking: don't disable locking for auth errors during verify #2110 (@ttaylorr) * commands/status: show partially staged files twice #2067 (@ttaylorr) ### Misc * all: build on Go 1.8.1 #2145 (@ttaylorr) * Polish custom-transfers.md #2171 (@sprohaska) * commands/push: Fix typo in comment #2170 (@sprohaska) * config: support multi-valued config entries #2152 (@ttaylorr) * smudge: use localstorage temp directory, not system #2140 (@ttaylorr) * locking: send locks limit to server #2107 (@ttaylorr) * lfs: extract `DiffIndexScanner` #2035 (@ttaylorr) * status: use DiffIndexScanner to populate results #2042 (@ttaylorr) ## 2.0.2 (29 March, 2017) ### Features * ssh auth and credential helper caching #2094 (@ttaylorr) * commands,tq: specialized logging for missing/corrupt objects #2085 (@ttaylorr) * commands/clone: install repo-level hooks after `git lfs clone` #2074 * (@ttaylorr) * debian: Support building on armhf and arm64 #2089 (@p12tic) ### Bugs * commands,locking: don't disable locking for auth errors during verify #2111 * (@ttaylorr) * commands: show real error while cleaning #2096 (@ttaylorr) * lfsapi/auth: optionally prepend an empty scheme to Git remote URLs #2092 * (@ttaylorr) * tq/verify: authenticate verify requests if required #2084 (@ttaylorr) * commands/{,un}track: correctly escape '#' and ' ' characters #2079 (@ttaylorr) * tq: use initialized lfsapi.Client instances in transfer adapters #2048 * (@ttaylorr) ### Misc * locking: send locks limit to server #2109 (@ttaylorr) * docs: update configuration documentation #2097 #2019 #2102 (@terrorobe) * docs: update locking API documentation #2099 #2101 (@dpursehouse) * fixed table markdown in README.md #2095 (@ZaninAndrea) * remove the the duplicate work #2098 (@grimreaper) ## 2.0.1 (6 March, 2017) ### Misc * tq: fallback to `_links` if present #2007 (@ttaylorr) ## 2.0.0 (1 March, 2017) Git LFS v2.0.0 brings a number of important bug fixes, some new features, and a lot of internal refactoring. It also completely removes old APIs that were deprecated in Git LFS v0.6. ### Locking File Locking is a brand new feature that lets teams communicate when they are working on files that are difficult to merge. Users are not able to edit or push changes to any files that are locked by other users. While the feature has been in discussion for a year, we are releasing a basic Locking implementation to solicit feedback from the community. ### Transfer Queue LFS 2.0 introduces a new Git Scanner, which walks a range of Git commits looking for LFS objects to transfer. The Git Scanner is now asynchronous, initiating large uploads or downloads in the Transfer Queue immediately once an LFS object is found. Previously, the Transfer Queue waited until all of the Git commits have been scanned before initiating the transfer. The Transfer Queue also automatically retries failed uploads and downloads more often. ### Deprecations Git LFS v2.0.0 also drops support for the legacy API in v0.5.0. If you're still using LFS servers on the old API, you'll have to stick to v1.5.6. ### Features * Mid-stage locking support #1769 (@sinbad) * Define lockable files, make read-only in working copy #1870 (@sinbad) * Check that files are not uncommitted before unlock #1896 (@sinbad) * Fix `lfs unlock --force` on a missing file #1927 (@technoweenie) * locking: teach pre-push hook to check for locks #1815 (@ttaylorr) * locking: add `--json` flag #1814 (@ttaylorr) * Implement local lock cache, support querying it #1760 (@sinbad) * support for client certificates pt 2 #1893 (@technoweenie) * Fix clash between progress meter and credential helper #1886 (@technoweenie) * Teach uninstall cmd about --local and --system #1887 (@technoweenie) * Add `--skip-repo` option to `git lfs install` & use in tests #1868 (@sinbad) * commands: convert push, pre-push to use async gitscanner #1812 (@ttaylorr) * tq: prioritize transferring retries before new items #1758 (@ttaylorr) ### Bugs * ensure you're in the correct directory when installing #1793 (@technoweenie) * locking: make API requests relative to repository, not root #1818 (@ttaylorr) * Teach 'track' about CRLF #1914 (@technoweenie) * Teach 'track' how to handle empty lines in .gitattributes #1921 (@technoweenie) * Closing stdout pipe before function return #1861 (@monitorjbl) * Custom transfer terminate #1847 (@sinbad) * Fix Install in root problems #1727 (@technoweenie) * cat-file batch: read all of the bytes #1680 (@technoweenie) * Fixed file paths on cygwin. #1820, #1965 (@creste, @ttaylorr) * tq: decrement uploaded bytes in basic_upload before retry #1958 (@ttaylorr) * progress: fix never reading bytes with sufficiently small files #1955 (@ttaylorr) * tools: fix truncating string fields between balanced quotes in GIT_SSH_COMMAND #1962 (@ttaylorr) * commands/smudge: treat empty pointers as empty files #1954 (@ttaylorr) ### Misc * all: build using Go 1.8 #1952 (@ttaylorr) * Embed the version information into the Windows executable #1689 (@sschuberth) * Add more meta-data to the Windows installer executable #1752 (@sschuberth) * docs/api: object size must be positive #1779 (@ttaylorr) * build: omit DWARF tables by default #1937 (@ttaylorr) * Add test to prove set operator [] works in filter matching #1768 (@sinbad) * test: add ntlm integration test #1840 (@technoweenie) * lfs/tq: completely remove legacy support #1686 (@ttaylorr) * remove deprecated features #1679 (@technoweenie) * remove legacy api support #1629 (@technoweenie) ## 1.5.6 (16 February, 2017) ## Bugs * Spool malformed pointers to avoid deadlock #1932 (@ttaylorr) ## 1.5.5 (12 January, 2017) ### Bugs * lfs: only buffer first 1k when creating a CleanPointerError #1856 (@ttaylorr) ## 1.5.4 (27 December, 2016) ### Bugs * progress: guard negative padding width, panic in `strings.Repeat` #1807 (@ttaylorr) * commands,lfs: handle malformed pointers #1805 (@ttaylorr) ### Misc * script/packagecloud: release LFS on fedora/25 #1798 (@ttaylorr) * backport filepathfilter to v1.5.x #1782 (@technoweenie) ## 1.5.3 (5 December, 2016) ### Bugs * Support LFS installations at filesystem root #1732 (@technoweenie) * git: parse filter process header values containing '=' properly #1733 (@larsxschneider) * Fix SSH endpoint parsing #1738 (@technoweenie) ### Misc * build: release on Go 1.7.4 #1741 (@ttaylorr) ## 1.5.2 (22 November, 2016) ### Features * Release LFS on Fedora 24 #1685 (@technoweenie) ### Bugs * filter-process: fix reading 1024 byte files #1708 (@ttaylorr) * Support long paths on Windows #1705 (@technoweenie) ### Misc * filter-process: exit with error if we detect an unknown command from Git #1707 (@ttaylorr) * vendor: remove contentaddressable lib #1706 (@technoweenie) ## 1.5.1 (18 November, 2016) ### Bugs * cat-file --batch parser errors on non-lfs git blobs #1680 (@technoweenie) ## 1.5.0 (17 November, 2016) ### Features * Filter Protocol Support #1617 (@ttaylorr, @larsxschneider) * Fast directory walk #1616 (@sinbad) * Allow usage of proxies even when contacting localhost #1605 (@chalstrick) ### Bugs * start reading off the Watch() channel before sending any input #1671 (@technoweenie) * wait for remote ref commands to exit before returning #1656 (@jjgod, @technoweenie) ### Misc * rewrite new catfilebatch implementation for upcoming gitscanner pkg #1650 (@technoweenie) * refactor testutils.FileInput so it's a little more clear #1666 (@technoweenie) * Update the lfs track docs #1642 (@technoweenie) * Pre push tracing #1638 (@technoweenie) * Remove `AllGitConfig()` #1634 (@technoweenie) * README: set minimal required Git version to 1.8.5 #1636 (@larsxschneider) * 'smudge --info' is deprecated in favor of 'ls-files' #1631 (@technoweenie) * travis-ci: test GitLFS with ancient Git version #1626 (@larsxschneider) ## 1.4.4 (24 October, 2016) ### Bugs * transfer: more descriptive "expired at" errors #1603 (@ttaylorr) * commands,lfs/tq: Only send unique OIDs to the Transfer Queue #1600 (@ttaylorr) * Expose the result message in case of an SSH authentication error #1599 (@sschuberth) ### Misc * AppVeyor: Do not build branches with open pull requests #1594 (@sschuberth) * Update .mailmap #1593 (@dpursehouse) ## 1.4.3 (17 October, 2016) ### Bugs * lfs/tq: use extra arguments given to tracerx.Printf #1583 (@ttaylorr) * api: correctly print legacy API warning to Stderr #1582 (@ttaylorr) ### Misc * Test storage retries #1585 (@ttaylorr) * Test legacy check retries behavior #1584 (@ttaylorr) * docs: Fix a link to the legacy API #1579 (@sschuberth) * Add a .mailmap file #1577 (@sschuberth) * Add a large wizard image to the Windows installer #1575 (@sschuberth) * Appveyor badge #1574 (@ttaylorr) ## 1.4.2 (10 October, 2016) v1.4.2 brings a number of bug fixes and usability improvements to LFS. This release also adds support for multiple retries within the transfer queue, making transfers much more reliable. To enable this feature, see the documentation for `lfs.transfer.maxretries` in `git-lfs-config(5)`. We'd also like to extend a special thank-you to @sschuberth who undertook the process of making LFS's test run on Windows through AppVeyor. Now all pull requests run tests on macOS, Linux, and Windows. ### Features * lfs: warn on usage of the legacy API #1564 (@ttaylorr) * use filepath.Clean() when comparing filenames to include/exclude patterns #1565 (@technoweenie) * lfs/transfer_queue: support multiple retries per object #1505, #1528, #1535, #1545 (@ttaylorr) * Automatically upgrade old filters instead of requiring —force #1497 (@sinbad) * Allow lfs.pushurl in .lfsconfig #1489 (@technoweenie) ### Bugs * Use "sha256sum" on Windows #1566 (@sschuberth) * git: ignore non-root wildcards #1563 (@ttaylorr) * Teach status to recognize multiple files with identical contents #1550 (@ttaylorr) * Status initial commit #1540 (@sinbad) * Make path comparison robust against Windows short / long path issues #1523 (@sschuberth) * Allow fetch to run without a remote configured #1507 (@sschuberth) ### Misc * travis: run tests on Go 1.7.1 #1568 (@ttaylorr) * Enable running tests on AppVeyor CI #1567 (@sschuberth) * Travis: Only install git if not installed yet #1557 (@sschuberth) * Windows test framework fixes #1522 (@sschuberth) * Simplify getting the absolute Git root directory #1518 (@sschuberth) * Add icons to the Windows installer #1504 (@sschuberth) * docs/man: reference git-lfs-pointer(1) in clean documentation #1503 (@ttaylorr) * Make AppVeyor CI for Windows work again #1506 (@sschuberth) * commands: try out RegisterCommand() #1495 (@technoweenie) ## 1.4.1 (26 August, 2016) ### Features * retry if file download failed #1454 (@larsxschneider) * Support wrapped clone in current directory #1478 (@ttaylorr) ### Misc * Test `RetriableReader` #1482 (@ttaylorr) ## 1.4.0 (19 August, 2016) ### Features * Install LFS at the system level when packaged #1460 (@javabrett) * Fetch remote urls #1451 (@technoweenie) * add object Authenticated property #1452 (@technoweenie) * add support for `url.*.insteadof` in git config #1117, #1443 (@artagnon, @technoweenie) ### Bugs * fix --include bug when multiple files have same lfs content #1458 (@technoweenie) * check the git version is ok in some key commands #1461 (@technoweenie) * fix duplicate error reporting #1445, #1453 (@dpursehouse, @technoweenie) * transfer/custom: encode "event" as lowercase #1441 (@ttaylorr) ### Misc * docs/man: note GIT_LFS_PROGRESS #1469 (@ttaylorr) * Reword the description of HTTP 509 status #1467 (@dpursehouse) * Update fetch include/exclude docs for pattern matching #1455 (@ralfthewise) * config-next: API changes to the `config` package #1425 (@ttaylorr) * errors-next: Contextualize error messages #1463 (@ttaylorr, @technoweenie) * scope commands to not leak instances of themselves #1434 (@technoweenie) * Transfer manifest #1430 (@technoweenie) ## 1.3.1 (2 August 2016) ### Features * lfs/hook: teach `lfs.Hook` about `core.hooksPath` #1409 (@ttaylorr) ### Bugs * distinguish between empty include/exclude paths #1411 (@technoweenie) * Fix sslCAInfo config lookup when host in config doesn't have a trailing slash #1404 (@dakotahawkins) ### Misc * Use commands.Config instead of config.Config #1390 (@technoweenie) ## 1.3.0 (21 July 2016) ### Features * use proxy from git config #1173, #1358 (@jonmagic, @LizzHale, @technoweenie) * Enhanced upload/download of LFS content: #1265 #1279 #1297 #1303 #1367 (@sinbad) * Resumable downloads using HTTP range headers * Resumable uploads using [tus.io protocol](http://tus.io) * Pluggable [custom transfer adapters](https://github.com/git-lfs/git-lfs/blob/master/docs/custom-transfers.md) * In git 2.9+, run "git lfs pull" in submodules after "git lfs clone" #1373 (@sinbad) * cmd,doc,test: teach `git lfs track --{no-touch,verbose,dry-run}` #1344 (@ttaylorr) * ⏳ Retry transfers with expired actions #1350 (@ttaylorr) * Safe track patterns #1346 (@ttaylorr) * Add checkout --unstaged flag #1262 (@orivej) * cmd/clone: add include/exclude via flags and config #1321 (@ttaylorr) * Improve progress reporting when files skipped #1296 (@sinbad) * Experimental file locking commands #1236, #1259, #1256, #1386 (@ttaylorr) * Implement support for GIT_SSH_COMMAND #1260 (@pdf) * Recognize include/exclude filters from config #1257 (@ttaylorr) ### Bugs * Fix bug in Windows installer under Win32. #1200 (@teo-tsirpanis) * Updated request.GetAuthType to handle multi-value auth headers #1379 (@VladimirKhvostov) * Windows fixes #1374 (@sinbad) * Handle artifactory responses #1371 (@ttaylorr) * use `git rev-list --stdin` instead of passing each remote ref #1359 (@technoweenie) * docs/man: move "logs" subcommands from OPTIONS to COMMANDS #1335 (@ttaylorr) * test/zero-len: update test for git v2.9.1 #1369 (@ttaylorr) * Unbreak building httputil on OpenBSD #1360 (@jasperla) * WIP transferqueue race fix #1255 (@technoweenie) * Safety check to `comands.requireStdin` #1349 (@ttaylorr) * Removed CentOS 5 from dockers. Fixed #1295. #1298 (@javabrett) * Fix 'git lfs fetch' with a sha1 ref #1323 (@omonnier) * Ignore HEAD ref when fetching with --all #1310 (@ttaylorr) * Return a fully remote ref to reduce chances of ref clashes #1248 (@technoweenie) * Fix reporting of `git update-index` errors in `git lfs checkout` and `git lfs pull` #1400 (@technoweenie) ### Misc * Added Linux Mint Sarah to package cloud script #1384 (@andyneff) * travis-ci: require successful tests against upcoming Git core release #1372 (@larsxschneider) * travis-ci: add a build job to test against upcoming versions of Git #1361 (@larsxschneider) * Create Makefiles for building with gccgo #1222 (@zeldin) * README: add @ttaylorr to core team #1332 (@ttaylorr) * Enforced a minimum gem version of 1.0.4 for packagecloud-ruby #1292 (@javabrett) * I think this should be "Once installed" and not "One installed", but … #1305 (@GabLeRoux) * script/test: propagate extra args to go test #1324 (@omonnier) * Add `lfs.basictransfersonly` option to disable non-basic transfer adapters #1299 (@sinbad) * Debian build vendor test excludes #1291 (@javabrett) * gitignore: ignore lfstest-\* files #1271 (@ttaylorr) * Disable gojsonschema test, causes failures when firewalls block it #1274 (@sinbad) * test: use noop credential helper for auth tests #1267 (@ttaylorr) * get git tests passing when run outside of repository #1229 (@technoweenie) * Package refactor no.1 #1226 (@sinbad) * vendor: vendor dependencies in vendor/ using Glide #1243 (@ttaylorr) ## 1.2.1 (2 June 2016) ### Features * Add missing config details to `env` command #1217 (@sinbad) * Allow smudge filter to return 0 on download failure #1213 (@sinbad) * Add `git lfs update --manual` option & promote it on hook install fail #1182 (@sinbad) * Pass `git lfs clone` flags through to `git clone` correctly, respect some options #1160 (@sinbad) ### Bugs * Clean trailing `/` from include/exclude paths #1278 (@ttaylorr) * Fix problems with user prompts in `git lfs clone` #1185 (@sinbad) * Fix failure to return non-zero exit code when lfs install/update fails to install hooks #1178 (@sinbad) * Fix missing man page #1149 (@javabrett) * fix concurrent map read and map write #1179 (@technoweenie) ### Misc * Allow additional fields on request & response schema #1276 (@sinbad) * Fix installer error on win32. #1198 (@teo-tsirpanis) * Applied same -ldflags -X name value -> name=value fix #1193 (@javabrett) * add instructions to install from MacPorts #1186 (@skymoo) * Add xenial repo #1170 (@graingert) ## 1.2.0 (14 April 2016) ### Features * netrc support #715 (@rubyist) * `git lfs clone` command #988 (@sinbad) * Support self-signed certs #1067 (@sinbad) * Support sslverify option for specific hosts #1081 (@sinbad) * Stop transferring duplicate objects on major push or fetch operations on multiple refs. #1128 (@technoweenie) * Touch existing git tracked files when tracked in LFS so they are flagged as modified #1104 (@sinbad) * Support for git reference clones #1007 (@jlehtnie) ### Bugs * Fix clean/smudge filter string for files starting with - #1083 (@epriestley) * Fix silent failure to push LFS objects when ref matches a filename in the working copy #1096 (@epriestley) * Fix problems with using LFS in symlinked folders #818 (@sinbad) * Fix git lfs push silently misbehaving on ambiguous refs; fail like git push instead #1118 (@sinbad) * Whitelist `lfs.*.access` config in local ~/.lfsconfig #1122 (@rjbell4) * Only write the encoded pointer information to Stdout #1105 (@sschuberth) * Use hardcoded auth from remote or lfs config when accessing the storage api #1136 (@technoweenie, @jonmagic) * SSH should be called more strictly with command as one argument #1134 (@sinbad) ## 1.1.2 (1 March, 2016) * Fix Base64 issues with `?` #989 (@technoweenie) * Fix zombie git proc issue #1012 (@rlaakkol) * Fix problems with files containing unicode characters #1016 (@technoweenie) * Fix panic in `git cat-file` parser #1006 (@technoweenie) * Display error messages in non-fatal errors #1028 #1039 #1042 (@technoweenie) * Fix concurrent map access in progress meter (@technoweenie) ## 1.1.1 (4 February, 2016) ### Features * Add copy-on-write support for Linux BTRFS filesystem #952 (@bozaro) * convert `git://` remotes to LFS servers automatically #964 (@technoweenie) * Fix `git lfs track` handling of absolute paths. #975 (@technoweenie) * Allow tunable http client timeouts #977 (@technoweenie) ### Bugs * Suppress git config warnings for non-LFS keys #861 (@technoweenie) * Fix fallthrough when `git-lfs-authenticate` returns an error #909 (@sinbad) * Fix progress bar issue #883 (@pokehanai) * Support `remote.name.pushurl` config #949 (@sinbad) * Fix handling of `GIT_DIR` and `GIT_WORK_TREE` #963, #971 (@technoweenie) * Fix handling of zero length files #966 (@nathanhi) * Guard against invalid remotes passed to `push` and `pre-push` #974 (@technoweenie) * Fix race condition in `git lfs pull` #972 (@technoweenie) ### Extra * Add server API test tool #868 (@sinbad) * Redo windows installer with innosetup #875 (@strich) * Pre-built packages are built with Go v1.5.3 ## 1.1.0 (18 November, 2015) * NTLM auth support #820 (@WillHipschman, @technoweenie) * Add `prune` command #742 (@sinbad) * Use .lfsconfig instead of .gitconfig #837 (@technoweenie) * Rename "init" command #838 (@technoweenie) * Raise error if credentials are needed #842 (@technoweenie) * Support git repos in symlinked directories #818 (@sinbad, @difro, @jiangxin) * Fix "git lfs env" to show correct SSH remote info #828 (@jiangxin) ## 1.0.2 (28 October, 2015) * Fix issue with 'git lfs smudge' and the batch API. #795 (@technoweenie) * Fix race condition in the git scanning code. #801 (@technoweenie) ## 1.0.1 (23 October, 2015) * Downcase git config keys (prevents Auth loop) #690 (@WillHipschman) * Show more info for unexpected http responses #710 (@rubyist) * Use separate stdout/stderr buffers for `git-lfs-authenticate` #718 (@bozaro) * Use LoggedError instead of Panic if update-index fails in checkout #735 (@sinbad) * `smudge` command exits with non-zero if the download fails #732 (@rubyist) * Use `git rev-parse` to find the git working dir #692 (@sinbad) * Improved default remote behaviour & validation for fetch/pull #713 (@sinbad) * Make fetch return error code when 1+ downloads failed #734 (@sinbad) * Improve lfs.InRepo() detection in `init`/`update` #756 (@technoweenie) * Teach smudge to use the batch api #711 (@rubyist) * Fix not setting global attribute when needed to b/c of local state #765 (@sinbad) * Fix clone fail when fetch is excluded globally #770 (@sinbad) * Fix for partial downloads problem #763 (@technoweenie) * Get integration tests passing on Windows #771 (@sinbad) ### Security * Whitelist the valid keys read from .gitconfig #760 (@technoweenie) This prevents unsafe git configuration values from being used by Git LFS. ## v1.0 (1 October, 2015) * Manual reference is integrated into the "help" options #665 @sinbad * Fix `ls-files` when run from an empty repository #668 @Aorjoa * Support listing duplicate files in `ls-files` #681 @Aorjoa @technoweenie * `update` and `init` commands can install the pre-push hook in bare repositories #671 @technoweenie * Add `GIT_LFS_SKIP_SMUDGE` and `init --skip-smudge` #679 @technoweenie ## v0.6.0 (10 September, 2015) This is the first release that uses the new Batch API by default, while still falling back to the Legacy API automatically. Also, new fetch/checkout/push commands have been added. Run `git lfs update` in any local repositories to make sure all config settings are updated. ### Fetch * Rename old `git lfs fetch` command to `git lfs pull`. #527 (@sinbad) * Add `git lfs checkout` #527 #543 #551 #566 (@sinbad) * Add `git lfs fetch` for _just_ downloading objects. #527 (@sinbad) * Add `remote` arg, and default to tracking remote instead of "origin". #583 (@sinbad) * Support fetching multiple refs #542 (@sinbad) * Add `--include` and `--exclude` flag for `git lfs fetch` #573 (@sinbad) * Add `--recent` flag for downloading recent files outside of the current checkout. #610 (@sinbad) * Add `--all` option for download all objects from the server. #633 (@sinbad) * Fix error handling while `git update-index` is running. #570 (@rubyist) See [git-lfs-fetch(1)](https://github.com/git-lfs/git-lfs/blob/v0.6.0/docs/man/git-lfs-fetch.1.ronn), [git-lfs-checkout(1)](https://github.com/git-lfs/git-lfs/blob/v0.6.0/docs/man/git-lfs-checkout.1.ronn), and [git-lfs-pull(1)](https://github.com/git-lfs/git-lfs/blob/v0.6.0/docs/man/git-lfs-pull.1.ronn) for details. ### Push * Support pushing multiple branches in the pre-push hook. #635 (@sinbad) * Fix pushing objects from a branch that's not HEAD. #608 (@sinbad) * Check server for objects before failing push because local is missing. #581 (@sinbad) * Filter out commits from remote refs when pushing. #578 (@billygor) * Support pushing all objects to the server, regardless of the remote ref. #646 (@technoweenie) * Fix case where pre-push git hook exits with 0. #582 (@sinbad) See [git-lfs-push(1)](https://github.com/git-lfs/git-lfs/blob/v0.6.0/docs/man/git-lfs-push.1.ronn) for details. ### API Clients * Fix some race conditions in the Batch API client. #577 #637 (@sinbad, @rubyist) * Support retries in the Batch API client. #595 (@rubyist) * Fix hanging batch client in certain error conditions. #594 (@rubyist) * Treat 401 responses as errors in the Legacy API client. #634 (@rubyist) * Fix bug in the Legacy API client when the object already exists on the server. #572 (@billygor) ### Credentials * Fix how git credentials are checked in certain edge cases. #611 #650 #652 (@technoweenie) * Send URI user to git credentials. #626 (@sinbad) * Support git credentials with useHttpPath enabled. #554 (@clareliguori) ### Installation * Docker images and scripts for building and testing linux packages. #511 #526 #555 #603 (@andyneff, @ssgelm) * Create Windows GUI installer. #642 (@technoweenie) * Binary releases use Go 1.5, which includes fix for Authorization when the request URL includes just the username. [golang/go#11399](https://github.com/golang/go/issues/11399) ### Misc * Documented Git config values used by Git LFS in [git-lfs-config(5)](https://github.com/git-lfs/git-lfs/blob/v0.6.0/docs/man/git-lfs-config.5.ronn). #610 (@sinbad) * Experimental support for Git worktrees (in Git 2.5+) #546 (@sinbad) * Experimental extension support. #486 (@ryansimmen) ## v0.5.4 (30 July, 2015) * Ensure `git lfs uninit` cleans your git config thoroughly. #530 (@technoweenie) * Fix issue with asking `git-credentials` for auth details after getting them from the SSH command. #534 (@technoweenie) ## v0.5.3 (23 July, 2015) * `git lfs fetch` bugs #429 (@rubyist) * Push can crash on 32 bit architectures #450 (@rubyist) * Improved SSH support #404, #464 (@sinbad, @technoweenie) * Support 307 redirects with relative url #442 (@sinbad) * Fix `init` issues when upgrading #446 #451 #452 #465 (@technoweenie, @rubyist) * Support chunked Transfer-Encoding #386 (@ryansimmen) * Fix issue with pushing deleted objects #461 (@technoweenie) * Teach `git lfs push` how to send specific objects #449 (@larsxschneider) * Update error message when attempting to push objects that don't exist in `.git/lfs/objects` #447 (@technoweenie) * Fix bug in HTTP client when response body is nil #472 #488 (@rubyist, @technoweenie) * `-crlf` flag in gitattributes is deprecated #475 (@technoweenie) * Improvements to the CentOS and Debian build and package scripts (@andyneff, @ssgelm) ## v0.5.2 (19 June, 2015) * Add `git lfs fetch` command for downloading objects. #285 (@rubyist) * Fix `git lfs track` issues when run outside of a git repository #312, #323 (@michael-k, @Aorjoa) * Fix `git lfs track` for paths with spaces in them #327 (@technoweenie) * Fix `git lfs track` by writing relative paths to .gitattributes #356 (@michael-k) * Fix `git lfs untrack` so it doesn't remove entries incorrectly from .gitattributes #398 (@michael-k) * Fix `git lfs clean` bug with zero length files #346 (@technoweenie) * Add `git lfs fsck` #373 (@zeroshirts, @michael-k) * The Git pre-push warns if Git LFS is not installed #339 (@rubyist) * Fix Content-Type header sent by the HTTP client #329 (@joerg) * Improve performance tracing while scanning refs #311 (@michael-k) * Fix detection of LocalGitDir and LocalWorkingDir #312 #354 #361 (@michael-k) * Fix inconsistent file mode bits for directories created by Git LFS #364 (@michael-k) * Optimize shell execs #377, #382, #391 (@bozaro) * Collect HTTP transfer stats #366, #400 (@rubyist) * Support GIT_DIR and GIT_WORK_TREE #370 (@michael-k) * Hide Git application window in Windows #381 (@bozaro) * Add support for configured URLs containing credentials per RFC1738 #408 (@ewbankkit, @technoweenie) * Add experimental support for batch API calls #285 (@rubyist) * Improve linux build instructions for CentOS and Debian. #299 #309 #313 #332 (@jsh, @ssgelm, @andyneff) ## v0.5.1 (30 April, 2015) * Fix Windows install.bat script. #223 (@PeterDaveHello) * Fix bug where `git lfs clean` will clean Git LFS pointers too #271 (@technoweenie) * Better timeouts for the HTTP client #215 (@Mistobaan) * Concurrent uploads through `git lfs push` #258 (@rubyist) * Fix `git lfs smudge` behavior with zero-length file in `.git/lfs/objects` #267 (@technoweenie) * Separate out pre-push hook behavior from `git lfs push` #263 (@technoweenie) * Add diff/merge properties to .gitattributes #265 (@technoweenie) * Respect `GIT_TERMINAL_PROMPT ` #257 (@technoweenie) * Fix CLI progress bar output #185 (@technoweenie) * Fail fast in `clean` and `smudge` commands when run without STDIN #264 (@technoweenie) * Fix shell quoting in pre-push hook. #235 (@mhagger) * Fix progress bar output during file uploads. #185 (@technoweenie) * Change `remote.{name}.lfs_url` to `remote.{name}.lfsurl` #237 (@technoweenie) * Swap `git config` order. #245 (@technoweenie) * New `git lfs pointer` command for generating and comparing pointers #246 (@technoweenie) * Follow optional "href" property from git-lfs-authenticate SSH command #247 (@technoweenie) * `.git/lfs/objects` spec clarifications: #212 (@rtyley), #244 (@technoweenie) * man page updates: #228 (@mhagger) * pointer spec clarifications: #246 (@technoweenie) * Code comments for the untrack command: #225 (@thekafkaf) ## v0.5.0 (10 April, 2015) * Initial public release git-lfs-2.3.4/CODE-OF-CONDUCT.md000066400000000000000000000162771317167762300155520ustar00rootroot00000000000000# Git LFS Code of Conduct This code of conduct outlines our expectations for participants within the Git LFS community, as well as steps to reporting unacceptable behavior. We are committed to providing a welcoming and inspiring community for all and expect our code of conduct to be honored. Anyone who violates this code of conduct may be banned from the community. Our open source community strives to: * **Be friendly and patient.** * **Be welcoming**: We strive to be a community that welcomes and supports people of all backgrounds and identities. This includes, but is not limited to members of any race, ethnicity, culture, national origin, colour, immigration status, social and economic class, educational level, sex, sexual orientation, gender identity and expression, age, size, family status, political belief, religion, and mental and physical ability. * **Be considerate**: Your work will be used by other people, and you in turn will depend on the work of others. Any decision you take will affect users and colleagues, and you should take those consequences into account when making decisions. Remember that we're a world-wide community, so you might not be communicating in someone else's primary language. * **Be respectful**: Not all of us will agree all the time, but disagreement is no excuse for poor behavior and poor manners. We might all experience some frustration now and then, but we cannot allow that frustration to turn into a personal attack. It’s important to remember that a community where people feel uncomfortable or threatened is not a productive one. * **Be careful in the words that we choose**: we are a community of professionals, and we conduct ourselves professionally. Be kind to others. Do not insult or put down other participants. Harassment and other exclusionary behavior aren't acceptable. * **Try to understand why we disagree**: Disagreements, both social and technical, happen all the time. It is important that we resolve disagreements and differing views constructively. Remember that we’re different. The strength of our community comes from its diversity, people from a wide range of backgrounds. Different people have different perspectives on issues. Being unable to understand why someone holds a viewpoint doesn’t mean that they’re wrong. Don’t forget that it is human to err and blaming each other doesn’t get us anywhere. Instead, focus on helping to resolve issues and learning from mistakes. ## Definitions Harassment includes, but is not limited to: - Offensive comments related to gender, gender identity and expression, sexual orientation, disability, mental illness, neuro(a)typicality, physical appearance, body size, race, age, regional discrimination, political or religious affiliation - Unwelcome comments regarding a person’s lifestyle choices and practices, including those related to food, health, parenting, drugs, and employment - Deliberate misgendering. This includes deadnaming or persistently using a pronoun that does not correctly reflect a person's gender identity. You must address people by the name they give you when not addressing them by their username or handle - Physical contact and simulated physical contact (eg, textual descriptions like “*hug*” or “*backrub*”) without consent or after a request to stop - Threats of violence, both physical and psychological - Incitement of violence towards any individual, including encouraging a person to commit suicide or to engage in self-harm - Deliberate intimidation - Stalking or following - Harassing photography or recording, including logging online activity for harassment purposes - Sustained disruption of discussion - Unwelcome sexual attention, including gratuitous or off-topic sexual images or behaviour - Pattern of inappropriate social contact, such as requesting/assuming inappropriate levels of intimacy with others - Continued one-on-one communication after requests to cease - Deliberate “outing” of any aspect of a person’s identity without their consent except as necessary to protect others from intentional abuse - Publication of non-harassing private communication Our open source community prioritizes marginalized people’s safety over privileged people’s comfort. We will not act on complaints regarding: - ‘Reverse’ -isms, including ‘reverse racism,’ ‘reverse sexism,’ and ‘cisphobia’ - Reasonable communication of boundaries, such as “leave me alone,” “go away,” or “I’m not discussing this with you” - Refusal to explain or debate social justice concepts - Communicating in a ‘tone’ you don’t find congenial - Criticizing racist, sexist, cissexist, or otherwise oppressive behavior or assumptions ### Diversity Statement We encourage everyone to participate and are committed to building a community for all. Although we will fail at times, we seek to treat everyone both as fairly and equally as possible. Whenever a participant has made a mistake, we expect them to take responsibility for it. If someone has been harmed or offended, it is our responsibility to listen carefully and respectfully, and do our best to right the wrong. Although this list cannot be exhaustive, we explicitly honor diversity in age, gender, gender identity or expression, culture, ethnicity, language, national origin, political beliefs, profession, race, religion, sexual orientation, socioeconomic status, and technical ability. We will not tolerate discrimination based on any of the protected characteristics above, including participants with disabilities. ### Reporting Issues If you experience or witness unacceptable behavior—or have any other concerns—please report it by contacting us via opensource@github.com. All reports will be handled with discretion. In your report please include: - Your contact information. - Names (real, nicknames, or pseudonyms) of any individuals involved. If there are additional witnesses, please include them as well. Your account of what occurred, and if you believe the incident is ongoing. If there is a publicly available record (e.g. a mailing list archive or a public IRC logger), please include a link. - Any additional information that may be helpful. After filing a report, a representative will contact you personally, review the incident, follow up with any additional questions, and make a decision as to how to respond. If the person who is harassing you is part of the response team, they will recuse themselves from handling your incident. If the complaint originates from a member of the response team, it will be handled by a different member of the response team. We will respect confidentiality requests for the purpose of protecting victims of abuse. ### Attribution & Acknowledgements We all stand on the shoulders of giants across many open source communities. We'd like to thank the communities and projects that established code of conducts and diversity statements as our inspiration: * [Django](https://www.djangoproject.com/conduct/reporting/) * [Python](https://www.python.org/community/diversity/) * [Ubuntu](http://www.ubuntu.com/about/about-ubuntu/conduct) * [Contributor Covenant](http://contributor-covenant.org/) * [Geek Feminism](http://geekfeminism.org/about/code-of-conduct/) * [Citizen Code of Conduct](http://citizencodeofconduct.org/) git-lfs-2.3.4/CONTRIBUTING.md000066400000000000000000000163321317167762300153400ustar00rootroot00000000000000## Contributing to Git Large File Storage Hi there! We're thrilled that you'd like to contribute to this project. Your help is essential for keeping it great. This project adheres to the [Open Code of Conduct](./CODE-OF-CONDUCT.md). By participating, you are expected to uphold this code. ## Feature Requests Feature requests are welcome, but will have a much better chance of being accepted if they meet the first principles for the project. Git LFS is intended for end users, not Git experts. It should fit into the standard workflow as much as possible, and require little client configuration. * Large objects are pushed to Git LFS servers during git push. * Large objects are downloaded during git checkout. * Git LFS servers are linked to Git remotes by default. Git hosts can support users without requiring them to set up anything extra. Users can access different Git LFS servers like they can with different Git remotes. * Upload and download requests should use the same form of authentication built into Git: SSH through public keys, and HTTPS through Git credential helpers. * Git LFS servers use a JSON API designed around progressive enhancement. Servers can simply host off cloud storage, or implement more efficient methods of transferring data. You can see what the Git LFS team is prioritizing work on in the [roadmap](./ROADMAP.md). ## Project Management The Git LFS project is managed completely through this open source project and its [chat room][chat]. The [roadmap][] shows the high level items that are prioritized for future work. Suggestions for major features should be submitted as a pull request that adds a markdown file to `docs/proposals` discussing the feature. This gives the community time to discuss it before a lot of code has been written. Roadmap items are linked to one or more Issue task lists ([example][roadmap-items]), with the `roadmap` label, that go into more detail. [chat]: https://gitter.im/git-lfs/git-lfs [roadmap]: ./ROADMAP.md [roadmap-items]: https://github.com/git-lfs/git-lfs/issues/490 The Git LFS teams mark issues and pull requests with the following labels: * `bug` - An issue describing a bug. * `core-team` - An issue relating to the governance of the project. * `enhancement` - An issue for a possible new feature. * `review` - A pull request ready to be reviewed. * `release` - A checklist issue showing items marked for an upcoming release. * `roadmap` - A checklist issue with tasks to fulfill something from the [roadmap](./ROADMAP.md) ## Branching strategy In general, contributors should develop on branches based off of `master` and pull requests should be to `master`. ## Submitting a pull request 0. [Fork][] and clone the repository 0. Configure and install the dependencies: `script/bootstrap` 0. Make sure the tests pass on your machine: `script/test` 0. Create a new branch based on `master`: `git checkout -b master` 0. Make your change, add tests, and make sure the tests still pass 0. Push to your fork and [submit a pull request][pr] from your branch to `master` 0. Accept the [GitHub CLA][cla] 0. Pat yourself on the back and wait for your pull request to be reviewed Here are a few things you can do that will increase the likelihood of your pull request being accepted: * Follow the [style guide][style] where possible. * Write tests. * Update documentation as necessary. Commands have [man pages](./docs/man). * Keep your change as focused as possible. If there are multiple changes you would like to make that are not dependent upon each other, consider submitting them as separate pull requests. * Write a [good commit message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html). ## Building ### Prerequisites Git LFS depends on having a working Go 1.7.3+ environment, with your standard `$GOROOT` and `$GOPATH` environment variables set. On RHEL etc. e.g. Red Hat Enterprise Linux Server release 7.2 (Maipo), you will neet the minimum packages installed to build Git LFS: ``` $ sudo yum install gcc $ sudo yum install perl-Digest-SHA ``` In order to run the RPM build `rpm/build_rpms.bsh` you will also need to: `$ sudo yum install ruby-devel` (note on an AWS instance you may first need to `sudo yum-config-manager --enable rhui-REGION-rhel-server-optional`) ### Building Git LFS The easiest way to download Git LFS for making changes is `go get`: $ go get github.com/git-lfs/git-lfs This clones the Git LFS repository to your `$GOPATH`. If you typically keep your projects in a specific directory, you can symlink it from `$GOPATH`: $ cd ~/path/to/your/projects $ ln -s $GOPATH/src/github.com/git-lfs/git-lfs From here, run `script/bootstrap` to build Git LFS in the `./bin` directory. Before submitting changes, be sure to run the Go tests and the shell integration tests: $ script/test # runs just the Go tests $ script/integration # runs the shell tests in ./test $ script/cibuild # runs everything, with verbose debug output ## Updating 3rd party packages 0. Update `glide.yaml`. 0. Run `script/vendor` to update the code in the `vendor` directory. 0. Commit the change. Git LFS vendors the full source code in the repository. 0. Submit a pull request. ## Releasing If you are the current maintainer: * Create a [new draft Release](https://github.com/git-lfs/git-lfs/releases/new). List any changes with links to related PRs. * Make sure your local dependencies are up to date: `script/bootstrap` * Ensure that tests are green: `script/cibuild` * Bump the version in `lfs/lfs.go`, [like this](https://github.com/git-lfs/git-lfs/commit/dd17828e4a6f2394cbba8621037199dc28f046e8). * Add the new version to the top of CHANGELOG.md * Build for all platforms with `script/bootstrap -all` (you need Go setup for cross compiling with Mac, Linux, FreeBSD, and Windows support). * Test the command locally. The compiled version will be in `bin/releases/{os}-{arch}/git-lfs-{version}/git-lfs` * Get the draft Release ID from the GitHub API: `curl -in https://api.github.com/repos/git-lfs/git-lfs/releases` * Run `script/release -id {id}` to upload all of the compiled binaries to the release. * Publish the Release on GitHub. * Update [Git LFS website](https://github.com/git-lfs/git-lfs.github.com/blob/gh-pages/_config.yml#L4) (release engineer access rights required). * Ping external teams on GitHub: * @github/desktop * Build packages: * rpm * apt * Bump homebrew version and generate the homebrew hash with `curl --location https://github.com/git-lfs/git-lfs/archive/vx.y.z.tar.gz | shasum -a 256` ([example](https://github.com/Homebrew/homebrew-core/pull/413/commits/dc0eb1f62514f48f3f5a8d01ad3bea06f78bd566)) * Create release branch for bug fixes, such as `release-1.5`. * Increment version in `config/version.go` to the next expected version. If v1.5 just shipped, set the version in master to `1.6-pre`, for example. ## Resources - [Contributing to Open Source on GitHub](https://guides.github.com/activities/contributing-to-open-source/) - [Using Pull Requests](https://help.github.com/articles/using-pull-requests/) - [GitHub Help](https://help.github.com) [fork]: https://github.com/git-lfs/git-lfs/fork [pr]: https://github.com/git-lfs/git-lfs/compare [style]: https://github.com/golang/go/wiki/CodeReviewComments [cla]: https://cla.github.com/git-lfs/git-lfs/accept git-lfs-2.3.4/INSTALLING.md000066400000000000000000000055321317167762300150750ustar00rootroot00000000000000# Installing on Linux using packagecloud [packagecloud](https://packagecloud.io) hosts [`git-lfs` packages](https://packagecloud.io/github/git-lfs) for popular Linux distributions with Apt/deb and Yum/rpm based package-managers. Installing from packagecloud is reasonably straightforward and involves: * Adding the packagecloud repo that best matches your Linux distribution and version, then * Running your package-manager's install command ## Adding the packagecloud repository packagecloud provides scripts to automate the process of configuring the package repository on your system, importing signing-keys etc. These scripts must be run sudo root, and you should review them first. The scripts are: * Apt/deb repositories: https://packagecloud.io/install/repositories/github/git-lfs/script.deb.sh * Yum/rpm repositories: https://packagecloud.io/install/repositories/github/git-lfs/script.rpm.sh The scripts check your Linux distribution and version, and use those parameters to create the best repository URL. If you are running one of the distributions listed for the latest version of Git LFS listed at [packagecloud](https://packagecloud.io/github/git-lfs) e.g `debian/jessie`, `el/7`, you can run the script without parameters: Apt/deb repos: `curl -s https://packagecloud.io/install/repositories/github/git-lfs/script.deb.sh | sudo bash` Yum/rpm repos: `curl -s https://packagecloud.io/install/repositories/github/git-lfs/script.rpm.sh | sudo bash` If you are running a distribution which does not match exactly a repository uploaded for Git LFS, but for which there is a repository for a compatible upstream distribution, you can either run the script with some additional parameters, or run it and then manually-correct the resulting repository URLs. See [#1074](https://github.com/git-lfs/git-lfs/issues/1074) for details. If you are running LinuxMint 17.1 Rebecca, which is downstream of Ubuntu Trusty and Debian Jessie, you can run: `curl -s https://packagecloud.io/install/repositories/github/git-lfs/script.deb.sh | os=debian dist=jessie sudo -E sudo bash` The `os` and `dist` variables passed-in will override what would be detected for your system and force the selection of the upstream distribution's repository. ## Installing packages With the packagecloud repository configured for your system, you can install Git LFS: * Apt/deb: `sudo apt-get install git-lfs` * Yum/rpm: `sudo yum install git-lfs` ## A note about proxies Several of the commands above assume internet access and use `sudo`. If your host is behind a proxy-server that is required for internet access, you may depend on environment-variables `http_proxy` or `https_proxy` being set, and these might not survive the switch to root with `sudo`, which resets environment by-default. To get around this, you can run `sudo` with the `-E` switch, `sudo -E ...`, which retains environment variables. git-lfs-2.3.4/LICENSE.md000066400000000000000000000021131317167762300145030ustar00rootroot00000000000000MIT License Copyright (c) 2014-2016 GitHub, Inc. and Git LFS contributors Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. git-lfs-2.3.4/Makefile000066400000000000000000000020571317167762300145460ustar00rootroot00000000000000GOC ?= gccgo AR ?= ar SRCDIR := $(dir $(lastword $(MAKEFILE_LIST))) LIBDIR := out/github.com/git-lfs/git-lfs GOFLAGS := -Iout ifeq ($(MAKEFILE_GEN),) MAKEFILE_GEN := out/Makefile.gen all: $(MAKEFILE_GEN) @$(MAKE) -f $(lastword $(MAKEFILE_LIST)) $(MAKEFLAGS) MAKEFILE_GEN=$(MAKEFILE_GEN) $@ $(MAKEFILE_GEN) : out/genmakefile $(SRCDIR)commands/mancontent_gen.go @mkdir -p $(dir $@) $< "$(SRCDIR)" github.com/git-lfs/git-lfs/ > $@ else all : bin/git-lfs include $(MAKEFILE_GEN) $(LIBDIR)/git-lfs.o : $(SRC_main) $(DEPS_main) @mkdir -p $(dir $@) $(GOC) $(GOFLAGS) -c -o $@ $(SRC_main) bin/git-lfs : $(LIBDIR)/git-lfs.o $(DEPS_main) @mkdir -p $(dir $@) $(GOC) $(GOFLAGS) -o $@ $^ %.a : %.o $(AR) rc $@ $< endif $(SRCDIR)commands/mancontent_gen.go : out/mangen cd $(SRCDIR)commands && $(CURDIR)/out/mangen out/mangen : $(SRCDIR)docs/man/mangen.go @mkdir -p $(dir $@) $(GOC) -o $@ $< out/genmakefile : $(SRCDIR)script/genmakefile/genmakefile.go @mkdir -p $(dir $@) $(GOC) -o $@ $< clean : rm -rf out bin rm -f $(SRCDIR)commands/mancontent_gen.go git-lfs-2.3.4/README.md000066400000000000000000000130701317167762300143620ustar00rootroot00000000000000# Git Large File Storage | Linux | macOS | Windows | | :---- | :------ | :---- | [ ![Linux build status][1]][2] | [![macOS build status][3]][4] | [![Windows build status][5]][6] | [1]: https://travis-ci.org/git-lfs/git-lfs.svg?branch=master [2]: https://travis-ci.org/git-lfs/git-lfs [3]: https://circleci.com/gh/git-lfs/git-lfs.svg?style=shield&circle-token=856152c2b02bfd236f54d21e1f581f3e4ebf47ad [4]: https://circleci.com/gh/git-lfs/git-lfs [5]: https://ci.appveyor.com/api/projects/status/46a5yoqc3hk59bl5/branch/master?svg=true [6]: https://ci.appveyor.com/project/git-lfs/git-lfs/branch/master Git LFS is a command line extension and [specification](docs/spec.md) for managing large files with Git. The client is written in Go, with pre-compiled binaries available for Mac, Windows, Linux, and FreeBSD. Check out the [Git LFS website][page] for an overview of features. [page]: https://git-lfs.github.com/ ## Getting Started By default, the Git LFS client needs a Git LFS server to sync the large files it manages. This works out of the box when using popular git repository hosting providers like GitHub, Atlassian, etc. When you host your own vanilla git server, for example, you need to either use a separate [Git LFS server instance](https://github.com/git-lfs/git-lfs/wiki/Implementations), or use the [custom transfer adapter](docs/custom-transfers.md) with a transfer agent in blind mode, without having to use a Git LFS server instance. You can install the Git LFS client in several different ways, depending on your setup and preferences. * Linux users can install Debian or RPM packages from [PackageCloud](https://packagecloud.io/github/git-lfs/install). See the [Installation Guide](./INSTALLING.md) for details. * Mac users can install from [Homebrew](https://github.com/Homebrew/homebrew) with `brew install git-lfs`, or from [MacPorts](https://www.macports.org) with `port install git-lfs`. * Windows users can install from [Chocolatey](https://chocolatey.org/) with `choco install git-lfs`. * [Binary packages are available][rel] for Windows, Mac, Linux, and FreeBSD. * You can build it with Go 1.8.1+. See the [Contributing Guide](./CONTRIBUTING.md) for instructions. [rel]: https://github.com/git-lfs/git-lfs/releases Note: Git LFS requires Git v1.8.5 or higher. Once installed, you need to setup the global Git hooks for Git LFS. This only needs to be done once per machine. ```bash $ git lfs install ``` Now, it's time to add some large files to a repository. The first step is to specify file patterns to store with Git LFS. These file patterns are stored in `.gitattributes`. ```bash $ mkdir large-repo $ cd large-repo $ git init # Add all zip files through Git LFS $ git lfs track "*.zip" ``` Now you're ready to push some commits: ```bash $ git add .gitattributes $ git add my.zip $ git commit -m "add zip" ``` You can confirm that Git LFS is managing your zip file: ```bash $ git lfs ls-files my.zip ``` Once you've made your commits, push your files to the Git remote: ```bash $ git push origin master Sending my.zip LFS: 12.58 MB / 12.58 MB 100.00 % Counting objects: 2, done. Delta compression using up to 8 threads. Compressing objects: 100% (5/5), done. Writing objects: 100% (5/5), 548 bytes | 0 bytes/s, done. Total 5 (delta 1), reused 0 (delta 0) To https://github.com/git-lfs/git-lfs-test 67fcf6a..47b2002 master -> master ``` ## Need Help? You can get help on specific commands directly: ```bash $ git lfs help ``` The [official documentation](docs) has command references and specifications for the tool. You can ask questions in the [Git LFS chat room][chat], or [file a new issue][ish]. Be sure to include details about the problem so we can troubleshoot it. 1. Include the output of `git lfs env`, which shows how your Git environment is setup. 2. Include `GIT_TRACE=1` in any bad Git commands to enable debug messages. 3. If the output includes a message like `Errors logged to /path/to/.git/lfs/objects/logs/*.log`, throw the contents in the issue, or as a link to a Gist or paste site. [chat]: https://gitter.im/git-lfs/git-lfs [ish]: https://github.com/git-lfs/git-lfs/issues ## Contributing See [CONTRIBUTING.md](CONTRIBUTING.md) for info on working on Git LFS and sending patches. Related projects are listed on the [Implementations wiki page][impl]. You can also join [the project's chat room][chat]. [impl]: https://github.com/git-lfs/git-lfs/wiki/Implementations ### Using LFS from other Go code At the moment git-lfs is only focussed on the stability of its command line interface, and the [server APIs](docs/api/README.md). The contents of the source packages is subject to change. We therefore currently discourage other Go code from depending on the git-lfs packages directly; an API to be used by external Go code may be provided in future. ## Core Team These are the humans that form the Git LFS core team, which runs the project. In alphabetical order: | [@andyneff](https://github.com/andyneff) | [@rubyist](https://github.com/rubyist) | [@sinbad](https://github.com/sinbad) | [@technoweenie](https://github.com/technoweenie) | [@ttaylorr](https://github.com/ttaylorr) | |---|---|---|---|---| | [![](https://avatars1.githubusercontent.com/u/7596961?v=3&s=100)](https://github.com/andyneff) | [![](https://avatars1.githubusercontent.com/u/143?v=3&s=100)](https://github.com/rubyist) | [![](https://avatars1.githubusercontent.com/u/142735?v=3&s=100)](https://github.com/sinbad) | [![](https://avatars3.githubusercontent.com/u/21?v=3&s=100)](https://github.com/technoweenie) | [![](https://avatars3.githubusercontent.com/u/443245?v=3&s=100)](https://github.com/ttaylorr) | git-lfs-2.3.4/ROADMAP.md000066400000000000000000000122261317167762300145120ustar00rootroot00000000000000# Git LFS Roadmap This is a high level overview of some of the big changes we want to make for Git LFS. If you have an idea for a new feature, open an issue for discussion. ## Bugs/Features | | Name | Ref | | ------ | ---- | --- | | | git index issues | [#937](https://github.com/git-lfs/git-lfs/issues/937) | | :soon: | Add ref information to upload request | [#969](https://github.com/git-lfs/git-lfs/issues/969) | | :soon: | Socks proxy support | [#1424](https://github.com/git-lfs/git-lfs/issues/1424) | | :no_entry_sign: | Not following 301 redirect | [#1129](https://github.com/git-lfs/git-lfs/issues/1129) | | | add all lfs.\* git config keys to git lfs env output | | | | credential output hidden while transferring files | [#387](https://github.com/git-lfs/git-lfs/pull/387) | | | Support multiple git alternates | | | | Investigate `git lfs checkout` hardlinking instead of copying files. | | | | Investigate `--shared` and `--dissociate` options for `git clone` (similar to `--references`) | | | | Investigate `GIT_SSH_COMMAND` | [#1142](https://github.com/git-lfs/git-lfs/issues/1142) | | | | Teach `git lfs install` to use `git config --system` instead of `git config --global` by default | [#1177](https://github.com/git-lfs/git-lfs/pull/1177) | | | Investigate `git -c lfs.url=... lfs clone` usage | | | | Test that manpages are built and included | [#1149](https://github.com/git-lfs/git-lfs/pull/1149) | | | Update CI to build from source outside of git repo | [#1156](https://github.com/git-lfs/git-lfs/issues/1156#issuecomment-211574343) | | :soon: | Teach `git lfs track` and others to warn when `git lfs install` hasn't been run (or auto-install) | [#1167](https://github.com/git-lfs/git-lfs/issues/1167) | | | Investigate hanging pushes/pulls when git credential helper is not set | [#197](https://github.com/git-lfs/git-lfs/issues/197) | | | Support git ssh shorthands | [#278](https://github.com/git-lfs/git-lfs/issues/278) | | | Support `GIT_CONFIG` | [#318](https://github.com/git-lfs/git-lfs/issues/318) | | | Warn when Git version is unsupported | [#410](https://github.com/git-lfs/git-lfs/issues/410) | | | Detect when credential cacher is not setup | [#523](https://github.com/git-lfs/git-lfs/issues/523) | | | Fix error logging from `git clone` errors | [#513](https://github.com/git-lfs/git-lfs/issues/513) | | | Investigate cherry picking issues | [#438](https://github.com/git-lfs/git-lfs/issues/438) | | | dynamic blob size cutoff for pointers | [#524](https://github.com/git-lfs/git-lfs/issues/524) | | | windows `--help` support | [#394](https://github.com/git-lfs/git-lfs/issues/394) | | | Investigate git hook installs within git worktree | [#1385](https://github.com/git-lfs/git-lfs/issues/1385) | | | Support ssh username in ssh config | [#754](https://github.com/git-lfs/git-lfs/issues/754) | | | Investigate `autocrlf` for lfs objects | [#723](https://github.com/git-lfs/git-lfs/issues/723) | ## Upcoming Features | | Name | Ref | | ------ | ---- | --- | | :construction: | File locking | [#666](https://github.com/git-lfs/git-lfs/pull/666) | | :ship: | Resumable uploads and downloads | [#414](https://github.com/git-lfs/git-lfs/issues/414) | | :construction: | Wrapped versions of `git pull` & `git checkout` that optimize without filters like `git lfs clone` | | | | Remove non-batch API route in client | | ## Possible Features | | Name | Ref | | ------ | ---- | --- | | | Support tracking files by size | [#282](https://github.com/git-lfs/git-lfs/issues/282) | | Binary diffing - reduce the amount of content sent over the wire. | | | | Client side metrics reporting, so the Git LFS server can optionally track how clients are performing. | | | | Pure SSH: full API & transfer support for SSH without redirect to HTTP | | | | Compression of files in `.git/lfs/objects` | [#260](https://github.com/git-lfs/git-lfs/issues/260) | | | LFS Migration tool | [#326](https://github.com/git-lfs/git-lfs/issues/326) | | | Automatic upgrades | [#531](https://github.com/gihtub/git-lfs/issues/531) | | | Investigate `git add` hash caching | [#574](https://github.com/git-lfs/git-lfs/issues/574) | | | `git lfs archive` command | [#1322](https://github.com/git-lfs/git-lfs/issues/1322) | | | Support 507 http responses | [#1327](https://github.com/git-lfs/git-lfs/issues/1327) | | | Investigate shared object directory | [#766](https://github.com/git-lfs/git-lfs/issues/766) | ## Project Related These are items that don't affect Git LFS end users. | | Name | Ref | | ------ | ---- | --- | | :ship: | CI builds for Windows. | [#1567](https://github.com/git-lfs/git-lfs/pull/1567) | | | Automated build servers that build Git LFS on native platforms. | | | | Automated QA test suite for running release candidates through a gauntlet of open source and proprietary Git LFS environments. | | | | Automatic updates of the Git LFS client. | [#531](https://github.com/git-lfs/git-lfs/issues/531) | ## Legend * :ship: - Completed * :construction: - In Progress * :soon: - Up next * :no_entry_sign: - Blocked ## How this works 0. Roadmap items are listed within their category in order of priority. 0. Roadmap items are kept up-to-date with the above legend. 0. Roadmap items are pruned once a release of LFS has been published. git-lfs-2.3.4/appveyor.yml000066400000000000000000000023071317167762300154740ustar00rootroot00000000000000skip_branch_with_pr: true environment: GOPATH: $(HOMEDRIVE)$(HOMEPATH)\go MSYSTEM: MINGW64 clone_folder: $(GOPATH)\src\github.com\git-lfs\git-lfs install: - rd C:\Go /s /q - cinst golang --version 1.8.3 -y - cinst InnoSetup -y - refreshenv - ps: | echo "Go directories in machine PATH environment:" [environment]::GetEnvironmentVariable("PATH","Machine").split(";") | Select-String -Pattern "\\go\\" echo "Go directories in user PATH environment:" [environment]::GetEnvironmentVariable("PATH","User").split(";") | Select-String -Pattern "\\go\\" echo "Go directories in process PATH environment:" [environment]::GetEnvironmentVariable("PATH","Process").split(";") | Select-String -Pattern "\\go\\" echo "Go version information:" go version build_script: - bash --login -c 'GOARCH=386 script/bootstrap' - mv bin\git-lfs.exe git-lfs-x86.exe - bash --login -c 'GOARCH=amd64 script/bootstrap' - mv bin\git-lfs.exe git-lfs-x64.exe after_build: - iscc script\windows-installer\inno-setup-git-lfs-installer.iss test_script: - bash --login script/cibuild artifacts: - path: git-lfs-x86.exe - path: git-lfs-x64.exe - path: git-lfs-windows-*.exe git-lfs-2.3.4/circle.yml000066400000000000000000000024441317167762300150720ustar00rootroot00000000000000machine: environment: GIT_LFS_TEST_DIR: $HOME/git-lfs-tests GIT_SOURCE_REPO: https://github.com/git/git.git GIT_EARLIEST_SUPPORTED_VERSION: v2.0.0 GIT_LATEST_SOURCE_BRANCH: master XCODE_SCHEME: test XCODE_WORKSPACE: test XCODE_PROJECT: test GOPATH: $HOME/git-lfs/.go NO_OPENSSL: YesPlease APPLE_COMMON_CRYPTO: YesPlease xcode: version: 8.2 general: build_dir: .go/src/github.com/git-lfs/git-lfs checkout: post: - mkdir -p ~/git-lfs/.go/src/github.com/git-lfs - ln -s ~/git-lfs ~/git-lfs/.go/src/github.com/git-lfs dependencies: pre: - brew update - brew prune - brew upgrade go || brew install go - brew upgrade git || brew install git - brew upgrade gettext || brew install gettext - brew link --force gettext - git clone $GIT_SOURCE_REPO git-source override: - script/bootstrap # needed for git-lfs-test-server-api - go get -d -v github.com/spf13/cobra - go get -d -v github.com/ThomsonReutersEikon/go-ntlm/ntlm test: override: - script/cibuild - script/install-git-source "$GIT_EARLIEST_SUPPORTED_VERSION" - PATH="$HOME/bin:$PATH" SKIPCOMPILE=1 script/integration - script/install-git-source "$GIT_LATEST_SOURCE_BRANCH" - PATH="$HOME/bin:$PATH" SKIPCOMPILE=1 script/integration git-lfs-2.3.4/commands/000077500000000000000000000000001317167762300147035ustar00rootroot00000000000000git-lfs-2.3.4/commands/command_checkout.go000066400000000000000000000034661317167762300205460ustar00rootroot00000000000000package commands import ( "github.com/git-lfs/git-lfs/filepathfilter" "github.com/git-lfs/git-lfs/git" "github.com/git-lfs/git-lfs/lfs" "github.com/git-lfs/git-lfs/progress" "github.com/spf13/cobra" ) func checkoutCommand(cmd *cobra.Command, args []string) { requireInRepo() ref, err := git.CurrentRef() if err != nil { Panic(err, "Could not checkout") } var totalBytes int64 var pointers []*lfs.WrappedPointer meter := progress.NewMeter(progress.WithOSEnv(cfg.Os)) chgitscanner := lfs.NewGitScanner(func(p *lfs.WrappedPointer, err error) { if err != nil { LoggedError(err, "Scanner error: %s", err) return } totalBytes += p.Size meter.Add(p.Size) meter.StartTransfer(p.Name) pointers = append(pointers, p) }) chgitscanner.Filter = filepathfilter.New(rootedPaths(args), nil) if err := chgitscanner.ScanTree(ref.Sha); err != nil { ExitWithError(err) } chgitscanner.Close() singleCheckout := newSingleCheckout() meter.Start() for _, p := range pointers { singleCheckout.Run(p) // not strictly correct (parallel) but we don't have a callback & it's just local // plus only 1 slot in channel so it'll block & be close meter.TransferBytes("checkout", p.Name, p.Size, totalBytes, int(p.Size)) meter.FinishTransfer(p.Name) } meter.Finish() singleCheckout.Close() } // Parameters are filters // firstly convert any pathspecs to the root of the repo, in case this is being // executed in a sub-folder func rootedPaths(args []string) []string { pathConverter, err := lfs.NewCurrentToRepoPathConverter() if err != nil { Panic(err, "Could not checkout") } rootedpaths := make([]string, 0, len(args)) for _, arg := range args { rootedpaths = append(rootedpaths, pathConverter.Convert(arg)) } return rootedpaths } func init() { RegisterCommand("checkout", checkoutCommand, nil) } git-lfs-2.3.4/commands/command_clean.go000066400000000000000000000057601317167762300200220ustar00rootroot00000000000000package commands import ( "io" "os" "github.com/git-lfs/git-lfs/errors" "github.com/git-lfs/git-lfs/lfs" "github.com/git-lfs/git-lfs/progress" "github.com/spf13/cobra" ) // clean cleans an object read from the given `io.Reader`, "from", and writes // out a corresponding pointer to the `io.Writer`, "to". If there were any // errors encountered along the way, they will be returned immediately if the // error is non-fatal, otherwise they will halt using the built in // `commands.Panic`. // // If fileSize is given as a non-negative (>= 0) integer, that value is used // with preference to os.Stat(fileName).Size(). If it is given as negative, the // value from the `stat(1)` call will be used instead. // // If the object read from "from" is _already_ a clean pointer, then it will be // written out verbatim to "to", without trying to make it a pointer again. func clean(to io.Writer, from io.Reader, fileName string, fileSize int64) (*lfs.Pointer, error) { var cb progress.CopyCallback var file *os.File if len(fileName) > 0 { stat, err := os.Stat(fileName) if err == nil && stat != nil { if fileSize < 0 { fileSize = stat.Size() } localCb, localFile, err := lfs.CopyCallbackFile("clean", fileName, 1, 1) if err != nil { Error(err.Error()) } else { cb = localCb file = localFile } } } cleaned, err := lfs.PointerClean(from, fileName, fileSize, cb) if file != nil { file.Close() } if cleaned != nil { defer cleaned.Teardown() } if errors.IsCleanPointerError(err) { // If the contents read from the working directory was _already_ // a pointer, we'll get a `CleanPointerError`, with the context // containing the bytes that we should write back out to Git. _, err = to.Write(errors.GetContext(err, "bytes").([]byte)) return nil, err } if err != nil { ExitWithError(errors.Wrap(err, "Error cleaning LFS object")) } tmpfile := cleaned.Filename mediafile, err := lfs.LocalMediaPath(cleaned.Oid) if err != nil { Panic(err, "Unable to get local media path.") } if stat, _ := os.Stat(mediafile); stat != nil { if stat.Size() != cleaned.Size && len(cleaned.Pointer.Extensions) == 0 { Exit("Files don't match:\n%s\n%s", mediafile, tmpfile) } Debug("%s exists", mediafile) } else { if err := os.Rename(tmpfile, mediafile); err != nil { Panic(err, "Unable to move %s to %s\n", tmpfile, mediafile) } Debug("Writing %s", mediafile) } _, err = lfs.EncodePointer(to, cleaned.Pointer) return cleaned.Pointer, err } func cleanCommand(cmd *cobra.Command, args []string) { requireStdin("This command should be run by the Git 'clean' filter") lfs.InstallHooks(false) var fileName string if len(args) > 0 { fileName = args[0] } ptr, err := clean(os.Stdout, os.Stdin, fileName, -1) if err != nil { Error(err.Error()) } if ptr != nil && possiblyMalformedObjectSize(ptr.Size) { Error("Possibly malformed conversion on Windows, see `git lfs help smudge` for more details.") } } func init() { RegisterCommand("clean", cleanCommand, nil) } git-lfs-2.3.4/commands/command_clone.go000066400000000000000000000154371317167762300200420ustar00rootroot00000000000000package commands import ( "fmt" "os" "path" "path/filepath" "strings" "github.com/git-lfs/git-lfs/lfs" "github.com/git-lfs/git-lfs/localstorage" "github.com/git-lfs/git-lfs/subprocess" "github.com/git-lfs/git-lfs/git" "github.com/git-lfs/git-lfs/tools" "github.com/spf13/cobra" ) var ( cloneFlags git.CloneFlags cloneSkipRepoInstall bool ) func cloneCommand(cmd *cobra.Command, args []string) { requireGitVersion() if git.Config.IsGitVersionAtLeast("2.15.0") { msg := []string{ "WARNING: 'git lfs clone' is deprecated and will not be updated", " with new flags from 'git clone'", "", "'git clone' has been updated in upstream Git to have comparable", "speeds to 'git lfs clone'.", } fmt.Fprintln(os.Stderr, strings.Join(msg, "\n")) } // We pass all args to git clone err := git.CloneWithoutFilters(cloneFlags, args) if err != nil { Exit("Error(s) during clone:\n%v", err) } // now execute pull (need to be inside dir) cwd, err := tools.Getwd() if err != nil { Exit("Unable to derive current working dir: %v", err) } // Either the last argument was a relative or local dir, or we have to // derive it from the clone URL clonedir, err := filepath.Abs(args[len(args)-1]) if err != nil || !tools.DirExists(clonedir) { // Derive from clone URL instead base := path.Base(args[len(args)-1]) if strings.HasSuffix(base, ".git") { base = base[:len(base)-4] } clonedir, _ = filepath.Abs(base) if !tools.DirExists(clonedir) { Exit("Unable to find clone dir at %q", clonedir) } } err = os.Chdir(clonedir) if err != nil { Exit("Unable to change directory to clone dir %q: %v", clonedir, err) } // Make sure we pop back to dir we started in at the end defer os.Chdir(cwd) // Also need to derive dirs now localstorage.ResolveDirs() requireInRepo() // Now just call pull with default args // Support --origin option to clone var remote string if len(cloneFlags.Origin) > 0 { remote = cloneFlags.Origin } else { remote = "origin" } if ref, err := git.CurrentRef(); err == nil { includeArg, excludeArg := getIncludeExcludeArgs(cmd) filter := buildFilepathFilter(cfg, includeArg, excludeArg) if cloneFlags.NoCheckout || cloneFlags.Bare { // If --no-checkout or --bare then we shouldn't check out, just fetch instead cfg.CurrentRemote = remote fetchRef(ref.Name, filter) } else { pull(remote, filter) err := postCloneSubmodules(args) if err != nil { Exit("Error performing 'git lfs pull' for submodules: %v", err) } } } if !cloneSkipRepoInstall { // If --skip-repo wasn't given, install repo-level hooks while // we're still in the checkout directory. if err := lfs.InstallHooks(false); err != nil { ExitWithError(err) } } } func postCloneSubmodules(args []string) error { // In git 2.9+ the filter option will have been passed through to submodules // So we need to lfs pull inside each if !git.Config.IsGitVersionAtLeast("2.9.0") { // In earlier versions submodules would have used smudge filter return nil } // Also we only do this if --recursive or --recurse-submodules was provided if !cloneFlags.Recursive && !cloneFlags.RecurseSubmodules { return nil } // Use `git submodule foreach --recursive` to cascade into nested submodules // Also good to call a new instance of git-lfs rather than do things // inside this instance, since that way we get a clean env in that subrepo cmd := subprocess.ExecCommand("git", "submodule", "foreach", "--recursive", "git lfs pull") cmd.Stderr = os.Stderr cmd.Stdin = os.Stdin cmd.Stdout = os.Stdout return cmd.Run() } func init() { RegisterCommand("clone", cloneCommand, func(cmd *cobra.Command) { cmd.PreRun = nil // Mirror all git clone flags cmd.Flags().StringVarP(&cloneFlags.TemplateDirectory, "template", "", "", "See 'git clone --help'") cmd.Flags().BoolVarP(&cloneFlags.Local, "local", "l", false, "See 'git clone --help'") cmd.Flags().BoolVarP(&cloneFlags.Shared, "shared", "s", false, "See 'git clone --help'") cmd.Flags().BoolVarP(&cloneFlags.NoHardlinks, "no-hardlinks", "", false, "See 'git clone --help'") cmd.Flags().BoolVarP(&cloneFlags.Quiet, "quiet", "q", false, "See 'git clone --help'") cmd.Flags().BoolVarP(&cloneFlags.NoCheckout, "no-checkout", "n", false, "See 'git clone --help'") cmd.Flags().BoolVarP(&cloneFlags.Progress, "progress", "", false, "See 'git clone --help'") cmd.Flags().BoolVarP(&cloneFlags.Bare, "bare", "", false, "See 'git clone --help'") cmd.Flags().BoolVarP(&cloneFlags.Mirror, "mirror", "", false, "See 'git clone --help'") cmd.Flags().StringVarP(&cloneFlags.Origin, "origin", "o", "", "See 'git clone --help'") cmd.Flags().StringVarP(&cloneFlags.Branch, "branch", "b", "", "See 'git clone --help'") cmd.Flags().StringVarP(&cloneFlags.Upload, "upload-pack", "u", "", "See 'git clone --help'") cmd.Flags().StringVarP(&cloneFlags.Reference, "reference", "", "", "See 'git clone --help'") cmd.Flags().StringVarP(&cloneFlags.ReferenceIfAble, "reference-if-able", "", "", "See 'git clone --help'") cmd.Flags().BoolVarP(&cloneFlags.Dissociate, "dissociate", "", false, "See 'git clone --help'") cmd.Flags().StringVarP(&cloneFlags.SeparateGit, "separate-git-dir", "", "", "See 'git clone --help'") cmd.Flags().StringVarP(&cloneFlags.Depth, "depth", "", "", "See 'git clone --help'") cmd.Flags().BoolVarP(&cloneFlags.Recursive, "recursive", "", false, "See 'git clone --help'") cmd.Flags().BoolVarP(&cloneFlags.RecurseSubmodules, "recurse-submodules", "", false, "See 'git clone --help'") cmd.Flags().StringVarP(&cloneFlags.Config, "config", "c", "", "See 'git clone --help'") cmd.Flags().BoolVarP(&cloneFlags.SingleBranch, "single-branch", "", false, "See 'git clone --help'") cmd.Flags().BoolVarP(&cloneFlags.NoSingleBranch, "no-single-branch", "", false, "See 'git clone --help'") cmd.Flags().BoolVarP(&cloneFlags.Verbose, "verbose", "v", false, "See 'git clone --help'") cmd.Flags().BoolVarP(&cloneFlags.Ipv4, "ipv4", "", false, "See 'git clone --help'") cmd.Flags().BoolVarP(&cloneFlags.Ipv6, "ipv6", "", false, "See 'git clone --help'") cmd.Flags().StringVarP(&cloneFlags.ShallowSince, "shallow-since", "", "", "See 'git clone --help'") cmd.Flags().StringVarP(&cloneFlags.ShallowExclude, "shallow-exclude", "", "", "See 'git clone --help'") cmd.Flags().BoolVarP(&cloneFlags.ShallowSubmodules, "shallow-submodules", "", false, "See 'git clone --help'") cmd.Flags().BoolVarP(&cloneFlags.NoShallowSubmodules, "no-shallow-submodules", "", false, "See 'git clone --help'") cmd.Flags().Int64VarP(&cloneFlags.Jobs, "jobs", "j", -1, "See 'git clone --help'") cmd.Flags().StringVarP(&includeArg, "include", "I", "", "Include a list of paths") cmd.Flags().StringVarP(&excludeArg, "exclude", "X", "", "Exclude a list of paths") cmd.Flags().BoolVar(&cloneSkipRepoInstall, "skip-repo", false, "Skip LFS repo setup") }) } git-lfs-2.3.4/commands/command_env.go000066400000000000000000000026701317167762300175250ustar00rootroot00000000000000package commands import ( "github.com/git-lfs/git-lfs/config" "github.com/git-lfs/git-lfs/git" "github.com/git-lfs/git-lfs/lfs" "github.com/spf13/cobra" ) func envCommand(cmd *cobra.Command, args []string) { config.ShowConfigWarnings = true endpoint := getAPIClient().Endpoints.Endpoint("download", cfg.CurrentRemote) gitV, err := git.Config.Version() if err != nil { gitV = "Error getting git version: " + err.Error() } Print(config.VersionDesc) Print(gitV) Print("") if len(endpoint.Url) > 0 { access := getAPIClient().Endpoints.AccessFor(endpoint.Url) Print("Endpoint=%s (auth=%s)", endpoint.Url, access) if len(endpoint.SshUserAndHost) > 0 { Print(" SSH=%s:%s", endpoint.SshUserAndHost, endpoint.SshPath) } } for _, remote := range cfg.Remotes() { remoteEndpoint := getAPIClient().Endpoints.RemoteEndpoint("download", remote) remoteAccess := getAPIClient().Endpoints.AccessFor(remoteEndpoint.Url) Print("Endpoint (%s)=%s (auth=%s)", remote, remoteEndpoint.Url, remoteAccess) if len(remoteEndpoint.SshUserAndHost) > 0 { Print(" SSH=%s:%s", remoteEndpoint.SshUserAndHost, remoteEndpoint.SshPath) } } for _, env := range lfs.Environ(cfg, getTransferManifest()) { Print(env) } for _, key := range []string{"filter.lfs.process", "filter.lfs.smudge", "filter.lfs.clean"} { value, _ := cfg.Git.Get(key) Print("git config %s = %q", key, value) } } func init() { RegisterCommand("env", envCommand, nil) } git-lfs-2.3.4/commands/command_ext.go000066400000000000000000000015421317167762300175320ustar00rootroot00000000000000package commands import ( "fmt" "github.com/git-lfs/git-lfs/config" "github.com/spf13/cobra" ) func extCommand(cmd *cobra.Command, args []string) { printAllExts() } func extListCommand(cmd *cobra.Command, args []string) { n := len(args) if n == 0 { printAllExts() return } for _, key := range args { ext := cfg.Extensions()[key] printExt(ext) } } func printAllExts() { extensions, err := cfg.SortedExtensions() if err != nil { fmt.Println(err) return } for _, ext := range extensions { printExt(ext) } } func printExt(ext config.Extension) { Print("Extension: %s", ext.Name) Print(" clean = %s", ext.Clean) Print(" smudge = %s", ext.Smudge) Print(" priority = %d", ext.Priority) } func init() { RegisterCommand("ext", extCommand, func(cmd *cobra.Command) { cmd.AddCommand(NewCommand("list", extListCommand)) }) } git-lfs-2.3.4/commands/command_fetch.go000066400000000000000000000242711317167762300200270ustar00rootroot00000000000000package commands import ( "fmt" "time" "github.com/git-lfs/git-lfs/filepathfilter" "github.com/git-lfs/git-lfs/git" "github.com/git-lfs/git-lfs/lfs" "github.com/git-lfs/git-lfs/progress" "github.com/git-lfs/git-lfs/tq" "github.com/rubyist/tracerx" "github.com/spf13/cobra" ) var ( fetchRecentArg bool fetchAllArg bool fetchPruneArg bool ) func getIncludeExcludeArgs(cmd *cobra.Command) (include, exclude *string) { includeFlag := cmd.Flag("include") excludeFlag := cmd.Flag("exclude") if includeFlag.Changed { include = &includeArg } if excludeFlag.Changed { exclude = &excludeArg } return } func fetchCommand(cmd *cobra.Command, args []string) { requireInRepo() var refs []*git.Ref if len(args) > 0 { // Remote is first arg if err := git.ValidateRemote(args[0]); err != nil { Exit("Invalid remote name %q", args[0]) } cfg.CurrentRemote = args[0] } else { cfg.CurrentRemote = "" } if len(args) > 1 { resolvedrefs, err := git.ResolveRefs(args[1:]) if err != nil { Panic(err, "Invalid ref argument: %v", args[1:]) } refs = resolvedrefs } else if !fetchAllArg { ref, err := git.CurrentRef() if err != nil { Panic(err, "Could not fetch") } refs = []*git.Ref{ref} } success := true gitscanner := lfs.NewGitScanner(nil) defer gitscanner.Close() include, exclude := getIncludeExcludeArgs(cmd) if fetchAllArg { if fetchRecentArg || len(args) > 1 { Exit("Cannot combine --all with ref arguments or --recent") } if include != nil || exclude != nil { Exit("Cannot combine --all with --include or --exclude") } if len(cfg.FetchIncludePaths()) > 0 || len(cfg.FetchExcludePaths()) > 0 { Print("Ignoring global include / exclude paths to fulfil --all") } success = fetchAll() } else { // !all filter := buildFilepathFilter(cfg, include, exclude) // Fetch refs sequentially per arg order; duplicates in later refs will be ignored for _, ref := range refs { Print("Fetching %v", ref.Name) s := fetchRef(ref.Sha, filter) success = success && s } if fetchRecentArg || cfg.FetchPruneConfig().FetchRecentAlways { s := fetchRecent(refs, filter) success = success && s } } if fetchPruneArg { fetchconf := cfg.FetchPruneConfig() verify := fetchconf.PruneVerifyRemoteAlways // no dry-run or verbose options in fetch, assume false prune(fetchconf, verify, false, false) } if !success { c := getAPIClient() e := c.Endpoints.Endpoint("download", cfg.CurrentRemote) Exit("error: failed to fetch some objects from '%s'", e.Url) } } func pointersToFetchForRef(ref string, filter *filepathfilter.Filter) ([]*lfs.WrappedPointer, error) { var pointers []*lfs.WrappedPointer var multiErr error tempgitscanner := lfs.NewGitScanner(func(p *lfs.WrappedPointer, err error) { if err != nil { if multiErr != nil { multiErr = fmt.Errorf("%v\n%v", multiErr, err) } else { multiErr = err } return } pointers = append(pointers, p) }) tempgitscanner.Filter = filter if err := tempgitscanner.ScanTree(ref); err != nil { return nil, err } tempgitscanner.Close() return pointers, multiErr } // Fetch all binaries for a given ref (that we don't have already) func fetchRef(ref string, filter *filepathfilter.Filter) bool { pointers, err := pointersToFetchForRef(ref, filter) if err != nil { Panic(err, "Could not scan for Git LFS files") } return fetchAndReportToChan(pointers, filter, nil) } // Fetch all previous versions of objects from since to ref (not including final state at ref) // So this will fetch all the '-' sides of the diff from since to ref func fetchPreviousVersions(ref string, since time.Time, filter *filepathfilter.Filter) bool { var pointers []*lfs.WrappedPointer tempgitscanner := lfs.NewGitScanner(func(p *lfs.WrappedPointer, err error) { if err != nil { Panic(err, "Could not scan for Git LFS previous versions") return } pointers = append(pointers, p) }) tempgitscanner.Filter = filter if err := tempgitscanner.ScanPreviousVersions(ref, since, nil); err != nil { ExitWithError(err) } tempgitscanner.Close() return fetchAndReportToChan(pointers, filter, nil) } // Fetch recent objects based on config func fetchRecent(alreadyFetchedRefs []*git.Ref, filter *filepathfilter.Filter) bool { fetchconf := cfg.FetchPruneConfig() if fetchconf.FetchRecentRefsDays == 0 && fetchconf.FetchRecentCommitsDays == 0 { return true } ok := true // Make a list of what unique commits we've already fetched for to avoid duplicating work uniqueRefShas := make(map[string]string, len(alreadyFetchedRefs)) for _, ref := range alreadyFetchedRefs { uniqueRefShas[ref.Sha] = ref.Name } // First find any other recent refs if fetchconf.FetchRecentRefsDays > 0 { Print("Fetching recent branches within %v days", fetchconf.FetchRecentRefsDays) refsSince := time.Now().AddDate(0, 0, -fetchconf.FetchRecentRefsDays) refs, err := git.RecentBranches(refsSince, fetchconf.FetchRecentRefsIncludeRemotes, cfg.CurrentRemote) if err != nil { Panic(err, "Could not scan for recent refs") } for _, ref := range refs { // Don't fetch for the same SHA twice if prevRefName, ok := uniqueRefShas[ref.Sha]; ok { if ref.Name != prevRefName { tracerx.Printf("Skipping fetch for %v, already fetched via %v", ref.Name, prevRefName) } } else { uniqueRefShas[ref.Sha] = ref.Name Print("Fetching %v", ref.Name) k := fetchRef(ref.Sha, filter) ok = ok && k } } } // For every unique commit we've fetched, check recent commits too if fetchconf.FetchRecentCommitsDays > 0 { for commit, refName := range uniqueRefShas { // We measure from the last commit at the ref summ, err := git.GetCommitSummary(commit) if err != nil { Error("Couldn't scan commits at %v: %v", refName, err) continue } Print("Fetching changes within %v days of %v", fetchconf.FetchRecentCommitsDays, refName) commitsSince := summ.CommitDate.AddDate(0, 0, -fetchconf.FetchRecentCommitsDays) k := fetchPreviousVersions(commit, commitsSince, filter) ok = ok && k } } return ok } func fetchAll() bool { pointers := scanAll() Print("Fetching objects...") return fetchAndReportToChan(pointers, nil, nil) } func scanAll() []*lfs.WrappedPointer { // This could be a long process so use the chan version & report progress Print("Scanning for all objects ever referenced...") spinner := progress.NewSpinner() var numObjs int64 // use temp gitscanner to collect pointers var pointers []*lfs.WrappedPointer var multiErr error tempgitscanner := lfs.NewGitScanner(func(p *lfs.WrappedPointer, err error) { if err != nil { if multiErr != nil { multiErr = fmt.Errorf("%v\n%v", multiErr, err) } else { multiErr = err } return } numObjs++ spinner.Print(OutputWriter, fmt.Sprintf("%d objects found", numObjs)) pointers = append(pointers, p) }) if err := tempgitscanner.ScanAll(nil); err != nil { Panic(err, "Could not scan for Git LFS files") } tempgitscanner.Close() if multiErr != nil { Panic(multiErr, "Could not scan for Git LFS files") } spinner.Finish(OutputWriter, fmt.Sprintf("%d objects found", numObjs)) return pointers } // Fetch and report completion of each OID to a channel (optional, pass nil to skip) // Returns true if all completed with no errors, false if errors were written to stderr/log func fetchAndReportToChan(allpointers []*lfs.WrappedPointer, filter *filepathfilter.Filter, out chan<- *lfs.WrappedPointer) bool { // Lazily initialize the current remote. if len(cfg.CurrentRemote) == 0 { // Actively find the default remote, don't just assume origin defaultRemote, err := git.DefaultRemote() if err != nil { Exit("No default remote") } cfg.CurrentRemote = defaultRemote } ready, pointers, meter := readyAndMissingPointers(allpointers, filter) q := newDownloadQueue( getTransferManifestOperationRemote("download", cfg.CurrentRemote), cfg.CurrentRemote, tq.WithProgress(meter), ) if out != nil { // If we already have it, or it won't be fetched // report it to chan immediately to support pull/checkout for _, p := range ready { out <- p } dlwatch := q.Watch() go func() { // fetch only reports single OID, but OID *might* be referenced by multiple // WrappedPointers if same content is at multiple paths, so map oid->slice oidToPointers := make(map[string][]*lfs.WrappedPointer, len(pointers)) for _, pointer := range pointers { plist := oidToPointers[pointer.Oid] oidToPointers[pointer.Oid] = append(plist, pointer) } for t := range dlwatch { plist, ok := oidToPointers[t.Oid] if !ok { continue } for _, p := range plist { out <- p } } close(out) }() } for _, p := range pointers { tracerx.Printf("fetch %v [%v]", p.Name, p.Oid) q.Add(downloadTransfer(p)) } processQueue := time.Now() q.Wait() tracerx.PerformanceSince("process queue", processQueue) ok := true for _, err := range q.Errors() { ok = false FullError(err) } return ok } func readyAndMissingPointers(allpointers []*lfs.WrappedPointer, filter *filepathfilter.Filter) ([]*lfs.WrappedPointer, []*lfs.WrappedPointer, *progress.ProgressMeter) { meter := buildProgressMeter(false) seen := make(map[string]bool, len(allpointers)) missing := make([]*lfs.WrappedPointer, 0, len(allpointers)) ready := make([]*lfs.WrappedPointer, 0, len(allpointers)) for _, p := range allpointers { // no need to download the same object multiple times if seen[p.Oid] { continue } seen[p.Oid] = true // no need to download objects that exist locally already lfs.LinkOrCopyFromReference(p.Oid, p.Size) if lfs.ObjectExistsOfSize(p.Oid, p.Size) { ready = append(ready, p) continue } missing = append(missing, p) meter.Add(p.Size) } return ready, missing, meter } func init() { RegisterCommand("fetch", fetchCommand, func(cmd *cobra.Command) { cmd.Flags().StringVarP(&includeArg, "include", "I", "", "Include a list of paths") cmd.Flags().StringVarP(&excludeArg, "exclude", "X", "", "Exclude a list of paths") cmd.Flags().BoolVarP(&fetchRecentArg, "recent", "r", false, "Fetch recent refs & commits") cmd.Flags().BoolVarP(&fetchAllArg, "all", "a", false, "Fetch all LFS files ever referenced") cmd.Flags().BoolVarP(&fetchPruneArg, "prune", "p", false, "After fetching, prune old data") }) } git-lfs-2.3.4/commands/command_filter_process.go000066400000000000000000000245001317167762300217540ustar00rootroot00000000000000package commands import ( "bytes" "fmt" "io" "os" "strings" "sync" "github.com/git-lfs/git-lfs/errors" "github.com/git-lfs/git-lfs/filepathfilter" "github.com/git-lfs/git-lfs/git" "github.com/git-lfs/git-lfs/lfs" "github.com/git-lfs/git-lfs/tq" "github.com/spf13/cobra" ) const ( // cleanFilterBufferCapacity is the desired capacity of the // `*git.PacketWriter`'s internal buffer when the filter protocol // dictates the "clean" command. 512 bytes is (in most cases) enough to // hold an entire LFS pointer in memory. cleanFilterBufferCapacity = 512 // smudgeFilterBufferCapacity is the desired capacity of the // `*git.PacketWriter`'s internal buffer when the filter protocol // dictates the "smudge" command. smudgeFilterBufferCapacity = git.MaxPacketLength ) // filterSmudgeSkip is a command-line flag owned by the `filter-process` command // dictating whether or not to skip the smudging process, leaving pointers as-is // in the working tree. var filterSmudgeSkip bool func filterCommand(cmd *cobra.Command, args []string) { requireStdin("This command should be run by the Git filter process") lfs.InstallHooks(false) s := git.NewFilterProcessScanner(os.Stdin, os.Stdout) if err := s.Init(); err != nil { ExitWithError(err) } caps, err := s.NegotiateCapabilities() if err != nil { ExitWithError(err) } var supportsDelay bool for _, cap := range caps { if cap == "capability=delay" { supportsDelay = true break } } skip := filterSmudgeSkip || cfg.Os.Bool("GIT_LFS_SKIP_SMUDGE", false) filter := filepathfilter.New(cfg.FetchIncludePaths(), cfg.FetchExcludePaths()) ptrs := make(map[string]*lfs.Pointer) var q *tq.TransferQueue closeOnce := new(sync.Once) available := make(chan *tq.Transfer) if supportsDelay { q = tq.NewTransferQueue( tq.Download, getTransferManifestOperationRemote("download", cfg.CurrentRemote), cfg.CurrentRemote, ) go infiniteTransferBuffer(q, available) } var malformed []string var malformedOnWindows []string for s.Scan() { var n int64 var err error var delayed bool var w *git.PktlineWriter req := s.Request() switch req.Header["command"] { case "clean": s.WriteStatus(statusFromErr(nil)) w = git.NewPktlineWriter(os.Stdout, cleanFilterBufferCapacity) var ptr *lfs.Pointer ptr, err = clean(w, req.Payload, req.Header["pathname"], -1) if ptr != nil { n = ptr.Size } case "smudge": w = git.NewPktlineWriter(os.Stdout, smudgeFilterBufferCapacity) if req.Header["can-delay"] == "1" { var ptr *lfs.Pointer n, delayed, ptr, err = delayedSmudge(s, w, req.Payload, q, req.Header["pathname"], skip, filter) if delayed { ptrs[req.Header["pathname"]] = ptr } } else { s.WriteStatus(statusFromErr(nil)) from, ferr := incomingOrCached(req.Payload, ptrs[req.Header["pathname"]]) if ferr != nil { break } n, err = smudge(w, from, req.Header["pathname"], skip, filter) if err == nil { delete(ptrs, req.Header["pathname"]) } } case "list_available_blobs": closeOnce.Do(func() { // The first time that Git sends us the // 'list_available_blobs' command, it is given // that no more smudge commands will be issued // with _new_ checkout entries. // // This means that, by the time that we're here, // we have seen all entries in the checkout, and // should therefore instruct the transfer queue // to make a batch out of whatever remaining // items it has, and then close itself. // // This function call is wrapped in a // `sync.(*Once).Do()` call so we only call // `q.Wait()` once, and is called via a // goroutine since `q.Wait()` is blocking. go q.Wait() }) // The first, and all subsequent calls to // list_available_blobs, we read items from `tq.Watch()` // until a read from that channel becomes blocking (in // other words, we read until there are no more items // immediately ready to be sent back to Git). paths := pathnames(readAvailable(available, q.BatchSize())) if len(paths) == 0 { // If `len(paths) == 0`, `tq.Watch()` has // closed, indicating that all items have been // completely processed, and therefore, sent // back to Git for checkout. for path, _ := range ptrs { // If we sent a path to Git but it // didn't ask for the smudge contents, // that path is available and Git should // accept it later. paths = append(paths, fmt.Sprintf("pathname=%s", path)) } } err = s.WriteList(paths) default: ExitWithError(fmt.Errorf("Unknown command %q", req.Header["command"])) } if errors.IsNotAPointerError(err) { malformed = append(malformed, req.Header["pathname"]) err = nil } else if possiblyMalformedObjectSize(n) { malformedOnWindows = append(malformedOnWindows, req.Header["pathname"]) } var status git.FilterProcessStatus if delayed { // If delayed, there is no need to call w.Flush() since // no data was written. Calculate the status from the // given error using 'delayedStatusFromErr'. status = delayedStatusFromErr(err) } else if ferr := w.Flush(); ferr != nil { // Otherwise, we do need to call w.Flush(), since we // have to assume that data was written. If the flush // operation was unsuccessful, calculate the status // using 'statusFromErr'. status = statusFromErr(ferr) } else { // If the above flush was successful, we calculate the // status from the above clean, smudge, or // list_available_blobs command using statusFromErr, // since we did not delay. status = statusFromErr(err) } s.WriteStatus(status) } if len(malformed) > 0 { fmt.Fprintf(os.Stderr, "Encountered %d file(s) that should have been pointers, but weren't:\n", len(malformed)) for _, m := range malformed { fmt.Fprintf(os.Stderr, "\t%s\n", m) } } if len(malformedOnWindows) > 0 { fmt.Fprintf(os.Stderr, "Encountered %d file(s) that may not have been copied correctly on Windows:\n") for _, m := range malformedOnWindows { fmt.Fprintf(os.Stderr, "\t%s\n", m) } fmt.Fprintf(os.Stderr, "\nSee: `git lfs help smudge` for more details.\n") } if err := s.Err(); err != nil && err != io.EOF { ExitWithError(err) } } // infiniteTransferBuffer streams the results of q.Watch() into "available" as // if available had an infinite channel buffer. func infiniteTransferBuffer(q *tq.TransferQueue, available chan<- *tq.Transfer) { // Stream results from q.Watch() into chan "available" via an infinite // buffer. watch := q.Watch() // pending is used to keep track of an ordered list of available // `*tq.Transfer`'s that cannot be written to "available" without // blocking. var pending []*tq.Transfer for { if len(pending) > 0 { select { case t, ok := <-watch: if !ok { // If the list of pending elements is // non-empty, stream them out (even if // they block), and then close(). for _, t = range pending { available <- t } close(available) return } pending = append(pending, t) case available <- pending[0]: // Otherwise, dequeue and shift the first // element from pending onto available. pending = pending[1:] } } else { t, ok := <-watch if !ok { // If watch is closed, the "tq" is done, and // there are no items on the buffer. Return // immediately. close(available) return } select { case available <- t: // Copy an item directly from <-watch onto available<-. default: // Otherwise, if that would have blocked, make // the new read pending. pending = append(pending, t) } } } } // incomingOrCached returns an io.Reader that is either the contents of the // given io.Reader "r", or the encoded contents of "ptr". It returns an error if // there was an error reading from "r". // // This is done because when a `command=smudge` with `can-delay=0` is issued, // the entry's contents are not sent, and must be re-encoded from the stored // pointer corresponding to the request's filepath. func incomingOrCached(r io.Reader, ptr *lfs.Pointer) (io.Reader, error) { buf := make([]byte, 1024) n, err := r.Read(buf) buf = buf[:n] if n == 0 { if ptr == nil { // If we read no data from the given io.Reader "r" _and_ // there was no data to fall back on, return an empty // io.Reader yielding no data. return bytes.NewReader(buf), nil } // If we read no data from the given io.Reader "r", _and_ there // is a pointer that we can fall back on, return an io.Reader // that yields the encoded version of the given pointer. return strings.NewReader(ptr.Encoded()), nil } if err == io.EOF { return bytes.NewReader(buf), nil } return io.MultiReader(bytes.NewReader(buf), r), err } // readAvailable satisfies the accumulation semantics for the // 'list_available_blobs' command. It accumulates items until: // // 1. Reading from the channel of available items blocks, or ... // 2. There is one item available, or ... // 3. The 'tq.TransferQueue' is completed. func readAvailable(ch <-chan *tq.Transfer, cap int) []*tq.Transfer { ts := make([]*tq.Transfer, 0, cap) for { select { case t, ok := <-ch: if !ok { return ts } ts = append(ts, t) default: if len(ts) > 0 { return ts } t, ok := <-ch if !ok { return ts } return append(ts, t) } } return ts } // pathnames formats a list of *tq.Transfers as a valid response to the // 'list_available_blobs' command. func pathnames(ts []*tq.Transfer) []string { pathnames := make([]string, 0, len(ts)) for _, t := range ts { pathnames = append(pathnames, fmt.Sprintf("pathname=%s", t.Name)) } return pathnames } // statusFromErr returns the status code that should be sent over the filter // protocol based on a given error, "err". func statusFromErr(err error) git.FilterProcessStatus { if err != nil && err != io.EOF { return git.StatusError } return git.StatusSuccess } // delayedStatusFromErr returns the status code that should be sent over the // filter protocol based on a given error, "err" when the blob smudge operation // was delayed. func delayedStatusFromErr(err error) git.FilterProcessStatus { status := statusFromErr(err) switch status { case git.StatusSuccess: return git.StatusDelay default: return status } } func init() { RegisterCommand("filter-process", filterCommand, func(cmd *cobra.Command) { cmd.Flags().BoolVarP(&filterSmudgeSkip, "skip", "s", false, "") }) } git-lfs-2.3.4/commands/command_fsck.go000066400000000000000000000050251317167762300176600ustar00rootroot00000000000000package commands import ( "crypto/sha256" "encoding/hex" "io" "os" "path/filepath" "github.com/git-lfs/git-lfs/config" "github.com/git-lfs/git-lfs/git" "github.com/git-lfs/git-lfs/lfs" "github.com/spf13/cobra" ) var ( fsckDryRun bool ) // TODO(zeroshirts): 'git fsck' reports status (percentage, current#/total) as // it checks... we should do the same, as we are rehashing potentially gigs and // gigs of content. // // NOTE(zeroshirts): Ideally git would have hooks for fsck such that we could // chain a lfs-fsck, but I don't think it does. func fsckCommand(cmd *cobra.Command, args []string) { lfs.InstallHooks(false) requireInRepo() ref, err := git.CurrentRef() if err != nil { ExitWithError(err) } var corruptOids []string gitscanner := lfs.NewGitScanner(func(p *lfs.WrappedPointer, err error) { if err == nil { var pointerOk bool pointerOk, err = fsckPointer(p.Name, p.Oid) if !pointerOk { corruptOids = append(corruptOids, p.Oid) } } if err != nil { Panic(err, "Error checking Git LFS files") } }) if err := gitscanner.ScanRef(ref.Sha, nil); err != nil { ExitWithError(err) } if err := gitscanner.ScanIndex("HEAD", nil); err != nil { ExitWithError(err) } gitscanner.Close() if len(corruptOids) == 0 { Print("Git LFS fsck OK") return } if fsckDryRun { return } storageConfig := config.Config.StorageConfig() badDir := filepath.Join(storageConfig.LfsStorageDir, "bad") Print("Moving corrupt objects to %s", badDir) if err := os.MkdirAll(badDir, 0755); err != nil { ExitWithError(err) } for _, oid := range corruptOids { badFile := filepath.Join(badDir, oid) if err := os.Rename(lfs.LocalMediaPathReadOnly(oid), badFile); err != nil { ExitWithError(err) } } } func fsckPointer(name, oid string) (bool, error) { path := lfs.LocalMediaPathReadOnly(oid) Debug("Examining %v (%v)", name, path) f, err := os.Open(path) if pErr, pOk := err.(*os.PathError); pOk { Print("Object %s (%s) could not be checked: %s", name, oid, pErr.Err) return false, nil } if err != nil { return false, err } oidHash := sha256.New() _, err = io.Copy(oidHash, f) f.Close() if err != nil { return false, err } recalculatedOid := hex.EncodeToString(oidHash.Sum(nil)) if recalculatedOid == oid { return true, nil } Print("Object %s (%s) is corrupt", name, oid) return false, nil } func init() { RegisterCommand("fsck", fsckCommand, func(cmd *cobra.Command) { cmd.Flags().BoolVarP(&fsckDryRun, "dry-run", "d", false, "List corrupt objects without deleting them.") }) } git-lfs-2.3.4/commands/command_install.go000066400000000000000000000053211317167762300203770ustar00rootroot00000000000000package commands import ( "os" "github.com/git-lfs/git-lfs/lfs" "github.com/git-lfs/git-lfs/localstorage" "github.com/spf13/cobra" ) var ( forceInstall = false localInstall = false manualInstall = false systemInstall = false skipSmudgeInstall = false skipRepoInstall = false ) func installCommand(cmd *cobra.Command, args []string) { opt := cmdInstallOptions() if skipSmudgeInstall { // assume the user is changing their smudge mode, so enable force implicitly opt.Force = true } if err := lfs.InstallFilters(opt, skipSmudgeInstall); err != nil { Print("WARNING: %s", err.Error()) Print("Run `git lfs install --force` to reset git config.") return } if !skipRepoInstall && (localInstall || lfs.InRepo()) { localstorage.InitStorageOrFail() installHooksCommand(cmd, args) } Print("Git LFS initialized.") } func cmdInstallOptions() lfs.InstallOptions { requireGitVersion() if localInstall { requireInRepo() } if localInstall && systemInstall { Exit("Only one of --local and --system options can be specified.") } if systemInstall && os.Geteuid() != 0 { Print("WARNING: current user is not root/admin, system install is likely to fail.") } return lfs.InstallOptions{ Force: forceInstall, Local: localInstall, System: systemInstall, } } func installHooksCommand(cmd *cobra.Command, args []string) { updateForce = forceInstall // TODO(@ttaylorr): this is a hack since the `git-lfs-install(1)` calls // into the function that implements `git-lfs-update(1)`. Given that, // there is no way to pass flags into that function, other than // hijacking the flags that `git-lfs-update(1)` already owns. // // At a later date, extract `git-lfs-update(1)`-related logic into its // own function, and translate this flag as a boolean argument to it. updateManual = manualInstall updateCommand(cmd, args) } func init() { RegisterCommand("install", installCommand, func(cmd *cobra.Command) { cmd.Flags().BoolVarP(&forceInstall, "force", "f", false, "Set the Git LFS global config, overwriting previous values.") cmd.Flags().BoolVarP(&localInstall, "local", "l", false, "Set the Git LFS config for the local Git repository only.") cmd.Flags().BoolVarP(&systemInstall, "system", "", false, "Set the Git LFS config in system-wide scope.") cmd.Flags().BoolVarP(&skipSmudgeInstall, "skip-smudge", "s", false, "Skip automatic downloading of objects on clone or pull.") cmd.Flags().BoolVarP(&skipRepoInstall, "skip-repo", "", false, "Skip repo setup, just install global filters.") cmd.Flags().BoolVarP(&manualInstall, "manual", "m", false, "Print instructions for manual install.") cmd.AddCommand(NewCommand("hooks", installHooksCommand)) cmd.PreRun = setupLocalStorage }) } git-lfs-2.3.4/commands/command_lock.go000066400000000000000000000044351317167762300176660ustar00rootroot00000000000000package commands import ( "encoding/json" "fmt" "os" "path/filepath" "strings" "github.com/git-lfs/git-lfs/errors" "github.com/git-lfs/git-lfs/git" "github.com/spf13/cobra" ) var ( lockRemote string lockRemoteHelp = "specify which remote to use when interacting with locks" ) func lockCommand(cmd *cobra.Command, args []string) { if len(args) == 0 { Print("Usage: git lfs lock ") return } path, err := lockPath(args[0]) if err != nil { Exit(err.Error()) } lockClient := newLockClient(lockRemote) defer lockClient.Close() lock, err := lockClient.LockFile(path) if err != nil { Exit("Lock failed: %v", errors.Cause(err)) } if locksCmdFlags.JSON { if err := json.NewEncoder(os.Stdout).Encode(lock); err != nil { Error(err.Error()) } return } Print("Locked %s", path) } // lockPaths relativizes the given filepath such that it is relative to the root // path of the repository it is contained within, taking into account the // working directory of the caller. // // lockPaths also respects different filesystem directory separators, so that a // Windows path of "\foo\bar" will be normalized to "foo/bar". // // If the root directory, working directory, or file cannot be // determined/opened, an error will be returned. If the file in question is // actually a directory, an error will be returned. Otherwise, the cleaned path // will be returned. // // For example: // - Working directory: /code/foo/bar/ // - Repository root: /code/foo/ // - File to lock: ./baz // - Resolved path bar/baz func lockPath(file string) (string, error) { repo, err := git.RootDir() if err != nil { return "", err } wd, err := os.Getwd() if err != nil { return "", err } abs := filepath.Join(wd, file) path := strings.TrimPrefix(abs, repo) path = strings.TrimPrefix(path, string(os.PathSeparator)) if stat, err := os.Stat(abs); err != nil { return "", err } else { if stat.IsDir() { return path, fmt.Errorf("lfs: cannot lock directory: %s", file) } return filepath.ToSlash(path), nil } } func init() { RegisterCommand("lock", lockCommand, func(cmd *cobra.Command) { cmd.Flags().StringVarP(&lockRemote, "remote", "r", cfg.CurrentRemote, lockRemoteHelp) cmd.Flags().BoolVarP(&locksCmdFlags.JSON, "json", "", false, "print output in json") }) } git-lfs-2.3.4/commands/command_locks.go000066400000000000000000000064061317167762300200510ustar00rootroot00000000000000package commands import ( "encoding/json" "os" "sort" "strings" "github.com/git-lfs/git-lfs/errors" "github.com/git-lfs/git-lfs/locking" "github.com/git-lfs/git-lfs/tools" "github.com/spf13/cobra" ) var ( locksCmdFlags = new(locksFlags) ) func locksCommand(cmd *cobra.Command, args []string) { filters, err := locksCmdFlags.Filters() if err != nil { Exit("Error building filters: %v", err) } lockClient := newLockClient(lockRemote) defer lockClient.Close() locks, err := lockClient.SearchLocks(filters, locksCmdFlags.Limit, locksCmdFlags.Local) // Print any we got before exiting if locksCmdFlags.JSON { if err := json.NewEncoder(os.Stdout).Encode(locks); err != nil { Error(err.Error()) } return } var maxPathLen int var maxNameLen int lockPaths := make([]string, 0, len(locks)) locksByPath := make(map[string]locking.Lock) for _, lock := range locks { lockPaths = append(lockPaths, lock.Path) locksByPath[lock.Path] = lock maxPathLen = tools.MaxInt(maxPathLen, len(lock.Path)) if lock.Owner != nil { maxNameLen = tools.MaxInt(maxNameLen, len(lock.Owner.Name)) } } sort.Strings(lockPaths) for _, lockPath := range lockPaths { var ownerName string lock := locksByPath[lockPath] if lock.Owner != nil { ownerName = lock.Owner.Name } pathPadding := tools.MaxInt(maxPathLen-len(lock.Path), 0) namePadding := tools.MaxInt(maxNameLen-len(ownerName), 0) Print("%s%s\t%s%s\tID:%s", lock.Path, strings.Repeat(" ", pathPadding), ownerName, strings.Repeat(" ", namePadding), lock.Id, ) } if err != nil { Exit("Error while retrieving locks: %v", errors.Cause(err)) } } // locksFlags wraps up and holds all of the flags that can be given to the // `git lfs locks` command. type locksFlags struct { // Path is an optional filter parameter to filter against the lock's // path Path string // Id is an optional filter parameter used to filtere against the lock's // ID. Id string // limit is an optional request parameter sent to the server used to // limit the Limit int // local limits the scope of lock reporting to the locally cached record // of locks for the current user & doesn't query the server Local bool // JSON is an optional parameter to output data in json format. JSON bool } // Filters produces a filter based on locksFlags instance. func (l *locksFlags) Filters() (map[string]string, error) { filters := make(map[string]string) if l.Path != "" { path, err := lockPath(l.Path) if err != nil { return nil, err } filters["path"] = path } if l.Id != "" { filters["id"] = l.Id } return filters, nil } func init() { RegisterCommand("locks", locksCommand, func(cmd *cobra.Command) { cmd.Flags().StringVarP(&lockRemote, "remote", "r", cfg.CurrentRemote, lockRemoteHelp) cmd.Flags().StringVarP(&locksCmdFlags.Path, "path", "p", "", "filter locks results matching a particular path") cmd.Flags().StringVarP(&locksCmdFlags.Id, "id", "i", "", "filter locks results matching a particular ID") cmd.Flags().IntVarP(&locksCmdFlags.Limit, "limit", "l", 0, "optional limit for number of results to return") cmd.Flags().BoolVarP(&locksCmdFlags.Local, "local", "", false, "only list cached local record of own locks") cmd.Flags().BoolVarP(&locksCmdFlags.JSON, "json", "", false, "print output in json") }) } git-lfs-2.3.4/commands/command_logs.go000066400000000000000000000033751317167762300177040ustar00rootroot00000000000000package commands import ( "io/ioutil" "os" "path/filepath" "github.com/git-lfs/git-lfs/config" "github.com/git-lfs/git-lfs/errors" "github.com/spf13/cobra" ) func logsCommand(cmd *cobra.Command, args []string) { for _, path := range sortedLogs() { Print(path) } } func logsLastCommand(cmd *cobra.Command, args []string) { logs := sortedLogs() if len(logs) < 1 { Print("No logs to show") return } logsShowCommand(cmd, logs[len(logs)-1:]) } func logsShowCommand(cmd *cobra.Command, args []string) { if len(args) == 0 { Print("Supply a log name.") return } name := args[0] by, err := ioutil.ReadFile(filepath.Join(config.LocalLogDir, name)) if err != nil { Exit("Error reading log: %s", name) } Debug("Reading log: %s", name) os.Stdout.Write(by) } func logsClearCommand(cmd *cobra.Command, args []string) { err := os.RemoveAll(config.LocalLogDir) if err != nil { Panic(err, "Error clearing %s", config.LocalLogDir) } Print("Cleared %s", config.LocalLogDir) } func logsBoomtownCommand(cmd *cobra.Command, args []string) { Debug("Debug message") err := errors.Wrapf(errors.New("Inner error message!"), "Error") Panic(err, "Welcome to Boomtown") Debug("Never seen") } func sortedLogs() []string { fileinfos, err := ioutil.ReadDir(config.LocalLogDir) if err != nil { return []string{} } names := make([]string, 0, len(fileinfos)) for _, info := range fileinfos { if info.IsDir() { continue } names = append(names, info.Name()) } return names } func init() { RegisterCommand("logs", logsCommand, func(cmd *cobra.Command) { cmd.AddCommand( NewCommand("last", logsLastCommand), NewCommand("show", logsShowCommand), NewCommand("clear", logsClearCommand), NewCommand("boomtown", logsBoomtownCommand), ) }) } git-lfs-2.3.4/commands/command_ls_files.go000066400000000000000000000031761317167762300205370ustar00rootroot00000000000000package commands import ( "os" "github.com/git-lfs/git-lfs/git" "github.com/git-lfs/git-lfs/lfs" "github.com/spf13/cobra" ) var ( longOIDs = false debug = false ) func lsFilesCommand(cmd *cobra.Command, args []string) { requireInRepo() var ref string if len(args) == 1 { ref = args[0] } else { fullref, err := git.CurrentRef() if err != nil { Exit(err.Error()) } ref = fullref.Sha } showOidLen := 10 if longOIDs { showOidLen = 64 } gitscanner := lfs.NewGitScanner(func(p *lfs.WrappedPointer, err error) { if err != nil { Exit("Could not scan for Git LFS tree: %s", err) return } if debug { Print( "filepath: %s\n"+ " size: %d\n"+ "checkout: %v\n"+ "download: %v\n"+ " oid: %s %s\n"+ " version: %s\n", p.Name, p.Size, fileExistsOfSize(p), lfs.ObjectExistsOfSize(p.Oid, p.Size), p.OidType, p.Oid, p.Version) } else { Print("%s %s %s", p.Oid[0:showOidLen], lsFilesMarker(p), p.Name) } }) defer gitscanner.Close() if err := gitscanner.ScanTree(ref); err != nil { Exit("Could not scan for Git LFS tree: %s", err) } } // Returns true if a pointer appears to be properly smudge on checkout func fileExistsOfSize(p *lfs.WrappedPointer) bool { info, err := os.Stat(p.Name) return err == nil && info.Size() == p.Size } func lsFilesMarker(p *lfs.WrappedPointer) string { if fileExistsOfSize(p) { return "*" } return "-" } func init() { RegisterCommand("ls-files", lsFilesCommand, func(cmd *cobra.Command) { cmd.Flags().BoolVarP(&longOIDs, "long", "l", false, "") cmd.Flags().BoolVarP(&debug, "debug", "d", false, "") }) } git-lfs-2.3.4/commands/command_migrate.go000066400000000000000000000207041317167762300203630ustar00rootroot00000000000000package commands import ( "path/filepath" "strings" "github.com/git-lfs/git-lfs/errors" "github.com/git-lfs/git-lfs/git" "github.com/git-lfs/git-lfs/git/githistory" "github.com/git-lfs/git-lfs/git/githistory/log" "github.com/git-lfs/git-lfs/git/odb" "github.com/git-lfs/git-lfs/localstorage" "github.com/spf13/cobra" ) var ( // migrateIncludeRefs is a set of Git references to explicitly include // in the migration. migrateIncludeRefs []string // migrateExcludeRefs is a set of Git references to explicitly exclude // in the migration. migrateExcludeRefs []string // migrateEverything indicates the presence of the --everything flag, // and instructs 'git lfs migrate' to migrate all local references. migrateEverything bool // migrateVerbose enables verbose logging migrateVerbose bool ) // migrate takes the given command and arguments, *odb.ObjectDatabase, as well // as a BlobRewriteFn to apply, and performs a migration. func migrate(args []string, r *githistory.Rewriter, l *log.Logger, opts *githistory.RewriteOptions) { requireInRepo() opts, err := rewriteOptions(args, opts, l) if err != nil { ExitWithError(err) } _, err = r.Rewrite(opts) if err != nil { ExitWithError(err) } } // getObjectDatabase creates a *git.ObjectDatabase from the filesystem pointed // at the .git directory of the currently checked-out repository. func getObjectDatabase() (*odb.ObjectDatabase, error) { dir, err := git.GitDir() if err != nil { return nil, errors.Wrap(err, "cannot open root") } return odb.FromFilesystem(filepath.Join(dir, "objects")) } // rewriteOptions returns *githistory.RewriteOptions able to be passed to a // *githistory.Rewriter that reflect the current arguments and flags passed to // an invocation of git-lfs-migrate(1). // // It is merged with the given "opts". In other words, an identical "opts" is // returned, where the Include and Exclude fields have been filled based on the // following rules: // // The included and excluded references are determined based on the output of // includeExcludeRefs (see below for documentation and detail). // // If any of the above could not be determined without error, that error will be // returned immediately. func rewriteOptions(args []string, opts *githistory.RewriteOptions, l *log.Logger) (*githistory.RewriteOptions, error) { include, exclude, err := includeExcludeRefs(l, args) if err != nil { return nil, err } return &githistory.RewriteOptions{ Include: include, Exclude: exclude, UpdateRefs: opts.UpdateRefs, Verbose: opts.Verbose, BlobFn: opts.BlobFn, TreeCallbackFn: opts.TreeCallbackFn, }, nil } // includeExcludeRefs returns fully-qualified sets of references to include, and // exclude, or an error if those could not be determined. // // They are determined based on the following rules: // // - Include all local refs/heads/ references for each branch // specified as an argument. // - Include the currently checked out branch if no branches are given as // arguments and the --include-ref= or --exclude-ref= flag(s) aren't given. // - Include all references given in --include-ref=. // - Exclude all references given in --exclude-ref=. func includeExcludeRefs(l *log.Logger, args []string) (include, exclude []string, err error) { hardcore := len(migrateIncludeRefs) > 0 || len(migrateExcludeRefs) > 0 if len(args) == 0 && !hardcore && !migrateEverything { // If no branches were given explicitly AND neither // --include-ref or --exclude-ref flags were given, then add the // currently checked out reference. current, err := currentRefToMigrate() if err != nil { return nil, nil, err } args = append(args, current.Name) } if migrateEverything && len(args) > 0 { return nil, nil, errors.New("fatal: cannot use --everything with explicit reference arguments") } for _, name := range args { // Then, loop through each branch given, resolve that reference, // and include it. ref, err := git.ResolveRef(name) if err != nil { return nil, nil, err } include = append(include, ref.Name) } if hardcore { if migrateEverything { return nil, nil, errors.New("fatal: cannot use --everything with --include-ref or --exclude-ref") } // If either --include-ref= or --exclude-ref= were // given, append those to the include and excluded reference // set, respectively. include = append(include, migrateIncludeRefs...) exclude = append(exclude, migrateExcludeRefs...) } else if migrateEverything { localRefs, err := git.LocalRefs() if err != nil { return nil, nil, err } for _, ref := range localRefs { include = append(include, ref.Name) } } else { // Otherwise, if neither --include-ref= or // --exclude-ref= were given, include no additional // references, and exclude all remote references that are remote // branches or remote tags. remoteRefs, err := getRemoteRefs(l) if err != nil { return nil, nil, err } exclude = append(exclude, remoteRefs...) } return include, exclude, nil } // getRemoteRefs returns a fully qualified set of references belonging to all // remotes known by the currently checked-out repository, or an error if those // references could not be determined. func getRemoteRefs(l *log.Logger) ([]string, error) { var refs []string remotes, err := git.RemoteList() if err != nil { return nil, err } w := l.Waiter("migrate: Fetching remote refs") if err := git.Fetch(remotes...); err != nil { return nil, err } w.Complete() for _, remote := range remotes { refsForRemote, err := git.RemoteRefs(remote) if err != nil { return nil, err } for _, ref := range refsForRemote { refs = append(refs, formatRefName(ref, remote)) } } return refs, nil } // formatRefName returns the fully-qualified name for the given Git reference // "ref". func formatRefName(ref *git.Ref, remote string) string { var name []string switch ref.Type { case git.RefTypeRemoteBranch: name = []string{"refs", "remotes", remote, ref.Name} case git.RefTypeRemoteTag: name = []string{"refs", "tags", ref.Name} default: return ref.Name } return strings.Join(name, "/") } // currentRefToMigrate returns the fully-qualified name of the currently // checked-out reference, or an error if the reference's type was not a local // branch. func currentRefToMigrate() (*git.Ref, error) { current, err := git.CurrentRef() if err != nil { return nil, err } if current.Type == git.RefTypeOther || current.Type == git.RefTypeRemoteBranch || current.Type == git.RefTypeRemoteTag { return nil, errors.Errorf("fatal: cannot migrate non-local ref: %s", current.Name) } return current, nil } // getHistoryRewriter returns a history rewriter that includes the filepath // filter given by the --include and --exclude arguments. func getHistoryRewriter(cmd *cobra.Command, db *odb.ObjectDatabase, l *log.Logger) *githistory.Rewriter { include, exclude := getIncludeExcludeArgs(cmd) filter := buildFilepathFilter(cfg, include, exclude) return githistory.NewRewriter(db, githistory.WithFilter(filter), githistory.WithLogger(l)) } func init() { info := NewCommand("info", migrateInfoCommand) info.Flags().IntVar(&migrateInfoTopN, "top", 5, "--top=") info.Flags().StringVar(&migrateInfoAboveFmt, "above", "", "--above=") info.Flags().StringVar(&migrateInfoUnitFmt, "unit", "", "--unit=") importCmd := NewCommand("import", migrateImportCommand) importCmd.Flags().BoolVar(&migrateVerbose, "verbose", false, "Verbose logging") RegisterCommand("migrate", nil, func(cmd *cobra.Command) { cmd.PersistentFlags().StringVarP(&includeArg, "include", "I", "", "Include a list of paths") cmd.PersistentFlags().StringVarP(&excludeArg, "exclude", "X", "", "Exclude a list of paths") cmd.PersistentFlags().StringSliceVar(&migrateIncludeRefs, "include-ref", nil, "An explicit list of refs to include") cmd.PersistentFlags().StringSliceVar(&migrateExcludeRefs, "exclude-ref", nil, "An explicit list of refs to exclude") cmd.PersistentFlags().BoolVar(&migrateEverything, "everything", false, "Migrate all local references") cmd.PersistentPreRun = func(_ *cobra.Command, args []string) { // Initialize local storage before running any child // subcommands, since migrations require lfs.TempDir to // be initialized within ".git/lfs/objects". // // When lfs.TempDir is initialized to "/tmp", // hard-linking can fail when another filesystem is // mounted at "/tmp" (such as tmpfs). localstorage.InitStorageOrFail() } cmd.AddCommand(importCmd, info) }) } git-lfs-2.3.4/commands/command_migrate_import.go000066400000000000000000000117041317167762300217550ustar00rootroot00000000000000package commands import ( "bufio" "bytes" "encoding/hex" "fmt" "os" "path/filepath" "strings" "github.com/git-lfs/git-lfs/filepathfilter" "github.com/git-lfs/git-lfs/git" "github.com/git-lfs/git-lfs/git/githistory" "github.com/git-lfs/git-lfs/git/githistory/log" "github.com/git-lfs/git-lfs/git/odb" "github.com/git-lfs/git-lfs/tools" "github.com/spf13/cobra" ) func migrateImportCommand(cmd *cobra.Command, args []string) { l := log.NewLogger(os.Stderr) defer l.Close() db, err := getObjectDatabase() if err != nil { ExitWithError(err) } defer db.Close() rewriter := getHistoryRewriter(cmd, db, l) tracked := trackedFromFilter(rewriter.Filter()) exts := tools.NewOrderedSet() migrate(args, rewriter, l, &githistory.RewriteOptions{ Verbose: migrateVerbose, BlobFn: func(path string, b *odb.Blob) (*odb.Blob, error) { if filepath.Base(path) == ".gitattributes" { return b, nil } var buf bytes.Buffer if _, err := clean(&buf, b.Contents, path, b.Size); err != nil { return nil, err } if ext := filepath.Ext(path); len(ext) > 0 { exts.Add(fmt.Sprintf("*%s filter=lfs diff=lfs merge=lfs -text", ext)) } return &odb.Blob{ Contents: &buf, Size: int64(buf.Len()), }, nil }, TreeCallbackFn: func(path string, t *odb.Tree) (*odb.Tree, error) { if path != string(os.PathSeparator) { // Ignore non-root trees. return t, nil } ours := tracked if ours.Cardinality() == 0 { // If there were no explicitly tracked // --include, --exclude filters, assume that the // include set is the wildcard filepath // extensions of files tracked. ours = exts } theirs, err := trackedFromAttrs(db, t) if err != nil { return nil, err } // Create a blob of the attributes that are optionally // present in the "t" tree's .gitattributes blob, and // union in the patterns that we've tracked. // // Perform this Union() operation each time we visit a // root tree such that if the underlying .gitattributes // is present and has a diff between commits in the // range of commits to migrate, those changes are // preserved. blob, err := trackedToBlob(db, theirs.Clone().Union(ours)) if err != nil { return nil, err } // Finally, return a copy of the tree "t" that has the // new .gitattributes file included/replaced. return t.Merge(&odb.TreeEntry{ Name: ".gitattributes", Filemode: 0100644, Oid: blob, }), nil }, UpdateRefs: true, }) // Only perform `git-checkout(1) -f` if the repository is // non-bare. if bare, _ := git.IsBare(); !bare { t := l.Waiter("migrate: checkout") err := git.Checkout("", nil, true) t.Complete() if err != nil { ExitWithError(err) } } } // trackedFromFilter returns an ordered set of strings where each entry is a // line in the .gitattributes file. It adds/removes the fiter/diff/merge=lfs // attributes based on patterns included/excldued in the given filter. func trackedFromFilter(filter *filepathfilter.Filter) *tools.OrderedSet { tracked := tools.NewOrderedSet() for _, include := range filter.Include() { tracked.Add(fmt.Sprintf("%s filter=lfs diff=lfs merge=lfs -text", include)) } for _, exclude := range filter.Exclude() { tracked.Add(fmt.Sprintf("%s text -filter -merge -diff", exclude)) } return tracked } var ( // attrsCache maintains a cache from the hex-encoded SHA1 of a // .gitattributes blob to the set of patterns parsed from that blob. attrsCache = make(map[string]*tools.OrderedSet) ) // trackedFromAttrs returns an ordered line-delimited set of the contents of a // .gitattributes blob in a given tree "t". // // It returns an empty set if no attributes file could be found, or an error if // it could not otherwise be opened. func trackedFromAttrs(db *odb.ObjectDatabase, t *odb.Tree) (*tools.OrderedSet, error) { var oid []byte for _, e := range t.Entries { if strings.ToLower(e.Name) == ".gitattributes" && e.Type() == odb.BlobObjectType { oid = e.Oid break } } if oid == nil { // TODO(@ttaylorr): make (*tools.OrderedSet)(nil) a valid // receiver for non-mutative methods. return tools.NewOrderedSet(), nil } sha1 := hex.EncodeToString(oid) if s, ok := attrsCache[sha1]; ok { return s, nil } blob, err := db.Blob(oid) if err != nil { return nil, err } attrs := tools.NewOrderedSet() scanner := bufio.NewScanner(blob.Contents) for scanner.Scan() { attrs.Add(scanner.Text()) } if err := scanner.Err(); err != nil { return nil, err } attrsCache[sha1] = attrs return attrsCache[sha1], nil } // trackedToBlob writes and returns the OID of a .gitattributes blob based on // the patterns given in the ordered set of patterns, "patterns". func trackedToBlob(db *odb.ObjectDatabase, patterns *tools.OrderedSet) ([]byte, error) { var attrs bytes.Buffer for pattern := range patterns.Iter() { fmt.Fprintf(&attrs, "%s\n", pattern) } return db.WriteBlob(&odb.Blob{ Contents: &attrs, Size: int64(attrs.Len()), }) } git-lfs-2.3.4/commands/command_migrate_info.go000066400000000000000000000134231317167762300213760ustar00rootroot00000000000000package commands import ( "fmt" "io" "os" "path/filepath" "sort" "strings" "github.com/git-lfs/git-lfs/errors" "github.com/git-lfs/git-lfs/git/githistory" "github.com/git-lfs/git-lfs/git/githistory/log" "github.com/git-lfs/git-lfs/git/odb" "github.com/git-lfs/git-lfs/tools" "github.com/git-lfs/git-lfs/tools/humanize" "github.com/spf13/cobra" ) var ( // migrateInfoTopN is a flag given to the git-lfs-migrate(1) subcommand // 'info' which specifies how many info entries to show by default. migrateInfoTopN int // migrateInfoAboveFmt is a flag given to the git-lfs-migrate(1) // subcommand 'info' specifying a human-readable string threshold of // filesize before entries are counted. migrateInfoAboveFmt string // migrateInfoAbove is the number of bytes parsed from the above // migrateInfoAboveFmt flag. migrateInfoAbove uint64 // migrateInfoUnitFmt is a flag given to the git-lfs-migrate(1) // subcommand 'info' specifying a human-readable string of units with // which to display the number of bytes. migrateInfoUnitFmt string // migrateInfoUnit is the number of bytes in the unit given as // migrateInfoUnitFmt. migrateInfoUnit uint64 ) func migrateInfoCommand(cmd *cobra.Command, args []string) { l := log.NewLogger(os.Stderr) db, err := getObjectDatabase() if err != nil { ExitWithError(err) } defer db.Close() rewriter := getHistoryRewriter(cmd, db, l) exts := make(map[string]*MigrateInfoEntry) above, err := humanize.ParseBytes(migrateInfoAboveFmt) if err != nil { ExitWithError(errors.Wrap(err, "cannot parse --above=")) } if u := cmd.Flag("unit"); u.Changed { unit, err := humanize.ParseByteUnit(u.Value.String()) if err != nil { ExitWithError(errors.Wrap(err, "cannot parse --unit=")) } migrateInfoUnit = unit } migrateInfoAbove = above migrate(args, rewriter, l, &githistory.RewriteOptions{ BlobFn: func(path string, b *odb.Blob) (*odb.Blob, error) { ext := fmt.Sprintf("*%s", filepath.Ext(path)) if len(ext) > 1 { entry := exts[ext] if entry == nil { entry = &MigrateInfoEntry{Qualifier: ext} } entry.Total++ entry.BytesTotal += b.Size if b.Size > int64(migrateInfoAbove) { entry.TotalAbove++ entry.BytesAbove += b.Size } exts[ext] = entry } return b, nil }, }) l.Close() entries := EntriesBySize(MapToEntries(exts)) entries = removeEmptyEntries(entries) sort.Sort(sort.Reverse(entries)) migrateInfoTopN = tools.ClampInt(migrateInfoTopN, len(entries), 0) entries = entries[:tools.MaxInt(0, migrateInfoTopN)] entries.Print(os.Stdout) } // MigrateInfoEntry represents a tuple of filetype to bytes and entry count // above and below a threshold. type MigrateInfoEntry struct { // Qualifier is the filepath's extension. Qualifier string // BytesAbove is total size of all files above a given threshold. BytesAbove int64 // TotalAbove is the count of all files above a given size threshold. TotalAbove int64 // BytesTotal is the number of bytes of all files BytesTotal int64 // Total is the count of all files. Total int64 } // MapToEntries creates a set of `*MigrateInfoEntry`'s for a given map of // filepath extensions to file size in bytes. func MapToEntries(exts map[string]*MigrateInfoEntry) []*MigrateInfoEntry { entries := make([]*MigrateInfoEntry, 0, len(exts)) for _, entry := range exts { entries = append(entries, entry) } return entries } // removeEmptyEntries removes `*MigrateInfoEntry`'s for which no matching file // is above the given threshold "--above". func removeEmptyEntries(entries []*MigrateInfoEntry) []*MigrateInfoEntry { nz := make([]*MigrateInfoEntry, 0, len(entries)) for _, e := range entries { if e.TotalAbove > 0 { nz = append(nz, e) } } return nz } // EntriesBySize is an implementation of sort.Interface that sorts a set of // `*MigrateInfoEntry`'s type EntriesBySize []*MigrateInfoEntry // Len returns the total length of the set of `*MigrateInfoEntry`'s. func (e EntriesBySize) Len() int { return len(e) } // Less returns the whether or not the MigrateInfoEntry given at `i` takes up // less total size than the MigrateInfoEntry given at `j`. func (e EntriesBySize) Less(i, j int) bool { return e[i].BytesAbove < e[j].BytesAbove } // Swap swaps the entries given at i, j. func (e EntriesBySize) Swap(i, j int) { e[i], e[j] = e[j], e[i] } // Print formats the `*MigrateInfoEntry`'s in the set and prints them to the // given io.Writer, "to", returning "n" the number of bytes written, and any // error, if one occurred. func (e EntriesBySize) Print(to io.Writer) (int, error) { if len(e) == 0 { return 0, nil } extensions := make([]string, 0, len(e)) sizes := make([]string, 0, len(e)) stats := make([]string, 0, len(e)) percentages := make([]string, 0, len(e)) for _, entry := range e { bytesAbove := uint64(entry.BytesAbove) above := entry.TotalAbove total := entry.Total percentAbove := 100 * (float64(above) / float64(total)) var size string if migrateInfoUnit > 0 { size = humanize.FormatBytesUnit(bytesAbove, migrateInfoUnit) } else { size = humanize.FormatBytes(bytesAbove) } stat := fmt.Sprintf("%d/%d files(s)", above, total) percentage := fmt.Sprintf("%.0f%%", percentAbove) extensions = append(extensions, entry.Qualifier) sizes = append(sizes, size) stats = append(stats, stat) percentages = append(percentages, percentage) } extensions = tools.Ljust(extensions) sizes = tools.Ljust(sizes) stats = tools.Rjust(stats) percentages = tools.Rjust(percentages) output := make([]string, 0, len(e)) for i := 0; i < len(e); i++ { extension := extensions[i] size := sizes[i] stat := stats[i] percentage := percentages[i] line := strings.Join([]string{extension, size, stat, percentage}, "\t") output = append(output, line) } return fmt.Fprintln(to, strings.Join(output, "\n")) } git-lfs-2.3.4/commands/command_pointer.go000066400000000000000000000055521317167762300204170ustar00rootroot00000000000000package commands import ( "bytes" "crypto/sha256" "encoding/hex" "errors" "fmt" "io" "os" "github.com/git-lfs/git-lfs/git" "github.com/git-lfs/git-lfs/lfs" "github.com/spf13/cobra" ) var ( pointerFile string pointerCompare string pointerStdin bool ) func pointerCommand(cmd *cobra.Command, args []string) { comparing := false something := false buildOid := "" compareOid := "" if len(pointerCompare) > 0 || pointerStdin { comparing = true } if len(pointerFile) > 0 { something = true buildFile, err := os.Open(pointerFile) if err != nil { Error(err.Error()) os.Exit(1) } oidHash := sha256.New() size, err := io.Copy(oidHash, buildFile) buildFile.Close() if err != nil { Error(err.Error()) os.Exit(1) } ptr := lfs.NewPointer(hex.EncodeToString(oidHash.Sum(nil)), size, nil) fmt.Fprintf(os.Stderr, "Git LFS pointer for %s\n\n", pointerFile) buf := &bytes.Buffer{} lfs.EncodePointer(io.MultiWriter(os.Stdout, buf), ptr) if comparing { buildOid, err = git.HashObject(bytes.NewReader(buf.Bytes())) if err != nil { Error(err.Error()) os.Exit(1) } fmt.Fprintf(os.Stderr, "\nGit blob OID: %s\n\n", buildOid) } } else { comparing = false } if len(pointerCompare) > 0 || pointerStdin { something = true compFile, err := pointerReader() if err != nil { Error(err.Error()) os.Exit(1) } buf := &bytes.Buffer{} tee := io.TeeReader(compFile, buf) _, err = lfs.DecodePointer(tee) compFile.Close() pointerName := "STDIN" if !pointerStdin { pointerName = pointerCompare } fmt.Fprintf(os.Stderr, "Pointer from %s\n\n", pointerName) if err != nil { Error(err.Error()) os.Exit(1) } fmt.Fprintf(os.Stderr, buf.String()) if comparing { compareOid, err = git.HashObject(bytes.NewReader(buf.Bytes())) if err != nil { Error(err.Error()) os.Exit(1) } fmt.Fprintf(os.Stderr, "\nGit blob OID: %s\n", compareOid) } } if comparing && buildOid != compareOid { fmt.Fprintf(os.Stderr, "\nPointers do not match\n") os.Exit(1) } if !something { Error("Nothing to do!") os.Exit(1) } } func pointerReader() (io.ReadCloser, error) { if len(pointerCompare) > 0 { if pointerStdin { return nil, errors.New("Cannot read from STDIN and --pointer.") } return os.Open(pointerCompare) } requireStdin("The --stdin flag expects a pointer file from STDIN.") return os.Stdin, nil } func init() { RegisterCommand("pointer", pointerCommand, func(cmd *cobra.Command) { cmd.Flags().StringVarP(&pointerFile, "file", "f", "", "Path to a local file to generate the pointer from.") cmd.Flags().StringVarP(&pointerCompare, "pointer", "p", "", "Path to a local file containing a pointer built by another Git LFS implementation.") cmd.Flags().BoolVarP(&pointerStdin, "stdin", "", false, "Read a pointer built by another Git LFS implementation through STDIN.") }) } git-lfs-2.3.4/commands/command_post_checkout.go000066400000000000000000000050511317167762300216030ustar00rootroot00000000000000package commands import ( "os" "github.com/git-lfs/git-lfs/git" "github.com/git-lfs/git-lfs/locking" "github.com/rubyist/tracerx" "github.com/spf13/cobra" ) // postCheckoutCommand is run through Git's post-checkout hook. The hook passes // up to 3 arguments on the command line: // // 1. SHA of previous commit before the checkout // 2. SHA of commit just checked out // 3. Flag ("0" or "1") - 1 if a branch/tag/SHA was checked out, 0 if a file was // In the case of a file being checked out, the pre/post SHA are the same // // This hook checks that files which are lockable and not locked are made read-only, // optimising that as best it can based on the available information. func postCheckoutCommand(cmd *cobra.Command, args []string) { if len(args) != 3 { Print("This should be run through Git's post-commit hook. Run `git lfs update` to install it.") os.Exit(1) } // Skip entire hook if lockable read only feature is disabled if !cfg.SetLockableFilesReadOnly() { os.Exit(0) } requireGitVersion() lockClient := newLockClient(cfg.CurrentRemote) // Skip this hook if no lockable patterns have been configured if len(lockClient.GetLockablePatterns()) == 0 { os.Exit(0) } if args[2] == "1" && args[0] != "0000000000000000000000000000000000000000" { postCheckoutRevChange(lockClient, args[0], args[1]) } else { postCheckoutFileChange(lockClient) } } func postCheckoutRevChange(client *locking.Client, pre, post string) { tracerx.Printf("post-checkout: changes between %v and %v", pre, post) // We can speed things up by looking at the difference between previous HEAD // and current HEAD, and only checking lockable files that are different files, err := git.GetFilesChanged(pre, post) if err != nil { LoggedError(err, "Warning: post-checkout rev diff %v:%v failed: %v\nFalling back on full scan.", pre, post, err) postCheckoutFileChange(client) } tracerx.Printf("post-checkout: checking write flags on %v", files) err = client.FixLockableFileWriteFlags(files) if err != nil { LoggedError(err, "Warning: post-checkout locked file check failed: %v", err) } } func postCheckoutFileChange(client *locking.Client) { tracerx.Printf("post-checkout: checking write flags for all lockable files") // Sadly we don't get any information about what files were checked out, // so we have to check the entire repo err := client.FixAllLockableFileWriteFlags() if err != nil { LoggedError(err, "Warning: post-checkout locked file check failed: %v", err) } } func init() { RegisterCommand("post-checkout", postCheckoutCommand, nil) } git-lfs-2.3.4/commands/command_post_commit.go000066400000000000000000000030421317167762300212640ustar00rootroot00000000000000package commands import ( "os" "github.com/git-lfs/git-lfs/git" "github.com/rubyist/tracerx" "github.com/spf13/cobra" ) // postCommitCommand is run through Git's post-commit hook. The hook passes // no arguments. // This hook checks that files which are lockable and not locked are made read-only, // optimising that based on what was added / modified in the commit. // This is mainly to catch added files, since modified files should already be // locked. If we didn't do this, any added files would remain read/write on disk // even without a lock unless something else checked. func postCommitCommand(cmd *cobra.Command, args []string) { // Skip entire hook if lockable read only feature is disabled if !cfg.SetLockableFilesReadOnly() { os.Exit(0) } requireGitVersion() lockClient := newLockClient(cfg.CurrentRemote) // Skip this hook if no lockable patterns have been configured if len(lockClient.GetLockablePatterns()) == 0 { os.Exit(0) } tracerx.Printf("post-commit: checking file write flags at HEAD") // We can speed things up by looking at what changed in // HEAD, and only checking those lockable files files, err := git.GetFilesChanged("HEAD", "") if err != nil { LoggedError(err, "Warning: post-commit failed: %v", err) os.Exit(1) } tracerx.Printf("post-commit: checking write flags on %v", files) err = lockClient.FixLockableFileWriteFlags(files) if err != nil { LoggedError(err, "Warning: post-commit locked file check failed: %v", err) } } func init() { RegisterCommand("post-commit", postCommitCommand, nil) } git-lfs-2.3.4/commands/command_post_merge.go000066400000000000000000000027301317167762300210760ustar00rootroot00000000000000package commands import ( "os" "github.com/rubyist/tracerx" "github.com/spf13/cobra" ) // postMergeCommand is run through Git's post-merge hook. // // This hook checks that files which are lockable and not locked are made read-only, // optimising that as best it can based on the available information. func postMergeCommand(cmd *cobra.Command, args []string) { if len(args) != 1 { Print("This should be run through Git's post-merge hook. Run `git lfs update` to install it.") os.Exit(1) } // Skip entire hook if lockable read only feature is disabled if !cfg.SetLockableFilesReadOnly() { os.Exit(0) } requireGitVersion() lockClient := newLockClient(cfg.CurrentRemote) // Skip this hook if no lockable patterns have been configured if len(lockClient.GetLockablePatterns()) == 0 { os.Exit(0) } // The only argument this hook receives is a flag indicating whether the // merge was a squash merge; we don't know what files changed. // Whether it's squash or not is irrelevant, either way it could have // reset the read-only flag on files that got merged. tracerx.Printf("post-merge: checking write flags for all lockable files") // Sadly we don't get any information about what files were checked out, // so we have to check the entire repo err := lockClient.FixAllLockableFileWriteFlags() if err != nil { LoggedError(err, "Warning: post-merge locked file check failed: %v", err) } } func init() { RegisterCommand("post-merge", postMergeCommand, nil) } git-lfs-2.3.4/commands/command_pre_push.go000066400000000000000000000050351317167762300205600ustar00rootroot00000000000000package commands import ( "bufio" "os" "strings" "github.com/git-lfs/git-lfs/git" "github.com/rubyist/tracerx" "github.com/spf13/cobra" ) var ( prePushDryRun = false prePushDeleteBranch = strings.Repeat("0", 40) ) // prePushCommand is run through Git's pre-push hook. The pre-push hook passes // two arguments on the command line: // // 1. Name of the remote to which the push is being done // 2. URL to which the push is being done // // The hook receives commit information on stdin in the form: // // // In the typical case, prePushCommand will get a list of git objects being // pushed by using the following: // // git rev-list --objects ^ // // If any of those git objects are associated with Git LFS objects, those // objects will be pushed to the Git LFS API. // // In the case of pushing a new branch, the list of git objects will be all of // the git objects in this branch. // // In the case of deleting a branch, no attempts to push Git LFS objects will be // made. func prePushCommand(cmd *cobra.Command, args []string) { if len(args) == 0 { Print("This should be run through Git's pre-push hook. Run `git lfs update` to install it.") os.Exit(1) } requireGitVersion() // Remote is first arg if err := git.ValidateRemote(args[0]); err != nil { Exit("Invalid remote name %q", args[0]) } ctx := newUploadContext(args[0], prePushDryRun) gitscanner, err := ctx.buildGitScanner() if err != nil { ExitWithError(err) } defer gitscanner.Close() // We can be passed multiple lines of refs scanner := bufio.NewScanner(os.Stdin) for scanner.Scan() { line := strings.TrimSpace(scanner.Text()) if len(line) == 0 { continue } tracerx.Printf("pre-push: %s", line) left, _ := decodeRefs(line) if left == prePushDeleteBranch { continue } if err := uploadLeftOrAll(gitscanner, ctx, left); err != nil { Print("Error scanning for Git LFS files in %q", left) ExitWithError(err) } } ctx.Await() } // decodeRefs pulls the sha1s out of the line read from the pre-push // hook's stdin. func decodeRefs(input string) (string, string) { refs := strings.Split(strings.TrimSpace(input), " ") var left, right string if len(refs) > 1 { left = refs[1] } if len(refs) > 3 { right = "^" + refs[3] } return left, right } func init() { RegisterCommand("pre-push", prePushCommand, func(cmd *cobra.Command) { cmd.Flags().BoolVarP(&prePushDryRun, "dry-run", "d", false, "Do everything except actually send the updates") }) } git-lfs-2.3.4/commands/command_prune.go000066400000000000000000000353111317167762300200640ustar00rootroot00000000000000package commands import ( "bytes" "fmt" "os" "sync" "time" "github.com/git-lfs/git-lfs/config" "github.com/git-lfs/git-lfs/git" "github.com/git-lfs/git-lfs/lfs" "github.com/git-lfs/git-lfs/localstorage" "github.com/git-lfs/git-lfs/progress" "github.com/git-lfs/git-lfs/tools" "github.com/git-lfs/git-lfs/tools/humanize" "github.com/git-lfs/git-lfs/tq" "github.com/rubyist/tracerx" "github.com/spf13/cobra" ) var ( pruneDryRunArg bool pruneVerboseArg bool pruneVerifyArg bool pruneDoNotVerifyArg bool ) func pruneCommand(cmd *cobra.Command, args []string) { // Guts of this must be re-usable from fetch --prune so just parse & dispatch if pruneVerifyArg && pruneDoNotVerifyArg { Exit("Cannot specify both --verify-remote and --no-verify-remote") } fetchPruneConfig := cfg.FetchPruneConfig() verify := !pruneDoNotVerifyArg && (fetchPruneConfig.PruneVerifyRemoteAlways || pruneVerifyArg) prune(fetchPruneConfig, verify, pruneDryRunArg, pruneVerboseArg) } type PruneProgressType int const ( PruneProgressTypeLocal = PruneProgressType(iota) PruneProgressTypeRetain = PruneProgressType(iota) PruneProgressTypeVerify = PruneProgressType(iota) ) // Progress from a sub-task of prune type PruneProgress struct { ProgressType PruneProgressType Count int // Number of items done } type PruneProgressChan chan PruneProgress func prune(fetchPruneConfig config.FetchPruneConfig, verifyRemote, dryRun, verbose bool) { localObjects := make([]localstorage.Object, 0, 100) retainedObjects := tools.NewStringSetWithCapacity(100) var reachableObjects tools.StringSet var taskwait sync.WaitGroup // Add all the base funcs to the waitgroup before starting them, in case // one completes really fast & hits 0 unexpectedly // each main process can Add() to the wg itself if it subdivides the task taskwait.Add(4) // 1..4: localObjects, current & recent refs, unpushed, worktree if verifyRemote { taskwait.Add(1) // 5 } progressChan := make(PruneProgressChan, 100) // Collect errors errorChan := make(chan error, 10) var errorwait sync.WaitGroup errorwait.Add(1) var taskErrors []error go pruneTaskCollectErrors(&taskErrors, errorChan, &errorwait) // Populate the single list of local objects go pruneTaskGetLocalObjects(&localObjects, progressChan, &taskwait) // Now find files to be retained from many sources retainChan := make(chan string, 100) gitscanner := lfs.NewGitScanner(nil) go pruneTaskGetRetainedCurrentAndRecentRefs(gitscanner, fetchPruneConfig, retainChan, errorChan, &taskwait) go pruneTaskGetRetainedUnpushed(gitscanner, fetchPruneConfig, retainChan, errorChan, &taskwait) go pruneTaskGetRetainedWorktree(gitscanner, retainChan, errorChan, &taskwait) if verifyRemote { reachableObjects = tools.NewStringSetWithCapacity(100) go pruneTaskGetReachableObjects(gitscanner, &reachableObjects, errorChan, &taskwait) } // Now collect all the retained objects, on separate wait var retainwait sync.WaitGroup retainwait.Add(1) go pruneTaskCollectRetained(&retainedObjects, retainChan, progressChan, &retainwait) // Report progress var progresswait sync.WaitGroup progresswait.Add(1) go pruneTaskDisplayProgress(progressChan, &progresswait) taskwait.Wait() // wait for subtasks gitscanner.Close() close(retainChan) // triggers retain collector to end now all tasks have retainwait.Wait() // make sure all retained objects added close(errorChan) // triggers error collector to end now all tasks have errorwait.Wait() // make sure all errors have been processed pruneCheckErrors(taskErrors) prunableObjects := make([]string, 0, len(localObjects)/2) // Build list of prunables (also queue for verify at same time if applicable) var verifyQueue *tq.TransferQueue var verifiedObjects tools.StringSet var totalSize int64 var verboseOutput bytes.Buffer var verifyc chan *tq.Transfer var verifywait sync.WaitGroup if verifyRemote { verifyQueue = newDownloadCheckQueue( getTransferManifestOperationRemote("download", fetchPruneConfig.PruneRemoteName), fetchPruneConfig.PruneRemoteName, ) verifiedObjects = tools.NewStringSetWithCapacity(len(localObjects) / 2) // this channel is filled with oids for which Check() succeeded & Transfer() was called verifyc = verifyQueue.Watch() verifywait.Add(1) go func() { for t := range verifyc { verifiedObjects.Add(t.Oid) tracerx.Printf("VERIFIED: %v", t.Oid) progressChan <- PruneProgress{PruneProgressTypeVerify, 1} } verifywait.Done() }() } for _, file := range localObjects { if !retainedObjects.Contains(file.Oid) { prunableObjects = append(prunableObjects, file.Oid) totalSize += file.Size if verbose { // Save up verbose output for the end, spinner still going verboseOutput.WriteString(fmt.Sprintf(" * %v (%v)\n", file.Oid, humanize.FormatBytes(uint64(file.Size)))) } if verifyRemote { tracerx.Printf("VERIFYING: %v", file.Oid) verifyQueue.Add(downloadTransfer(&lfs.WrappedPointer{ Pointer: lfs.NewPointer(file.Oid, file.Size, nil), })) } } } if verifyRemote { verifyQueue.Wait() verifywait.Wait() close(progressChan) // after verify (uses spinner) but before check progresswait.Wait() pruneCheckVerified(prunableObjects, reachableObjects, verifiedObjects) } else { close(progressChan) progresswait.Wait() } if len(prunableObjects) == 0 { Print("Nothing to prune") return } if dryRun { Print("%d files would be pruned (%v)", len(prunableObjects), humanize.FormatBytes(uint64(totalSize))) if verbose { Print(verboseOutput.String()) } } else { Print("Pruning %d files, (%v)", len(prunableObjects), humanize.FormatBytes(uint64(totalSize))) if verbose { Print(verboseOutput.String()) } pruneDeleteFiles(prunableObjects) } } func pruneCheckVerified(prunableObjects []string, reachableObjects, verifiedObjects tools.StringSet) { // There's no issue if an object is not reachable and missing, only if reachable & missing var problems bytes.Buffer for _, oid := range prunableObjects { // Test verified first as most likely reachable if !verifiedObjects.Contains(oid) { if reachableObjects.Contains(oid) { problems.WriteString(fmt.Sprintf(" * %v\n", oid)) } else { // Just to indicate why it doesn't matter that we didn't verify tracerx.Printf("UNREACHABLE: %v", oid) } } } // technically we could still prune the other oids, but this indicates a // more serious issue because the local state implies that these can be // deleted but that's incorrect; bad state has occurred somehow, might need // push --all to resolve if problems.Len() > 0 { Exit("Abort: these objects to be pruned are missing on remote:\n%v", problems.String()) } } func pruneCheckErrors(taskErrors []error) { if len(taskErrors) > 0 { for _, err := range taskErrors { LoggedError(err, "Prune error: %v", err) } Exit("Prune sub-tasks failed, cannot continue") } } func pruneTaskDisplayProgress(progressChan PruneProgressChan, waitg *sync.WaitGroup) { defer waitg.Done() spinner := progress.NewSpinner() localCount := 0 retainCount := 0 verifyCount := 0 var msg string for p := range progressChan { switch p.ProgressType { case PruneProgressTypeLocal: localCount++ case PruneProgressTypeRetain: retainCount++ case PruneProgressTypeVerify: verifyCount++ } msg = fmt.Sprintf("%d local objects, %d retained", localCount, retainCount) if verifyCount > 0 { msg += fmt.Sprintf(", %d verified with remote", verifyCount) } spinner.Print(OutputWriter, msg) } spinner.Finish(OutputWriter, msg) } func pruneTaskCollectRetained(outRetainedObjects *tools.StringSet, retainChan chan string, progressChan PruneProgressChan, retainwait *sync.WaitGroup) { defer retainwait.Done() for oid := range retainChan { if outRetainedObjects.Add(oid) { progressChan <- PruneProgress{PruneProgressTypeRetain, 1} } } } func pruneTaskCollectErrors(outtaskErrors *[]error, errorChan chan error, errorwait *sync.WaitGroup) { defer errorwait.Done() for err := range errorChan { *outtaskErrors = append(*outtaskErrors, err) } } func pruneDeleteFiles(prunableObjects []string) { spinner := progress.NewSpinner() var problems bytes.Buffer // In case we fail to delete some var deletedFiles int for i, oid := range prunableObjects { spinner.Print(OutputWriter, fmt.Sprintf("Deleting object %d/%d", i, len(prunableObjects))) mediaFile, err := lfs.LocalMediaPath(oid) if err != nil { problems.WriteString(fmt.Sprintf("Unable to find media path for %v: %v\n", oid, err)) continue } err = os.Remove(mediaFile) if err != nil { problems.WriteString(fmt.Sprintf("Failed to remove file %v: %v\n", mediaFile, err)) continue } deletedFiles++ } spinner.Finish(OutputWriter, fmt.Sprintf("Deleted %d files", deletedFiles)) if problems.Len() > 0 { LoggedError(fmt.Errorf("Failed to delete some files"), problems.String()) Exit("Prune failed, see errors above") } } // Background task, must call waitg.Done() once at end func pruneTaskGetLocalObjects(outLocalObjects *[]localstorage.Object, progChan PruneProgressChan, waitg *sync.WaitGroup) { defer waitg.Done() localObjectsChan := lfs.ScanObjectsChan() for f := range localObjectsChan { *outLocalObjects = append(*outLocalObjects, f) progChan <- PruneProgress{PruneProgressTypeLocal, 1} } } // Background task, must call waitg.Done() once at end func pruneTaskGetRetainedAtRef(gitscanner *lfs.GitScanner, ref string, retainChan chan string, errorChan chan error, waitg *sync.WaitGroup) { defer waitg.Done() err := gitscanner.ScanRef(ref, func(p *lfs.WrappedPointer, err error) { if err != nil { errorChan <- err return } retainChan <- p.Oid tracerx.Printf("RETAIN: %v via ref %v", p.Oid, ref) }) if err != nil { errorChan <- err } } // Background task, must call waitg.Done() once at end func pruneTaskGetPreviousVersionsOfRef(gitscanner *lfs.GitScanner, ref string, since time.Time, retainChan chan string, errorChan chan error, waitg *sync.WaitGroup) { defer waitg.Done() err := gitscanner.ScanPreviousVersions(ref, since, func(p *lfs.WrappedPointer, err error) { if err != nil { errorChan <- err return } else { retainChan <- p.Oid tracerx.Printf("RETAIN: %v via ref %v >= %v", p.Oid, ref, since) } }) if err != nil { errorChan <- err return } } // Background task, must call waitg.Done() once at end func pruneTaskGetRetainedCurrentAndRecentRefs(gitscanner *lfs.GitScanner, fetchconf config.FetchPruneConfig, retainChan chan string, errorChan chan error, waitg *sync.WaitGroup) { defer waitg.Done() // We actually increment the waitg in this func since we kick off sub-goroutines // Make a list of what unique commits to keep, & search backward from commits := tools.NewStringSet() // Do current first ref, err := git.CurrentRef() if err != nil { errorChan <- err return } commits.Add(ref.Sha) waitg.Add(1) go pruneTaskGetRetainedAtRef(gitscanner, ref.Sha, retainChan, errorChan, waitg) // Now recent if fetchconf.FetchRecentRefsDays > 0 { pruneRefDays := fetchconf.FetchRecentRefsDays + fetchconf.PruneOffsetDays tracerx.Printf("PRUNE: Retaining non-HEAD refs within %d (%d+%d) days", pruneRefDays, fetchconf.FetchRecentRefsDays, fetchconf.PruneOffsetDays) refsSince := time.Now().AddDate(0, 0, -pruneRefDays) // Keep all recent refs including any recent remote branches refs, err := git.RecentBranches(refsSince, fetchconf.FetchRecentRefsIncludeRemotes, "") if err != nil { Panic(err, "Could not scan for recent refs") } for _, ref := range refs { if commits.Add(ref.Sha) { // A new commit waitg.Add(1) go pruneTaskGetRetainedAtRef(gitscanner, ref.Sha, retainChan, errorChan, waitg) } } } // For every unique commit we've fetched, check recent commits too // Only if we're fetching recent commits, otherwise only keep at refs if fetchconf.FetchRecentCommitsDays > 0 { pruneCommitDays := fetchconf.FetchRecentCommitsDays + fetchconf.PruneOffsetDays for commit := range commits.Iter() { // We measure from the last commit at the ref summ, err := git.GetCommitSummary(commit) if err != nil { errorChan <- fmt.Errorf("Couldn't scan commits at %v: %v", commit, err) continue } commitsSince := summ.CommitDate.AddDate(0, 0, -pruneCommitDays) waitg.Add(1) go pruneTaskGetPreviousVersionsOfRef(gitscanner, commit, commitsSince, retainChan, errorChan, waitg) } } } // Background task, must call waitg.Done() once at end func pruneTaskGetRetainedUnpushed(gitscanner *lfs.GitScanner, fetchconf config.FetchPruneConfig, retainChan chan string, errorChan chan error, waitg *sync.WaitGroup) { defer waitg.Done() err := gitscanner.ScanUnpushed(fetchconf.PruneRemoteName, func(p *lfs.WrappedPointer, err error) { if err != nil { errorChan <- err } else { retainChan <- p.Pointer.Oid tracerx.Printf("RETAIN: %v unpushed", p.Pointer.Oid) } }) if err != nil { errorChan <- err return } } // Background task, must call waitg.Done() once at end func pruneTaskGetRetainedWorktree(gitscanner *lfs.GitScanner, retainChan chan string, errorChan chan error, waitg *sync.WaitGroup) { defer waitg.Done() // Retain other worktree HEADs too // Working copy, branch & maybe commit is different but repo is shared allWorktreeRefs, err := git.GetAllWorkTreeHEADs(config.LocalGitStorageDir) if err != nil { errorChan <- err return } // Don't repeat any commits, worktrees are always on their own branches but // may point to the same commit commits := tools.NewStringSet() // current HEAD is done elsewhere headref, err := git.CurrentRef() if err != nil { errorChan <- err return } commits.Add(headref.Sha) for _, ref := range allWorktreeRefs { if commits.Add(ref.Sha) { // Worktree is on a different commit waitg.Add(1) // Don't need to 'cd' to worktree since we share same repo go pruneTaskGetRetainedAtRef(gitscanner, ref.Sha, retainChan, errorChan, waitg) } } } // Background task, must call waitg.Done() once at end func pruneTaskGetReachableObjects(gitscanner *lfs.GitScanner, outObjectSet *tools.StringSet, errorChan chan error, waitg *sync.WaitGroup) { defer waitg.Done() err := gitscanner.ScanAll(func(p *lfs.WrappedPointer, err error) { if err != nil { errorChan <- err return } outObjectSet.Add(p.Oid) }) if err != nil { errorChan <- err } } func init() { RegisterCommand("prune", pruneCommand, func(cmd *cobra.Command) { cmd.Flags().BoolVarP(&pruneDryRunArg, "dry-run", "d", false, "Don't delete anything, just report") cmd.Flags().BoolVarP(&pruneVerboseArg, "verbose", "v", false, "Print full details of what is/would be deleted") cmd.Flags().BoolVarP(&pruneVerifyArg, "verify-remote", "c", false, "Verify that remote has LFS files before deleting") cmd.Flags().BoolVar(&pruneDoNotVerifyArg, "no-verify-remote", false, "Override lfs.pruneverifyremotealways and don't verify") }) } git-lfs-2.3.4/commands/command_pull.go000066400000000000000000000070101317167762300177020ustar00rootroot00000000000000package commands import ( "fmt" "sync" "time" "github.com/git-lfs/git-lfs/filepathfilter" "github.com/git-lfs/git-lfs/git" "github.com/git-lfs/git-lfs/lfs" "github.com/git-lfs/git-lfs/progress" "github.com/git-lfs/git-lfs/tq" "github.com/rubyist/tracerx" "github.com/spf13/cobra" ) func pullCommand(cmd *cobra.Command, args []string) { requireGitVersion() requireInRepo() var remote string if len(args) > 0 { // Remote is first arg if err := git.ValidateRemote(args[0]); err != nil { Panic(err, fmt.Sprintf("Invalid remote name '%v'", args[0])) } remote = args[0] } else { // Actively find the default remote, don't just assume origin defaultRemote, err := git.DefaultRemote() if err != nil { Panic(err, "No default remote") } remote = defaultRemote } includeArg, excludeArg := getIncludeExcludeArgs(cmd) filter := buildFilepathFilter(cfg, includeArg, excludeArg) pull(remote, filter) } func pull(remote string, filter *filepathfilter.Filter) { cfg.CurrentRemote = remote ref, err := git.CurrentRef() if err != nil { Panic(err, "Could not pull") } pointers := newPointerMap() meter := progress.NewMeter(progress.WithOSEnv(cfg.Os)) singleCheckout := newSingleCheckout() q := newDownloadQueue(singleCheckout.manifest, remote, tq.WithProgress(meter)) gitscanner := lfs.NewGitScanner(func(p *lfs.WrappedPointer, err error) { if err != nil { LoggedError(err, "Scanner error: %s", err) return } if pointers.Seen(p) { return } // no need to download objects that exist locally already lfs.LinkOrCopyFromReference(p.Oid, p.Size) if lfs.ObjectExistsOfSize(p.Oid, p.Size) { singleCheckout.Run(p) return } meter.Add(p.Size) tracerx.Printf("fetch %v [%v]", p.Name, p.Oid) pointers.Add(p) q.Add(downloadTransfer(p)) }) gitscanner.Filter = filter dlwatch := q.Watch() var wg sync.WaitGroup wg.Add(1) go func() { for t := range dlwatch { for _, p := range pointers.All(t.Oid) { singleCheckout.Run(p) } } wg.Done() }() processQueue := time.Now() if err := gitscanner.ScanTree(ref.Sha); err != nil { singleCheckout.Close() ExitWithError(err) } meter.Start() gitscanner.Close() q.Wait() wg.Wait() tracerx.PerformanceSince("process queue", processQueue) singleCheckout.Close() success := true for _, err := range q.Errors() { success = false FullError(err) } if !success { c := getAPIClient() e := c.Endpoints.Endpoint("download", remote) Exit("error: failed to fetch some objects from '%s'", e.Url) } } // tracks LFS objects being downloaded, according to their unique OIDs. type pointerMap struct { pointers map[string][]*lfs.WrappedPointer mu sync.Mutex } func newPointerMap() *pointerMap { return &pointerMap{pointers: make(map[string][]*lfs.WrappedPointer)} } func (m *pointerMap) Seen(p *lfs.WrappedPointer) bool { m.mu.Lock() defer m.mu.Unlock() if existing, ok := m.pointers[p.Oid]; ok { m.pointers[p.Oid] = append(existing, p) return true } return false } func (m *pointerMap) Add(p *lfs.WrappedPointer) { m.mu.Lock() defer m.mu.Unlock() m.pointers[p.Oid] = append(m.pointers[p.Oid], p) } func (m *pointerMap) All(oid string) []*lfs.WrappedPointer { m.mu.Lock() defer m.mu.Unlock() pointers := m.pointers[oid] delete(m.pointers, oid) return pointers } func init() { RegisterCommand("pull", pullCommand, func(cmd *cobra.Command) { cmd.Flags().StringVarP(&includeArg, "include", "I", "", "Include a list of paths") cmd.Flags().StringVarP(&excludeArg, "exclude", "X", "", "Exclude a list of paths") }) } git-lfs-2.3.4/commands/command_push.go000066400000000000000000000064661317167762300177230ustar00rootroot00000000000000package commands import ( "os" "github.com/git-lfs/git-lfs/errors" "github.com/git-lfs/git-lfs/git" "github.com/git-lfs/git-lfs/lfs" "github.com/rubyist/tracerx" "github.com/spf13/cobra" ) var ( pushDryRun = false pushObjectIDs = false pushAll = false useStdin = false // shares some global vars and functions with command_pre_push.go ) func uploadsBetweenRefAndRemote(ctx *uploadContext, refnames []string) { tracerx.Printf("Upload refs %v to remote %v", refnames, ctx.Remote) gitscanner, err := ctx.buildGitScanner() if err != nil { ExitWithError(err) } defer gitscanner.Close() refs, err := refsByNames(refnames) if err != nil { Error(err.Error()) Exit("Error getting local refs.") } for _, ref := range refs { if err = uploadLeftOrAll(gitscanner, ctx, ref.Name); err != nil { Print("Error scanning for Git LFS files in the %q ref", ref.Name) ExitWithError(err) } } ctx.Await() } func uploadsWithObjectIDs(ctx *uploadContext, oids []string) { for _, oid := range oids { mp, err := lfs.LocalMediaPath(oid) if err != nil { ExitWithError(errors.Wrap(err, "Unable to find local media path:")) } stat, err := os.Stat(mp) if err != nil { ExitWithError(errors.Wrap(err, "Unable to stat local media path")) } uploadPointers(ctx, &lfs.WrappedPointer{ Name: mp, Pointer: &lfs.Pointer{ Oid: oid, Size: stat.Size(), }, }) } ctx.Await() } func refsByNames(refnames []string) ([]*git.Ref, error) { localrefs, err := git.LocalRefs() if err != nil { return nil, err } if pushAll && len(refnames) == 0 { return localrefs, nil } reflookup := make(map[string]*git.Ref, len(localrefs)) for _, ref := range localrefs { reflookup[ref.Name] = ref } refs := make([]*git.Ref, len(refnames)) for i, name := range refnames { if ref, ok := reflookup[name]; ok { refs[i] = ref } else { refs[i] = &git.Ref{Name: name, Type: git.RefTypeOther, Sha: name} } } return refs, nil } // pushCommand pushes local objects to a Git LFS server. It takes two // arguments: // // ` ` // // Remote must be a remote name, not a URL // // pushCommand calculates the git objects to send by comparing the range // of commits between the local and remote git servers. func pushCommand(cmd *cobra.Command, args []string) { if len(args) == 0 { Print("Specify a remote and a remote branch name (`git lfs push origin master`)") os.Exit(1) } requireGitVersion() // Remote is first arg if err := git.ValidateRemote(args[0]); err != nil { Exit("Invalid remote name %q", args[0]) } ctx := newUploadContext(args[0], pushDryRun) if pushObjectIDs { if len(args) < 2 { Print("Usage: git lfs push --object-id [lfs-object-id] ...") return } uploadsWithObjectIDs(ctx, args[1:]) } else { if len(args) < 1 { Print("Usage: git lfs push --dry-run [ref]") return } uploadsBetweenRefAndRemote(ctx, args[1:]) } } func init() { RegisterCommand("push", pushCommand, func(cmd *cobra.Command) { cmd.Flags().BoolVarP(&pushDryRun, "dry-run", "d", false, "Do everything except actually send the updates") cmd.Flags().BoolVarP(&pushObjectIDs, "object-id", "o", false, "Push LFS object ID(s)") cmd.Flags().BoolVarP(&pushAll, "all", "a", false, "Push all objects for the current ref to the remote.") }) } git-lfs-2.3.4/commands/command_smudge.go000066400000000000000000000130011317167762300202070ustar00rootroot00000000000000package commands import ( "fmt" "io" "os" "github.com/git-lfs/git-lfs/errors" "github.com/git-lfs/git-lfs/filepathfilter" "github.com/git-lfs/git-lfs/git" "github.com/git-lfs/git-lfs/lfs" "github.com/git-lfs/git-lfs/localstorage" "github.com/git-lfs/git-lfs/tools" "github.com/git-lfs/git-lfs/tools/humanize" "github.com/git-lfs/git-lfs/tq" "github.com/spf13/cobra" ) var ( // smudgeSkip is a command-line flag belonging to the "git-lfs smudge" // command specifying whether to skip the smudge process. smudgeSkip = false ) // delayedSmudge performs a 'delayed' smudge, adding the LFS pointer to the // `*tq.TransferQueue` "q" if the file is not present locally, passes the given // filepathfilter, and is not skipped. If the pointer is malformed, or already // exists, it streams the contents to be written into the working copy to "to". // // delayedSmudge returns the number of bytes written, whether the checkout was // delayed, the *lfs.Pointer that was smudged, and an error, if one occurred. func delayedSmudge(s *git.FilterProcessScanner, to io.Writer, from io.Reader, q *tq.TransferQueue, filename string, skip bool, filter *filepathfilter.Filter) (int64, bool, *lfs.Pointer, error) { ptr, pbuf, perr := lfs.DecodeFrom(from) if perr != nil { // Write 'statusFromErr(nil)', even though 'perr != nil', since // we are about to write non-delayed smudged contents to "to". if err := s.WriteStatus(statusFromErr(nil)); err != nil { return 0, false, nil, err } n, err := tools.Spool(to, pbuf, localstorage.Objects().TempDir) if err != nil { return n, false, nil, errors.Wrap(err, perr.Error()) } if n != 0 { return 0, false, nil, errors.NewNotAPointerError(errors.Errorf( "Unable to parse pointer at: %q", filename, )) } return 0, false, nil, nil } lfs.LinkOrCopyFromReference(ptr.Oid, ptr.Size) path, err := lfs.LocalMediaPath(ptr.Oid) if err != nil { return 0, false, nil, err } if !skip && filter.Allows(filename) { if _, statErr := os.Stat(path); statErr != nil { q.Add(filename, path, ptr.Oid, ptr.Size) return 0, true, ptr, nil } // Write 'statusFromErr(nil)', since the object is already // present in the local cache, we will write the object's // contents without delaying. if err := s.WriteStatus(statusFromErr(nil)); err != nil { return 0, false, nil, err } n, err := ptr.Smudge(to, filename, false, nil, nil) return n, false, ptr, err } if err := s.WriteStatus(statusFromErr(nil)); err != nil { return 0, false, nil, err } n, err := ptr.Encode(to) return int64(n), false, ptr, err } // smudge smudges the given `*lfs.Pointer`, "ptr", and writes its objects // contents to the `io.Writer`, "to". // // If the encoded LFS pointer is not parse-able as a pointer, the contents of // that file will instead be spooled to a temporary location on disk and then // copied out back to Git. If the pointer file is empty, an empty file will be // written with no error. // // If the smudged object did not "pass" the include and exclude filterset, it // will not be downloaded, and the object will remain a pointer on disk, as if // the smudge filter had not been applied at all. // // Any errors encountered along the way will be returned immediately if they // were non-fatal, otherwise execution will halt and the process will be // terminated by using the `commands.Panic()` func. func smudge(to io.Writer, from io.Reader, filename string, skip bool, filter *filepathfilter.Filter) (int64, error) { ptr, pbuf, perr := lfs.DecodeFrom(from) if perr != nil { n, err := tools.Spool(to, pbuf, localstorage.Objects().TempDir) if err != nil { return 0, errors.Wrap(err, perr.Error()) } if n != 0 { return 0, errors.NewNotAPointerError(errors.Errorf( "Unable to parse pointer at: %q", filename, )) } return 0, nil } lfs.LinkOrCopyFromReference(ptr.Oid, ptr.Size) cb, file, err := lfs.CopyCallbackFile("download", filename, 1, 1) if err != nil { return 0, err } download := !skip if download { download = filter.Allows(filename) } n, err := ptr.Smudge(to, filename, download, getTransferManifest(), cb) if file != nil { file.Close() } if err != nil { ptr.Encode(to) // Download declined error is ok to skip if we weren't requesting download if !(errors.IsDownloadDeclinedError(err) && !download) { var oid string = ptr.Oid if len(oid) >= 7 { oid = oid[:7] } LoggedError(err, "Error downloading object: %s (%s): %s", filename, oid, err) if !cfg.SkipDownloadErrors() { os.Exit(2) } } } return n, nil } func smudgeCommand(cmd *cobra.Command, args []string) { requireStdin("This command should be run by the Git 'smudge' filter") lfs.InstallHooks(false) if !smudgeSkip && cfg.Os.Bool("GIT_LFS_SKIP_SMUDGE", false) { smudgeSkip = true } filter := filepathfilter.New(cfg.FetchIncludePaths(), cfg.FetchExcludePaths()) if n, err := smudge(os.Stdout, os.Stdin, smudgeFilename(args), smudgeSkip, filter); err != nil { if errors.IsNotAPointerError(err) { fmt.Fprintln(os.Stderr, err.Error()) } else { Error(err.Error()) } } else if possiblyMalformedObjectSize(n) { fmt.Fprintln(os.Stderr, "Possibly malformed smudge on Windows: see `git lfs help smudge` for more info.") } } func smudgeFilename(args []string) string { if len(args) > 0 { return args[0] } return "" } func possiblyMalformedObjectSize(n int64) bool { return n > 4*humanize.Gigabyte } func init() { RegisterCommand("smudge", smudgeCommand, func(cmd *cobra.Command) { cmd.Flags().BoolVarP(&smudgeSkip, "skip", "s", false, "") }) } git-lfs-2.3.4/commands/command_status.go000066400000000000000000000152101317167762300202520ustar00rootroot00000000000000package commands import ( "crypto/sha256" "encoding/json" "fmt" "io" "os" "regexp" "strings" "github.com/git-lfs/git-lfs/git" "github.com/git-lfs/git-lfs/lfs" "github.com/spf13/cobra" ) var ( porcelain = false statusJson = false ) func statusCommand(cmd *cobra.Command, args []string) { requireInRepo() // tolerate errors getting ref so this works before first commit ref, _ := git.CurrentRef() scanIndexAt := "HEAD" if ref == nil { scanIndexAt = git.RefBeforeFirstCommit } scanner, err := lfs.NewPointerScanner() if err != nil { scanner.Close() ExitWithError(err) } if porcelain { porcelainStagedPointers(scanIndexAt) return } else if statusJson { jsonStagedPointers(scanner, scanIndexAt) return } statusScanRefRange(ref) staged, unstaged, err := scanIndex(scanIndexAt) if err != nil { ExitWithError(err) } Print("\nGit LFS objects to be committed:\n") for _, entry := range staged { switch entry.Status { case lfs.StatusRename, lfs.StatusCopy: Print("\t%s -> %s (%s)", entry.SrcName, entry.DstName, formatBlobInfo(scanner, entry)) default: Print("\t%s (%s)", entry.SrcName, formatBlobInfo(scanner, entry)) } } Print("\nGit LFS objects not staged for commit:\n") for _, entry := range unstaged { Print("\t%s (%s)", entry.SrcName, formatBlobInfo(scanner, entry)) } Print("") if err = scanner.Close(); err != nil { ExitWithError(err) } } var z40 = regexp.MustCompile(`\^?0{40}`) func formatBlobInfo(s *lfs.PointerScanner, entry *lfs.DiffIndexEntry) string { fromSha, fromSrc, err := blobInfoFrom(s, entry) if err != nil { ExitWithError(err) } from := fmt.Sprintf("%s: %s", fromSrc, fromSha) if entry.Status == lfs.StatusAddition { return from } toSha, toSrc, err := blobInfoTo(s, entry) if err != nil { ExitWithError(err) } to := fmt.Sprintf("%s: %s", toSrc, toSha) return fmt.Sprintf("%s -> %s", from, to) } func blobInfoFrom(s *lfs.PointerScanner, entry *lfs.DiffIndexEntry) (sha, from string, err error) { var blobSha string = entry.SrcSha if z40.MatchString(blobSha) { blobSha = entry.DstSha } return blobInfo(s, blobSha, entry.SrcName) } func blobInfoTo(s *lfs.PointerScanner, entry *lfs.DiffIndexEntry) (sha, from string, err error) { var name string = entry.DstName if len(name) == 0 { name = entry.SrcName } return blobInfo(s, entry.DstSha, name) } func blobInfo(s *lfs.PointerScanner, blobSha, name string) (sha, from string, err error) { if !z40.MatchString(blobSha) { s.Scan(blobSha) if err := s.Err(); err != nil { if git.IsMissingObject(err) { return "", "?", nil } return "", "", err } var from string if s.Pointer() != nil { from = "LFS" } else { from = "Git" } return s.ContentsSha()[:7], from, nil } f, err := os.Open(name) if err != nil { return "", "", err } defer f.Close() shasum := sha256.New() if _, err = io.Copy(shasum, f); err != nil { return "", "", err } return fmt.Sprintf("%x", shasum.Sum(nil))[:7], "File", nil } func scanIndex(ref string) (staged, unstaged []*lfs.DiffIndexEntry, err error) { uncached, err := lfs.NewDiffIndexScanner(ref, false) if err != nil { return nil, nil, err } cached, err := lfs.NewDiffIndexScanner(ref, true) if err != nil { return nil, nil, err } seenNames := make(map[string]struct{}, 0) staged, err = drainScanner(seenNames, cached) if err != nil { return nil, nil, err } unstaged, err = drainScanner(seenNames, uncached) if err != nil { return nil, nil, err } return } func drainScanner(cache map[string]struct{}, scanner *lfs.DiffIndexScanner) ([]*lfs.DiffIndexEntry, error) { var to []*lfs.DiffIndexEntry for scanner.Scan() { entry := scanner.Entry() key := keyFromEntry(entry) if _, seen := cache[key]; !seen { to = append(to, entry) cache[key] = struct{}{} } } if err := scanner.Err(); err != nil { return nil, err } return to, nil } func keyFromEntry(e *lfs.DiffIndexEntry) string { var name string = e.DstName if len(name) == 0 { name = e.SrcName } return strings.Join([]string{e.SrcSha, e.DstSha, name}, ":") } func statusScanRefRange(ref *git.Ref) { if ref == nil { return } Print("On branch %s", ref.Name) remoteRef, err := git.CurrentRemoteRef() if err != nil { return } gitscanner := lfs.NewGitScanner(func(p *lfs.WrappedPointer, err error) { if err != nil { Panic(err, "Could not scan for Git LFS objects") return } Print("\t%s (%s)", p.Name) }) defer gitscanner.Close() Print("Git LFS objects to be pushed to %s:\n", remoteRef.Name) if err := gitscanner.ScanRefRange(ref.Sha, "^"+remoteRef.Sha, nil); err != nil { Panic(err, "Could not scan for Git LFS objects") } } type JSONStatusEntry struct { Status string `json:"status"` From string `json:"from,omitempty"` } type JSONStatus struct { Files map[string]JSONStatusEntry `json:"files"` } func jsonStagedPointers(scanner *lfs.PointerScanner, ref string) { staged, unstaged, err := scanIndex(ref) if err != nil { ExitWithError(err) } status := JSONStatus{Files: make(map[string]JSONStatusEntry)} for _, entry := range append(unstaged, staged...) { _, fromSrc, err := blobInfoFrom(scanner, entry) if err != nil { ExitWithError(err) } if fromSrc != "LFS" { continue } switch entry.Status { case lfs.StatusRename, lfs.StatusCopy: status.Files[entry.DstName] = JSONStatusEntry{ Status: string(entry.Status), From: entry.SrcName, } default: status.Files[entry.SrcName] = JSONStatusEntry{ Status: string(entry.Status), } } } ret, err := json.Marshal(status) if err != nil { ExitWithError(err) } Print(string(ret)) } func porcelainStagedPointers(ref string) { staged, unstaged, err := scanIndex(ref) if err != nil { ExitWithError(err) } seenNames := make(map[string]struct{}) for _, entry := range append(unstaged, staged...) { name := entry.DstName if len(name) == 0 { name = entry.SrcName } if _, seen := seenNames[name]; !seen { Print(porcelainStatusLine(entry)) seenNames[name] = struct{}{} } } } func porcelainStatusLine(entry *lfs.DiffIndexEntry) string { switch entry.Status { case lfs.StatusRename, lfs.StatusCopy: return fmt.Sprintf("%s %s -> %s", entry.Status, entry.SrcName, entry.DstName) case lfs.StatusModification: return fmt.Sprintf(" %s %s", entry.Status, entry.SrcName) } return fmt.Sprintf("%s %s", entry.Status, entry.SrcName) } func init() { RegisterCommand("status", statusCommand, func(cmd *cobra.Command) { cmd.Flags().BoolVarP(&porcelain, "porcelain", "p", false, "Give the output in an easy-to-parse format for scripts.") cmd.Flags().BoolVarP(&statusJson, "json", "j", false, "Give the output in a stable json format for scripts.") }) } git-lfs-2.3.4/commands/command_track.go000066400000000000000000000202231317167762300200330ustar00rootroot00000000000000package commands import ( "bufio" "bytes" "fmt" "io/ioutil" "os" "path/filepath" "strings" "time" "github.com/git-lfs/git-lfs/config" "github.com/git-lfs/git-lfs/git" "github.com/git-lfs/git-lfs/lfs" "github.com/git-lfs/git-lfs/tools" "github.com/spf13/cobra" ) var ( prefixBlocklist = []string{ ".git", ".lfs", } trackLockableFlag bool trackNotLockableFlag bool trackVerboseLoggingFlag bool trackDryRunFlag bool trackNoModifyAttrsFlag bool ) func trackCommand(cmd *cobra.Command, args []string) { requireGitVersion() if config.LocalGitDir == "" { Print("Not a git repository.") os.Exit(128) } if config.LocalWorkingDir == "" { Print("This operation must be run in a work tree.") os.Exit(128) } if !cfg.Os.Bool("GIT_LFS_TRACK_NO_INSTALL_HOOKS", false) { lfs.InstallHooks(false) } if len(args) == 0 { listPatterns() return } knownPatterns := git.GetAttributePaths(config.LocalWorkingDir, config.LocalGitDir) lineEnd := getAttributeLineEnding(knownPatterns) if len(lineEnd) == 0 { lineEnd = gitLineEnding(cfg.Git) } wd, _ := tools.Getwd() wd = tools.ResolveSymlinks(wd) relpath, err := filepath.Rel(config.LocalWorkingDir, wd) if err != nil { Exit("Current directory %q outside of git working directory %q.", wd, config.LocalWorkingDir) } changedAttribLines := make(map[string]string) var readOnlyPatterns []string var writeablePatterns []string ArgsLoop: for _, unsanitizedPattern := range args { pattern := cleanRootPath(unsanitizedPattern) if !trackNoModifyAttrsFlag { for _, known := range knownPatterns { if known.Path == filepath.Join(relpath, pattern) && ((trackLockableFlag && known.Lockable) || // enabling lockable & already lockable (no change) (trackNotLockableFlag && !known.Lockable) || // disabling lockable & not lockable (no change) (!trackLockableFlag && !trackNotLockableFlag)) { // leave lockable as-is in all cases Print("%q already supported", pattern) continue ArgsLoop } } } // Generate the new / changed attrib line for merging encodedArg := escapeTrackPattern(pattern) lockableArg := "" if trackLockableFlag { // no need to test trackNotLockableFlag, if we got here we're disabling lockableArg = " " + git.LockableAttrib } changedAttribLines[pattern] = fmt.Sprintf("%s filter=lfs diff=lfs merge=lfs -text%v%s", encodedArg, lockableArg, lineEnd) if trackLockableFlag { readOnlyPatterns = append(readOnlyPatterns, pattern) } else { writeablePatterns = append(writeablePatterns, pattern) } Print("Tracking %q", unescapeTrackPattern(encodedArg)) } // Now read the whole local attributes file and iterate over the contents, // replacing any lines where the values have changed, and appending new lines // change this: var ( attribContents []byte attributesFile *os.File ) if !trackNoModifyAttrsFlag { attribContents, err = ioutil.ReadFile(".gitattributes") // it's fine for file to not exist if err != nil && !os.IsNotExist(err) { Print("Error reading .gitattributes file") return } // Re-generate the file with merge of old contents and new (to deal with changes) attributesFile, err = os.OpenFile(".gitattributes", os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0660) if err != nil { Print("Error opening .gitattributes file") return } defer attributesFile.Close() if len(attribContents) > 0 { scanner := bufio.NewScanner(bytes.NewReader(attribContents)) for scanner.Scan() { line := scanner.Text() fields := strings.Fields(line) if len(fields) < 1 { continue } pattern := fields[0] if newline, ok := changedAttribLines[pattern]; ok { // Replace this line (newline already embedded) attributesFile.WriteString(newline) // Remove from map so we know we don't have to add it to the end delete(changedAttribLines, pattern) } else { // Write line unchanged (replace newline) attributesFile.WriteString(line + lineEnd) } } // Our method of writing also made sure there's always a newline at end } } // Any items left in the map, write new lines at the end of the file // Note this is only new patterns, not ones which changed locking flags for pattern, newline := range changedAttribLines { if !trackNoModifyAttrsFlag { // Newline already embedded attributesFile.WriteString(newline) } // Also, for any new patterns we've added, make sure any existing git // tracked files have their timestamp updated so they will now show as // modifed note this is relative to current dir which is how we write // .gitattributes deliberately not done in parallel as a chan because // we'll be marking modified // // NOTE: `git ls-files` does not do well with leading slashes. // Since all `git-lfs track` calls are relative to the root of // the repository, the leading slash is simply removed for its // implicit counterpart. if trackVerboseLoggingFlag { Print("Searching for files matching pattern: %s", pattern) } gittracked, err := git.GetTrackedFiles(pattern) if err != nil { Exit("Error getting tracked files for %q: %s", pattern, err) } if trackVerboseLoggingFlag { Print("Found %d files previously added to Git matching pattern: %s", len(gittracked), pattern) } var matchedBlocklist bool for _, f := range gittracked { if forbidden := blocklistItem(f); forbidden != "" { Print("Pattern %s matches forbidden file %s. If you would like to track %s, modify .gitattributes manually.", pattern, f, f) matchedBlocklist = true } } if matchedBlocklist { continue } for _, f := range gittracked { if trackVerboseLoggingFlag || trackDryRunFlag { Print("Git LFS: touching %q", f) } if !trackDryRunFlag { now := time.Now() err := os.Chtimes(f, now, now) if err != nil { LoggedError(err, "Error marking %q modified: %s", f, err) continue } } } } // now flip read-only mode based on lockable / not lockable changes lockClient := newLockClient(cfg.CurrentRemote) err = lockClient.FixFileWriteFlagsInDir(relpath, readOnlyPatterns, writeablePatterns) if err != nil { LoggedError(err, "Error changing lockable file permissions: %s", err) } } func listPatterns() { knownPatterns := git.GetAttributePaths(config.LocalWorkingDir, config.LocalGitDir) if len(knownPatterns) < 1 { return } Print("Listing tracked patterns") for _, t := range knownPatterns { if t.Lockable { Print(" %s [lockable] (%s)", t.Path, t.Source) } else { Print(" %s (%s)", t.Path, t.Source) } } } func getAttributeLineEnding(attribs []git.AttributePath) string { for _, a := range attribs { if a.Source.Path == ".gitattributes" { return a.Source.LineEnding } } return "" } // blocklistItem returns the name of the blocklist item preventing the given // file-name from being tracked, or an empty string, if there is none. func blocklistItem(name string) string { base := filepath.Base(name) for _, p := range prefixBlocklist { if strings.HasPrefix(base, p) { return p } } return "" } var ( trackEscapePatterns = map[string]string{ " ": "[[:space:]]", "#": "\\#", } ) func escapeTrackPattern(unescaped string) string { var escaped string = strings.Replace(unescaped, `\`, "/", -1) for from, to := range trackEscapePatterns { escaped = strings.Replace(escaped, from, to, -1) } return escaped } func unescapeTrackPattern(escaped string) string { var unescaped string = escaped for to, from := range trackEscapePatterns { unescaped = strings.Replace(unescaped, from, to, -1) } return unescaped } func init() { RegisterCommand("track", trackCommand, func(cmd *cobra.Command) { cmd.Flags().BoolVarP(&trackLockableFlag, "lockable", "l", false, "make pattern lockable, i.e. read-only unless locked") cmd.Flags().BoolVarP(&trackNotLockableFlag, "not-lockable", "", false, "remove lockable attribute from pattern") cmd.Flags().BoolVarP(&trackVerboseLoggingFlag, "verbose", "v", false, "log which files are being tracked and modified") cmd.Flags().BoolVarP(&trackDryRunFlag, "dry-run", "d", false, "preview results of running `git lfs track`") cmd.Flags().BoolVarP(&trackNoModifyAttrsFlag, "no-modify-attrs", "", false, "skip modifying .gitattributes file") }) } git-lfs-2.3.4/commands/command_uninstall.go000066400000000000000000000022621317167762300207430ustar00rootroot00000000000000package commands import ( "github.com/git-lfs/git-lfs/lfs" "github.com/git-lfs/git-lfs/localstorage" "github.com/spf13/cobra" ) // uninstallCmd removes any configuration and hooks set by Git LFS. func uninstallCommand(cmd *cobra.Command, args []string) { opt := cmdInstallOptions() if err := lfs.UninstallFilters(opt); err != nil { Error(err.Error()) } if localInstall || lfs.InRepo() { localstorage.InitStorageOrFail() uninstallHooksCommand(cmd, args) } Print("Global Git LFS configuration has been removed.") } // uninstallHooksCmd removes any hooks created by Git LFS. func uninstallHooksCommand(cmd *cobra.Command, args []string) { if err := lfs.UninstallHooks(); err != nil { Error(err.Error()) } Print("Hooks for this repository have been removed.") } func init() { RegisterCommand("uninstall", uninstallCommand, func(cmd *cobra.Command) { cmd.Flags().BoolVarP(&localInstall, "local", "l", false, "Set the Git LFS config for the local Git repository only.") cmd.Flags().BoolVarP(&systemInstall, "system", "", false, "Set the Git LFS config in system-wide scope.") cmd.AddCommand(NewCommand("hooks", uninstallHooksCommand)) cmd.PreRun = setupLocalStorage }) } git-lfs-2.3.4/commands/command_unlock.go000066400000000000000000000070111317167762300202220ustar00rootroot00000000000000package commands import ( "encoding/json" "os" "github.com/git-lfs/git-lfs/errors" "github.com/git-lfs/git-lfs/git" "github.com/git-lfs/git-lfs/locking" "github.com/spf13/cobra" ) var ( unlockCmdFlags unlockFlags ) // unlockFlags holds the flags given to the `git lfs unlock` command type unlockFlags struct { // Id is the Id of the lock that is being unlocked. Id string // Force specifies whether or not the `lfs unlock` command was invoked // with "--force", signifying the user's intent to break another // individual's lock(s). Force bool } var unlockUsage = "Usage: git lfs unlock (--id my-lock-id | )" func unlockCommand(cmd *cobra.Command, args []string) { hasPath := len(args) > 0 hasId := len(unlockCmdFlags.Id) > 0 if hasPath == hasId { // If there is both an `--id` AND a ``, or there is // neither, print the usage and quit. Exit(unlockUsage) } lockClient := newLockClient(lockRemote) defer lockClient.Close() if hasPath { path, err := lockPath(args[0]) if err != nil { if !unlockCmdFlags.Force { Exit("Unable to determine path: %v", err.Error()) } path = args[0] } // This call can early-out unlockAbortIfFileModified(path, !os.IsNotExist(err)) err = lockClient.UnlockFile(path, unlockCmdFlags.Force) if err != nil { Exit("%s", errors.Cause(err)) } if !locksCmdFlags.JSON { Print("Unlocked %s", path) return } } else if unlockCmdFlags.Id != "" { // This call can early-out unlockAbortIfFileModifiedById(unlockCmdFlags.Id, lockClient) err := lockClient.UnlockFileById(unlockCmdFlags.Id, unlockCmdFlags.Force) if err != nil { Exit("Unable to unlock %v: %v", unlockCmdFlags.Id, errors.Cause(err)) } if !locksCmdFlags.JSON { Print("Unlocked Lock %s", unlockCmdFlags.Id) return } } else { Error(unlockUsage) } if err := json.NewEncoder(os.Stdout).Encode(struct { Unlocked bool `json:"unlocked"` }{true}); err != nil { Error(err.Error()) } return } func unlockAbortIfFileModified(path string, exists bool) { modified, err := git.IsFileModified(path) if err != nil { if !exists && unlockCmdFlags.Force { // Since git/git@b9a7d55, `git-status(1)` causes an // error when asked about files that don't exist, // causing `err != nil`, as above. // // Unlocking a files that does not exist with // --force is OK. return } Exit(err.Error()) } if modified { if unlockCmdFlags.Force { // Only a warning Error("Warning: unlocking with uncommitted changes because --force") } else { Exit("Cannot unlock file with uncommitted changes") } } } func unlockAbortIfFileModifiedById(id string, lockClient *locking.Client) { // Get the path so we can check the status filter := map[string]string{"id": id} // try local cache first locks, _ := lockClient.SearchLocks(filter, 0, true) if len(locks) == 0 { // Fall back on calling server locks, _ = lockClient.SearchLocks(filter, 0, false) } if len(locks) == 0 { // Don't block if we can't determine the path, may be cleaning up old data return } unlockAbortIfFileModified(locks[0].Path, true) } func init() { RegisterCommand("unlock", unlockCommand, func(cmd *cobra.Command) { cmd.Flags().StringVarP(&lockRemote, "remote", "r", cfg.CurrentRemote, lockRemoteHelp) cmd.Flags().StringVarP(&unlockCmdFlags.Id, "id", "i", "", "unlock a lock by its ID") cmd.Flags().BoolVarP(&unlockCmdFlags.Force, "force", "f", false, "forcibly break another user's lock(s)") cmd.Flags().BoolVarP(&locksCmdFlags.JSON, "json", "", false, "print output in json") }) } git-lfs-2.3.4/commands/command_untrack.go000066400000000000000000000032401317167762300203760ustar00rootroot00000000000000package commands import ( "bufio" "io/ioutil" "os" "strings" "github.com/git-lfs/git-lfs/config" "github.com/git-lfs/git-lfs/lfs" "github.com/spf13/cobra" ) // untrackCommand takes a list of paths as an argument, and removes each path from the // default attributes file (.gitattributes), if it exists. func untrackCommand(cmd *cobra.Command, args []string) { if config.LocalGitDir == "" { Print("Not a git repository.") os.Exit(128) } if config.LocalWorkingDir == "" { Print("This operation must be run in a work tree.") os.Exit(128) } lfs.InstallHooks(false) if len(args) < 1 { Print("git lfs untrack [path]*") return } data, err := ioutil.ReadFile(".gitattributes") if err != nil { return } attributes := strings.NewReader(string(data)) attributesFile, err := os.Create(".gitattributes") if err != nil { Print("Error opening .gitattributes for writing") return } defer attributesFile.Close() scanner := bufio.NewScanner(attributes) // Iterate through each line of the attributes file and rewrite it, // if the path was meant to be untracked, omit it, and print a message instead. for scanner.Scan() { line := scanner.Text() if !strings.Contains(line, "filter=lfs") { attributesFile.WriteString(line + "\n") continue } path := strings.Fields(line)[0] if removePath(path, args) { Print("Untracking %q", unescapeTrackPattern(path)) } else { attributesFile.WriteString(line + "\n") } } } func removePath(path string, args []string) bool { for _, t := range args { if path == escapeTrackPattern(t) { return true } } return false } func init() { RegisterCommand("untrack", untrackCommand, nil) } git-lfs-2.3.4/commands/command_update.go000066400000000000000000000031221317167762300202100ustar00rootroot00000000000000package commands import ( "regexp" "github.com/git-lfs/git-lfs/git" "github.com/git-lfs/git-lfs/lfs" "github.com/spf13/cobra" ) var ( updateForce = false updateManual = false ) // updateCommand is used for updating parts of Git LFS that reside under // .git/lfs. func updateCommand(cmd *cobra.Command, args []string) { requireGitVersion() requireInRepo() lfsAccessRE := regexp.MustCompile(`\Alfs\.(.*)\.access\z`) for key, _ := range cfg.Git.All() { matches := lfsAccessRE.FindStringSubmatch(key) if len(matches) < 2 { continue } value, _ := cfg.Git.Get(key) switch value { case "basic": case "private": git.Config.SetLocal("", key, "basic") Print("Updated %s access from %s to %s.", matches[1], value, "basic") default: git.Config.UnsetLocalKey("", key) Print("Removed invalid %s access of %s.", matches[1], value) } } if updateForce && updateManual { Exit("You cannot use --force and --manual options together") } if updateManual { Print(lfs.GetHookInstallSteps()) } else { if err := lfs.InstallHooks(updateForce); err != nil { Error(err.Error()) Exit("To resolve this, either:\n 1: run `git lfs update --manual` for instructions on how to merge hooks.\n 2: run `git lfs update --force` to overwrite your hook.") } else { Print("Updated git hooks.") } } } func init() { RegisterCommand("update", updateCommand, func(cmd *cobra.Command) { cmd.Flags().BoolVarP(&updateForce, "force", "f", false, "Overwrite existing hooks.") cmd.Flags().BoolVarP(&updateManual, "manual", "m", false, "Print instructions for manual install.") }) } git-lfs-2.3.4/commands/command_version.go000066400000000000000000000007161317167762300204210ustar00rootroot00000000000000package commands import ( "github.com/git-lfs/git-lfs/lfsapi" "github.com/spf13/cobra" ) var ( lovesComics bool ) func versionCommand(cmd *cobra.Command, args []string) { Print(lfsapi.UserAgent) if lovesComics { Print("Nothing may see Gah Lak Tus and survive!") } } func init() { RegisterCommand("version", versionCommand, func(cmd *cobra.Command) { cmd.PreRun = nil cmd.Flags().BoolVarP(&lovesComics, "comics", "c", false, "easter egg") }) } git-lfs-2.3.4/commands/commands.go000066400000000000000000000247701317167762300170450ustar00rootroot00000000000000package commands import ( "bytes" "fmt" "io" "log" "net" "os" "os/exec" "path/filepath" "strings" "sync" "time" "github.com/git-lfs/git-lfs/config" "github.com/git-lfs/git-lfs/errors" "github.com/git-lfs/git-lfs/filepathfilter" "github.com/git-lfs/git-lfs/git" "github.com/git-lfs/git-lfs/lfs" "github.com/git-lfs/git-lfs/lfsapi" "github.com/git-lfs/git-lfs/locking" "github.com/git-lfs/git-lfs/progress" "github.com/git-lfs/git-lfs/tools" "github.com/git-lfs/git-lfs/tq" ) // Populate man pages //go:generate go run ../docs/man/mangen.go var ( Debugging = false ErrorBuffer = &bytes.Buffer{} ErrorWriter = io.MultiWriter(os.Stderr, ErrorBuffer) OutputWriter = io.MultiWriter(os.Stdout, ErrorBuffer) ManPages = make(map[string]string, 20) cfg = config.Config tqManifest = make(map[string]*tq.Manifest) apiClient *lfsapi.Client global sync.Mutex includeArg string excludeArg string ) // getTransferManifest builds a tq.Manifest from the global os and git // environments. func getTransferManifest() *tq.Manifest { return getTransferManifestOperationRemote("", "") } // getTransferManifestOperationRemote builds a tq.Manifest from the global os // and git environments and operation-specific and remote-specific settings. // Operation must be "download", "upload", or the empty string. func getTransferManifestOperationRemote(operation, remote string) *tq.Manifest { c := getAPIClient() global.Lock() defer global.Unlock() k := fmt.Sprintf("%s.%s", operation, remote) if tqManifest[k] == nil { tqManifest[k] = tq.NewManifestClientOperationRemote(c, operation, remote) } return tqManifest[k] } func getAPIClient() *lfsapi.Client { global.Lock() defer global.Unlock() if apiClient == nil { c, err := lfsapi.NewClient(cfg.Os, cfg.Git) if err != nil { ExitWithError(err) } apiClient = c } return apiClient } func closeAPIClient() error { global.Lock() defer global.Unlock() if apiClient == nil { return nil } return apiClient.Close() } func newLockClient(remote string) *locking.Client { storageConfig := config.Config.StorageConfig() lockClient, err := locking.NewClient(remote, getAPIClient()) if err == nil { err = lockClient.SetupFileCache(storageConfig.LfsStorageDir) } if err != nil { Exit("Unable to create lock system: %v", err.Error()) } // Configure dirs lockClient.LocalWorkingDir = config.LocalWorkingDir lockClient.LocalGitDir = config.LocalGitDir lockClient.SetLockableFilesReadOnly = cfg.SetLockableFilesReadOnly() return lockClient } // newDownloadCheckQueue builds a checking queue, checks that objects are there but doesn't download func newDownloadCheckQueue(manifest *tq.Manifest, remote string, options ...tq.Option) *tq.TransferQueue { allOptions := make([]tq.Option, 0, len(options)+1) allOptions = append(allOptions, options...) allOptions = append(allOptions, tq.DryRun(true)) return newDownloadQueue(manifest, remote, allOptions...) } // newDownloadQueue builds a DownloadQueue, allowing concurrent downloads. func newDownloadQueue(manifest *tq.Manifest, remote string, options ...tq.Option) *tq.TransferQueue { return tq.NewTransferQueue(tq.Download, manifest, remote, options...) } // newUploadQueue builds an UploadQueue, allowing `workers` concurrent uploads. func newUploadQueue(manifest *tq.Manifest, remote string, options ...tq.Option) *tq.TransferQueue { return tq.NewTransferQueue(tq.Upload, manifest, remote, options...) } func buildFilepathFilter(config *config.Configuration, includeArg, excludeArg *string) *filepathfilter.Filter { inc, exc := determineIncludeExcludePaths(config, includeArg, excludeArg) return filepathfilter.New(inc, exc) } func downloadTransfer(p *lfs.WrappedPointer) (name, path, oid string, size int64) { path, _ = lfs.LocalMediaPath(p.Oid) return p.Name, path, p.Oid, p.Size } // Error prints a formatted message to Stderr. It also gets printed to the // panic log if one is created for this command. func Error(format string, args ...interface{}) { if len(args) == 0 { fmt.Fprintln(ErrorWriter, format) return } fmt.Fprintf(ErrorWriter, format+"\n", args...) } // Print prints a formatted message to Stdout. It also gets printed to the // panic log if one is created for this command. func Print(format string, args ...interface{}) { if len(args) == 0 { fmt.Fprintln(OutputWriter, format) return } fmt.Fprintf(OutputWriter, format+"\n", args...) } // Exit prints a formatted message and exits. func Exit(format string, args ...interface{}) { Error(format, args...) os.Exit(2) } // ExitWithError either panics with a full stack trace for fatal errors, or // simply prints the error message and exits immediately. func ExitWithError(err error) { errorWith(err, Panic, Exit) } // FullError prints either a full stack trace for fatal errors, or just the // error message. func FullError(err error) { errorWith(err, LoggedError, Error) } func errorWith(err error, fatalErrFn func(error, string, ...interface{}), errFn func(string, ...interface{})) { if Debugging || errors.IsFatalError(err) { fatalErrFn(err, "%s", err) return } errFn("%s", err) } // Debug prints a formatted message if debugging is enabled. The formatted // message also shows up in the panic log, if created. func Debug(format string, args ...interface{}) { if !Debugging { return } log.Printf(format, args...) } // LoggedError prints the given message formatted with its arguments (if any) to // Stderr. If an empty string is passed as the "format" argument, only the // standard error logging message will be printed, and the error's body will be // omitted. // // It also writes a stack trace for the error to a log file without exiting. func LoggedError(err error, format string, args ...interface{}) { if len(format) > 0 { Error(format, args...) } file := handlePanic(err) if len(file) > 0 { fmt.Fprintf(os.Stderr, "\nErrors logged to %s\nUse `git lfs logs last` to view the log.\n", file) } } // Panic prints a formatted message, and writes a stack trace for the error to // a log file before exiting. func Panic(err error, format string, args ...interface{}) { LoggedError(err, format, args...) os.Exit(2) } func Cleanup() { if err := lfs.ClearTempObjects(); err != nil { fmt.Fprintf(os.Stderr, "Error clearing old temp files: %s\n", err) } } func PipeMediaCommand(name string, args ...string) error { return PipeCommand("bin/"+name, args...) } func PipeCommand(name string, args ...string) error { cmd := exec.Command(name, args...) cmd.Stdin = os.Stdin cmd.Stderr = os.Stderr cmd.Stdout = os.Stdout return cmd.Run() } func requireStdin(msg string) { var out string stat, err := os.Stdin.Stat() if err != nil { out = fmt.Sprintf("Cannot read from STDIN. %s (%s)", msg, err) } else if (stat.Mode() & os.ModeCharDevice) != 0 { out = fmt.Sprintf("Cannot read from STDIN. %s", msg) } if len(out) > 0 { Error(out) os.Exit(1) } } func requireInRepo() { if !lfs.InRepo() { Print("Not in a git repository.") os.Exit(128) } } func handlePanic(err error) string { if err == nil { return "" } return logPanic(err) } func logPanic(loggedError error) string { var ( fmtWriter io.Writer = os.Stderr lineEnding string = "\n" ) now := time.Now() name := now.Format("20060102T150405.999999999") full := filepath.Join(config.LocalLogDir, name+".log") if err := os.MkdirAll(config.LocalLogDir, 0755); err != nil { full = "" fmt.Fprintf(fmtWriter, "Unable to log panic to %s: %s\n\n", config.LocalLogDir, err.Error()) } else if file, err := os.Create(full); err != nil { filename := full full = "" defer func() { fmt.Fprintf(fmtWriter, "Unable to log panic to %s\n\n", filename) logPanicToWriter(fmtWriter, err, lineEnding) }() } else { fmtWriter = file lineEnding = gitLineEnding(cfg.Git) defer file.Close() } logPanicToWriter(fmtWriter, loggedError, lineEnding) return full } func ipAddresses() []string { ips := make([]string, 0, 1) ifaces, err := net.Interfaces() if err != nil { ips = append(ips, "Error getting network interface: "+err.Error()) return ips } for _, i := range ifaces { if i.Flags&net.FlagUp == 0 { continue // interface down } if i.Flags&net.FlagLoopback != 0 { continue // loopback interface } addrs, _ := i.Addrs() l := make([]string, 0, 1) if err != nil { ips = append(ips, "Error getting IP address: "+err.Error()) continue } for _, addr := range addrs { var ip net.IP switch v := addr.(type) { case *net.IPNet: ip = v.IP case *net.IPAddr: ip = v.IP } if ip == nil || ip.IsLoopback() { continue } l = append(l, ip.String()) } if len(l) > 0 { ips = append(ips, strings.Join(l, " ")) } } return ips } func logPanicToWriter(w io.Writer, loggedError error, le string) { // log the version gitV, err := git.Config.Version() if err != nil { gitV = "Error getting git version: " + err.Error() } fmt.Fprint(w, config.VersionDesc+le) fmt.Fprint(w, gitV+le) // log the command that was run fmt.Fprint(w, le) fmt.Fprintf(w, "$ %s", filepath.Base(os.Args[0])) if len(os.Args) > 0 { fmt.Fprintf(w, " %s", strings.Join(os.Args[1:], " ")) } fmt.Fprint(w, le) // log the error message and stack trace w.Write(ErrorBuffer.Bytes()) fmt.Fprint(w, le) fmt.Fprintf(w, "%+v"+le, loggedError) for key, val := range errors.Context(err) { fmt.Fprintf(w, "%s=%v"+le, key, val) } fmt.Fprint(w, le+"Current time in UTC: "+le) fmt.Fprint(w, time.Now().UTC().Format("2006-01-02 15:04:05")+le) fmt.Fprint(w, le+"ENV:"+le) // log the environment for _, env := range lfs.Environ(cfg, getTransferManifest()) { fmt.Fprint(w, env+le) } fmt.Fprint(w, le+"Client IP addresses:"+le) for _, ip := range ipAddresses() { fmt.Fprint(w, ip+le) } } func determineIncludeExcludePaths(config *config.Configuration, includeArg, excludeArg *string) (include, exclude []string) { if includeArg == nil { include = config.FetchIncludePaths() } else { include = tools.CleanPaths(*includeArg, ",") } if excludeArg == nil { exclude = config.FetchExcludePaths() } else { exclude = tools.CleanPaths(*excludeArg, ",") } return } func buildProgressMeter(dryRun bool) *progress.ProgressMeter { return progress.NewMeter( progress.WithOSEnv(cfg.Os), progress.DryRun(dryRun), ) } func requireGitVersion() { minimumGit := "1.8.2" if !git.Config.IsGitVersionAtLeast(minimumGit) { gitver, err := git.Config.Version() if err != nil { Exit("Error getting git version: %s", err) } Exit("git version >= %s is required for Git LFS, your version: %s", minimumGit, gitver) } } func init() { log.SetOutput(ErrorWriter) } git-lfs-2.3.4/commands/commands_test.go000066400000000000000000000017771317167762300201060ustar00rootroot00000000000000package commands import ( "testing" "github.com/git-lfs/git-lfs/config" "github.com/stretchr/testify/assert" ) var ( testcfg = config.NewFrom(config.Values{ Git: map[string][]string{ "lfs.fetchinclude": []string{"/default/include"}, "lfs.fetchexclude": []string{"/default/exclude"}, }, }) ) func TestDetermineIncludeExcludePathsReturnsCleanedPaths(t *testing.T) { inc := "/some/include" exc := "/some/exclude" i, e := determineIncludeExcludePaths(testcfg, &inc, &exc) assert.Equal(t, []string{"/some/include"}, i) assert.Equal(t, []string{"/some/exclude"}, e) } func TestDetermineIncludeExcludePathsReturnsEmptyPaths(t *testing.T) { inc := "" exc := "" i, e := determineIncludeExcludePaths(testcfg, &inc, &exc) assert.Empty(t, i) assert.Empty(t, e) } func TestDetermineIncludeExcludePathsReturnsDefaultsWhenAbsent(t *testing.T) { i, e := determineIncludeExcludePaths(testcfg, nil, nil) assert.Equal(t, []string{"/default/include"}, i) assert.Equal(t, []string{"/default/exclude"}, e) } git-lfs-2.3.4/commands/path.go000066400000000000000000000004341317167762300161670ustar00rootroot00000000000000package commands import "strings" func gitLineEnding(git env) string { value, _ := git.Get("core.autocrlf") switch strings.ToLower(value) { case "input", "true", "t", "1": return "\r\n" default: return osLineEnding() } } type env interface { Get(string) (string, bool) } git-lfs-2.3.4/commands/path_nix.go000066400000000000000000000003161317167762300170440ustar00rootroot00000000000000// +build !windows package commands // cleanRootPath is a no-op on every platform except Windows func cleanRootPath(pattern string) string { return pattern } func osLineEnding() string { return "\n" } git-lfs-2.3.4/commands/path_windows.go000066400000000000000000000025651317167762300177500ustar00rootroot00000000000000// +build windows package commands import ( "path/filepath" "regexp" "strings" "sync" "github.com/git-lfs/git-lfs/subprocess" ) var ( winBashPrefix string winBashMu sync.Mutex winBashRe *regexp.Regexp ) func osLineEnding() string { return "\r\n" } // cleanRootPath replaces the windows root path prefix with a unix path prefix: // "/". Git Bash (provided with Git For Windows) expands a path like "/foo" to // the actual Windows directory, but with forward slashes. You can see this // for yourself: // // $ git /foo // git: 'C:/Program Files/Git/foo' is not a git command. See 'git --help'. // // You can check the path with `pwd -W`: // // $ cd / // $ pwd // / // $ pwd -W // c:/Program Files/Git func cleanRootPath(pattern string) string { winBashMu.Lock() defer winBashMu.Unlock() // check if path starts with windows drive letter if !winPathHasDrive(pattern) { return pattern } if len(winBashPrefix) < 1 { // cmd.Path is something like C:\Program Files\Git\usr\bin\pwd.exe cmd := subprocess.ExecCommand("pwd") winBashPrefix = strings.Replace(filepath.Dir(filepath.Dir(filepath.Dir(cmd.Path))), `\`, "/", -1) + "/" } return strings.Replace(pattern, winBashPrefix, "/", 1) } func winPathHasDrive(pattern string) bool { if winBashRe == nil { winBashRe = regexp.MustCompile(`\A\w{1}:[/\/]`) } return winBashRe.MatchString(pattern) } git-lfs-2.3.4/commands/pointers.go000066400000000000000000000004501317167762300170740ustar00rootroot00000000000000package commands import "github.com/git-lfs/git-lfs/lfs" func collectPointers(pointerCh *lfs.PointerChannelWrapper) ([]*lfs.WrappedPointer, error) { var pointers []*lfs.WrappedPointer for p := range pointerCh.Results { pointers = append(pointers, p) } return pointers, pointerCh.Wait() } git-lfs-2.3.4/commands/pull.go000066400000000000000000000064621317167762300162160ustar00rootroot00000000000000package commands import ( "bytes" "fmt" "io" "os" "sync" "github.com/git-lfs/git-lfs/errors" "github.com/git-lfs/git-lfs/git" "github.com/git-lfs/git-lfs/lfs" "github.com/git-lfs/git-lfs/subprocess" "github.com/git-lfs/git-lfs/tq" ) // Handles the process of checking out a single file, and updating the git // index. func newSingleCheckout() *singleCheckout { // Get a converter from repo-relative to cwd-relative // Since writing data & calling git update-index must be relative to cwd pathConverter, err := lfs.NewRepoToCurrentPathConverter() if err != nil { Panic(err, "Could not convert file paths") } return &singleCheckout{ gitIndexer: &gitIndexer{}, pathConverter: pathConverter, manifest: getTransferManifest(), } } type singleCheckout struct { gitIndexer *gitIndexer pathConverter lfs.PathConverter manifest *tq.Manifest } func (c *singleCheckout) Run(p *lfs.WrappedPointer) { cwdfilepath := c.pathConverter.Convert(p.Name) // Check the content - either missing or still this pointer (not exist is ok) filepointer, err := lfs.DecodePointerFromFile(cwdfilepath) if err != nil && !os.IsNotExist(err) { if errors.IsNotAPointerError(err) { // File has non-pointer content, leave it alone return } LoggedError(err, "Checkout error: %s", err) return } if filepointer != nil && filepointer.Oid != p.Oid { // User has probably manually reset a file to another commit // while leaving it a pointer; don't mess with this return } err = lfs.PointerSmudgeToFile(cwdfilepath, p.Pointer, false, c.manifest, nil) if err != nil { if errors.IsDownloadDeclinedError(err) { // acceptable error, data not local (fetch not run or include/exclude) LoggedError(err, "Skipped checkout for %q, content not local. Use fetch to download.", p.Name) } else { FullError(fmt.Errorf("Could not check out %q", p.Name)) } return } // errors are only returned when the gitIndexer is starting a new cmd if err := c.gitIndexer.Add(cwdfilepath); err != nil { Panic(err, "Could not update the index") } } func (c *singleCheckout) Close() { if err := c.gitIndexer.Close(); err != nil { LoggedError(err, "Error updating the git index:\n%s", c.gitIndexer.Output()) } } // Don't fire up the update-index command until we have at least one file to // give it. Otherwise git interprets the lack of arguments to mean param-less update-index // which can trigger entire working copy to be re-examined, which triggers clean filters // and which has unexpected side effects (e.g. downloading filtered-out files) type gitIndexer struct { cmd *subprocess.Cmd input io.WriteCloser output bytes.Buffer mu sync.Mutex } func (i *gitIndexer) Add(path string) error { i.mu.Lock() defer i.mu.Unlock() if i.cmd == nil { // Fire up the update-index command cmd := git.UpdateIndexFromStdin() cmd.Stdout = &i.output cmd.Stderr = &i.output stdin, err := cmd.StdinPipe() if err != nil { return err } err = cmd.Start() if err != nil { return err } i.cmd = cmd i.input = stdin } i.input.Write([]byte(path + "\n")) return nil } func (i *gitIndexer) Output() string { return i.output.String() } func (i *gitIndexer) Close() error { i.mu.Lock() defer i.mu.Unlock() if i.input != nil { i.input.Close() } if i.cmd != nil { return i.cmd.Wait() } return nil } git-lfs-2.3.4/commands/run.go000066400000000000000000000065601317167762300160450ustar00rootroot00000000000000package commands import ( "fmt" "os" "path/filepath" "strings" "sync" "time" "github.com/git-lfs/git-lfs/config" "github.com/git-lfs/git-lfs/lfsapi" "github.com/git-lfs/git-lfs/localstorage" "github.com/spf13/cobra" ) var ( commandFuncs []func() *cobra.Command commandMu sync.Mutex ) // NewCommand creates a new 'git-lfs' sub command, given a command name and // command run function. // // Each command will initialize the local storage ('.git/lfs') directory when // run, unless the PreRun hook is set to nil. func NewCommand(name string, runFn func(*cobra.Command, []string)) *cobra.Command { return &cobra.Command{Use: name, Run: runFn, PreRun: resolveLocalStorage} } // RegisterCommand creates a direct 'git-lfs' subcommand, given a command name, // a command run function, and an optional callback during the command // initialization process. // // The 'git-lfs' command initialization is deferred until the `commands.Run()` // function is called. The fn callback is passed the output from NewCommand, // and gives the caller the flexibility to customize the command by adding // flags, tweaking command hooks, etc. func RegisterCommand(name string, runFn func(cmd *cobra.Command, args []string), fn func(cmd *cobra.Command)) { commandMu.Lock() commandFuncs = append(commandFuncs, func() *cobra.Command { cmd := NewCommand(name, runFn) if fn != nil { fn(cmd) } return cmd }) commandMu.Unlock() } // Run initializes the 'git-lfs' command and runs it with the given stdin and // command line args. func Run() { root := NewCommand("git-lfs", gitlfsCommand) root.PreRun = nil // Set up help/usage funcs based on manpage text root.SetHelpTemplate("{{.UsageString}}") root.SetHelpFunc(helpCommand) root.SetUsageFunc(usageCommand) for _, f := range commandFuncs { if cmd := f(); cmd != nil { root.AddCommand(cmd) } } root.Execute() closeAPIClient() } func gitlfsCommand(cmd *cobra.Command, args []string) { versionCommand(cmd, args) cmd.Usage() } // resolveLocalStorage implements the `func(*cobra.Command, []string)` signature // necessary to wire it up via `cobra.Command.PreRun`. When run, this function // will resolve the localstorage directories. func resolveLocalStorage(cmd *cobra.Command, args []string) { localstorage.ResolveDirs() setupHTTPLogger(getAPIClient()) } func setupLocalStorage(cmd *cobra.Command, args []string) { config.ResolveGitBasicDirs() setupHTTPLogger(getAPIClient()) } func helpCommand(cmd *cobra.Command, args []string) { if len(args) == 0 { printHelp("git-lfs") } else { printHelp(args[0]) } } func usageCommand(cmd *cobra.Command) error { printHelp(cmd.Name()) return nil } func printHelp(commandName string) { if txt, ok := ManPages[commandName]; ok { fmt.Fprintf(os.Stdout, "%s\n", strings.TrimSpace(txt)) } else { fmt.Fprintf(os.Stdout, "Sorry, no usage text found for %q\n", commandName) } } func setupHTTPLogger(c *lfsapi.Client) { if c == nil || len(os.Getenv("GIT_LOG_STATS")) < 1 { return } logBase := filepath.Join(config.LocalLogDir, "http") if err := os.MkdirAll(logBase, 0755); err != nil { fmt.Fprintf(os.Stderr, "Error logging http stats: %s\n", err) return } logFile := fmt.Sprintf("http-%d.log", time.Now().Unix()) file, err := os.Create(filepath.Join(logBase, logFile)) if err != nil { fmt.Fprintf(os.Stderr, "Error logging http stats: %s\n", err) } else { c.LogHTTPStats(file) } } git-lfs-2.3.4/commands/uploader.go000066400000000000000000000305511317167762300170510ustar00rootroot00000000000000package commands import ( "fmt" "net/url" "os" "path/filepath" "strconv" "strings" "sync" "github.com/git-lfs/git-lfs/config" "github.com/git-lfs/git-lfs/errors" "github.com/git-lfs/git-lfs/git" "github.com/git-lfs/git-lfs/lfs" "github.com/git-lfs/git-lfs/lfsapi" "github.com/git-lfs/git-lfs/locking" "github.com/git-lfs/git-lfs/progress" "github.com/git-lfs/git-lfs/tools" "github.com/git-lfs/git-lfs/tq" "github.com/rubyist/tracerx" ) func uploadLeftOrAll(g *lfs.GitScanner, ctx *uploadContext, ref string) error { if pushAll { if err := g.ScanRefWithDeleted(ref, nil); err != nil { return err } } else { if err := g.ScanLeftToRemote(ref, nil); err != nil { return err } } return ctx.scannerError() } type uploadContext struct { Remote string DryRun bool Manifest *tq.Manifest uploadedOids tools.StringSet meter progress.Meter tq *tq.TransferQueue committerName string committerEmail string trackedLocksMu *sync.Mutex // ALL verifiable locks lockVerifyState verifyState ourLocks map[string]locking.Lock theirLocks map[string]locking.Lock // locks from ourLocks that were modified in this push ownedLocks []locking.Lock // locks from theirLocks that were modified in this push unownedLocks []locking.Lock // allowMissing specifies whether pushes containing missing/corrupt // pointers should allow pushing Git blobs allowMissing bool // tracks errors from gitscanner callbacks scannerErr error errMu sync.Mutex } // Determines if a filename is lockable. Serves as a wrapper around theirLocks // that implements GitScannerSet. type gitScannerLockables struct { m map[string]locking.Lock } func (l *gitScannerLockables) Contains(name string) bool { if l == nil { return false } _, ok := l.m[name] return ok } type verifyState byte const ( verifyStateUnknown verifyState = iota verifyStateEnabled verifyStateDisabled ) func newUploadContext(remote string, dryRun bool) *uploadContext { cfg.CurrentRemote = remote ctx := &uploadContext{ Remote: remote, Manifest: getTransferManifestOperationRemote("upload", remote), DryRun: dryRun, uploadedOids: tools.NewStringSet(), ourLocks: make(map[string]locking.Lock), theirLocks: make(map[string]locking.Lock), trackedLocksMu: new(sync.Mutex), allowMissing: cfg.Git.Bool("lfs.allowincompletepush", true), } ctx.meter = buildProgressMeter(ctx.DryRun) ctx.tq = newUploadQueue(ctx.Manifest, ctx.Remote, tq.WithProgress(ctx.meter), tq.DryRun(ctx.DryRun)) ctx.committerName, ctx.committerEmail = cfg.CurrentCommitter() // Do not check locks for standalone transfer, because there is no LFS // server to ask. if ctx.Manifest.IsStandaloneTransfer() { ctx.lockVerifyState = verifyStateDisabled return ctx } ourLocks, theirLocks, verifyState := verifyLocks(remote) ctx.lockVerifyState = verifyState for _, l := range theirLocks { ctx.theirLocks[l.Path] = l } for _, l := range ourLocks { ctx.ourLocks[l.Path] = l } return ctx } func verifyLocks(remote string) (ours, theirs []locking.Lock, st verifyState) { endpoint := getAPIClient().Endpoints.Endpoint("upload", remote) state := getVerifyStateFor(endpoint) if state == verifyStateDisabled { return } lockClient := newLockClient(remote) ours, theirs, err := lockClient.VerifiableLocks(0) if err != nil { if errors.IsNotImplementedError(err) { disableFor(endpoint) } else if state == verifyStateUnknown || state == verifyStateEnabled { if errors.IsAuthError(err) { if state == verifyStateUnknown { Error("WARNING: Authentication error: %s", err) } else if state == verifyStateEnabled { Exit("ERROR: Authentication error: %s", err) } } else { Print("Remote %q does not support the LFS locking API. Consider disabling it with:", remote) Print(" $ git config lfs.%s.locksverify false", endpoint.Url) if state == verifyStateEnabled { ExitWithError(err) } } } } else if state == verifyStateUnknown { Print("Locking support detected on remote %q. Consider enabling it with:", remote) Print(" $ git config lfs.%s.locksverify true", endpoint.Url) } return ours, theirs, state } func (c *uploadContext) scannerError() error { c.errMu.Lock() defer c.errMu.Unlock() return c.scannerErr } func (c *uploadContext) addScannerError(err error) { c.errMu.Lock() defer c.errMu.Unlock() if c.scannerErr != nil { c.scannerErr = fmt.Errorf("%v\n%v", c.scannerErr, err) } else { c.scannerErr = err } } func (c *uploadContext) buildGitScanner() (*lfs.GitScanner, error) { gitscanner := lfs.NewGitScanner(func(p *lfs.WrappedPointer, err error) { if err != nil { c.addScannerError(err) } else { uploadPointers(c, p) } }) gitscanner.FoundLockable = func(name string) { if lock, ok := c.theirLocks[name]; ok { c.trackedLocksMu.Lock() c.unownedLocks = append(c.unownedLocks, lock) c.trackedLocksMu.Unlock() } } gitscanner.PotentialLockables = &gitScannerLockables{m: c.theirLocks} return gitscanner, gitscanner.RemoteForPush(c.Remote) } // AddUpload adds the given oid to the set of oids that have been uploaded in // the current process. func (c *uploadContext) SetUploaded(oid string) { c.uploadedOids.Add(oid) } // HasUploaded determines if the given oid has already been uploaded in the // current process. func (c *uploadContext) HasUploaded(oid string) bool { return c.uploadedOids.Contains(oid) } func (c *uploadContext) prepareUpload(unfiltered ...*lfs.WrappedPointer) (*tq.TransferQueue, []*lfs.WrappedPointer) { numUnfiltered := len(unfiltered) uploadables := make([]*lfs.WrappedPointer, 0, numUnfiltered) // XXX(taylor): temporary measure to fix duplicate (broken) results from // scanner uniqOids := tools.NewStringSet() // separate out objects that _should_ be uploaded, but don't exist in // .git/lfs/objects. Those will skipped if the server already has them. for _, p := range unfiltered { // object already uploaded in this process, or we've already // seen this OID (see above), skip! if uniqOids.Contains(p.Oid) || c.HasUploaded(p.Oid) { continue } uniqOids.Add(p.Oid) // canUpload determines whether the current pointer "p" can be // uploaded through the TransferQueue below. It is set to false // only when the file is locked by someone other than the // current committer. var canUpload bool = true if lock, ok := c.theirLocks[p.Name]; ok { c.trackedLocksMu.Lock() c.unownedLocks = append(c.unownedLocks, lock) c.trackedLocksMu.Unlock() // If the verification state is enabled, this failed // locks verification means that the push should fail. // // If the state is disabled, the verification error is // silent and the user can upload. // // If the state is undefined, the verification error is // sent as a warning and the user can upload. canUpload = c.lockVerifyState != verifyStateEnabled } if lock, ok := c.ourLocks[p.Name]; ok { c.trackedLocksMu.Lock() c.ownedLocks = append(c.ownedLocks, lock) c.trackedLocksMu.Unlock() } if canUpload { // estimate in meter early (even if it's not going into // uploadables), since we will call Skip() based on the // results of the download check queue. c.meter.Add(p.Size) uploadables = append(uploadables, p) } } return c.tq, uploadables } func uploadPointers(c *uploadContext, unfiltered ...*lfs.WrappedPointer) { if c.DryRun { for _, p := range unfiltered { if c.HasUploaded(p.Oid) { continue } Print("push %s => %s", p.Oid, p.Name) c.SetUploaded(p.Oid) } return } q, pointers := c.prepareUpload(unfiltered...) for _, p := range pointers { t, err := uploadTransfer(p, c.allowMissing) if err != nil && !errors.IsCleanPointerError(err) { ExitWithError(err) } q.Add(t.Name, t.Path, t.Oid, t.Size) c.SetUploaded(p.Oid) } } func (c *uploadContext) Await() { c.tq.Wait() var missing = make(map[string]string) var corrupt = make(map[string]string) var others = make([]error, 0, len(c.tq.Errors())) for _, err := range c.tq.Errors() { if malformed, ok := err.(*tq.MalformedObjectError); ok { if malformed.Missing() { missing[malformed.Name] = malformed.Oid } else if malformed.Corrupt() { corrupt[malformed.Name] = malformed.Oid } } else { others = append(others, err) } } for _, err := range others { FullError(err) } if len(missing) > 0 || len(corrupt) > 0 { var action string if c.allowMissing { action = "missing objects" } else { action = "failed" } Print("LFS upload %s:", action) for name, oid := range missing { Print(" (missing) %s (%s)", name, oid) } for name, oid := range corrupt { Print(" (corrupt) %s (%s)", name, oid) } if !c.allowMissing { os.Exit(2) } } if len(others) > 0 { os.Exit(2) } c.trackedLocksMu.Lock() if ul := len(c.unownedLocks); ul > 0 { Print("Unable to push %d locked file(s):", ul) for _, unowned := range c.unownedLocks { Print("* %s - %s", unowned.Path, unowned.Owner) } if c.lockVerifyState == verifyStateEnabled { Exit("ERROR: Cannot update locked files.") } else { Error("WARNING: The above files would have halted this push.") } } else if len(c.ownedLocks) > 0 { Print("Consider unlocking your own locked file(s): (`git lfs unlock `)") for _, owned := range c.ownedLocks { Print("* %s", owned.Path) } } c.trackedLocksMu.Unlock() } var ( githubHttps, _ = url.Parse("https://github.com") githubSsh, _ = url.Parse("ssh://github.com") // hostsWithKnownLockingSupport is a list of scheme-less hostnames // (without port numbers) that are known to implement the LFS locking // API. // // Additions are welcome. hostsWithKnownLockingSupport = []*url.URL{ githubHttps, githubSsh, } ) func uploadTransfer(p *lfs.WrappedPointer, allowMissing bool) (*tq.Transfer, error) { filename := p.Name oid := p.Oid localMediaPath, err := lfs.LocalMediaPath(oid) if err != nil { return nil, errors.Wrapf(err, "Error uploading file %s (%s)", filename, oid) } if len(filename) > 0 { if err = ensureFile(filename, localMediaPath, allowMissing); err != nil && !errors.IsCleanPointerError(err) { return nil, err } } return &tq.Transfer{ Name: filename, Path: localMediaPath, Oid: oid, Size: p.Size, }, nil } // ensureFile makes sure that the cleanPath exists before pushing it. If it // does not exist, it attempts to clean it by reading the file at smudgePath. func ensureFile(smudgePath, cleanPath string, allowMissing bool) error { if _, err := os.Stat(cleanPath); err == nil { return nil } localPath := filepath.Join(config.LocalWorkingDir, smudgePath) file, err := os.Open(localPath) if err != nil { if allowMissing { return nil } return err } defer file.Close() stat, err := file.Stat() if err != nil { return err } cleaned, err := lfs.PointerClean(file, file.Name(), stat.Size(), nil) if cleaned != nil { cleaned.Teardown() } if err != nil { return err } return nil } // getVerifyStateFor returns whether or not lock verification is enabled for the // given "endpoint". If no state has been explicitly set, an "unknown" state // will be returned instead. func getVerifyStateFor(endpoint lfsapi.Endpoint) verifyState { uc := config.NewURLConfig(cfg.Git) v, ok := uc.Get("lfs", endpoint.Url, "locksverify") if !ok { if supportsLockingAPI(endpoint) { return verifyStateEnabled } return verifyStateUnknown } if enabled, _ := strconv.ParseBool(v); enabled { return verifyStateEnabled } return verifyStateDisabled } // supportsLockingAPI returns whether or not a given lfsapi.Endpoint "e" // is known to support the LFS locking API by whether or not its hostname is // included in the list above. func supportsLockingAPI(e lfsapi.Endpoint) bool { u, err := url.Parse(e.Url) if err != nil { tracerx.Printf("commands: unable to parse %q to determine locking support: %v", e.Url, err) return false } for _, supported := range hostsWithKnownLockingSupport { if supported.Scheme == u.Scheme && supported.Hostname() == u.Hostname() && strings.HasPrefix(u.Path, supported.Path) { return true } } return false } // disableFor disables lock verification for the given lfsapi.Endpoint, // "endpoint". func disableFor(endpoint lfsapi.Endpoint) error { tracerx.Printf("commands: disabling lock verification for %q", endpoint.Url) key := strings.Join([]string{"lfs", endpoint.Url, "locksverify"}, ".") _, err := git.Config.SetLocal("", key, "false") return err } git-lfs-2.3.4/commands/uploader_test.go000066400000000000000000000023051317167762300201040ustar00rootroot00000000000000package commands import ( "testing" "github.com/git-lfs/git-lfs/lfsapi" "github.com/stretchr/testify/assert" ) type LockingSupportTestCase struct { Given string ExpectedToMatch bool } func (l *LockingSupportTestCase) Assert(t *testing.T) { ep := lfsapi.Endpoint{ Url: l.Given, } assert.Equal(t, l.ExpectedToMatch, supportsLockingAPI(ep)) } func TestSupportedLockingHosts(t *testing.T) { for desc, c := range map[string]*LockingSupportTestCase{ "https with path prefix": {"https://github.com/ttaylorr/dotfiles.git/info/lfs", true}, "https with root": {"https://github.com/ttaylorr/dotfiles", true}, "http with path prefix": {"http://github.com/ttaylorr/dotfiles.git/info/lfs", false}, "http with root": {"http://github.com/ttaylorr/dotfiles", false}, "ssh with path prefix": {"ssh://github.com/ttaylorr/dotfiles.git/info/lfs", true}, "ssh with root": {"ssh://github.com/ttaylorr/dotfiles", true}, "ssh with user and path prefix": {"ssh://git@github.com/ttaylorr/dotfiles.git/info/lfs", true}, "ssh with user and root": {"ssh://git@github.com/ttaylorr/dotfiles", true}, } { t.Run(desc, c.Assert) } } git-lfs-2.3.4/config/000077500000000000000000000000001317167762300143475ustar00rootroot00000000000000git-lfs-2.3.4/config/config.go000066400000000000000000000225671317167762300161570ustar00rootroot00000000000000// Package config collects together all configuration settings // NOTE: Subject to change, do not rely on this package from outside git-lfs source package config import ( "fmt" "reflect" "regexp" "strconv" "strings" "sync" "path/filepath" "github.com/git-lfs/git-lfs/errors" "github.com/git-lfs/git-lfs/tools" ) var ( Config = New() ShowConfigWarnings = false defaultRemote = "origin" gitConfigWarningPrefix = "lfs." ) // FetchPruneConfig collects together the config options that control fetching and pruning type FetchPruneConfig struct { // The number of days prior to current date for which (local) refs other than HEAD // will be fetched with --recent (default 7, 0 = only fetch HEAD) FetchRecentRefsDays int `git:"lfs.fetchrecentrefsdays"` // Makes the FetchRecentRefsDays option apply to remote refs from fetch source as well (default true) FetchRecentRefsIncludeRemotes bool `git:"lfs.fetchrecentremoterefs"` // number of days prior to latest commit on a ref that we'll fetch previous // LFS changes too (default 0 = only fetch at ref) FetchRecentCommitsDays int `git:"lfs.fetchrecentcommitsdays"` // Whether to always fetch recent even without --recent FetchRecentAlways bool `git:"lfs.fetchrecentalways"` // Number of days added to FetchRecent*; data outside combined window will be // deleted when prune is run. (default 3) PruneOffsetDays int `git:"lfs.pruneoffsetdays"` // Always verify with remote before pruning PruneVerifyRemoteAlways bool `git:"lfs.pruneverifyremotealways"` // Name of remote to check for unpushed and verify checks PruneRemoteName string `git:"lfs.pruneremotetocheck"` } // Storage configuration type StorageConfig struct { LfsStorageDir string `git:"lfs.storage"` } type Configuration struct { // Os provides a `*Environment` used to access to the system's // environment through os.Getenv. It is the point of entry for all // system environment configuration. Os Environment // Git provides a `*Environment` used to access to the various levels of // `.gitconfig`'s. It is the point of entry for all Git environment // configuration. Git Environment CurrentRemote string loading sync.Mutex // guards initialization of gitConfig and remotes remotes []string extensions map[string]Extension } func New() *Configuration { c := &Configuration{Os: EnvironmentOf(NewOsFetcher())} c.Git = &gitEnvironment{config: c} initConfig(c) return c } // Values is a convenience type used to call the NewFromValues function. It // specifies `Git` and `Env` maps to use as mock values, instead of calling out // to real `.gitconfig`s and the `os.Getenv` function. type Values struct { // Git and Os are the stand-in maps used to provide values for their // respective environments. Git, Os map[string][]string } // NewFrom returns a new `*config.Configuration` that reads both its Git // and Enviornment-level values from the ones provided instead of the actual // `.gitconfig` file or `os.Getenv`, respectively. // // This method should only be used during testing. func NewFrom(v Values) *Configuration { c := &Configuration{ Os: EnvironmentOf(mapFetcher(v.Os)), Git: EnvironmentOf(mapFetcher(v.Git)), } initConfig(c) return c } func initConfig(c *Configuration) { c.CurrentRemote = defaultRemote } // Unmarshal unmarshals the *Configuration in context into all of `v`'s fields, // according to the following rules: // // Values are marshaled according to the given key and environment, as follows: // type T struct { // Field string `git:"key"` // Other string `os:"key"` // } // // If an unknown environment is given, an error will be returned. If there is no // method supporting conversion into a field's type, an error will be returned. // If no value is associated with the given key and environment, the field will // // only be modified if there is a config value present matching the given // key. If the field is already set to a non-zero value of that field's type, // then it will be left alone. // // Otherwise, the field will be set to the value of calling the // appropriately-typed method on the specified environment. func (c *Configuration) Unmarshal(v interface{}) error { into := reflect.ValueOf(v) if into.Kind() != reflect.Ptr { return fmt.Errorf("lfs/config: unable to parse non-pointer type of %T", v) } into = into.Elem() for i := 0; i < into.Type().NumField(); i++ { field := into.Field(i) sfield := into.Type().Field(i) lookups, err := c.parseTag(sfield.Tag) if err != nil { return err } var val interface{} for _, lookup := range lookups { if _, ok := lookup.Get(); !ok { continue } switch sfield.Type.Kind() { case reflect.String: val, _ = lookup.Get() case reflect.Int: val = lookup.Int(int(field.Int())) case reflect.Bool: val = lookup.Bool(field.Bool()) default: return fmt.Errorf("lfs/config: unsupported target type for field %q: %v", sfield.Name, sfield.Type.String()) } if val != nil { break } } if val != nil { into.Field(i).Set(reflect.ValueOf(val)) } } return nil } var ( tagRe = regexp.MustCompile("((\\w+:\"[^\"]*\")\\b?)+") emptyEnv = EnvironmentOf(MapFetcher(nil)) ) type lookup struct { key string env Environment } func (l *lookup) Get() (interface{}, bool) { return l.env.Get(l.key) } func (l *lookup) Int(or int) int { return l.env.Int(l.key, or) } func (l *lookup) Bool(or bool) bool { return l.env.Bool(l.key, or) } // parseTag returns the key, environment, and optional error assosciated with a // given tag. It will return the XOR of either the `git` or `os` tag. That is to // say, a field tagged with EITHER `git` OR `os` is valid, but pone tagged with // both is not. // // If neither field was found, then a nil environment will be returned. func (c *Configuration) parseTag(tag reflect.StructTag) ([]*lookup, error) { var lookups []*lookup parts := tagRe.FindAllString(string(tag), -1) for _, part := range parts { sep := strings.SplitN(part, ":", 2) if len(sep) != 2 { return nil, errors.Errorf("config: invalid struct tag %q", tag) } var env Environment switch strings.ToLower(sep[0]) { case "git": env = c.Git case "os": env = c.Os default: // ignore other struct tags, like `json:""`, etc. env = emptyEnv } uq, err := strconv.Unquote(sep[1]) if err != nil { return nil, err } lookups = append(lookups, &lookup{ key: uq, env: env, }) } return lookups, nil } // BasicTransfersOnly returns whether to only allow "basic" HTTP transfers. // Default is false, including if the lfs.basictransfersonly is invalid func (c *Configuration) BasicTransfersOnly() bool { return c.Git.Bool("lfs.basictransfersonly", false) } // TusTransfersAllowed returns whether to only use "tus.io" HTTP transfers. // Default is false, including if the lfs.tustransfers is invalid func (c *Configuration) TusTransfersAllowed() bool { return c.Git.Bool("lfs.tustransfers", false) } func (c *Configuration) FetchIncludePaths() []string { patterns, _ := c.Git.Get("lfs.fetchinclude") return tools.CleanPaths(patterns, ",") } func (c *Configuration) FetchExcludePaths() []string { patterns, _ := c.Git.Get("lfs.fetchexclude") return tools.CleanPaths(patterns, ",") } func (c *Configuration) Remotes() []string { c.loadGitConfig() return c.remotes } func (c *Configuration) Extensions() map[string]Extension { c.loadGitConfig() return c.extensions } // SortedExtensions gets the list of extensions ordered by Priority func (c *Configuration) SortedExtensions() ([]Extension, error) { return SortExtensions(c.Extensions()) } func (c *Configuration) FetchPruneConfig() FetchPruneConfig { f := &FetchPruneConfig{ FetchRecentRefsDays: 7, FetchRecentRefsIncludeRemotes: true, PruneOffsetDays: 3, PruneRemoteName: "origin", } if err := c.Unmarshal(f); err != nil { panic(err.Error()) } return *f } func (c *Configuration) StorageConfig() StorageConfig { s := &StorageConfig{ LfsStorageDir: "lfs", } if err := c.Unmarshal(s); err != nil { panic(err.Error()) } if !filepath.IsAbs(s.LfsStorageDir) { s.LfsStorageDir = filepath.Join(LocalGitStorageDir, s.LfsStorageDir) } return *s } func (c *Configuration) SkipDownloadErrors() bool { return c.Os.Bool("GIT_LFS_SKIP_DOWNLOAD_ERRORS", false) || c.Git.Bool("lfs.skipdownloaderrors", false) } func (c *Configuration) SetLockableFilesReadOnly() bool { return c.Os.Bool("GIT_LFS_SET_LOCKABLE_READONLY", true) && c.Git.Bool("lfs.setlockablereadonly", true) } // loadGitConfig is a temporary measure to support legacy behavior dependent on // accessing properties set by ReadGitConfig, namely: // - `c.extensions` // - `c.uniqRemotes` // - `c.gitConfig` // // Since the *gitEnvironment is responsible for setting these values on the // (*config.Configuration) instance, we must call that method, if it exists. // // loadGitConfig returns a bool returning whether or not `loadGitConfig` was // called AND the method did not return early. func (c *Configuration) loadGitConfig() bool { if g, ok := c.Git.(*gitEnvironment); ok { return g.loadGitConfig() } return false } // CurrentCommitter returns the name/email that would be used to author a commit // with this configuration. In particular, the "user.name" and "user.email" // configuration values are used func (c *Configuration) CurrentCommitter() (name, email string) { name, _ = c.Git.Get("user.name") email, _ = c.Git.Get("user.email") return } git-lfs-2.3.4/config/config_netrc.go000066400000000000000000000010621317167762300173350ustar00rootroot00000000000000package config import ( "os" "path/filepath" "github.com/bgentry/go-netrc/netrc" ) type netrcfinder interface { FindMachine(string) *netrc.Machine } type noNetrc struct{} func (n *noNetrc) FindMachine(host string) *netrc.Machine { return nil } func (c *Configuration) parseNetrc() (netrcfinder, error) { home, _ := c.Os.Get("HOME") if len(home) == 0 { return &noNetrc{}, nil } nrcfilename := filepath.Join(home, netrcBasename) if _, err := os.Stat(nrcfilename); err != nil { return &noNetrc{}, nil } return netrc.ParseFile(nrcfilename) } git-lfs-2.3.4/config/config_nix.go000066400000000000000000000001011317167762300170110ustar00rootroot00000000000000// +build !windows package config var netrcBasename = ".netrc" git-lfs-2.3.4/config/config_test.go000066400000000000000000000152151317167762300172060ustar00rootroot00000000000000package config import ( "testing" "time" "github.com/stretchr/testify/assert" ) func TestBasicTransfersOnlySetValue(t *testing.T) { cfg := NewFrom(Values{ Git: map[string][]string{ "lfs.basictransfersonly": []string{"true"}, }, }) b := cfg.BasicTransfersOnly() assert.Equal(t, true, b) } func TestBasicTransfersOnlyDefault(t *testing.T) { cfg := NewFrom(Values{}) b := cfg.BasicTransfersOnly() assert.Equal(t, false, b) } func TestBasicTransfersOnlyInvalidValue(t *testing.T) { cfg := NewFrom(Values{ Git: map[string][]string{ "lfs.basictransfersonly": []string{"wat"}, }, }) b := cfg.BasicTransfersOnly() assert.Equal(t, false, b) } func TestTusTransfersAllowedSetValue(t *testing.T) { cfg := NewFrom(Values{ Git: map[string][]string{ "lfs.tustransfers": []string{"true"}, }, }) b := cfg.TusTransfersAllowed() assert.Equal(t, true, b) } func TestTusTransfersAllowedDefault(t *testing.T) { cfg := NewFrom(Values{}) b := cfg.TusTransfersAllowed() assert.Equal(t, false, b) } func TestTusTransfersAllowedInvalidValue(t *testing.T) { cfg := NewFrom(Values{ Git: map[string][]string{ "lfs.tustransfers": []string{"wat"}, }, }) b := cfg.TusTransfersAllowed() assert.Equal(t, false, b) } func TestLoadValidExtension(t *testing.T) { cfg := NewFrom(Values{ Git: map[string][]string{}, }) cfg.extensions = map[string]Extension{ "foo": Extension{ "foo", "foo-clean %f", "foo-smudge %f", 2, }, } ext := cfg.Extensions()["foo"] assert.Equal(t, "foo", ext.Name) assert.Equal(t, "foo-clean %f", ext.Clean) assert.Equal(t, "foo-smudge %f", ext.Smudge) assert.Equal(t, 2, ext.Priority) } func TestLoadInvalidExtension(t *testing.T) { cfg := NewFrom(Values{}) ext := cfg.Extensions()["foo"] assert.Equal(t, "", ext.Name) assert.Equal(t, "", ext.Clean) assert.Equal(t, "", ext.Smudge) assert.Equal(t, 0, ext.Priority) } func TestFetchPruneConfigDefault(t *testing.T) { cfg := NewFrom(Values{}) fp := cfg.FetchPruneConfig() assert.Equal(t, 7, fp.FetchRecentRefsDays) assert.Equal(t, 0, fp.FetchRecentCommitsDays) assert.Equal(t, 3, fp.PruneOffsetDays) assert.True(t, fp.FetchRecentRefsIncludeRemotes) assert.Equal(t, 3, fp.PruneOffsetDays) assert.Equal(t, "origin", fp.PruneRemoteName) assert.False(t, fp.PruneVerifyRemoteAlways) } func TestFetchPruneConfigCustom(t *testing.T) { cfg := NewFrom(Values{ Git: map[string][]string{ "lfs.fetchrecentrefsdays": []string{"12"}, "lfs.fetchrecentremoterefs": []string{"false"}, "lfs.fetchrecentcommitsdays": []string{"9"}, "lfs.pruneoffsetdays": []string{"30"}, "lfs.pruneverifyremotealways": []string{"true"}, "lfs.pruneremotetocheck": []string{"upstream"}, }, }) fp := cfg.FetchPruneConfig() assert.Equal(t, 12, fp.FetchRecentRefsDays) assert.Equal(t, 9, fp.FetchRecentCommitsDays) assert.False(t, fp.FetchRecentRefsIncludeRemotes) assert.Equal(t, 30, fp.PruneOffsetDays) assert.Equal(t, "upstream", fp.PruneRemoteName) assert.True(t, fp.PruneVerifyRemoteAlways) } func TestFetchIncludeExcludesAreCleaned(t *testing.T) { cfg := NewFrom(Values{ Git: map[string][]string{ "lfs.fetchinclude": []string{"/path/to/clean/"}, "lfs.fetchexclude": []string{"/other/path/to/clean/"}, }, }) assert.Equal(t, []string{"/path/to/clean"}, cfg.FetchIncludePaths()) assert.Equal(t, []string{"/other/path/to/clean"}, cfg.FetchExcludePaths()) } func TestUnmarshalMultipleTypes(t *testing.T) { cfg := NewFrom(Values{ Git: map[string][]string{ "string": []string{"string"}, "int": []string{"1"}, "bool": []string{"true"}, }, Os: map[string][]string{ "string": []string{"string"}, "int": []string{"1"}, "bool": []string{"true"}, }, }) v := &struct { GitString string `git:"string"` GitInt int `git:"int"` GitBool bool `git:"bool"` OsString string `os:"string"` OsInt int `os:"int"` OsBool bool `os:"bool"` }{} assert.Nil(t, cfg.Unmarshal(v)) assert.Equal(t, "string", v.GitString) assert.Equal(t, 1, v.GitInt) assert.Equal(t, true, v.GitBool) assert.Equal(t, "string", v.OsString) assert.Equal(t, 1, v.OsInt) assert.Equal(t, true, v.OsBool) } func TestUnmarshalErrsOnNonPointerType(t *testing.T) { type T struct { Foo string `git:"foo"` } cfg := NewFrom(Values{}) err := cfg.Unmarshal(T{}) assert.Equal(t, "lfs/config: unable to parse non-pointer type of config.T", err.Error()) } func TestUnmarshalLeavesNonZeroValuesWhenKeysEmpty(t *testing.T) { v := &struct { String string `git:"string"` Int int `git:"int"` Bool bool `git:"bool"` }{"foo", 1, true} cfg := NewFrom(Values{}) err := cfg.Unmarshal(v) assert.Nil(t, err) assert.Equal(t, "foo", v.String) assert.Equal(t, 1, v.Int) assert.Equal(t, true, v.Bool) } func TestUnmarshalOverridesNonZeroValuesWhenValuesPresent(t *testing.T) { v := &struct { String string `git:"string"` Int int `git:"int"` Bool bool `git:"bool"` }{"foo", 1, true} cfg := NewFrom(Values{ Git: map[string][]string{ "string": []string{"bar"}, "int": []string{"2"}, "bool": []string{"false"}, }, }) err := cfg.Unmarshal(v) assert.Nil(t, err) assert.Equal(t, "bar", v.String) assert.Equal(t, 2, v.Int) assert.Equal(t, false, v.Bool) } func TestUnmarshalAllowsBothOsAndGitTags(t *testing.T) { v := &struct { String string `git:"string" os:"STRING"` }{} cfg := NewFrom(Values{ Git: map[string][]string{"string": []string{"foo"}}, Os: map[string][]string{"STRING": []string{"bar"}}, }) err := cfg.Unmarshal(v) assert.Nil(t, err) assert.Equal(t, "foo", v.String) } func TestUnmarshalYieldsToDefaultIfBothEnvsMissing(t *testing.T) { v := &struct { String string `git:"string" os:"STRING"` }{"foo"} cfg := NewFrom(Values{}) err := cfg.Unmarshal(v) assert.Nil(t, err) assert.Equal(t, "foo", v.String) } func TestUnmarshalOverridesDefaultIfAnyEnvPresent(t *testing.T) { v := &struct { String string `git:"string" os:"STRING"` }{"foo"} cfg := NewFrom(Values{ Git: map[string][]string{"string": []string{"bar"}}, Os: map[string][]string{"STRING": []string{"baz"}}, }) err := cfg.Unmarshal(v) assert.Nil(t, err) assert.Equal(t, "bar", v.String) } func TestUnmarshalIgnoresUnknownEnvironments(t *testing.T) { v := &struct { String string `unknown:"string"` }{} cfg := NewFrom(Values{}) assert.Nil(t, cfg.Unmarshal(v)) } func TestUnmarshalErrsOnUnsupportedTypes(t *testing.T) { v := &struct { Unsupported time.Duration `git:"duration"` }{} cfg := NewFrom(Values{ Git: map[string][]string{"duration": []string{"foo"}}, }) err := cfg.Unmarshal(v) assert.Equal(t, "lfs/config: unsupported target type for field \"Unsupported\": time.Duration", err.Error()) } git-lfs-2.3.4/config/config_windows.go000066400000000000000000000001001317167762300177040ustar00rootroot00000000000000// +build windows package config var netrcBasename = "_netrc" git-lfs-2.3.4/config/environment.go000066400000000000000000000047571317167762300172570ustar00rootroot00000000000000package config import ( "strconv" "strings" ) // An Environment adds additional behavior to a Fetcher, such a type conversion, // and default values. // // `Environment`s are the primary way to communicate with various configuration // sources, such as the OS environment variables, the `.gitconfig`, and even // `map[string]string`s. type Environment interface { // Get is shorthand for calling `e.Fetcher.Get(key)`. Get(key string) (val string, ok bool) // Get is shorthand for calling `e.Fetcher.GetAll(key)`. GetAll(key string) (vals []string) // Bool returns the boolean state associated with a given key, or the // value "def", if no value was associated. // // The "boolean state associated with a given key" is defined as the // case-insensitive string comparison with the following: // // 1) true if... // "true", "1", "on", "yes", or "t" // 2) false if... // "false", "0", "off", "no", "f", or otherwise. Bool(key string, def bool) (val bool) // Int returns the int value associated with a given key, or the value // "def", if no value was associated. // // To convert from a the string value attached to a given key, // `strconv.Atoi(val)` is called. If `Atoi` returned a non-nil error, // then the value "def" will be returned instead. // // Otherwise, if the value was converted `string -> int` successfully, // then it will be returned wholesale. Int(key string, def int) (val int) // All returns a copy of all the key/value pairs for the current // environment. All() map[string][]string } type environment struct { // Fetcher is the `environment`'s source of data. Fetcher Fetcher } // EnvironmentOf creates a new `Environment` initialized with the givne // `Fetcher`, "f". func EnvironmentOf(f Fetcher) Environment { return &environment{f} } func (e *environment) Get(key string) (val string, ok bool) { return e.Fetcher.Get(key) } func (e *environment) GetAll(key string) []string { return e.Fetcher.GetAll(key) } func (e *environment) Bool(key string, def bool) (val bool) { s, _ := e.Fetcher.Get(key) if len(s) == 0 { return def } switch strings.ToLower(s) { case "true", "1", "on", "yes", "t": return true case "false", "0", "off", "no", "f": return false default: return false } } func (e *environment) Int(key string, def int) (val int) { s, _ := e.Fetcher.Get(key) if len(s) == 0 { return def } i, err := strconv.Atoi(s) if err != nil { return def } return i } func (e *environment) All() map[string][]string { return e.Fetcher.All() } git-lfs-2.3.4/config/environment_test.go000066400000000000000000000045121317167762300203030ustar00rootroot00000000000000package config_test import ( "testing" . "github.com/git-lfs/git-lfs/config" "github.com/stretchr/testify/assert" ) func TestEnvironmentGetDelegatesToFetcher(t *testing.T) { fetcher := MapFetcher(map[string][]string{ "foo": []string{"bar", "baz"}, }) env := EnvironmentOf(fetcher) val, ok := env.Get("foo") assert.True(t, ok) assert.Equal(t, "baz", val) } func TestEnvironmentGetAllDelegatesToFetcher(t *testing.T) { fetcher := MapFetcher(map[string][]string{ "foo": []string{"bar", "baz"}, }) env := EnvironmentOf(fetcher) vals := env.GetAll("foo") assert.Equal(t, []string{"bar", "baz"}, vals) } func TestEnvironmentUnsetBoolDefault(t *testing.T) { env := EnvironmentOf(MapFetcher(nil)) assert.True(t, env.Bool("unset", true)) } func TestEnvironmentBoolTruthyConversion(t *testing.T) { for _, c := range []EnvironmentConversionTestCase{ {"", false, GetBoolDefault(false)}, {"true", true, GetBoolDefault(false)}, {"1", true, GetBoolDefault(false)}, {"on", true, GetBoolDefault(false)}, {"yes", true, GetBoolDefault(false)}, {"t", true, GetBoolDefault(false)}, {"false", false, GetBoolDefault(true)}, {"0", false, GetBoolDefault(true)}, {"off", false, GetBoolDefault(true)}, {"no", false, GetBoolDefault(true)}, {"f", false, GetBoolDefault(true)}, } { c.Assert(t) } } func TestEnvironmentIntTestCases(t *testing.T) { for _, c := range []EnvironmentConversionTestCase{ {"", 1, GetIntDefault(1)}, {"1", 1, GetIntDefault(0)}, {"3", 3, GetIntDefault(0)}, {"malformed", 7, GetIntDefault(7)}, } { c.Assert(t) } } type EnvironmentConversionTestCase struct { Val string Expected interface{} GotFn func(env Environment, key string) interface{} } var ( GetBoolDefault = func(def bool) func(e Environment, key string) interface{} { return func(e Environment, key string) interface{} { return e.Bool(key, def) } } GetIntDefault = func(def int) func(e Environment, key string) interface{} { return func(e Environment, key string) interface{} { return e.Int(key, def) } } ) func (c *EnvironmentConversionTestCase) Assert(t *testing.T) { fetcher := MapFetcher(map[string][]string{ c.Val: []string{c.Val}, }) env := EnvironmentOf(fetcher) got := c.GotFn(env, c.Val) if c.Expected != got { t.Errorf("lfs/config: expected val=%q to be %q (got: %q)", c.Val, c.Expected, got) } } git-lfs-2.3.4/config/extension.go000066400000000000000000000015041317167762300167120ustar00rootroot00000000000000package config import ( "fmt" "sort" ) // An Extension describes how to manipulate files during smudge and clean. // Extensions are parsed from the Git config. type Extension struct { Name string Clean string Smudge string Priority int } // SortExtensions sorts a map of extensions in ascending order by Priority func SortExtensions(m map[string]Extension) ([]Extension, error) { pMap := make(map[int]Extension) priorities := make([]int, 0, len(m)) for n, ext := range m { p := ext.Priority if _, exist := pMap[p]; exist { err := fmt.Errorf("duplicate priority %d on %s", p, n) return nil, err } pMap[p] = ext priorities = append(priorities, p) } sort.Ints(priorities) result := make([]Extension, len(priorities)) for i, p := range priorities { result[i] = pMap[p] } return result, nil } git-lfs-2.3.4/config/extension_test.go000066400000000000000000000020211317167762300177440ustar00rootroot00000000000000package config import ( "testing" "github.com/stretchr/testify/assert" ) func TestSortExtensions(t *testing.T) { m := map[string]Extension{ "baz": Extension{ "baz", "baz-clean %f", "baz-smudge %f", 2, }, "foo": Extension{ "foo", "foo-clean %f", "foo-smudge %f", 0, }, "bar": Extension{ "bar", "bar-clean %f", "bar-smudge %f", 1, }, } names := []string{"foo", "bar", "baz"} sorted, err := SortExtensions(m) assert.Nil(t, err) for i, ext := range sorted { name := names[i] assert.Equal(t, name, ext.Name) assert.Equal(t, name+"-clean %f", ext.Clean) assert.Equal(t, name+"-smudge %f", ext.Smudge) assert.Equal(t, i, ext.Priority) } } func TestSortExtensionsDuplicatePriority(t *testing.T) { m := map[string]Extension{ "foo": Extension{ "foo", "foo-clean %f", "foo-smudge %f", 0, }, "bar": Extension{ "bar", "bar-clean %f", "bar-smudge %f", 0, }, } sorted, err := SortExtensions(m) assert.NotNil(t, err) assert.Empty(t, sorted) } git-lfs-2.3.4/config/fetcher.go000066400000000000000000000013671317167762300163250ustar00rootroot00000000000000package config // Fetcher provides an interface to get typed information out of a configuration // "source". These sources could be the OS enviornment, a .gitconfig, or even // just a `map`. type Fetcher interface { // Get returns the string value associated with a given key and a bool // determining if the key exists. // // If multiple entries match the given key, the first one will be // returned. Get(key string) (val string, ok bool) // GetAll returns the a set of string values associated with a given // key. If no entries matched the given key, an empty slice will be // returned instead. GetAll(key string) (vals []string) // All returns a copy of all the key/value pairs for the current // environment. All() map[string][]string } git-lfs-2.3.4/config/filesystem.go000066400000000000000000000061051317167762300170640ustar00rootroot00000000000000package config import ( "fmt" "io/ioutil" "os" "path/filepath" "strings" "github.com/git-lfs/git-lfs/git" "github.com/git-lfs/git-lfs/tools" "github.com/rubyist/tracerx" ) var ( LocalWorkingDir string LocalGitDir string // parent of index / config / hooks etc LocalGitStorageDir string // parent of objects/lfs (may be same as LocalGitDir but may not) LocalReferenceDir string // alternative local media dir (relative to clone reference repo) LocalLogDir string ) // Determins the LocalWorkingDir, LocalGitDir etc func ResolveGitBasicDirs() { var err error LocalGitDir, LocalWorkingDir, err = git.GitAndRootDirs() if err == nil { // Make sure we've fully evaluated symlinks, failure to do consistently // can cause discrepancies LocalGitDir = tools.ResolveSymlinks(LocalGitDir) LocalWorkingDir = tools.ResolveSymlinks(LocalWorkingDir) LocalGitStorageDir = resolveGitStorageDir(LocalGitDir) LocalReferenceDir = resolveReferenceDir(LocalGitStorageDir) } else { errMsg := err.Error() tracerx.Printf("Error running 'git rev-parse': %s", errMsg) if !strings.Contains(errMsg, "Not a git repository") { fmt.Fprintf(os.Stderr, "Error: %s\n", errMsg) } } } func resolveReferenceDir(gitStorageDir string) string { cloneReferencePath := filepath.Join(gitStorageDir, "objects", "info", "alternates") if tools.FileExists(cloneReferencePath) { buffer, err := ioutil.ReadFile(cloneReferencePath) if err == nil { path := strings.TrimSpace(string(buffer[:])) referenceLfsStoragePath := filepath.Join(filepath.Dir(path), "lfs", "objects") if tools.DirExists(referenceLfsStoragePath) { return referenceLfsStoragePath } } } return "" } // From a git dir, get the location that objects are to be stored (we will store lfs alongside) // Sometimes there is an additional level of redirect on the .git folder by way of a commondir file // before you find object storage, e.g. 'git worktree' uses this. It redirects to gitdir either by GIT_DIR // (during setup) or .git/git-dir: (during use), but this only contains the index etc, the objects // are found in another git dir via 'commondir'. func resolveGitStorageDir(gitDir string) string { commondirpath := filepath.Join(gitDir, "commondir") if tools.FileExists(commondirpath) && !tools.DirExists(filepath.Join(gitDir, "objects")) { // no git-dir: prefix in commondir storage, err := processGitRedirectFile(commondirpath, "") if err == nil { return storage } } return gitDir } func processGitRedirectFile(file, prefix string) (string, error) { data, err := ioutil.ReadFile(file) if err != nil { return "", err } contents := string(data) var dir string if len(prefix) > 0 { if !strings.HasPrefix(contents, prefix) { // Prefix required & not found return "", nil } dir = strings.TrimSpace(contents[len(prefix):]) } else { dir = strings.TrimSpace(contents) } if !filepath.IsAbs(dir) { // The .git file contains a relative path. // Create an absolute path based on the directory the .git file is located in. dir = filepath.Join(filepath.Dir(file), dir) } return dir, nil } git-lfs-2.3.4/config/git_environment.go000066400000000000000000000044231317167762300201100ustar00rootroot00000000000000package config // gitEnvironment is an implementation of the Environment which wraps the legacy // behavior or `*config.Configuration.loadGitConfig()`. // // It is functionally equivelant to call `cfg.loadGitConfig()` before calling // methods on the Environment type. type gitEnvironment struct { // git is the Environment which gitEnvironment wraps. git Environment // config is the *Configuration instance which is mutated by // `loadGitConfig`. config *Configuration } // Get is shorthand for calling the loadGitConfig, and then returning // `g.git.Get(key)`. func (g *gitEnvironment) Get(key string) (val string, ok bool) { g.loadGitConfig() return g.git.Get(key) } // Get is shorthand for calling the loadGitConfig, and then returning // `g.git.GetAll(key)`. func (g *gitEnvironment) GetAll(key string) []string { g.loadGitConfig() return g.git.GetAll(key) } // Get is shorthand for calling the loadGitConfig, and then returning // `g.git.Bool(key, def)`. func (g *gitEnvironment) Bool(key string, def bool) (val bool) { g.loadGitConfig() return g.git.Bool(key, def) } // Get is shorthand for calling the loadGitConfig, and then returning // `g.git.Int(key, def)`. func (g *gitEnvironment) Int(key string, def int) (val int) { g.loadGitConfig() return g.git.Int(key, def) } // All returns a copy of all the key/value pairs for the current git config. func (g *gitEnvironment) All() map[string][]string { g.loadGitConfig() return g.git.All() } // loadGitConfig reads and parses the .gitconfig by calling ReadGitConfig. It // also sets values on the configuration instance `g.config`. // // If loadGitConfig has already been called, this method will bail out early, // and return false. Otherwise it will preform the entire parse and return true. // // loadGitConfig is safe to call across multiple goroutines. func (g *gitEnvironment) loadGitConfig() bool { g.config.loading.Lock() defer g.config.loading.Unlock() if g.git != nil { return false } gf, extensions, uniqRemotes := ReadGitConfig(getGitConfigs()...) g.git = EnvironmentOf(gf) g.config.extensions = extensions g.config.remotes = make([]string, 0, len(uniqRemotes)) for remote, isOrigin := range uniqRemotes { if isOrigin { continue } g.config.remotes = append(g.config.remotes, remote) } return true } git-lfs-2.3.4/config/git_fetcher.go000066400000000000000000000113231317167762300171610ustar00rootroot00000000000000package config import ( "fmt" "os" "path/filepath" "strconv" "strings" "sync" "github.com/git-lfs/git-lfs/git" ) type GitFetcher struct { vmu sync.RWMutex vals map[string][]string } type GitConfig struct { Lines []string OnlySafeKeys bool } func NewGitConfig(gitconfiglines string, onlysafe bool) *GitConfig { return &GitConfig{ Lines: strings.Split(gitconfiglines, "\n"), OnlySafeKeys: onlysafe, } } func ReadGitConfig(configs ...*GitConfig) (gf *GitFetcher, extensions map[string]Extension, uniqRemotes map[string]bool) { vals := make(map[string][]string) ignored := make([]string, 0) extensions = make(map[string]Extension) uniqRemotes = make(map[string]bool) for _, gc := range configs { uniqKeys := make(map[string]string) for _, line := range gc.Lines { pieces := strings.SplitN(line, "=", 2) if len(pieces) < 2 { continue } allowed := !gc.OnlySafeKeys key, val := strings.ToLower(pieces[0]), pieces[1] if origKey, ok := uniqKeys[key]; ok { if ShowConfigWarnings && len(vals[key]) > 0 && vals[key][len(vals[key])-1] != val && strings.HasPrefix(key, gitConfigWarningPrefix) { fmt.Fprintf(os.Stderr, "WARNING: These git config values clash:\n") fmt.Fprintf(os.Stderr, " git config %q = %q\n", origKey, vals[key]) fmt.Fprintf(os.Stderr, " git config %q = %q\n", pieces[0], val) } } else { uniqKeys[key] = pieces[0] } parts := strings.Split(key, ".") if len(parts) == 4 && parts[0] == "lfs" && parts[1] == "extension" { // prop: lfs.extension.. name := parts[2] prop := parts[3] ext := extensions[name] ext.Name = name switch prop { case "clean": if gc.OnlySafeKeys { ignored = append(ignored, key) continue } ext.Clean = val case "smudge": if gc.OnlySafeKeys { ignored = append(ignored, key) continue } ext.Smudge = val case "priority": allowed = true p, err := strconv.Atoi(val) if err == nil && p >= 0 { ext.Priority = p } } extensions[name] = ext } else if len(parts) > 1 && parts[0] == "remote" { if gc.OnlySafeKeys && (len(parts) == 3 && parts[2] != "lfsurl") { ignored = append(ignored, key) continue } allowed = true remote := parts[1] uniqRemotes[remote] = remote == "origin" } else if len(parts) > 2 && parts[len(parts)-1] == "access" { allowed = true } if !allowed && keyIsUnsafe(key) { ignored = append(ignored, key) continue } vals[key] = append(vals[key], val) } } if len(ignored) > 0 { fmt.Fprintf(os.Stderr, "WARNING: These unsafe lfsconfig keys were ignored:\n\n") for _, key := range ignored { fmt.Fprintf(os.Stderr, " %s\n", key) } } gf = &GitFetcher{vals: vals} return } // Get implements the Fetcher interface, and returns the value associated with // a given key and true, signaling that the value was present. Otherwise, an // empty string and false will be returned, signaling that the value was // absent. // // Map lookup by key is case-insensitive, as per the .gitconfig specification. // // Get is safe to call across multiple goroutines. func (g *GitFetcher) Get(key string) (val string, ok bool) { all := g.GetAll(key) if len(all) == 0 { return "", false } return all[len(all)-1], true } func (g *GitFetcher) GetAll(key string) []string { g.vmu.RLock() defer g.vmu.RUnlock() return g.vals[strings.ToLower(key)] } func (g *GitFetcher) All() map[string][]string { newmap := make(map[string][]string) g.vmu.RLock() defer g.vmu.RUnlock() for key, values := range g.vals { for _, value := range values { newmap[key] = append(newmap[key], value) } } return newmap } func getGitConfigs() (sources []*GitConfig) { if lfsconfig := getFileGitConfig(".lfsconfig"); lfsconfig != nil { sources = append(sources, lfsconfig) } globalList, err := git.Config.List() if err == nil { sources = append(sources, NewGitConfig(globalList, false)) } else { fmt.Fprintf(os.Stderr, "Error reading git config: %s\n", err) } return } func getFileGitConfig(basename string) *GitConfig { fullname := filepath.Join(LocalWorkingDir, basename) if _, err := os.Stat(fullname); err != nil { if !os.IsNotExist(err) { fmt.Fprintf(os.Stderr, "Error reading %s: %s\n", basename, err) } return nil } lines, err := git.Config.ListFromFile(fullname) if err == nil { return NewGitConfig(lines, true) } fmt.Fprintf(os.Stderr, "Error reading %s: %s\n", basename, err) return nil } func keyIsUnsafe(key string) bool { for _, safe := range safeKeys { if safe == key { return false } } return true } var safeKeys = []string{ "lfs.fetchexclude", "lfs.fetchinclude", "lfs.gitprotocol", "lfs.pushurl", "lfs.url", } git-lfs-2.3.4/config/map_fetcher.go000066400000000000000000000016571317167762300171640ustar00rootroot00000000000000package config // mapFetcher provides an implementation of the Fetcher interface by wrapping // the `map[string]string` type. type mapFetcher map[string][]string func UniqMapFetcher(m map[string]string) Fetcher { multi := make(map[string][]string, len(m)) for k, v := range m { multi[k] = []string{v} } return MapFetcher(multi) } func MapFetcher(m map[string][]string) Fetcher { return mapFetcher(m) } // Get implements the func `Fetcher.Get`. func (m mapFetcher) Get(key string) (val string, ok bool) { all := m.GetAll(key) if len(all) == 0 { return "", false } return all[len(all)-1], true } // Get implements the func `Fetcher.GetAll`. func (m mapFetcher) GetAll(key string) []string { return m[key] } func (m mapFetcher) All() map[string][]string { newmap := make(map[string][]string) for key, values := range m { for _, value := range values { newmap[key] = append(newmap[key], value) } } return newmap } git-lfs-2.3.4/config/os_fetcher.go000066400000000000000000000032351317167762300170220ustar00rootroot00000000000000package config import ( "os" "sync" ) // OsFetcher is an implementation of the Fetcher type for communicating with // the system's environment. // // It is safe to use across multiple goroutines. type OsFetcher struct { // vmu guards read/write access to vals vmu sync.Mutex // vals maintains a local cache of the system's enviornment variables // for fast repeat lookups of a given key. vals map[string]*string } // NewOsFetcher returns a new *OsFetcher. func NewOsFetcher() *OsFetcher { return &OsFetcher{ vals: make(map[string]*string), } } // Get returns the value associated with the given key as stored in the local // cache, or in the operating system's environment variables. // // If there was a cache-hit, the value will be returned from the cache, skipping // a check against os.Getenv. Otherwise, the value will be fetched from the // system, stored in the cache, and then returned. If no value was present in // the cache or in the system, an empty string will be returned. // // Get is safe to call across multiple goroutines. func (o *OsFetcher) Get(key string) (val string, ok bool) { o.vmu.Lock() defer o.vmu.Unlock() if i, ok := o.vals[key]; ok { if i == nil { return "", false } return *i, true } v, ok := os.LookupEnv(key) if ok { o.vals[key] = &v } else { o.vals[key] = nil } return v, ok } // GetAll implements the `config.Fetcher.GetAll` method by returning, at most, a // 1-ary set containing the result of `config.OsFetcher.Get()`. func (o *OsFetcher) GetAll(key string) []string { if v, ok := o.Get(key); ok { return []string{v} } return make([]string, 0) } func (o *OsFetcher) All() map[string][]string { return nil } git-lfs-2.3.4/config/url_config.go000066400000000000000000000057341317167762300170360ustar00rootroot00000000000000package config import ( "fmt" "net/url" "strings" ) type URLConfig struct { git Environment } func NewURLConfig(git Environment) *URLConfig { if git == nil { git = EnvironmentOf(make(mapFetcher)) } return &URLConfig{ git: git, } } // Get retrieves a `http.{url}.{key}` for the given key and urls, following the // rules in https://git-scm.com/docs/git-config#git-config-httplturlgt. // The value for `http.{key}` is returned as a fallback if no config keys are // set for the given urls. func (c *URLConfig) Get(prefix, rawurl, key string) (string, bool) { if c == nil { return "", false } key = strings.ToLower(key) prefix = strings.ToLower(prefix) if v := c.getAll(prefix, rawurl, key); len(v) > 0 { return v[len(v)-1], true } return c.git.Get(strings.Join([]string{prefix, key}, ".")) } func (c *URLConfig) GetAll(prefix, rawurl, key string) []string { if c == nil { return nil } key = strings.ToLower(key) prefix = strings.ToLower(prefix) if v := c.getAll(prefix, rawurl, key); len(v) > 0 { return v } return c.git.GetAll(strings.Join([]string{prefix, key}, ".")) } func (c *URLConfig) getAll(prefix, rawurl, key string) []string { hosts, paths := c.hostsAndPaths(rawurl) for i := len(paths); i > 0; i-- { for _, host := range hosts { path := strings.Join(paths[:i], slash) if v := c.git.GetAll(fmt.Sprintf("%s.%s/%s.%s", prefix, host, path, key)); len(v) > 0 { return v } if v := c.git.GetAll(fmt.Sprintf("%s.%s/%s/.%s", prefix, host, path, key)); len(v) > 0 { return v } if isDefaultLFSUrl(path, paths, i) { path = path[0 : len(path)-4] if v := c.git.GetAll(fmt.Sprintf("%s.%s/%s.%s", prefix, host, path, key)); len(v) > 0 { return v } } } } for _, host := range hosts { if v := c.git.GetAll(fmt.Sprintf("%s.%s.%s", prefix, host, key)); len(v) > 0 { return v } if v := c.git.GetAll(fmt.Sprintf("%s.%s/.%s", prefix, host, key)); len(v) > 0 { return v } } return nil } func (c *URLConfig) hostsAndPaths(rawurl string) (hosts, paths []string) { u, err := url.Parse(rawurl) if err != nil { return nil, nil } return c.hosts(u), c.paths(u.Path) } func (c *URLConfig) hosts(u *url.URL) []string { hosts := make([]string, 0, 1) if u.User != nil { hosts = append(hosts, fmt.Sprintf("%s://%s@%s", u.Scheme, u.User.Username(), u.Host)) } hosts = append(hosts, fmt.Sprintf("%s://%s", u.Scheme, u.Host)) return hosts } func (c *URLConfig) paths(path string) []string { pLen := len(path) if pLen <= 2 { return nil } end := pLen if strings.HasSuffix(path, slash) { end-- } return strings.Split(path[1:end], slash) } const ( gitExt = ".git" infoPart = "info" lfsPart = "lfs" slash = "/" ) func isDefaultLFSUrl(path string, parts []string, index int) bool { if len(path) < 5 { return false // shorter than ".git" } if !strings.HasSuffix(path, gitExt) { return false } if index > len(parts)-2 { return false } return parts[index] == infoPart && parts[index+1] == lfsPart } git-lfs-2.3.4/config/url_config_test.go000066400000000000000000000046351317167762300200740ustar00rootroot00000000000000package config import ( "testing" "github.com/stretchr/testify/assert" ) func TestURLConfig(t *testing.T) { u := NewURLConfig(EnvironmentOf(MapFetcher(map[string][]string{ "http.key": []string{"root", "root-2"}, "http.https://host.com.key": []string{"host", "host-2"}, "http.https://user@host.com/a.key": []string{"user-a", "user-b"}, "http.https://user@host.com.key": []string{"user", "user-2"}, "http.https://host.com/a.key": []string{"host-a", "host-b"}, "http.https://host.com:8080.key": []string{"port", "port-2"}, "http.https://host.com/repo.git.key": []string{".git"}, "http.https://host.com/repo.key": []string{"no .git"}, "http.https://host.com/repo2.key": []string{"no .git"}, }))) getOne := map[string]string{ "https://root.com/a/b/c": "root-2", "https://host.com/": "host-2", "https://host.com/a/b/c": "host-b", "https://user:pass@host.com/a/b/c": "user-b", "https://user:pass@host.com/z/b/c": "user-2", "https://host.com:8080/a": "port-2", "https://host.com/repo.git/info/lfs": ".git", "https://host.com/repo.git/info": ".git", "https://host.com/repo.git": ".git", "https://host.com/repo": "no .git", "https://host.com/repo2.git/info/lfs/foo/bar": "no .git", "https://host.com/repo2.git/info/lfs": "no .git", "https://host.com/repo2.git/info": "host-2", // doesn't match /.git/info/lfs\Z/ "https://host.com/repo2.git": "host-2", // ditto "https://host.com/repo2": "no .git", } for rawurl, expected := range getOne { value, _ := u.Get("http", rawurl, "key") assert.Equal(t, expected, value, "get one: "+rawurl) } getAll := map[string][]string{ "https://root.com/a/b/c": []string{"root", "root-2"}, "https://host.com/": []string{"host", "host-2"}, "https://host.com/a/b/c": []string{"host-a", "host-b"}, "https://user:pass@host.com/a/b/c": []string{"user-a", "user-b"}, "https://user:pass@host.com/z/b/c": []string{"user", "user-2"}, "https://host.com:8080/a": []string{"port", "port-2"}, } for rawurl, expected := range getAll { values := u.GetAll("http", rawurl, "key") assert.Equal(t, expected, values, "get all: "+rawurl) } } git-lfs-2.3.4/config/version.go000066400000000000000000000006321317167762300163640ustar00rootroot00000000000000package config import ( "fmt" "runtime" "strings" ) var ( GitCommit string VersionDesc string ) const ( Version = "2.3.4" ) func init() { gitCommit := "" if len(GitCommit) > 0 { gitCommit = "; git " + GitCommit } VersionDesc = fmt.Sprintf("git-lfs/%s (GitHub; %s %s; go %s%s)", Version, runtime.GOOS, runtime.GOARCH, strings.Replace(runtime.Version(), "go", "", 1), gitCommit, ) } git-lfs-2.3.4/debian/000077500000000000000000000000001317167762300143245ustar00rootroot00000000000000git-lfs-2.3.4/debian/changelog000066400000000000000000000122501317167762300161760ustar00rootroot00000000000000git-lfs (2.3.4) stable; urgency=low * New upstream version -- Rick Olson Wed, 18 Oct 2017 14:29:00 +0000 git-lfs (2.3.3) stable; urgency=low * New upstream version -- Rick Olson Mon, 9 Oct 2017 14:29:00 +0000 git-lfs (2.3.2) stable; urgency=low * New upstream version -- Rick Olson Tue, 3 Oct 2017 14:29:00 +0000 git-lfs (2.3.1) stable; urgency=low * New upstream version -- Rick Olson Wed, 26 Sep 2017 14:29:00 +0000 git-lfs (2.3.0) stable; urgency=low * New upstream version -- Taylor Blau Thu, 14 Sep 2017 14:29:00 +0000 git-lfs (2.2.1) stable; urgency=low * New upstream version -- Taylor Blau Mon, 10 Jul 2017 14:29:00 +0000 git-lfs (2.2.0) stable; urgency=low * New upstream version -- Rick Olson Tue, 27 Jun 2017 14:29:00 +0000 git-lfs (2.1.1) stable; urgency=low * New upstream version -- Taylor Blau Fri, 19 May 2017 14:29:00 +0000 git-lfs (2.1.0) stable; urgency=low * New upstream version -- Taylor Blau Fri, 28 Apr 2017 14:29:00 +0000 git-lfs (2.0.2) stable; urgency=low * New upstream version -- Taylor Blau Wed, 29 Mar 2017 14:29:00 +0000 git-lfs (2.0.1) stable; urgency=low * New upstream version -- Taylor Blau Mon, 6 Mar 2017 14:29:00 +0000 git-lfs (2.0.0) stable; urgency=low * New upstream version -- Rick Olson Tue, 1 Mar 2017 14:29:00 +0000 git-lfs (1.5.6) stable; urgency=low * New upstream version -- Stephen Gelman Thu, 16 Feb 2017 14:29:00 +0000 git-lfs (1.5.5) stable; urgency=low * New upstream version -- Stephen Gelman Thu, 12 Jan 2017 14:29:00 +0000 git-lfs (1.5.4) stable; urgency=low * New upstream version -- Stephen Gelman Tue, 27 Dec 2016 14:29:00 +0000 git-lfs (1.5.3) stable; urgency=low * New upstream version -- Stephen Gelman Mon, 5 Dec 2016 14:29:00 +0000 git-lfs (1.5.2) stable; urgency=low * New upstream version -- Stephen Gelman Tue, 22 Nov 2016 14:29:00 +0000 git-lfs (1.5.1) stable; urgency=low * New upstream version -- Stephen Gelman Fri, 18 Nov 2016 14:29:00 +0000 git-lfs (1.5.0) stable; urgency=low * New upstream version -- Stephen Gelman Thu, 17 Nov 2016 14:29:00 +0000 git-lfs (1.4.4) stable; urgency=low * New upstream version -- Stephen Gelman Mon, 24 Oct 2016 14:29:00 +0000 git-lfs (1.4.3) stable; urgency=low * New upstream version -- Stephen Gelman Mon, 17 Oct 2016 14:29:00 +0000 git-lfs (1.4.2) stable; urgency=low * New upstream version -- Stephen Gelman Mon, 10 Oct 2016 14:29:00 +0000 git-lfs (1.4.1) stable; urgency=low * New upstream version -- Stephen Gelman Fri, 26 Aug 2016 14:29:00 +0000 git-lfs (1.4.0) stable; urgency=low * New upstream version -- Stephen Gelman Fri, 19 Aug 2016 14:29:00 +0000 git-lfs (1.3.1) stable; urgency=low * New upstream version -- Stephen Gelman Tue, 2 Aug 2016 14:29:00 +0000 git-lfs (1.3.0) stable; urgency=low * New upstream version -- Stephen Gelman Thu, 21 Jul 2016 14:29:00 +0000 git-lfs (1.2.1) stable; urgency=low * New upstream version -- Stephen Gelman Thu, 2 Jun 2016 14:29:00 +0000 git-lfs (1.2.0) stable; urgency=low * New upstream version -- Stephen Gelman Thu, 14 Apr 2016 14:29:00 +0000 git-lfs (1.1.2) stable; urgency=low * New upstream version -- Stephen Gelman Tue, 1 Mar 2016 14:29:00 +0000 git-lfs (1.1.1) stable; urgency=low * New upstream version -- Stephen Gelman Wed, 4 Feb 2016 14:29:00 +0000 git-lfs (1.1.0) stable; urgency=low * New upstream version -- Stephen Gelman Wed, 12 Nov 2015 14:29:00 +0000 git-lfs (1.0.2) stable; urgency=low * New upstream version -- Stephen Gelman Wed, 28 Oct 2015 14:29:00 +0000 git-lfs (1.0.1) stable; urgency=low * New upstream version -- Stephen Gelman Fri, 23 Oct 2015 14:29:00 +0000 git-lfs (1.0.0) stable; urgency=low * New upstream version -- Stephen Gelman Thu, 1 Oct 2015 14:29:00 +0000 git-lfs (0.6.0) stable; urgency=low * New upstream version -- Stephen Gelman Thu, 10 Sep 2015 14:29:00 +0000 git-lfs (0.5.3) stable; urgency=low * New upstream version -- Stephen Gelman Fri, 24 Jul 2015 20:43:00 +0000 git-lfs (0.5.2) stable; urgency=low * New upstream version -- Stephen Gelman Fri, 12 Jun 2015 02:54:01 +0000 git-lfs (0.5.1) stable; urgency=medium * Initial release. -- Stephen Gelman Fri, 08 May 2015 22:55:45 +0000 git-lfs-2.3.4/debian/compat000066400000000000000000000000021317167762300155220ustar00rootroot000000000000009 git-lfs-2.3.4/debian/control000066400000000000000000000007451317167762300157350ustar00rootroot00000000000000Source: git-lfs Section: vcs Priority: optional Maintainer: Stephen Gelman Build-Depends: debhelper (>= 9), dh-golang, golang-go:native (>= 1.3.0), git (>= 1.8.2), ruby-ronn Standards-Version: 3.9.6 Package: git-lfs Architecture: any Depends: ${shlibs:Depends}, ${misc:Depends}, git (>= 1.8.2) Built-Using: ${misc:Built-Using} Description: Git Large File Support An open source Git extension for versioning large files Homepage: https://git-lfs.github.com/ git-lfs-2.3.4/debian/copyright000066400000000000000000000024211317167762300162560ustar00rootroot00000000000000Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ Upstream-Name: git-lfs Source: https://github.com/git-lfs/git-lfs Files: * Copyright: 2013-2015 Github, Inc. License: Expat Copyright (c) GitHub, Inc. and Git LFS contributors . Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: . The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. . THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. git-lfs-2.3.4/debian/git-lfs.lintian-overrides000066400000000000000000000004241317167762300212510ustar00rootroot00000000000000# Go only produces static binaries so read-only relocations aren't possible hardening-no-relro usr/bin/git-lfs # strip disabled as golang upstream doesn't support it and it makes go # crash. See https://launchpad.net/bugs/1200255. unstripped-binary-or-object usr/bin/git-lfs git-lfs-2.3.4/debian/git-lfs.manpages000066400000000000000000000000201317167762300173760ustar00rootroot00000000000000man/*.1 man/*.5 git-lfs-2.3.4/debian/postinst000066400000000000000000000000431317167762300161270ustar00rootroot00000000000000#!/bin/sh git lfs install --system git-lfs-2.3.4/debian/prerm000066400000000000000000000000341317167762300153710ustar00rootroot00000000000000#!/bin/sh git lfs uninstall git-lfs-2.3.4/debian/rules000077500000000000000000000045011317167762300154040ustar00rootroot00000000000000#!/usr/bin/make -f export DH_OPTIONS export GO15VENDOREXPERIMENT=1 #dh_golang doesn't do this for you ifeq ($(DEB_HOST_ARCH), i386) export GOARCH := 386 else ifeq ($(DEB_HOST_ARCH), amd64) export GOARCH := amd64 else ifeq ($(DEB_HOST_ARCH), armhf) export GOARCH := arm else ifeq ($(DEB_HOST_ARCH), arm64) export GOARCH := arm64 endif BUILD_DIR := obj-$(DEB_HOST_GNU_TYPE) export DH_GOPKG := github.com/git-lfs/git-lfs # DH_GOLANG_EXCLUDES typically incorporates vendor exclusions from script/test export DH_GOLANG_EXCLUDES := test github.com/olekukonko/ts/* github.com/xeipuuv/* github.com/spf13/cobra/* github.com/kr/* github.com/pkg/errors export PATH := $(CURDIR)/$(BUILD_DIR)/bin:$(PATH) # by-default, dh_golang only copies *.go and other source - this upsets a bunch of vendor test routines export DH_GOLANG_INSTALL_ALL := 1 %: dh $@ --buildsystem=golang --with=golang override_dh_clean: rm -f debian/debhelper.log rm -rf man dh_clean override_dh_auto_build: dh_auto_build #dh_golang doesn't do anything here in deb 8, and it's needed in both if [ "$(DEB_HOST_GNU_TYPE)" != "$(DEB_BUILD_GNU_TYPE)" ]; then\ cp -rf $(BUILD_DIR)/bin/*/* $(BUILD_DIR)/bin/; \ cp -rf $(BUILD_DIR)/pkg/*/* $(BUILD_DIR)/pkg/; \ fi rm $(BUILD_DIR)/bin/script rm $(BUILD_DIR)/bin/man ./script/man override_dh_strip: # strip disabled as golang upstream doesn't support it and it makes go # crash. See https://launchpad.net/bugs/1200255. override_dh_golang: # The dh_golang is used to add the Built-using field to the deb. This is only for reference. # As of https://anonscm.debian.org/cgit/collab-maint/dh-golang.git/commit/script/dh_golang?id=7c3fbec6ea92294477fa8910264fe9bd823f21c3 # dh_golang errors out because the go compiler used was not installed via a package. Therefore the step is skipped override_dh_auto_install: mkdir -p debian/git-lfs/usr/bin cp $(BUILD_DIR)/bin/git-lfs debian/git-lfs/usr/bin/ override_dh_auto_test: ln -s ../../../../../../commands/repos $(BUILD_DIR)/src/github.com/git-lfs/git-lfs/commands/repos ln -s ../../../../bin $(BUILD_DIR)/src/github.com/git-lfs/git-lfs/bin #dh_golang uses the wrong dir to test on. This tricks everything into being happy DEB_BUILD_GNU_TYPE=$(DEB_HOST_GNU_TYPE) dh_auto_test rm $(BUILD_DIR)/src/github.com/git-lfs/git-lfs/commands/repos $(BUILD_DIR)/src/github.com/git-lfs/git-lfs/bin git-lfs-2.3.4/debian/source/000077500000000000000000000000001317167762300156245ustar00rootroot00000000000000git-lfs-2.3.4/debian/source/format000066400000000000000000000000151317167762300170330ustar00rootroot000000000000003.0 (native) git-lfs-2.3.4/docker/000077500000000000000000000000001317167762300143515ustar00rootroot00000000000000git-lfs-2.3.4/docker/README.md000066400000000000000000000324371317167762300156410ustar00rootroot00000000000000# README # ## TL;DR version ## 1. Run the dockers ./docker/run_dockers.bsh 2. **Enjoy** all your new package files in ./repos/ ### Slightly longer version ### 1. Generate GPG keys for everything (See GPG Signing) 2. `export REPO_HOSTNAME=repo.something.com` 4. Generate git-lfs/repo packages and sign all packages ./docker/run_dockers.bsh 5. Host the `/repo` on the `REPO_HOSTNAME` server 6. Test the repos and git-lfs in a client environment ./docker/test_dockers.bsh ## Using the Dockers ## All docker commands need to either be run as root **or** as a user with docker permissions. Adding your user name to the docker group (or setting up boot2docker environment) is probably the easiest. For Mac and Windows users, the git-lfs repo needs to be in your Users directory or else boot2docker magic won't work. Alternatively, you could add addition mount points like [this](http://stackoverflow.com/questions/26639968/boot2docker-startup-script-to-mount-local-shared-folder-with-host) ### Running Dockers ### In order to run the dockers, the docker has to be run with a lot of arguments to get the mount points right, etc... A convenient script is supplied to make this all easy. Simply run ./docker/run_docker.bsh All the images are pulled automatically, and then run. To only run certain docker images, supply them as arguments, e.g. ./docker/run_docker.bsh debian_7 ./docker/run_docker.bsh centos_7 debian_8 ./docker/run_docker.bsh centos_{6,7} And only those images will be run. ### Development in Dockers ### Sometimes you don't want to just build git-lfs and destroy the container, you want to get in there, run a lot of command, debug, develop, etc... To do this, the best command to run is bash, and then you have an interactive shell to use ./docker/run_docker.bsh {image name(s)} -- bash After listing the image(s) you want to run, add a double dash (--) and then any command (and arguments) you want executed in the docker. Remember, the command you are executing has to be in the docker image. ## Docker images ## There are currently three type of docker images: 1. Building images: `{OS NAME}_{OS VERSION}` - These build git-lfs and save the package/repository in the `/repo` direrctory. This image also signs all rpms/debs if gpg signing is setup 2. Environment building images: `{OS_NAME}_{OS_VERSION}_env` - These build or install the environment (dependencies) for building git-lfs. These are mostly important for CentOS because without these, many dependencies have to be built by a developer. These containers should create packages for these dependencies and place them in `/repo` 3. Testing images: `{OS_NAME}_{OS_VERSION}_test` - These images should install the repo and download the git-lfs packages and dependencies to test that everything is working, including the GPG signatures. Unlike the first two types, testing images are not guaranteed to work without GPG signatures. They should also run the test and integration scripts after installing git-lfs to verify everything is working in a **non-developer** setup. (With the exception that go is needed to build the tests...) This default behavior for `./docker/run_dockers.bsh` is to run all of the _building images_. These containers will use the currently checked-out version of git-lfs and copy it into the docker, and run `git clean -xdf` to remove any non-tracked files, (but non-committed changes are kept). git-lfs is built, and a packages/repo is created for each container. These are all a developer would need to test the different OSes. And create the git-lfs rpm or deb packages in the `/repo` directory. In order to distribute git-lfs **and** build dependencies, the dependencies that that were built to create the docker images need to be saved too. Most of these are downloaded by yum/apt-get and do not need to be saved, but a few are not. In order to save the necessary dependencies, call `./docker/run_dockers.bsh` on `{OS_NAME}_{OS_VERSION}_env` and the rpms will be extracted from the images and saved in the `./repo` directory. (This _can_ be done in one command) ./docker/run_dockers.bsh centos_6_env centos_6 This isn't all that important anymore, unless you want ruby2 and the gems used to make the man pages for CentOS 6 where ruby2 is not natively available. Calling the environment building images only needs to be done once, they should remain in the `./repo` directory afterwards. ### Run Docker Environment Variables ### There are a few environment variables you can set to easily adjust the behavior of the `run_docker.bsh` script. `export` before calling `run_docker.bsh` `REPO_HOSTNAME` - Override the hostname for all the repos generated/tested (see below) `DOCKER_AUTOPULL` - Default 1. `run_docker.bsh` always pulls the latest version of the lfs dockers. If set to 0, it will not check to see if a new pull is needed, and you will always run off of your currently cached images docker images. `AUTO_REMOVE` - Default 1. Docker containers are automatically deleted on exit. If set to 0, the docker containers will not be automatically deleted upon exit. This can be useful for a post mortem analysis (using other docker commands not covered here). Just make sure you clean up the docker containers manually. `DOCKER_OTHER_OPTIONS` - Any additional arguments you may want to pass to the docker run command. This can be particularly useful when having to help docker with dns, etc... For example `DOCKER_OTHER_OPTIONS="--dns 8.8.8.8"` If for some reason on Windows, you need to add a -v mount, folder names need to start with `//driveleter/dir...` instead of `/driveleter/dir...` to fool MINGW32 ## Deploying/Building Repositories ## When `./docker/run_dockers.bsh` is done building git-lfs and generating packages, it automatically creates a repository for distribution too. Each distro gets a repo generated in `./repos/{DISTRO_NAME}/{VERSION #}`. Just drop the repo directory onto a webserver and you have a fully functioning Linux repo. (See Testing the Repositories below for more detail) The two major packages included are: `git-lfs-....*` - the git-lfs package `git-lfs-repo-release....*` - A package to install the repo. When building, all **untracked** files are removed during RPM generation (except any stray directories containing a .git folder will not be cleared. This shouldn't be the case, unless you are temporarily storing another git repo in the git repo. This is a safety mechanism in git, so just keep in mind if you are producing packages.) ### Setting the website URL ### The git-lfs-repo-release must contain the URL where the repo is to be hosted. The current default value is `git-lfs.github.com` but this can be overridden using the `REPO_HOSTNAME` env var, e.g. export REPO_HOSTNAME=www.notgithub.uk.co ./docker/run_dockers.bsh Now all the `git-lfs-repo-release....*` files will point to that URL instead _Hint_: `REPO_HOSTNAME` can also be `www.notgithub.uk.co:2213/not_root_dir` ### Testing the Repositories ### To test that all the OSes can download the packages, install, and run the tests again, run ./test_dockers.bsh (which is basically just `./docker/run_dockers.bsh ./docker/git-lfs-test_*`) Remember to set `REPO_HOSTNAME` if you changed it for `./docker/build_docker.bsh` This can also be used to run a local test (on `localhost:{Port Number}`, for example) An easy way to test the repositories locally, is to run them on a simple webserver such as cd ./repos python -m SimpleHTTPServer {Port number} or cd ./repos ruby -run -ehttpd . -p{Port Number} ## GPG signing ### For private repo testing, GPG signing can be skipped. apt-get and yum can install .deb/.rpm directly without gpg keys and everything will work (with certain flags). This section is for distribution in a repo. Most if not all this functionality is automatically disabled when there is no signing key (`./docker/git-lfs_*.key`). In order to sign packages, you need to generate and place GPG keys in the right place. The general procedure for this is gpg --gen-key 1. 4 - RSA 2. 4096 bits 3. Some length of time or 0 for infinite 4. y for yes 5. Signer name (Will become part of the key and uid) 6. Email address (Will become part of the key and uid) 7. Comment (Will become part of the key) 8. O for Okay 9. Enter a secure password, make sure you will not forget it 10. Generate Entropy! gpg --export-secret-key '!' > filename.key e.g. `gpg --export-secret-key '547CF247!' > ./docker/git-lfs_centos_7.key` *NOTE*: the **!** is important in this command Keep in mind, .key files must NEVER be accidentally committed to the repo. _What if you don't have gpg handy?_ Just enter one of the dockers (-- bash) and generate them in there, and save them in the /src dir to get them out of the docker. Or `docker run -it --rm -v $(pwd):/key OS_NAME:OS_VERSION bash`, and generate in that docker and save to the `/key` directory ### GPG Agent ### To prevent MANY passphrase entries at random times, a gpg-agent docker is used to cache your signing key. This is done automatically for you, whenever you call `./docker/run_dockers.bsh` on a building image (`git-lfs_*.dockerfile`). It can be manually preloaded by calling `./docker/gpg-agent_preload.bsh`. It will ask you for your passphrase, once for each unique key out of all the dockers. So if you use the same key for every docker, it will only prompt once. If you have 5 different keys, you'll have prompts, with only the key ID to tell you which is which. The gpg agent TTL is set to 1 year. If this is not acceptable for you, set the `GPG_MAX_CACHE` and `GPG_DEFAULT_CACHE` environment variables (in seconds) before starting the gpg-agent daemon. `./docker/gpg-agent_start.bsh` starts the gpg-agent daemon. It is called automatically by `./docker/gpg-agent_preload.bsh` `./docker/gpg-agent_stop.bsh` stops the gpg-agent daemon. It is called automatically by `./docker/gpg-agent_preload.bsh` `./docker/gpg-agent_preload.bsh` is called automatically by `./docker/run_dockers.bsh` when running any of the signing dockers. `./docker/gpg-agent_preload.bsh -r` - Stops and restarts the gpg agent daemon. This is useful for reloading keys when you update them in your host. ### GPG capabilities by Distro ### Every distro has its own GPG signing capability. This is why every signing docker (`git-lfs_*.dockerfile`) can have an associated key (`git-lfs_*.key`) Debian **will** work with 4096 bit RSA signing subkeys like [1] suggests, but will also work with 4096 bit RSA signing keys. CentOS will **not** work with subkeys[3]. CentOS 6 and 7 will work with 4096 bit RSA signing keys You can make a 4096 RSA key for Debian and CentOS 6/7 (4 for step 1 above, and 4096 for step 2). And only have two keys... Or optionally a 4096 RSA subkey for Debain [1]. Or a key for each distro. Dealers choice. [1] https://www.digitalocean.com/community/tutorials/how-to-use-reprepro-for-a-secure-package-repository-on-ubuntu-14-04 [2] https://iuscommunity.org/pages/CreatingAGPGKeyandSigningRPMs.html#exporting-the-public-gpg-key [3] http://www.redhat.com/archives/rpm-list/2006-November/msg00105.html ## Adding additional OSes ## To add another operating system, it needs to be added to the lfs_dockers repo and uploaded to docker hub. Then all that is left is to add it to the IMAGES list in `run_dockers.bsh` and `test_dockers.bsh` Follow the already existing pattern `{OS NAME}_{OS VERSION #}` where **{OS NAME}** and **{OS VERSION #}** should not contain underscores (\_). ## Docker Cheat sheet ## Install https://docs.docker.com/installation/ * list running dockers docker ps * list stopped dockers too docker ps -a * Remove all stopped dockers docker rm $(docker ps --filter=status=exited -q) * List docker images docker images * Remove unused docker images docker rmi $(docker images -a --filter=dangling=true -q) * Run another command (like bash) in a running docker docker exec -i {docker name} {command} * Stopping a docker (signal 15 to the main pid) docker stop {docker name} * Killing a docker (signal 9 to the main pid) docker kill {docker name} # Troubleshooting # 1. I started one of the script, and am trying to stop it with Ctrl+C. It is ignoring many Ctrl+C's This happens a lot when calling programs like apt-get, yum, etc... From the host, you can still use ps, pgrep, kill, pkill, etc... commands to kill the PIDs in a docker. You can also use `docker ps` to find the container name/id and then used `docker stop` (signal 15) or `docker kill` (signal 9) to stop the docker. You can also use 'docker exec' to start another bash or kill command inside that container 2. How do I re-enter a docker after it failed/succeeded? Dockers are immediately deleted upon exit. The best way to work in a docker is to run bash (See Development in Dockers). This will let you to run the main build command and then continue. 3. That answer's not good enough. How do I resume a docker? Well, first you have to set the environment variable `AUTO_REMOVE=0` before running the image you want to resume. This will keep the docker around after stopping. (Be careful! They multiply like rabbits.) Then docker commit {container name/id} {new_name} Then you can `docker run` that new image. git-lfs-2.3.4/docker/gpg-agent_preload.bsh000077500000000000000000000021121317167762300204250ustar00rootroot00000000000000#!/usr/bin/env bash #Based off of https://github.com/andyneff/gpg_agent/blob/master/preload.bsh set -eu CUR_DIR=$(dirname ${BASH_SOURCE[0]}) IMAGE_NAME=andyneff/gpg_agent CONTAINER_NAME=git-lfs-gpg : ${SUDO=`if ( [ ! -w /var/run/docker.sock ] && id -nG | grep -qwv docker && [ "${DOCKER_HOST:+dh}" != "dh" ] ) && which sudo > /dev/null 2>&1; then echo sudo; fi`} if [[ $# > 0 ]] && [ "$1" == "-r" ]; then ${CUR_DIR}/gpg-agent_stop.bsh shift fi ${CUR_DIR}/gpg-agent_start.bsh for (( x=0; x<10; x++ )); do if $SUDO docker exec -it ${CONTAINER_NAME} bash -c \ "gpg-connect-agent --homedir="'${GNUPGHOME}'" /bye"; then break else sleep 1 fi done if ls ${CUR_DIR}/*.key >/dev/null 2>&1; then for file in ${CUR_DIR}/*.key; do if [ -s $file ]; then $SUDO docker cp $file ${CONTAINER_NAME}:/tmp/ fi done fi $SUDO docker exec -it ${CONTAINER_NAME} script /dev/null -q -c ' \ for key in $(ls /tmp/*.key); do \ if [ -s $key ]; then \ gpg --import $key; \ gpg2 -o /dev/null -s /dev/null; \ rm -f /tmp/gpg-agent/*.gpg; \ fi \ done'git-lfs-2.3.4/docker/gpg-agent_start.bsh000077500000000000000000000012541317167762300201420ustar00rootroot00000000000000#!/usr/bin/env bash #Based off of https://github.com/andyneff/gpg_agent/blob/master/start.bsh set -eu CUR_DIR=$(dirname ${BASH_SOURCE[0]}) IMAGE_NAME=andyneff/gpg_agent CONTAINER_NAME=git-lfs-gpg : ${SUDO=`if ( [ ! -w /var/run/docker.sock ] && id -nG | grep -qwv docker && [ "${DOCKER_HOST:+dh}" != "dh" ] ) && which sudo > /dev/null 2>&1; then echo sudo; fi`} if [ "$(docker inspect -f {{.State.Running}} ${CONTAINER_NAME})" != "true" ]; then OTHER_OPTIONS=("-e" "GPG_DEFAULT_CACHE=${GPG_DEFAULT_CACHE:-31536000}") OTHER_OPTIONS+=("-e" "GPG_MAX_CACHE=${GPG_MAX_CACHE:-31536000}") ${SUDO} docker run -d -t "${OTHER_OPTIONS[@]}" --name ${CONTAINER_NAME} ${IMAGE_NAME} fi git-lfs-2.3.4/docker/gpg-agent_stop.bsh000077500000000000000000000014361317167762300177740ustar00rootroot00000000000000#!/usr/bin/env bash #Based off of https://github.com/andyneff/gpg_agent/blob/master/stop.bsh set -eu CUR_DIR=$(dirname ${BASH_SOURCE[0]}) CONTAINER_NAME=git-lfs-gpg : ${SUDO=`if ( [ ! -w /var/run/docker.sock ] && id -nG | grep -qwv docker && [ "${DOCKER_HOST:+dh}" != "dh" ] ) && which sudo > /dev/null 2>&1; then echo sudo; fi`} function docker_wait(){ local set loop=0 while true; do if [ "$($SUDO docker inspect -f {{.State.Running}} $1)" != "true" ]; then return 0 else if (( $loop >= $2 )); then return 1 fi sleep 1 fi loop=$(( loop + 1 )) done } if ! docker_wait ${CONTAINER_NAME} 0; then $SUDO docker exec -it ${CONTAINER_NAME} pkill gpg-agent || : fi docker_wait ${CONTAINER_NAME} 10 || : $SUDO docker rm -f ${CONTAINER_NAME}git-lfs-2.3.4/docker/run_dockers.bsh000077500000000000000000000066551317167762300174040ustar00rootroot00000000000000#!/usr/bin/env bash # Usage: # ./run_dockers.bsh - Run all the docker images # ./run_dockers.bsh centos_6 centos_7 - Run only CentOS 6 & 7 image # ./run_dockers.bsh centos_6 -- bash #Runs bash in the CentOS 6 docker # # Special Environmet Variables # REPO_HOSTNAME - Override the hostname for all the repos generated/tested # DOCKER_AUTOPULL - Default 1. If set to 0, it will not build docker images # before running # AUTO_REMOVE - Default 1. If set to 0, it will not automatically delete the # docker instance when done. This can be useful for a post mortem # analysis. Just make sure you clean up the docker instances # manually set -eu #Mingw32 auto converts /drive/dir/blah to drive:\dir\blah ... Can't have that. if [[ `uname` == MINGW* ]]; then MINGW_PATCH='/' else MINGW_PATCH='' fi CUR_DIR=$(cd $(dirname "${BASH_SOURCE[0]}"); pwd) REPO_DIR=$(cd ${CUR_DIR}/..; pwd) PACKAGE_DIR=${REPO_DIR}/repos mkdir -p ${PACKAGE_DIR}/centos || : mkdir -p ${PACKAGE_DIR}/debian || : #If you are not in docker group and you have sudo, default value is sudo : ${SUDO=`if ( [ ! -w /var/run/docker.sock ] && id -nG | grep -qwv docker && [ "${DOCKER_HOST:+dh}" != "dh" ] ) && which sudo > /dev/null 2>&1; then echo sudo; fi`} function split_image_name() { #$1 - image dockerfile #sets IMAGE_NAME to the basename of the dir containing the docker file #sets IMAGE_INFO to be the array name following my pattern local IFS=_ IMAGE_INFO=($1) } # Parse Arguments IMAGES=() while [[ $# > 0 ]]; do if [ "$1" == "--" ]; then shift DOCKER_CMD="${@}" break else IMAGES+=("$1") fi shift done if [[ ${#IMAGES[@]} == 0 ]]; then IMAGES=(centos_6 centos_7 debian_7 debian_8 debian_9) fi mkdir -p "${PACKAGE_DIR}" #Run docker to build pacakges for IMAGE_NAME in "${IMAGES[@]}"; do split_image_name "${IMAGE_NAME}" #set IMAGE_NAME and IMAGE_INFO #Auto pull docker unless DOCKER_AUTOPULL=0 if [[ ${DOCKER_AUTOPULL-1} != 0 ]]; then $SUDO docker pull gitlfs/build-dockers:${IMAGE_NAME} fi #It CAN'T be empty () with set -u... So I put some defaults in here OTHER_OPTIONS=("-it") if [ "${AUTO_REMOVE-1}" == "1" ]; then OTHER_OPTIONS+=("--rm") fi if [ -s ${CUR_DIR}/${IMAGE_NAME}.key ]; then CONTAINER_NAME=git-lfs-gpg ${CUR_DIR}/gpg-agent_preload.bsh OTHER_OPTIONS+=("--volumes-from" "git-lfs-gpg") OTHER_OPTIONS+=("-v" "${CUR_DIR}/${IMAGE_NAME}.key:${MINGW_PATCH}/tmp/${IMAGE_NAME}.key") OTHER_OPTIONS+=("-e" "$(docker exec git-lfs-gpg cat ${MINGW_PATCH}/tmp/gpg-agent/gpg_agent_info)") #Do I need this? Or can I get away with hardcoding??? #GPG_AGENT_INFO=/tmp/gpg-agent/S.gpg-agent:1:1 fi FINAL_UID=$(id -u) FINAL_GID=$(id -g) if [[ $FINAL_UID == 0 ]]; then FINAL_UID=${SUDO_UID-} fi if [[ $FINAL_GID == 0 ]]; then FINAL_GID=${SUDO_GID-} fi echo Compiling LFS in docker image ${IMAGE_NAME} IMAGE_REPO_DIR="${PACKAGE_DIR}"/"${IMAGE_INFO[0]}"/"${IMAGE_INFO[1]}" $SUDO docker run "${OTHER_OPTIONS[@]}" ${DOCKER_OTHER_OPTIONS-} \ -e REPO_HOSTNAME=${REPO_HOSTNAME:-git-lfs.github.com} \ -e FINAL_UID=${FINAL_UID} \ -e FINAL_GID=${FINAL_GID} \ -v "${MINGW_PATCH}${REPO_DIR}:/src" \ -v "${MINGW_PATCH}${IMAGE_REPO_DIR}:/repo" \ gitlfs/build-dockers:${IMAGE_NAME} ${DOCKER_CMD-} done echo "Docker run completed successfully!" git-lfs-2.3.4/docker/test_dockers.bsh000077500000000000000000000003141317167762300175410ustar00rootroot00000000000000#!/usr/bin/env bash #Test to see is using the repo installs really work CUR_DIR=$(dirname "${BASH_SOURCE[0]}") ${CUR_DIR}/run_dockers.bsh centos_6_test centos_7_test debian_7_test debian_8_test "${@}" git-lfs-2.3.4/docs/000077500000000000000000000000001317167762300140325ustar00rootroot00000000000000git-lfs-2.3.4/docs/README.md000066400000000000000000000010331317167762300153060ustar00rootroot00000000000000# Git LFS Documentation ## Reference Manual Each Git LFS subcommand is documented in the [official man pages](man). Any of these can also be viewed from the command line: ```bash $ git lfs help $ git lfs -h ``` ## Videos * [How to Work with Big Files](https://www.youtube.com/watch?v=uLR1RNqJ1Mw) - Quick intro to Git LFS. ## Developer Docs Details of how the Git LFS **client** works are in the [official specification](spec.md). Details of how the GIT LFS **server** works are in the [API specification](api). git-lfs-2.3.4/docs/api/000077500000000000000000000000001317167762300146035ustar00rootroot00000000000000git-lfs-2.3.4/docs/api/README.md000066400000000000000000000016061317167762300160650ustar00rootroot00000000000000# Git LFS API The Git LFS client uses an HTTPS server to coordinate fetching and storing large binary objects separately from a Git server. The basic process the client goes through looks like this: 1. [Discover the LFS Server to use](./server-discovery.md). 2. [Apply Authentication](./authentication.md). 3. Make the request. See the Batch and File Locking API sections. ## Batch API The Batch API is used to request the ability to transfer LFS objects with the LFS server. API Specification: * [Batch API](./batch.md) Current transfer adapters include: * [Basic](./basic-transfers.md) Experimental transfer adapters include: * Tus.io (upload only) * [Custom](../custom-transfers.md) ## File Locking API The File Locking API is used to create, list, and delete locks, as well as verify that locks are respected in Git pushes. API Specification: * [File Locking API](./locking.md) git-lfs-2.3.4/docs/api/authentication.md000066400000000000000000000041241317167762300201450ustar00rootroot00000000000000# Authentication The Git LFS API uses HTTP Basic Authentication to authorize requests. Therefore, HTTPS is strongly encouraged for all production Git LFS servers. The credentials can come from the following places: ## SSH Git LFS will add any HTTP headers returned from the `git-lfs-authenticate` command to any Batch API requests. If servers are returning expiring tokens, they can add an `expires_in` (or `expires_at`) property to hint when the token will expire. ```bash # Called for remotes like: # * git@git-server.com:foo/bar.git # * ssh://git@git-server.com/foo/bar.git $ ssh git@git-server.com git-lfs-authenticate foo/bar.git download { "header": { "Authorization": "RemoteAuth some-token" }, # optional, for expiring tokens, preferred over expires_at "expires_in": 86400 # optional, for expiring tokens "expires_at": "2016-11-10T15:29:07Z" } ``` See the SSH section in the [Server Discovery doc](./server-discovery.md) for more info about `git-lfs-authenticate`. ## Git Credentials Git provides a [`credentials` command](https://git-scm.com/docs/gitcredentials) for storing and retrieving credentials through a customizable credential helper. By default, it associates the credentials with a domain. You can enable `credential.useHttpPath` so different repository paths have different credentials. Git ships with a really basic credential cacher that stores passwords in memory, so you don't have to enter your password frequently. However, you are encouraged to setup a [custom git credential cacher](https://help.github.com/articles/caching-your-github-password-in-git/), if a better one exists for your platform. If your Git LFS server authenticates with NTLM then you must provide your credentials to `git-credential` in the form `username:DOMAIN\user password:password`. ## Specified in URL You can hardcode credentials into your Git remote or LFS url properties in your git config. This is not recommended for security reasons because it relies on the credentials living in your local git config. ```bash $ git remote add origin https://user:password@git-server.com/foo/bar.git ``` git-lfs-2.3.4/docs/api/basic-transfers.md000066400000000000000000000061111317167762300202120ustar00rootroot00000000000000# Basic Transfer API The Basic transfer API is a simple, generic API for directly uploading and downloading LFS objects. Git LFS servers can offload object storage to cloud services like S3, or implement this API natively. This is the original transfer adapter. All Git LFS clients and servers SHOULD support it, and default to it if the [Batch API](./batch.md) request or response do not specify a `transfer` property. ## Downloads Downloading an object requires a download `action` object in the Batch API response that looks like this: ```json { "transfer": "basic", "objects": [ { "oid": "1111111", "size": 123, "authenticated": true, "actions": { "download": { "href": "https://some-download.com/1111111", "header": { "Authorization": "Basic ..." }, "expires_in": 86400, } } } ] } ``` The Basic transfer adapter will make a GET request on the `href`, expecting the raw bytes returned in the HTTP response. ``` > GET https://some-download.com/1111111 > Authorization: Basic ... < < HTTP/1.1 200 OK < Content-Type: application/octet-stream < Content-Length: 123 < < {contents} ``` ## Uploads The client uploads objects through individual PUT requests. The URL and headers are provided by an upload `action` object. ```json { "transfer": "basic", "objects": [ { "oid": "1111111", "size": 123, "authenticated": true, "actions": { "upload": { "href": "https://some-upload.com/1111111", "header": { "Authorization": "Basic ..." }, "expires_in": 86400, } } } ] } ``` The Basic transfer adapter will make a PUT request on the `href`, sending the raw bytes returned in the HTTP request. ``` > PUT https://some-upload.com/1111111 > Authorization: Basic ... > Content-Type: application/octet-stream > Content-Length: 123 > > {contents} > < HTTP/1.1 200 OK ``` ## Verification The Batch API can optionally return a verify `action` object in addition to an upload `action` object. If given, The Batch API expects a POST to the href after a successful upload. ```json { "transfer": "basic", "objects": [ { "oid": "1111111", "size": 123, "authenticated": true, "actions": { "upload": { "href": "https://some-upload.com/1111111", "header": { "Authorization": "Basic ..." }, "expires_in": 86400, }, "verify": { "href": "https://some-verify-callback.com", "header": { "Authorization": "Basic ..." }, "expires_in": 86400, } } } ] } ``` Git LFS clients send: * `oid` - The String OID of the Git LFS object. * `size` - The integer size of the Git LFS object, in bytes. ``` > POST https://some-verify-callback.com > Accept: application/vnd.git-lfs+json > Content-Type: application/vnd.git-lfs+json > Content-Length: 123 > > {"oid": "{oid}", "size": 10000} > < HTTP/1.1 200 OK ``` A 200 response means that the object exists on the server. git-lfs-2.3.4/docs/api/batch.md000066400000000000000000000170511317167762300162120ustar00rootroot00000000000000# Git LFS Batch API Added: v0.6 The Batch API is used to request the ability to transfer LFS objects with the LFS server. The Batch URL is built by adding `/objects/batch` to the LFS server URL. Git remote: https://git-server.com/foo/bar
LFS server: https://git-server.com/foo/bar.git/info/lfs
Batch API: https://git-server.com/foo/bar.git/info/lfs/objects/batch See the [Server Discovery doc](./server-discovery.md) for more info on how LFS builds the LFS server URL. All Batch API requests use the POST verb, and require the following HTTP headers. The request and response bodies are JSON. Accept: application/vnd.git-lfs+json Content-Type: application/vnd.git-lfs+json See the [Authentication doc](./authentication.md) for more info on how LFS gets authorizes Batch API requests. ## Requests The client sends the following information to the Batch endpoint to transfer some objects: * `operation` - Should be `download` or `upload`. * `transfers` - An optional Array of String identifiers for transfer adapters that the client has configured. If omitted, the `basic` transfer adapter MUST be assumed by the server. * `objects` - An Array of objects to download. * `oid` - String OID of the LFS object. * `size` - Integer byte size of the LFS object. Must be at least zero. Note: Git LFS currently only supports the `basic` transfer adapter. This property was added for future compatibility with some experimental transfer adapters. See the [API README](./README.md) for a list of the documented transfer adapters. ```js // POST https://lfs-server.com/objects/batch // Accept: application/vnd.git-lfs+json // Content-Type: application/vnd.git-lfs+json // Authorization: Basic ... (if needed) { "operation": "download", "transfers": [ "basic" ], "objects": [ { "oid": "12345678", "size": 123, } ] } ``` ### Successful Responses The Batch API should always return with a 200 status, unless there are some issues with the request (bad authorization, bad json, etc). See below for examples of response errors. Check out the documented transfer adapters in the [API README](./README.md) to see how Git LFS handles successful Batch responses. Successful responses include the following properties: * `transfer` - String identifier of the transfer adapter that the server prefers. This MUST be one of the given `transfer` identifiers from the request. Servers can assume the `basic` transfer adapter if none were given. The Git LFS client will use the `basic` transfer adapter if the `transfer` property is omitted. * `objects` - An Array of objects to download. * `oid` - String OID of the LFS object. * `size` - Integer byte size of the LFS object. Must be at least zero. * `authenticated` - Optional boolean specifying whether the request for this specific object is authenticated. If omitted or false, Git LFS will attempt to [find credentials for this URL](./authentication.md). * `actions` - Object containing the next actions for this object. Applicable actions depend on which `operation` is specified in the request. How these properties are interpreted depends on which transfer adapter the client will be using. * `href` - String URL to download the object. * `header` - Optional hash of String HTTP header key/value pairs to apply to the request. * `expires_in` - Whole number of seconds after local client time when transfer will expire. Preferred over `expires_at` if both are provided. Maximum of 2147483647, minimum of -2147483647. * `expires_at` - String ISO 8601 formatted timestamp for when the given action expires (usually due to a temporary token). Download operations MUST specify a `download` action, or an object error if the object cannot be downloaded for some reason. See "Response Errors" below. Upload operations can specify an `upload` and a `verify` action. The `upload` action describes how to upload the object. If the object has a `verify` action, the LFS client will hit this URL after a successful upload. Servers can use this for extra verification, if needed. If a client requests to upload an object that the server already has, the server should omit the `actions` property completely. The client will then assume the server already has it. ```js // HTTP/1.1 200 Ok // Content-Type: application/vnd.git-lfs+json { "transfer": "basic", "objects": [ { "oid": "1111111", "size": 123, "authenticated": true, "actions": { "download": { "href": "https://some-download.com", "header": { "Key": "value" }, "expires_at": "2016-11-10T15:29:07Z", } } } ] } ``` If there are problems accessing individual objects, servers should continue to return a 200 status code, and provide per-object errors. Here is an example: ```js // HTTP/1.1 200 Ok // Content-Type: application/vnd.git-lfs+json { "transfer": "basic", "objects": [ { "oid": "1111111", "size": 123, "error": { "code": 404, "message": "Object does not exist" } } ] } ``` LFS object error codes should match HTTP status codes where possible: * 404 - The object does not exist on the server. * 410 - The object was removed by the owner. * 422 - Validation error. ### Response Errors LFS servers can respond with these other HTTP status codes: * 401 - The authentication credentials are needed, but were not sent. Git LFS will attempt to [get the authentication](./authentication.md) for the request and retry immediately. * 403 - The user has **read**, but not **write** access. Only applicable when the `operation` in the request is "upload." * 404 - The Repository does not exist for the user. * 422 - Validation error with one or more of the objects in the request. This means that _none_ of the requested objects to upload are valid. Error responses will not have an `objects` property. They will only have: * `message` - String error message. * `request_id` - Optional String unique identifier for the request. Useful for debugging. * `documentation_url` - Optional String to give the user a place to report errors. ```js // HTTP/1.1 404 Not Found // Content-Type: application/vnd.git-lfs+json { "message": "Not found", "documentation_url": "https://lfs-server.com/docs/errors", "request_id": "123" } ``` HTTP 401 responses should include an `LFS-Authenticate` header to tell the client what form of authentication it requires. If omitted, Git LFS will assume Basic Authentication. This mirrors the standard `WWW-Authenticate` header with a custom header key so it does not trigger password prompts in browsers. ```js // HTTP/1.1 401 Unauthorized // Content-Type: application/vnd.git-lfs+json // LFS-Authenticate: Basic realm="Git LFS" { "message": "Credentials needed", "documentation_url": "https://lfs-server.com/docs/errors", "request_id": "123" } ``` The following status codes can optionally be returned from the API, depending on the server implementation. * 406 - The Accept header needs to be `application/vnd.git-lfs+json`. * 429 - The user has hit a rate limit with the server. Though the API does not specify any rate limits, implementors are encouraged to set some for availability reasons. * 501 - The server has not implemented the current method. Reserved for future use. * 507 - The server has insufficient storage capacity to complete the request. * 509 - The bandwidth limit for the user or repository has been exceeded. The API does not specify any bandwidth limit, but implementors may track usage. Some server errors may trigger the client to retry requests, such as 500, 502, 503, and 504. git-lfs-2.3.4/docs/api/locking.md000066400000000000000000000312051317167762300165540ustar00rootroot00000000000000# Git LFS File Locking API Added: v2.0 The File Locking API is used to create, list, and delete locks, as well as verify that locks are respected in Git pushes. The locking URLs are built by adding a suffix to the LFS Server URL. Git remote: https://git-server.com/foo/bar LFS server: https://git-server.com/foo/bar.git/info/lfs Locks API: https://git-server.com/foo/bar.git/info/lfs/locks See the [Server Discovery doc](./server-discovery.md) for more info on how LFS builds the LFS server URL. All File Locking requests require the following HTTP headers: Accept: application/vnd.git-lfs+json Content-Type: application/vnd.git-lfs+json See the [Authentication doc](./authentication.md) for more info on how LFS gets authorizes Batch API requests. Note: This is the first version of the File Locking API, supporting only the simplest use case: single branch locking. The API is designed to be extensible as we experiment with more advanced locking scenarios, as defined in the [original proposal](/docs/proposals/locking.md). ## Create Lock The client sends the following to create a lock by sending a `POST` to `/locks` (appended to the LFS server url, as described above). Servers should ensure that users have push access to the repository, and that files are locked exclusively to one user. * `path` - String path name of the file that is locked. This should be relative to the root of the repository working directory. ```js // POST https://lfs-server.com/locks // Accept: application/vnd.git-lfs+json // Content-Type: application/vnd.git-lfs+json // Authorization: Basic ... { "path": "foo/bar.zip" } ``` ### Successful Response Successful responses return the created lock: * `id` - String ID of the Lock. Git LFS doesn't enforce what type of ID is used, as long as it's returned as a string. * `path` - String path name of the locked file. This should be relative to the root of the repository working directory. * `locked_at` - The timestamp the lock was created, as an ISO 8601 formatted string. * `owner` - The name of the user that created the Lock. This should be set from the user credentials posted when creating the lock. ```js // HTTP/1.1 201 Created // Content-Type: application/vnd.git-lfs+json { "lock": { "id": "some-uuid", "path": "/path/to/file", "locked_at": "2016-05-17T15:49:06+00:00", "owner": { "name": "Jane Doe", } } } ``` ### Bad Response: Lock Exists Lock services should reject lock creations if one already exists for the given path on the current repository. * `lock` - The existing Lock that clashes with the request. * `message` - String error message. * `request_id` - Optional String unique identifier for the request. Useful for debugging. * `documentation_url` - Optional String to give the user a place to report errors. ```js // HTTP/1.1 409 Conflict // Content-Type: application/vnd.git-lfs+json { "lock": { // details of existing lock }, "message": "already created lock", "documentation_url": "https://lfs-server.com/docs/errors", "request_id": "123" } ``` ### Unauthorized Response Lock servers should require that users have push access to the repository before they can create locks. * `message` - String error message. * `request_id` - Optional String unique identifier for the request. Useful for debugging. * `documentation_url` - Optional String to give the user a place to report errors. ```js // HTTP/1.1 403 Forbidden // Content-Type: application/vnd.git-lfs+json { "message": "You must have push access to create a lock", "documentation_url": "https://lfs-server.com/docs/errors", "request_id": "123" } ``` ### Error Response * `message` - String error message. * `request_id` - Optional String unique identifier for the request. Useful for debugging. * `documentation_url` - Optional String to give the user a place to report errors. ```js // HTTP/1.1 500 Internal server error // Content-Type: application/vnd.git-lfs+json { "message": "internal server error", "documentation_url": "https://lfs-server.com/docs/errors", "request_id": "123" } ``` ## List Locks The client can request the current active locks for a repository by sending a `GET` to `/locks` (appended to the LFS server url, as described above). LFS Servers should ensure that users have at least pull access to the repository. The properties are sent as URI query values, instead of through a JSON body: * `path` - Optional string path to match against locks on the server. * `id` - Optional string ID to match against a lock on the server. * `cursor` - The optional string value to continue listing locks. This value should be the `next_cursor` from a previous request. * `limit` - The integer limit of the number of locks to return. The server should have its own upper and lower bounds on the supported limits. ```js // GET https://lfs-server.com/locks?path=&id=&cursor=&limit= // Accept: application/vnd.git-lfs+json // Authorization: Basic ... (if needed) ``` ### Successful Response A successful response will list the matching locks: * `locks` - Array of matching Lock objects. See the "Create Lock" successful response section to see what Lock properties are possible. * `next_cursor` - Optional string cursor that the server can return if there are more locks matching the given filters. The client will re-do the request, setting the `?cursor` query value with this `next_cursor` value. Note: If the server has no locks, it must return an empty `locks` array. ```js // HTTP/1.1 200 Ok // Content-Type: application/vnd.git-lfs+json { "locks": [ { "id": "some-uuid", "path": "/path/to/file", "locked_at": "2016-05-17T15:49:06+00:00", "owner": { "name": "Jane Doe" } } ], "next_cursor": "optional next ID", } ``` ### Unauthorized Response Lock servers should require that users have pull access to the repository before they can list locks. * `message` - String error message. * `request_id` - Optional String unique identifier for the request. Useful for debugging. * `documentation_url` - Optional String to give the user a place to report errors. ```js // HTTP/1.1 403 Forbidden // Content-Type: application/vnd.git-lfs+json { "message": "You must have pull access to list locks", "documentation_url": "https://lfs-server.com/docs/errors", "request_id": "123" } ``` ### Error Response * `message` - String error message. * `request_id` - Optional String unique identifier for the request. Useful for debugging. * `documentation_url` - Optional String to give the user a place to report errors. ```js // HTTP/1.1 500 Internal server error // Content-Type: application/vnd.git-lfs+json { "message": "unable to list locks", "documentation_url": "https://lfs-server.com/docs/errors", "request_id": "123" } ``` ## List Locks for Verification The client can use the Lock Verification endpoint to check for active locks that can affect a Git push. For a caller, this endpoint is very similar to the "List Locks" endpoint above, except: * Verification requires a `POST` request. * The `cursor` and `limit` values are sent as properties in the json request body. * The response includes locks partitioned into `ours` and `theirs` properties. LFS Servers should ensure that users have push access to the repository. Clients send the following to list locks for verification by sending a `POST` to `/locks/verify` (appended to the LFS server url, as described above): * `cursor` * `limit` ```js // POST https://lfs-server.com/locks/verify // Accept: application/vnd.git-lfs+json // Content-Type: application/vnd.git-lfs+json // Authorization: Basic ... { "cursor": "optional cursor", "limit": 100 // also optional } ``` Note: As more advanced locking workflows are implemented, more details will likely be added to this request body in future iterations. ### Successful Response A successful response will list the relevant locks: * `ours` - Array of Lock objects currently owned by the authenticated user. modify. * `theirs` - Array of Lock objects currently owned by other users. * `next_cursor` - Optional string cursor that the server can return if there are more locks matching the given filters. The client will re-do the request, setting the `cursor` property with this `next_cursor` value. If a Git push updates any files matching any of "our" locks, Git LFS will list them in the push output, in case the user will want to unlock them after the push. However, any updated files matching one of "their" locks will halt the push. At this point, it is up to the user to resolve the lock conflict with their team. Note: If the server has no locks, it must return an empty array in the `ours` or `theirs` properties. ```js // HTTP/1.1 200 Ok // Content-Type: application/vnd.git-lfs+json { "ours": [ { "id": "some-uuid", "path": "/path/to/file", "locked_at": "2016-05-17T15:49:06+00:00", "owner": { "name": "Jane Doe" } } ], "theirs": [], "next_cursor": "optional next ID", } ``` ### Not Found Response By default, an LFS server that doesn't implement any locking endpoints should return 404. This response will not halt any Git pushes. Any 404 will do, but Git LFS will show a better error message with a json response. * `message` - String error message. * `request_id` - Optional String unique identifier for the request. Useful for debugging. * `documentation_url` - Optional String to give the user a place to report errors. ```js // HTTP/1.1 404 Not found // Content-Type: application/vnd.git-lfs+json { "message": "Not found", "documentation_url": "https://lfs-server.com/docs/errors", "request_id": "123" } ``` ### Unauthorized Response Lock servers should require that users have push access to the repository before they can get a list of locks to verify a Git push. * `message` - String error message. * `request_id` - Optional String unique identifier for the request. Useful for debugging. * `documentation_url` - Optional String to give the user a place to report errors. ```js // HTTP/1.1 403 Forbidden // Content-Type: application/vnd.git-lfs+json { "message": "You must have push access to verify locks", "documentation_url": "https://lfs-server.com/docs/errors", "request_id": "123" } ``` ### Error Response * `message` - String error message. * `request_id` - Optional String unique identifier for the request. Useful for debugging. * `documentation_url` - Optional String to give the user a place to report errors. ```js // HTTP/1.1 500 Internal server error // Content-Type: application/vnd.git-lfs+json { "message": "unable to list locks", "documentation_url": "https://lfs-server.com/docs/errors", "request_id": "123" } ``` ## Delete Lock The client can delete a lock, given its ID, by sending a `POST` to `/locks/:id/unlock` (appended to the LFS server url, as described above). LFS servers should ensure that callers have push access to the repository. They should also prevent a user from deleting another user's lock, unless the `force` property is given. Properties: * `force` - Optional boolean specifying that the user is deleting another user's lock. ```js // POST https://lfs-server.com/locks/:id/unlock // Accept: application/vnd.git-lfs+json // Content-Type: application/vnd.git-lfs+json // Authorization: Basic ... { "force": true } ``` ### Successful Response Successful deletions return the deleted lock. See the "Create Lock" successful response section to see what Lock properties are possible. ```js // HTTP/1.1 200 Ok // Content-Type: application/vnd.git-lfs+json { "lock": { "id": "some-uuid", "path": "/path/to/file", "locked_at": "2016-05-17T15:49:06+00:00", "owner": { "name": "Jane Doe" } } } ``` ### Unauthorized Response Lock servers should require that users have push access to the repository before they can delete locks. Also, if the `force` parameter is omitted, or false, the user should only be allowed to delete locks that they created. * `message` - String error message. * `request_id` - Optional String unique identifier for the request. Useful for debugging. * `documentation_url` - Optional String to give the user a place to report errors. ```js // HTTP/1.1 403 Forbidden // Content-Type: application/vnd.git-lfs+json { "message": "You must have push access to delete locks", "documentation_url": "https://lfs-server.com/docs/errors", "request_id": "123" } ``` ### Error response * `message` - String error message. * `request_id` - Optional String unique identifier for the request. Useful for debugging. * `documentation_url` - Optional String to give the user a place to report errors. ```js // HTTP/1.1 500 Internal server error // Content-Type: application/vnd.git-lfs+json { "message": "unable to delete lock", "documentation_url": "https://lfs-server.com/docs/errors", "request_id": "123" } ``` git-lfs-2.3.4/docs/api/schemas/000077500000000000000000000000001317167762300162265ustar00rootroot00000000000000git-lfs-2.3.4/docs/api/schemas/http-batch-request-schema.json000077700000000000000000000000001317167762300346632../../../tq/schemas/http-batch-request-schema.jsonustar00rootroot00000000000000git-lfs-2.3.4/docs/api/schemas/http-batch-response-schema.json000077700000000000000000000000001317167762300351772../../../tq/schemas/http-batch-response-schema.jsonustar00rootroot00000000000000git-lfs-2.3.4/docs/api/schemas/http-lock-create-request-schema.json000077700000000000000000000000001317167762300401052../../../locking/schemas/http-lock-create-request-schema.jsonustar00rootroot00000000000000git-lfs-2.3.4/docs/api/schemas/http-lock-create-response-schema.json000077700000000000000000000000001317167762300404212../../../locking/schemas/http-lock-create-response-schema.jsonustar00rootroot00000000000000git-lfs-2.3.4/docs/api/schemas/http-lock-delete-request-schema.json000077700000000000000000000000001317167762300401032../../../locking/schemas/http-lock-delete-request-schema.jsonustar00rootroot00000000000000git-lfs-2.3.4/docs/api/schemas/http-lock-list-response-schema.json000077700000000000000000000000001317167762300376412../../../locking/schemas/http-lock-list-response-schema.jsonustar00rootroot00000000000000git-lfs-2.3.4/docs/api/schemas/http-lock-verify-response-schema.json000077700000000000000000000000001317167762300405232../../../locking/schemas/http-lock-verify-response-schema.jsonustar00rootroot00000000000000git-lfs-2.3.4/docs/api/server-discovery.md000066400000000000000000000063471317167762300204520ustar00rootroot00000000000000# Server Discovery One of the Git LFS goals is to work with supporting Git remotes with as few required configuration properties as possible. Git LFS will attempt to use your Git remote to determine the LFS server. You can also configure a custom LFS server if your Git remote doesn't support one, or you just want to use a separate one. Look for the `Endpoint` properties in `git lfs env` to see your current LFS servers. ## Guessing the Server By default, Git LFS will append `.git/info/lfs` to the end of a Git remote url to build the LFS server URL it will use: Git Remote: `https://git-server.com/foo/bar`
LFS Server: `https://git-server.com/foo/bar.git/info/lfs` Git Remote: `https://git-server.com/foo/bar.git`
LFS Server: `https://git-server.com/foo/bar.git/info/lfs` Git Remote: `git@git-server.com:foo/bar.git`
LFS Server: `https://git-server.com/foo/bar.git/info/lfs` Git Remote: `ssh://git-server.com/foo/bar.git`
LFS Server: `https://git-server.com/foo/bar.git/info/lfs` ## SSH If Git LFS detects an SSH remote, it will run the `git-lfs-authenticate` command. This allows supporting Git servers to give the Git LFS client alternative authentication so the user does not have to setup a git credential helper. Git LFS runs the following command: $ ssh [{user}@]{server} git-lfs-authenticate {path} {operation} The `user`, `server`, and `path` properties are taken from the SSH remote. The `operation` can either be "download" or "upload". The SSH command can be tweaked with the `GIT_SSH` or `GIT_SSH_COMMAND` environment variables. The output for successful commands is JSON, and matches the schema as an `action` in a Batch API response. Git LFS will dump the STDERR from the `ssh` command if it returns a non-zero exit code. Examples: The `git-lfs-authenticate` command can even suggest an LFS endpoint that does not match the Git remote by specifying an `href` property. ```bash # Called for remotes like: # * git@git-server.com:foo/bar.git # * ssh://git@git-server.com/foo/bar.git $ ssh git@git-server.com git-lfs-authenticate foo/bar.git download { "href": "https://lfs-server.com/foo/bar", "header": { "Authorization": "RemoteAuth some-token" }, "expires_in": 86400 } ``` Git LFS will output the STDERR if `git-lfs-authenticate` returns a non-zero exit code: ```bash $ ssh git@git-server.com git-lfs-authenticate foo/bar.git wat Invalid LFS operation: "wat" ``` ## Custom Configuration If Git LFS can't guess your LFS server, or you aren't using the `git-lfs-authenticate` command, you can specify the LFS server using Git config. Set `lfs.url` to set the LFS server, regardless of Git remote. ```bash $ git config lfs.url https://lfs-server.com/foo/bar ``` You can set `remote.{name}.lfsurl` to set the LFS server for that specific remote only: ```bash $ git config remote.dev.lfsurl http://lfs-server.dev/foo/bar $ git lfs env ... Endpoint=https://git-server.com/foo/bar.git/info/lfs (auth=none) Endpoint (dev)=http://lfs-server.dev/foo/bar (auth=none) ``` Git LFS will also read these settings from a `.lfsconfig` file in the root of your repository. This lets you commit it to the repository so that all users can use it, if you wish. ```bash $ git config --file=.lfsconfig lfs.url https://lfs-server.com/foo/bar ``` git-lfs-2.3.4/docs/custom-transfers.md000066400000000000000000000311161317167762300176750ustar00rootroot00000000000000# Adding Custom Transfer Agents to LFS ## Introduction Git LFS supports multiple ways to transfer (upload and download) files. In the core client, the basic way to do this is via a one-off HTTP request via the URL returned from the LFS API for a given object. The core client also supports extensions to allow resuming of downloads (via `Range` headers) and uploads (via the [tus.io](http://tus.io) protocol). Some people might want to be able to transfer content in other ways, however. To enable this, git-lfs allows configuring Custom Transfers, which are simply processes which must adhere to the protocol defined later in this document. git-lfs will invoke the process at the start of all transfers, and will communicate with the process via stdin/stdout for each transfer. ## Custom Transfer Type Selection In the LFS API request, the client includes a list of transfer types it supports. When replying, the API server will pick one of these and make any necessary adjustments to the returned object actions, in case the the picked transfer type needs custom details about how to do each transfer. ## Using a Custom Transfer Type without the API server In some cases the transfer agent can figure out by itself how and where the transfers should be made, without having to query the API server. In this case it's possible to use the custom transfer agent directly, without querying the server, by using the following config option: * `lfs.standalonetransferagent`, `lfs..standalonetransferagent` Specifies a custom transfer agent to be used if the API server URL matches as in `git config --get-urlmatch lfs.standalonetransferagent `. `git-lfs` will not contact the API server. It instead sets stage 2 transfer actions to `null`. `lfs..standalonetransferagent` can be used to configure a custom transfer agent for individual remotes. `lfs.standalonetransferagent` unconditionally configures a custom transfer agent for all remotes. The custom transfer agent must be specified in a `lfs.customtransfer.` settings group. ## Defining a Custom Transfer Type A custom transfer process is defined under a settings group called `lfs.customtransfer.`, where `` is an identifier (see [Naming](#naming) below). * `lfs.customtransfer..path` `path` should point to the process you wish to invoke. This will be invoked at the start of all transfers (possibly many times, see the `concurrent` option below) and the protocol over stdin/stdout is defined below in the [Protocol](#protocol) section. * `lfs.customtransfer..args` If the custom transfer process requires any arguments, these can be provided here. Typically you would only need this if your process was multi-purpose or particularly flexible, most of the time you won't need it. * `lfs.customtransfer..concurrent` If true (the default), git-lfs will invoke the custom transfer process multiple times in parallel, according to `lfs.concurrenttransfers`, splitting the transfer workload between the processes. If you would prefer that only one instance of the transfer process is invoked, maybe because you want to do your own parallelism internally (e.g. slicing files into parts), set this to false. * `lfs.customtransfer..direction` Specifies which direction the custom transfer process supports, either `download`, `upload`, or `both`. The default if unspecified is `both`. ## Naming Each custom transfer must have a name which is unique to the underlying mechanism, and the client and the server must agree on that name. The client will advertise this name to the server as a supported transfer approach, and if the server supports it, it will return relevant object action links. Because these may be very different from standard HTTP URLs it's important that the client and server agree on the name. For example, let's say I've implemented a custom transfer process which uses NFS. I could call this transfer type `nfs` - although it's not specific to my configuration exactly, it is specific to the way NFS works, and the server will need to give me different URLs. Assuming I define my transfer like this, and the server supports it, I might start getting object action links back like `nfs:///path/to/object` ## Protocol The git-lfs client communicates with the custom transfer process via the stdin and stdout streams. No file content is communicated on these streams, only request / response metadata. The metadata exchanged is always in JSON format. External files will be referenced when actual content is exchanged. ### Line Delimited JSON Because multiple JSON messages will be exchanged on the same stream it's useful to delimit them explicitly rather than have the parser find the closing `}` in an arbitrary stream, therefore each JSON structure will be sent and received on a **single line** as per [Line Delimited JSON](https://en.wikipedia.org/wiki/JSON_Streaming#Line_delimited_JSON_2). In other words when git-lfs sends a JSON message to the custom transfer it will be on a single line, with a line feed at the end. The transfer process must respond the same way by writing a JSON structure back to stdout with a single line feed at the end (and flush the output). ### Protocol Stages The protocol consists of 3 stages: #### Stage 1: Intiation Immediately after invoking a custom transfer process, git-lfs sends initiation data to the process over stdin. This tells the process useful information about the configuration. The message will look like this: ```json { "event": "init", "operation": "download", "remote": "origin", "concurrent": true, "concurrenttransfers": 3 } ``` * `event`: Always `init` to identify this message * `operation`: will be `upload` or `download` depending on transfer direction * `remote`: The Git remote. It can be a remote name like `origin` or an URL like `ssh://git.example.com//path/to/repo`. A standalone transfer agent can use it to determine the location of remote files. * `concurrent`: reflects the value of `lfs.customtransfer..concurrent`, in case the process needs to know * `concurrenttransfers`: reflects the value of `lfs.concurrenttransfers`, for if the transfer process wants to implement its own concurrency and wants to respect this setting. The transfer process should use the information it needs from the intiation structure, and also perform any one-off setup tasks it needs to do. It should then respond on stdout with a simple empty confirmation structure, as follows: ```json { } ``` Or if there was an error: ```json { "error": { "code": 32, "message": "Some init failure message" } } ``` #### Stage 2: 0..N Transfers After the initiation exchange, git-lfs will send any number of transfer requests to the stdin of the transfer process, in a serial sequence. Once a transfer request is sent to the process, it awaits a completion response before sending the next request. ##### Uploads For uploads the request sent from git-lfs to the transfer process will look like this: ```json { "event": "upload", "oid": "bf3e3e2af9366a3b704ae0c31de5afa64193ebabffde2091936ad2e7510bc03a", "size": 346232, "path": "/path/to/file.png", "action": { "href": "nfs://server/path", "header": { "key": "value" } } } ``` * `event`: Always `upload` to identify this message * `oid`: the identifier of the LFS object * `size`: the size of the LFS object * `path`: the file which the transfer process should read the upload data from * `action`: the `upload` action copied from the response from the batch API. This contains `href` and `header` contents, which are named per HTTP conventions, but can be interpreted however the custom transfer agent wishes (this is an NFS example, but it doesn't even have to be an URL). Generally, `href` will give the primary connection details, with `header` containing any miscellaneous information needed. `action` is `null` for standalone transfer agents. The transfer process should post one or more [progress messages](#progress) and then a final completion message as follows: ```json { "event": "complete", "oid": "bf3e3e2af9366a3b704ae0c31de5afa64193ebabffde2091936ad2e7510bc03a" } ``` * `event`: Always `complete` to identify this message * `oid`: the identifier of the LFS object Or if there was an error in the transfer: ```json { "event": "complete", "oid": "bf3e3e2af9366a3b704ae0c31de5afa64193ebabffde2091936ad2e7510bc03a", "error": { "code": 2, "message": "Explain what happened to this transfer" } } ``` * `event`: Always `complete` to identify this message * `oid`: the identifier of the LFS object * `error`: Should contain a `code` and `message` explaining the error ##### Downloads For downloads the request sent from git-lfs to the transfer process will look like this: ```json { "event": "download", "oid": "22ab5f63670800cc7be06dbed816012b0dc411e774754c7579467d2536a9cf3e", "size": 21245, "action": { "href": "nfs://server/path", "header": { "key": "value" } } } ``` * `event`: Always `download` to identify this message * `oid`: the identifier of the LFS object * `size`: the size of the LFS object * `action`: the `download` action copied from the response from the batch API. This contains `href` and `header` contents, which are named per HTTP conventions, but can be interpreted however the custom transfer agent wishes (this is an NFS example, but it doesn't even have to be an URL). Generally, `href` will give the primary connection details, with `header` containing any miscellaneous information needed. `action` is `null` for standalone transfer agents. Note there is no file path included in the download request; the transfer process should create a file itself and return the path in the final response after completion (see below). The transfer process should post one or more [progress messages](#progress) and then a final completion message as follows: ```json { "event": "complete", "oid": "22ab5f63670800cc7be06dbed816012b0dc411e774754c7579467d2536a9cf3e", "path": "/path/to/file.png" } ``` * `event`: Always `complete` to identify this message * `oid`: the identifier of the LFS object * `path`: the path to a file containing the downloaded data, which the transfer process relinquishes control of to git-lfs. git-lfs will move the file into LFS storage. Or, if there was a failure transferring this item: ```json { "event": "complete", "oid": "22ab5f63670800cc7be06dbed816012b0dc411e774754c7579467d2536a9cf3e", "error": { "code": 2, "message": "Explain what happened to this transfer" } } ``` * `event`: Always `complete` to identify this message * `oid`: the identifier of the LFS object * `error`: Should contain a `code` and `message` explaining the error Errors for a single transfer request should not terminate the process. The error should be returned in the response structure instead. The custom transfer adapter does not need to check the SHA of the file content it has downloaded, git-lfs will do that before moving the final content into the LFS store. ##### Progress In order to support progress reporting while data is uploading / downloading, the transfer process should post messages to stdout as follows before sending the final completion message: ```json { "event": "progress", "oid": "22ab5f63670800cc7be06dbed816012b0dc411e774754c7579467d2536a9cf3e", "bytesSoFar": 1234, "bytesSinceLast": 64 } ``` * `event`: Always `progress` to identify this message * `oid`: the identifier of the LFS object * `bytesSoFar`: the total number of bytes transferred so far * `bytesSinceLast`: the number of bytes transferred since the last progress message The transfer process should post these messages such that the last one sent has `bytesSoFar` equal to the file size on success. #### Stage 3: Finish & Cleanup When all transfers have been processed, git-lfs will send the following message to the stdin of the transfer process: ```json { "event": "terminate" } ``` On receiving this message the transfer process should clean up and terminate. No response is expected. ## Error handling Any unexpected fatal errors in the transfer process (not errors specific to a transfer request) should set the exit code to non-zero and print information to stderr. Otherwise the exit code should be 0 even if some transfers failed. ## A Note On Verify Actions You may have noticed that that only the `upload` and `download` actions are passed to the custom transfer agent for processing, what about the `verify` action, if the API returns one? Custom transfer agents do not handle the verification process, only the upload and download of content. The verify link is typically used to notify a system *other* than the actual content store after an upload was completed, therefore it makes more sense for that to be handled via the normal API process. git-lfs-2.3.4/docs/extensions.md000066400000000000000000000215621317167762300165610ustar00rootroot00000000000000# Extending LFS Teams who use Git LFS often have custom requirements for how the pointer files and blobs should be handled. Some examples of extensions that could be built: * Compress large files on clean, uncompress them on smudge/fetch * Encrypt files on clean, decrypt on smudge/fetch * Scan files on clean to make sure they don't contain sensitive information The basic extensibility model is that LFS extensions must be registered explicitly, and they will be invoked on clean and smudge to manipulate the contents of the files as needed. On clean, LFS itself ensures that the pointer file is updated with all the information needed to be able to smudge correctly, and the extensions never modify the pointer file directly. NOTE: This feature is considered experimental, and included so developers can work on extensions. Exact details of how extensions work are subject to change based on feedback. It is possible for buggy extensions to leave your repository in a bad state, so don't rely on them with a production git repository without extensive testing. ## Registration To register an LFS extension, it must be added to the Git config. Each extension needs to define: * Its unique name. This will be used as part of the key in the pointer file. * The command to run on clean (when files are added to git). * The command to run on smudge (when files are downloaded and checked out). * The priority of the extension, which must be a unique, non-negative integer. The sequence `%f` in the clean and smudge commands will be replaced by the filename being processed. Here's an example extension registration in the Git config: ``` [lfs "extension.foo"] clean = foo clean %f smudge = foo smudge %f priority = 0 [lfs "extension.bar"] clean = bar clean %f smudge = bar smudge %f priority = 1 ``` ## Clean When staging a file, Git invokes the LFS clean filter, as described earlier. If no extensions are installed, the LFS clean filter reads bytes from STDIN, calculates the SHA-256 signature, and writes the bytes to a temp file. It then moves the temp file into the appropriate place in .git/lfs/objects and writes a valid pointer file to STDOUT. When an extension is installed, LFS will invoke the extension to do additional processing on the bytes before writing them into the temp file. If multiple extensions are installed, they are invoked in the order defined by their priority. LFS will also insert a key in the pointer file for each extension that was invoked, indicating both the order that the extension was invoked and the oid of the file before that extension was invoked. All of that information is required to be able to reliably smudge the file later. Each new line in the pointer file will be of the form: `ext-{order}-{name} {hash-method}:{hash-of-input-to-extension}` This naming ensures that all extensions are written in both alphabetical and priority order, and also shows the progression of changes to the oid as it is processed by the extensions. Here's an example sequence, assuming extensions foo and bar are installed, as shown in the previous section. * Git passes the original contents of the file to LFS clean over STDIN. * LFS reads those bytes and calculates the original SHA-256 signature. * LFS streams the bytes to STDIN of `foo clean`, which is expected to write those bytes, modified or not, to its STDOUT. * LFS reads the bytes from STDOUT of `foo clean`, calculates the SHA-256 signature, and writes them to STDIN of `bar clean`, which then writes those bytes, modified or not, to its STDOUT. * LFS reads the bytes from STDOUT of `bar clean`, calculates the SHA-256 signature, and writes the bytes to a temp file. * When finished, LFS atomically moves the temp file into `.git/lfs/objects`. * LFS generates the pointer file, with some changes: * The oid and size keys are calculated from the final bytes written to LFS local storage. * LFS also writes keys named `ext-0-foo` and `ext-1-bar` into the pointer, along with their respective input oids. Here's an example pointer file, for a file processed by extensions foo and bar: ``` version https://git-lfs.github.com/spec/v1 ext-0-foo sha256:{original hash} ext-1-bar sha256:{hash after foo} oid sha256:{hash after bar} size 123 (ending \n) ``` Note: as an optimization, if an extension just does a pass-through, its key can be omitted from the pointer file. This will make smudging the file a bit more efficient since that extension can be skipped. LFS can detect a pass-through extension because the input and output oids will be the same. This implies that extensions must have no side effects other than writing to their STDOUT. Otherwise LFS has no way to know what extensions modified a file. ## Smudge When a file is checked out, Git invokes the LFS smudge filter, as described earlier. If no extensions are installed, the LFS smudge filter inspects the first 100 bytes of the bytes off STDIN, and if it is a pointer file, uses the oid to find the correct object in the LFS storage, and writes those bytes to STDOUT so that Git can write them to the working directory. If the pointer file indicates that extensions were invoked on that file, then those extensions must be installed in order to smudge. If they are not installed, not found, or unusable for any reason, LFS will fail to smudge the file, and outputs an error indicating which extension is missing. Each of the extensions indicated in the pointer file must be invoked in reverse order to undo the changes they made to the contents of the file. After each extension is invoked, LFS will compare the SHA-256 signature of the bytes output by the extension with the oid stored in the pointer file as the original input to that same extension. Those signatures must match, otherwise the extension did not undo its changes correctly. In that case, LFS fails to smudge the file, and outputs an error indicating which extension is failing. Here's an example sequence, indicating how LFS will smudge the pointer file shown in the previous section: * Git passes the bytes of the pointer file to LFS smudge over STDIN. Note that when using `git lfs checkout`, LFS reads the files directly from disk rather than off STDIN. The rest of the steps are unaffected either way. * LFS reads those bytes and inspects them to see if this is a pointer file. If it was not, the bytes would just be passed through to STDOUT. * Since it is a pointer file, LFS reads the whole file off STDIN, parses it, and determines that extensions foo and bar both processed the file, in that order. * LFS uses the value of the oid key to find the blob in the `.git/lfs/objects` folder, or download from the server as needed. * LFS writes the contents of the blob to STDIN of `bar smudge`, which modifies them as needed and writes them to its STDOUT. * LFS reads the bytes from STDOUT of `bar smudge`, calculates the SHA-256 signature, and writes the bytes to STDIN of `foo smudge`, which modifies them as needed and writes to them its STDOUT. * LFS reads the bytes from STDOUT of `foo smudge`, calculates the SHA-256 signature, and writes the bytes to its own STDOUT. * At the end, ensure that the hashes calculated on the outputs of foo and bar match their corresponding input hashes from the pointer file. If not, write a descriptive error message indicating which extension failed to undo its changes. * Question: On error, should we overwrite the file in the working directory with the original pointer file? Can this be done reliably? ## Handling errors If there are errors in the configuration of LFS extensions, such as invalid extension names, duplicate priorities, etc, then any LFS commands that rely on them will abort with a descriptive error message. If an extension is unable to perform its task, it can indicate this error by returning a non-zero error code and writing a descriptive error message to its STDERR. The behavior on an error depends on whether we are cleaning or smudging. ### Clean If an extension fails to clean a file, it will return a non-zero error code and write an error message to its STDERR. Because the file was not cleaned correctly, it can't be added to the index. LFS will ensure that no pointer file is added or updated for failed files. In addition, it will display the error messages for any files that could not be cleaned (and keep those errors in a log), so that the user can diagnose the failure, and then rerun "git add" on those files. ### Smudge If an extension fails to smudge a file, it will return a non-zero error code and write an error message to its STDERR. Because the file was not smudged correctly, LFS cannot update that file in the working directory. LFS will ensure that the pointer file is written to both the index and working directory. In addition, it will display the error messages for any files that could not be smudged (and keep those errors in a log), so that the user can diagnose the failure and then rerun `git-lfs checkout` to fix up any remaining pointer files. git-lfs-2.3.4/docs/linux-build.md000066400000000000000000000030541317167762300166120ustar00rootroot00000000000000## Building on Linux There are build scripts for recent versions of CentOS- and Debian-flavored Linuxes in `../scripts/{centos,debian}-build`. Both install all prerequisites, then build the client and the man pages in Docker containers for CentOS 7, Debian 8, and Ubuntu 14.04. On CentOS 6, the client builds, but not the man pages, because of problems getting the right version of Ruby. Earlier versions of CentOS and Debian/Ubuntu have trouble building go, so they are non-starters. ## Building a deb A debian package can be built by running `dpkg-buildpackage -us -uc` from the root of the repo. It is currently confirmed to work on Debian jessie and wheezy. On wheezy it requires `wheezy-backports` versions of `dh-golang`, `git`, and `golang`. ## Building an rpm An rpm package can be built by running ```./rpm/build_rpms.bsh```. All dependencies will be downloaded, compiled, and installed for you, provided you have sudo/root permissions. The resulting ./rpm/RPMS/x86_64/git-lfs*.rpm Can be installed using ```yum install``` or distributed. - CentOS 7 - build_rpms.bsh will take care of everything. You only need the git-lfs rpm - CentOS 6 - build_rpms.bsh will take care of everything. You will need to distribute both the git-lfs rpms and the git rpms, as CentOS 6 does not have a current enough version available - CentOS 5 - build_rpms.bsh will take care of everything. You only need the git-lfs rpm. When distributing to CentOS 5, they will need git from the epel repo ``` yum install epel-release yum install git ``` See ./rpm/INSTALL.md for more detail git-lfs-2.3.4/docs/man/000077500000000000000000000000001317167762300146055ustar00rootroot00000000000000git-lfs-2.3.4/docs/man/git-lfs-checkout.1.ronn000066400000000000000000000020441317167762300210120ustar00rootroot00000000000000git-lfs-checkout(1) -- Update working copy with file content if available ========================================================================= ## SYNOPSIS `git lfs checkout` ... ## DESCRIPTION Try to ensure that the working copy contains file content for Git LFS objects for the current ref, if the object data is available. Does not download any content, see git-lfs-fetch(1) for that. Checkout scans the current ref for all LFS objects that would be required, then where a file is either missing in the working copy, or contains placeholder pointer content with the same SHA, the real file content is written, provided we have it in the local store. Modified files are never overwritten. Filespecs can be provided as arguments to restrict the files which are updated. ## EXAMPLES * Checkout all files that are missing or placeholders `git lfs checkout` * Checkout a specific couple of files `git lfs checkout path/to/file1.png path/to.file2.png` ## SEE ALSO git-lfs-fetch(1), git-lfs-pull(1). Part of the git-lfs(1) suite. git-lfs-2.3.4/docs/man/git-lfs-clean.1.ronn000066400000000000000000000012761317167762300202750ustar00rootroot00000000000000git-lfs-clean(1) -- Git clean filter that converts large files to pointers ========================================================================== ## SYNOPSIS `git lfs clean` ## DESCRIPTION Read the contents of a large file from standard input, and write a Git LFS pointer file for that file to standard output. Clean is typically run by Git's clean filter, configured by the repository's Git attributes. Clean is not part of the user-facing Git plumbing commands. To preview the pointer of a large file as it would be generated, see the git-lfs-pointer(1) command. ## SEE ALSO git-lfs-install(1), git-lfs-push(1), git-lfs-pointer(1), gitattributes(5). Part of the git-lfs(1) suite. git-lfs-2.3.4/docs/man/git-lfs-clone.1.ronn000066400000000000000000000032411317167762300203050ustar00rootroot00000000000000git-lfs-clone(1) -- Efficiently clone a LFS-enabled repository ======================================================================== ## SYNOPSIS `git lfs clone` [git clone options] [] ## DESCRIPTION Clone an LFS enabled Git repository more efficiently by disabling LFS during the git clone, then performing a 'git lfs pull' directly afterwards. 'git lfs clone' also installs all of the repo-level hooks (.git/hooks) that LFS requires to operate. If `--separate-git-dir` is given, the hooks will be installed there. This is faster than a regular 'git clone' because that will download LFS content using the smudge filter, which is executed individually per file in the working copy. This is relatively inefficient compared to the batch mode and parallel downloads performed by 'git lfs pull'. ## OPTIONS All options supported by 'git clone' * `-I` `--include=`: See [INCLUDE AND EXCLUDE] * `-X` `--exclude=`: See [INCLUDE AND EXCLUDE] * `--skip-repo`: Skip installing repo-level hooks (.git/hooks) that LFS requires. Disabled by default. ## INCLUDE AND EXCLUDE You can configure Git LFS to only fetch objects to satisfy references in certain paths of the repo, and/or to exclude certain paths of the repo, to reduce the time you spend downloading things you do not use. In lfsconfig, set lfs.fetchinclude and lfs.fetchexclude to comma-separated lists of paths to include/exclude in the fetch (wildcard matching as per gitignore). Only paths which are matched by fetchinclude and not matched by fetchexclude will have objects fetched for them. ## SEE ALSO git-clone(1), git-lfs-pull(1). Part of the git-lfs(1) suite. git-lfs-2.3.4/docs/man/git-lfs-config.5.ronn000066400000000000000000000304271317167762300204640ustar00rootroot00000000000000git-lfs-config(5) -- Configuration options for git-lfs ====================================================== ## CONFIGURATION FILES git-lfs reads its configuration from a file called `.lfsconfig` at the root of the repository. The `.lfsconfig` file uses the same format as `.gitconfig`. Additionally, all settings can be overridden by values returned by `git config -l`. This allows you to override settings like `lfs.url` in your local environment without having to modify the `.lfsconfig` file. Most options regarding git-lfs are contained in the `[lfs]` section, meaning they are all named `lfs.foo` or similar, although occasionally an lfs option can be scoped inside the configuration for a remote. ## LIST OF OPTIONS ### General settings * `lfs.url` / `remote..lfsurl` The url used to call the Git LFS remote API. Default blank (derive from clone URL). * `lfs.pushurl` / `remote..lfspushurl` The url used to call the Git LFS remote API when pushing. Default blank (derive from either LFS non-push urls or clone url). * `lfs.dialtimeout` Sets the maximum time, in seconds, that the HTTP client will wait to initiate a connection. This does not include the time to send a request and wait for a response. Default: 30 seconds * `lfs.tlstimeout` Sets the maximum time, in seconds, that the HTTP client will wait for a TLS handshake. Default: 30 seconds. * `lfs.activitytimeout` / `lfs.https://.activitytimeout` Sets the maximum time, in seconds, that the HTTP client will wait for the next tcp read or write. If < 1, no activity timeout is used at all. Default: 30 seconds * `lfs.keepalive` Sets the maximum time, in seconds, for the HTTP client to maintain keepalive connections. Default: 30 minutes. * `core.askpass`, GIT_ASKPASS Given as a program and its arguments, this is invoked when authentication is needed against the LFS API. The contents of stdout are interpreted as the password. * `lfs.cachecredentials` Enables in-memory SSH and Git Credential caching for a single 'git lfs' command. Default: enabled. * `lfs.storage` Allow override LFS storage directory. Non-absolute path is relativized to inside of Git repository directory (usually `.git`). Note: you should not run `git lfs prune` if you have different repositories sharing the same storage directory. Default: `lfs` in Git repository directory (usually `.git/lfs`). ### Transfer (upload / download) settings These settings control how the upload and download of LFS content occurs. * `lfs.concurrenttransfers` The number of concurrent uploads/downloads. Default 8. * `lfs.basictransfersonly` If set to true, only basic HTTP upload/download transfers will be used, ignoring any more advanced transfers that the client/server may support. This is primarily to work around bugs or incompatibilities. The git-lfs client supports basic HTTP downloads, resumable HTTP downloads (using `Range` headers), and resumable uploads via tus.io protocol. Custom transfer methods can be added via `lfs.customtransfer` (see next section). However setting this value to true limits the client to simple HTTP. * `lfs.tustransfers` If set to true, this enables resumable uploads of LFS objects through the tus.io API. Once this feature is finalized, this setting will be removed, and tus.io uploads will be available for all clients. * `lfs.standalonetransferagent` Allows the specified custom transfer agent to be used directly for transferring files, without asking the server how the transfers should be made. The custom transfer agent has to be defined in a `lfs.customtransfer.` settings group. * `lfs.customtransfer..path` `lfs.customtransfer.` is a settings group which defines a custom transfer hook which allows you to upload/download via an intermediate process, using any mechanism you like (rather than just HTTP). `path` should point to the process you wish to invoke. The protocol between the git-lfs client and the custom transfer process is documented at https://github.com/git-lfs/git-lfs/blob/master/docs/custom-transfers.md must be a unique identifier that the LFS server understands. When calling the LFS API the client will include a list of supported transfer types. If the server also supports this named transfer type, it will select it and actions returned from the API will be in relation to that transfer type (may not be traditional URLs for example). Only if the server accepts as a transfer it supports will this custom transfer process be invoked. * `lfs.customtransfer..args` If the custom transfer process requires any arguments, these can be provided here. * `lfs.customtransfer..concurrent` If true (the default), git-lfs will invoke the custom transfer process multiple times in parallel, according to `lfs.concurrenttransfers`, splitting the transfer workload between the processes. * `lfs.customtransfer..direction` Specifies which direction the custom transfer process supports, either "download", "upload", or "both". The default if unspecified is "both". * `lfs.transfer.maxretries` Specifies how many retries LFS will attempt per OID before marking the transfer as failed. Must be an integer which is at least one. If the value is not an integer, is less than one, or is not given, a value of eight will be used instead. * `lfs.transfer.maxverifies` Specifies how many verification requests LFS will attempt per OID before marking the transfer as failed, if the object has a verification action associated with it. Must be an integer which is at least one. If the value is not an integer, is less than one, or is not given, a default value of three will be used instead. ### Push settings * `lfs.allowincompletepush` When pushing, allow objects to be missing from the local cache without halting a Git push. Default: true. ### Fetch settings * `lfs.fetchinclude` When fetching, only download objects which match any entry on this comma-separated list of paths/filenames. Wildcard matching is as per git-ignore(1). See git-lfs-fetch(1) for examples. * `lfs.fetchexclude` When fetching, do not download objects which match any item on this comma-separated list of paths/filenames. Wildcard matching is as per git-ignore(1). See git-lfs-fetch(1) for examples. * `lfs.fetchrecentrefsdays` If non-zero, fetches refs which have commits within N days of the current date. Only local refs are included unless lfs.fetchrecentremoterefs is true. Also used as a basis for pruning old files. The default is 7 days. * `lfs.fetchrecentremoterefs` If true, fetches remote refs (for the remote you're fetching) as well as local refs in the recent window. This is useful to fetch objects for remote branches you might want to check out later. The default is true; if you set this to false, fetching for those branches will only occur when you either check them out (losing the advantage of fetch --recent), or create a tracking local branch separately then fetch again. * `lfs.fetchrecentcommitsdays` In addition to fetching at refs, also fetches previous changes made within N days of the latest commit on the ref. This is useful if you're often reviewing recent changes. Also used as a basis for pruning old files. The default is 0 (no previous changes). * `lfs.fetchrecentalways` Always operate as if --recent was included in a `git lfs fetch` call. Default false. ### Prune settings * `lfs.pruneoffsetdays` The number of days added to the `lfs.fetchrecent*` settings to determine what can be pruned. Default is 3 days, i.e. that anything fetched at the very oldest edge of the 'recent window' is eligible for pruning 3 days later. * `lfs.pruneremotetocheck` Set the remote that LFS files must have been pushed to in order for them to be considered eligible for local pruning. Also the remote which is called if --verify-remote is enabled. * `lfs.pruneverifyremotealways` Always run `git lfs prune` as if `--verify-remote` was provided. ### Extensions * `lfs.extension..` Git LFS extensions enable the manipulation of files streams during smudge and clean. `name` groups the settings for a single extension, and the settings are: * `clean` The command which runs when files are added to the index * `smudge` The command which runs when files are written to the working copy * `priority` The order of this extension compared to others ### Other settings * `lfs..access` Note: this setting is normally set by LFS itself on receiving a 401 response (authentication required), you don't normally need to set it manually. If set to "basic" then credentials will be requested before making batch requests to this url, otherwise a public request will initially be attempted. * `lfs..locksverify` Determines whether locks are checked before Git pushes. This prevents you from pushing changes to files that other users have locked. The Git LFS pre-push hook varies its behavior based on the value of this config key. * `null` - In the absence of a value, Git LFS will attempt the call, and warn if it returns an error. If the response is valid, Git LFS will set the value to `true`, and will halt the push if the user attempts to update a file locked by another user. If the server returns a `501 Not Implemented` response, Git LFS will set the value to `false.` * `true` - Git LFS will attempt to verify locks, halting the Git push if there are any server issues, or if the user attempts to update a file locked by another user. * `false` - Git LFS will completely skip the lock check in the pre-push hook. You should set this if you're not using File Locking, or your Git server verifies locked files on pushes automatically. Supports URL config lookup as described in: https://git-scm.com/docs/git-config#git-config-httplturlgt. To set this value per-host: `git config --global lfs.https://github.com/.locksverify [true|false]`. * `lfs.skipdownloaderrors` Causes Git LFS not to abort the smudge filter when a download error is encountered, which allows actions such as checkout to work when you are unable to download the LFS content. LFS files which could not download will contain pointer content instead. Note that this will result in git commands which call the smudge filter to report success even in cases when LFS downloads fail, which may affect scripts. You can also set the environment variable GIT_LFS_SKIP_DOWNLOAD_ERRORS=1 to get the same effect. * `GIT_LFS_PROGRESS` This environment variable causes Git LFS to emit progress updates to an absolute file-path on disk when cleaning, smudging, or fetching. Progress is reported periodically in the form of a new line being appended to the end of the file. Each new line will take the following format: ` / / ` Each field is described below: * `direction`: The direction of transfer, either "checkout", "download", or "upload". * `current` The index of the currently transferring file. * `total files` The estimated count of all files to be transferred. * `downloaded` The number of bytes already downloaded. * `total` The entire size of the file, in bytes. * `name` The name of the file. * `GIT_LFS_SET_LOCKABLE_READONLY` `lfs.setlockablereadonly` These settings, the first an environment variable and the second a gitconfig setting, control whether files marked as 'lockable' in `git lfs track` are made read-only in the working copy when not locked by the current user. The default is `true`; you can disable this behaviour and have all files writeable by setting either variable to 0, 'no' or 'false'. ## LFSCONFIG The .lfsconfig file in a repository is read and interpreted in the same format as the file stored in .git/config. It allows a subset of keys to be used, including and limited to: - lfs.fetchexclude - lfs.fetchinclude - lfs.gitprotocol - lfs.pushurl - lfs.url - lfs.extension.{name}.clean - lfs.extension.{name}.smudge - lfs.extension.{name}.priority - remote.{name}.lfsurl - remote.{name}.{*}.access ## EXAMPLES * Configure a custom LFS endpoint for your repository: `git config -f .lfsconfig lfs.url https://lfs.example.com/foo/bar/info/lfs` ## SEE ALSO git-config(1), git-lfs-install(1), gitattributes(5) Part of the git-lfs(1) suite. git-lfs-2.3.4/docs/man/git-lfs-env.1.ronn000066400000000000000000000003461317167762300200000ustar00rootroot00000000000000git-lfs-env(1) -- Display the Git LFS environment ================================================= ## SYNOPSIS `git lfs env` ## DESCRIPTION Display the current Git LFS environment. ## SEE ALSO Part of the git-lfs(1) suite. git-lfs-2.3.4/docs/man/git-lfs-ext.1.ronn000066400000000000000000000007211317167762300200050ustar00rootroot00000000000000git-lfs-ext(1) - View extension details ============================================================== ## SYNOPSIS `git lfs ext list` [...] ## DESCRIPTION Git LFS extensions enable the manipulation of files streams during smudge and clean. ## EXAMPLES * List details for all extensions `git lfs ext` `git lfs ext list` * List details for the specified extensions `git lfs ext list 'foo' 'bar'` ## SEE ALSO Part of the git-lfs(1) suite. git-lfs-2.3.4/docs/man/git-lfs-fetch.1.ronn000066400000000000000000000115261317167762300203030ustar00rootroot00000000000000git-lfs-fetch(1) -- Download all Git LFS files for a given ref ============================================================== ## SYNOPSIS `git lfs fetch` [options] [ [...]] ## DESCRIPTION Download Git LFS objects at the given refs from the specified remote. See [DEFAULT REMOTE] and [DEFAULT REFS] for what happens if you don't specify. This does not update the working copy. ## OPTIONS * `-I` `--include=`: Specify lfs.fetchinclude just for this invocation; see [INCLUDE AND EXCLUDE] * `-X` `--exclude=`: Specify lfs.fetchexclude just for this invocation; see [INCLUDE AND EXCLUDE] * `--recent`: Download objects referenced by recent branches & commits in addition to those that would otherwise be downloaded. See [RECENT CHANGES] * `--all`: Download all objects referenced by any commit that is reachable; this is primarily for backup / migration purposes. Cannot be combined with --recent or --include/--exclude. Ignores any globally configured include and exclude paths to ensure that all objects are downloaded. * `--prune` `-p`: Prune old and unreferenced objects after fetching, equivalent to running `git lfs prune` afterwards. See git-lfs-prune(1) for more details. ## INCLUDE AND EXCLUDE You can configure Git LFS to only fetch objects to satisfy references in certain paths of the repo, and/or to exclude certain paths of the repo, to reduce the time you spend downloading things you do not use. In gitconfig, set lfs.fetchinclude and lfs.fetchexclude to comma-separated lists of paths to include/exclude in the fetch (pattern matching as per golang's `filepath.Match()`). Only paths which are matched by fetchinclude and not matched by fetchexclude will have objects fetched for them. ### Examples: * `git config lfs.fetchinclude "textures,images/foo*"` This will only fetch objects referenced in paths in the textures folder, and files called foo* in the images folder * `git config lfs.fetchinclude "*.jpg,*.png,*.tga"` Only fetch JPG/PNG/TGA files, wherever they are in the repository * `git config lfs.fetchexclude "media/reallybigfiles"` Don't fetch any LFS objects referenced in the folder media/reallybigfiles, but fetch everything else * `git config lfs.fetchinclude "media"`
`git config lfs.fetchexclude "media/excessive"` Only fetch LFS objects in the 'media' folder, but exclude those in one of its subfolders. ## DEFAULT REMOTE Without arguments, fetch downloads from the default remote. The default remote is the same as for `git fetch`, i.e. based on the remote branch you're tracking first, or origin otherwise. ## DEFAULT REFS If no refs are given as arguments, the currently checked out ref is used. In addition, if enabled, recently changed refs and commits are also included. See [RECENT CHANGES] for details. ## RECENT CHANGES If the `--recent` option is specified, or if the gitconfig option `lfs.fetchrecentalways` is true, then after the current ref (or those in the arguments) is fetched, we also search for 'recent' changes to fetch objects for, so that it's more convenient to checkout or diff those commits without incurring further downloads. What changes are considered 'recent' is based on a number of gitconfig options: * `lfs.fetchrecentrefsdays` If non-zero, includes branches which have commits within N days of the current date. Only local refs are included unless lfs.fetchrecentremoterefs is true. The default is 7 days. * `lfs.fetchrecentremoterefs` If true, fetches remote refs (for the remote you're fetching) as well as local refs in the recent window. This is useful to fetch objects for remote branches you might want to check out later. The default is true; if you set this to false, fetching for those branches will only occur when you either check them out (losing the advantage of fetch --recent), or create a tracking local branch separately then fetch again. * `lfs.fetchrecentcommitsdays` In addition to fetching at branches, also fetches changes made within N days of the latest commit on the branch. This is useful if you're often reviewing recent changes. The default is 0 (no previous changes). * `lfs.fetchrecentalways` Always operate as if --recent was provided on the command line. ## EXAMPLES * Fetch the LFS objects for the current ref from default remote `git lfs fetch` * Fetch the LFS objects for the current ref AND recent changes from default remote `git lfs fetch --recent` * Fetch the LFS objects for the current ref from a secondary remote 'upstream' `git lfs fetch upstream` * Fetch the LFS objects for a branch from origin `git lfs fetch origin mybranch` * Fetch the LFS objects for 2 branches and a commit from origin `git lfs fetch origin master mybranch e445b45c1c9c6282614f201b62778e4c0688b5c8` ## SEE ALSO git-lfs-checkout(1), git-lfs-pull(1), git-lfs-prune(1). Part of the git-lfs(1) suite. git-lfs-2.3.4/docs/man/git-lfs-filter-process.1.ronn000066400000000000000000000016451317167762300221540ustar00rootroot00000000000000git-lfs-filter-process(1) -- Git filter process that converts between pointer and actual content =========================================================================================== ## SYNOPSIS `git lfs filter-process` `git lfs filter-process --skip` ## DESCRIPTION Implement the Git process filter API, exchanging handshake messages and then accepting and responding to requests to either clean or smudge a file. filter-process is always run by Git's filter process, and is configured by the repository's Git attributes. The filter process uses Git's pkt-line protocol to communicate, and is documented in detail in gitattributes(5). ## OPTIONS Without any options, filter-process accepts and responds to requests normally. * `--skip`: Skip automatic downloading of objects on clone or pull. ## SEE ALSO git-lfs-clean(1), git-lfs-install(1), git-lfs-smudge(1), gitattributes(5). Part of the git-lfs(1) suite. git-lfs-2.3.4/docs/man/git-lfs-fsck.1.ronn000066400000000000000000000005351317167762300201360ustar00rootroot00000000000000git-lfs-fsck(1) -- Check GIT LFS files for consistency ====================================================== ## SYNOPSIS `git lfs fsck` ## DESCRIPTION Checks all GIT LFS files in the current HEAD for consistency. Corrupted files are moved to ".git/lfs/bad". ## SEE ALSO git-lfs-ls-files(1), git-lfs-status(1). Part of the git-lfs(1) suite. git-lfs-2.3.4/docs/man/git-lfs-install.1.ronn000066400000000000000000000033711317167762300206570ustar00rootroot00000000000000git-lfs-install(1) -- Install Git LFS configuration. ======================================================== ## SYNOPSIS `git lfs install` [options] ## DESCRIPTION Perform the following actions to ensure that Git LFS is setup properly: * Set up the clean and smudge filters under the name "lfs" in the global Git config. * Install a pre-push hook to run git-lfs-pre-push(1) for the current repository, if run from inside one. If "core.hooksPath" is configured in any Git configuration (and supported, i.e., the installed Git version is at least 2.9.0), then the pre-push hook will be installed to that directory instead. ## OPTIONS Without any options, `git lfs install` will only setup the "lfs" smudge and clean filters if they are not already set. * `--force`: Sets the "lfs" smudge and clean filters, overwriting existing values. * `--local`: Sets the "lfs" smudge and clean filters in the local repository's git config, instead of the global git config (~/.gitconfig). * `--manual`: Print instructions for manually updating your hooks to include git-lfs functionality. Use this option if `git lfs install` fails because of existing hooks and you want to retain their functionality. * `--system`: Sets the "lfs" smudge and clean filters in the system git config, e.g. /etc/gitconfig instead of the global git config (~/.gitconfig). * `--skip-smudge`: Skips automatic downloading of objects on clone or pull. This requires a manual "git lfs pull" every time a new commit is checked out on your repository. * `--skip-repo`: Skips setup of the local repo; use if you want to install the global lfs filters but not make changes to the current repo. ## SEE ALSO git-lfs-uninstall(1). Part of the git-lfs(1) suite. git-lfs-2.3.4/docs/man/git-lfs-lock.1.ronn000066400000000000000000000020021317167762300201270ustar00rootroot00000000000000git-lfs-lock(1) -- Set a file as "locked" on the Git LFS server =============================================================== ## SYNOPSIS `git lfs lock` [options] ## DESCRIPTION Sets the given file path as "locked" against the Git LFS server, with the intention of blocking attempts by other users to update the given path. Locking a file requires the file to exist in the working copy. Once locked, LFS will verify that Git pushes do not modify files locked by other users. See the description of the `lfs..locksverify` config key in git-lfs-config(5) for details. ## OPTIONS * `-r` `--remote=`: Specify the Git LFS server to use. Ignored if the `lfs.url` config key is set. * `--json`: Writes lock info as JSON to STDOUT if the command exits successfully. Intended for interoperation with external tools. If the command returns with a non-zero exit code, plain text messages will be sent to STDERR. ## SEE ALSO git-lfs-unlock(1), git-lfs-locks(1). Part of the git-lfs(1) suite. git-lfs-2.3.4/docs/man/git-lfs-locks.1.ronn000066400000000000000000000017621317167762300203260ustar00rootroot00000000000000git-lfs-locks(1) -- Lists currently locked files from the Git LFS server. ============================================================================= ## SYNOPSIS `git lfs locks` [options] ## DESCRIPTION Lists current locks from the Git LFS server. ## OPTIONS * `-r` `--remote=`: Specify the Git LFS server to use. Ignored if the `lfs.url` config key is set. * `-i ` `--id=`: Specifies a lock by its ID. Returns a single result. * `-p ` `--path=`: Specifies a lock by its path. Returns a single result. * `--local`: Lists only the locks cached locally. Skips a remote call. * `-l ` `--limit=`: Specifies number of results to return. * `--json`: Writes lock info as JSON to STDOUT if the command exits successfully. Intended for interoperation with external tools. If the command returns with a non-zero exit code, plain text messages will be sent to STDERR. ## SEE ALSO git-lfs-lock(1), git-lfs-unlock(1). Part of the git-lfs(1) suite. git-lfs-2.3.4/docs/man/git-lfs-logs.1.ronn000066400000000000000000000012511317167762300201500ustar00rootroot00000000000000git-lfs-logs(1) - Show errors from the git-lfs command ====================================================== ## SYNOPSIS `git lfs logs`
`git lfs logs`
`git lfs logs clear`
`git lfs logs boomtown`
## DESCRIPTION Display errors from the git-lfs command. Any time it crashes, the details are saved to ".git/lfs/logs". ## COMMANDS * `clear`: Clears all of the existing logged errors. * `boomtown`: Triggers a dummy exception. ## OPTIONS Without any options, `git lfs logs` simply shows the list of error logs. * : Shows the specified error log. Use "last" to show the most recent error. ## SEE ALSO Part of the git-lfs(1) suite. git-lfs-2.3.4/docs/man/git-lfs-ls-files.1.ronn000066400000000000000000000012711317167762300207240ustar00rootroot00000000000000git-lfs-ls-files(1) -- Show information about Git LFS files in the index and working tree ========================================================================================= ## SYNOPSIS `git lfs ls-files` [] ## DESCRIPTION Display paths of Git LFS files that are found in the tree at the given reference. If no reference is given, scan the currently checked-out branch. ## OPTIONS * `-l` `--long`: Show the entire 64 character OID, instead of just first 10. * -d --debug: Show as much information as possible about a LFS file. This is intended for manual inspection; the exact format may change at any time. ## SEE ALSO git-lfs-status(1). Part of the git-lfs(1) suite. git-lfs-2.3.4/docs/man/git-lfs-migrate.1.ronn000066400000000000000000000124551317167762300206440ustar00rootroot00000000000000git-lfs-migrate(1) - Migrate history to or from git-lfs ======================================================= ## SYNOPSIS `git lfs migrate` [options] [--] [branch ...] ## MODES * `info` Show information about repository size. * `import` Convert large Git objects to LFS pointers. ## OPTIONS * `-I` `--include=`: See [INCLUDE AND EXCLUDE]. * `-X` `--exclude=`: See [INCLUDE AND EXCLUDE]. * `--include-ref`=: See [INCLUDE AND EXCLUDE (REFS)]. * `--exclude-ref`=: See [INCLUDE AND EXCLUDE (REFS)]. * `--everything`: See [INCLUDE AND EXCLUDE (REFS)]. * [branch ...]: Migrate only the set of branches listed. If not given, `git-lfs-migrate(1)` will migrate the currently checked out branch. If any of `--include-ref` or `--exclude-ref` are given, the checked out branch will not be appended, but branches given explicitly will be appended. ### INFO The 'info' mode has these additional options: * `--above=` Only count files whose individual filesize is above the given size. 'size' may be specified as a number of bytes, or a number followed by a storage unit, e.g., "1b", "20 MB", "3 TiB", etc. If a set of files sharing a common extension has no files in that set whose individual size is above the given `--above` no files no entry for that set will be shown. * `--top=` Only include the top 'n' entries, ordered by how many total files match the given pathspec. * `--unit=` Format the number of bytes in each entry as a quantity of the storage unit provided. Valid units include: * b, kib, mib, gib, tib, pib - for IEC storage units * b, kb, mb, gb, tb, pb - for SI storage units If a --unit is not specified, the largest unit that can fit the number of counted bytes as a whole number quantity is chosen. ### IMPORT The 'import' mode migrates large objects present in the Git history to pointer files tracked and stored with Git LFS. It supports all the core 'migrate' options and these additional ones: * `--verbose` Print the commit oid and filename of migrated files to STDOUT. If `--include` or `--exclude` (`-I`, `-X`, respectively) are given, the .gitattributes will be modified to include any new filepath patterns as given by those flags. If neither of those flags are given, the gitattributes will be incrementally modified to include new filepath extensions as they are rewritten in history. ## INCLUDE AND EXCLUDE You can configure Git LFS to only migrate tree entries whose pathspec matches the include glob and does not match the exclude glob, to reduce total migration time or to only migrate part of your repo. Specify multiple patterns using the comma as the delimiter. Pattern matching is done as given to be functionally equivalent to pattern matching as in .gitattributes. ## INCLUDE AND EXCLUDE (REFS) You can configure Git LFS to only migrate commits reachable by references include by `--include-ref` and not reachable by `--exclude-ref`. D---E---F / \ A---B------C refs/heads/my-feature \ \ \ refs/heads/master \ refs/remotes/origin/master In the above configuration, the following commits are reachable by each ref: refs/heads/master: C, B, A refs/heads/my-feature: F, E, D, B, A refs/remote/origin/master: A The following configuration: --include-ref=refs/heads/my-feature --include-ref=refs/heads/master --exclude-ref=refs/remotes/origin/master Would, therefore, include commits: F, E, D, C, B, but exclude commit A. The presence of flag `--everything` indicates that all local references should be migrated. ## EXAMPLES ### Migrate unpushed commits The migrate command's most common use case is to convert large git objects to LFS before pushing your commits. By default, it only scans commits that don't exist on any remote. First, run `git lfs migrate info` to list the file types taking up the most space in your repository. ``` $ git lfs migrate info migrate: Fetching remote refs: ..., done migrate: Sorting commits: ..., done migrate: Examining commits: 100% (1/1), done *.mp3 284 MB 1/1 files(s) 100% *.pdf 42 MB 8/8 files(s) 100% *.psd 9.8 MB 15/15 files(s) 100% *.ipynb 6.9 MB 6/6 files(s) 100% *.csv 5.8 MB 2/2 files(s) 100% ``` Now, you can run `git lfs migrate import` to convert some file types to LFS: ``` $ git lfs migrate import --include="*.mp3,*.psd" migrate: Fetching remote refs: ..., done migrate: Sorting commits: ..., done migrate: Rewriting commits: 100% (1/1), done master d2b959babd099fe70da1c1512e2475e8a24de163 -> 136e706bf1ae79643915c134e17a6c933fd53c61 migrate: Updating refs: ..., done ``` ### Migrate local history You can also migrate the entire history of your repository: ``` # Check for large files in your local master branch $ git lfs migrate info --include-ref=master # Check for large files in every branch $ git lfs migrate info --everything ``` The same flags will work in `import` mode: ``` # Convert all zip files in your master branch $ git lfs migrate import --include-ref=master --include="*.zip" # Convert all zip files in every local branch $ git lfs migrate import --everything --include="*.zip" ``` Note: This will require a force push to any existing Git remotes. ## SEE ALSO Part of the git-lfs(1) suite. git-lfs-2.3.4/docs/man/git-lfs-pointer.1.ronn000066400000000000000000000014301317167762300206630ustar00rootroot00000000000000git-lfs-pointer(1) -- Build and compare pointers ================================================ ## SYNOPSIS `git lfs pointer --file=path/to/file`
`git lfs pointer --file=path/to/file --pointer=path/to/pointer`
`git lfs pointer --file=path/to/file --stdin` ## Description Builds and optionally compares generated pointer files to ensure consistency between different Git LFS implementations. ## OPTIONS * `--file`: A local file to build the pointer from. * `--pointer`: A local file including the contents of a pointer generated from another implementation. This is compared to the pointer generated from `--file`. * `--stdin`: Reads the pointer from STDIN to compare with the pointer generated from `--file`. ## SEE ALSO Part of the git-lfs(1) suite. git-lfs-2.3.4/docs/man/git-lfs-post-checkout.1.ronn000066400000000000000000000007501317167762300217770ustar00rootroot00000000000000git-lfs-post-checkout(1) -- Git post-checkout hook implementation ================================================================= ## SYNOPSIS `git lfs post-checkout` ## DESCRIPTION Responds to Git post-checkout events. It makes sure that any files which are marked as lockable by `git lfs track` are read-only in the working copy, if not currently locked by the local user. ## SEE ALSO git-lfs-track(1) Part of the git-lfs(1) suite. git-lfs-2.3.4/docs/man/git-lfs-post-merge.1.ronn000066400000000000000000000006721317167762300212740ustar00rootroot00000000000000git-lfs-post-merge(1) -- Git post-merge hook implementation ================================================================= ## SYNOPSIS `git lfs post-merge` ## DESCRIPTION Responds to Git post-merge events. It makes sure that any files which are marked as lockable by `git lfs track` are read-only in the working copy, if not currently locked by the local user. ## SEE ALSO git-lfs-track(1) Part of the git-lfs(1) suite. git-lfs-2.3.4/docs/man/git-lfs-pre-push.1.ronn000066400000000000000000000007451317167762300207560ustar00rootroot00000000000000git-lfs-pre-push(1) -- Git pre-push hook implementation ======================================================= ## SYNOPSIS `git lfs pre-push` [remoteurl] ## DESCRIPTION Responds to Git pre-hook events. It reads the range of commits from STDIN, in the following format: SP SP SP \n It also takes the remote name and URL as arguments. ## SEE ALSO git-lfs-clean(1), git-lfs-push(1). Part of the git-lfs(1) suite. git-lfs-2.3.4/docs/man/git-lfs-prune.1.ronn000066400000000000000000000121461317167762300203420ustar00rootroot00000000000000git-lfs-prune(1) -- Delete old LFS files from local storage =========================================================== ## SYNOPSIS `git lfs prune` [options] ## DESCRIPTION Deletes local copies of LFS files which are old, thus freeing up disk space. Prune operates by enumerating all the locally stored objects, and then deleting any which are not referenced by at least ONE of the following: * the current checkout * a 'recent branch'; see [RECENT FILES] * a 'recent commit' on the current branch or recent branches; see [RECENT FILES] * a commit which has not been pushed; see [UNPUSHED LFS FILES] * any other worktree checkouts; see git-worktree(1) In general terms, prune will delete files you're not currently using and which are not 'recent', so long as they've been pushed i.e. the local copy is not the only one. The reflog is not considered, only commits. Therefore LFS objects that are only referenced by orphaned commits are always deleted. Note: you should not run `git lfs prune` if you have different repositories sharing the same custom storage directory; see git-lfs-config(1) for more details about `lfs.storage` option. ## OPTIONS * `--dry-run` `-d` Don't actually delete anything, just report on what would have been done * `--verify-remote` `-c` Contact the remote and check that copies of the files we would delete definitely exist before deleting. See [VERIFY REMOTE]. * `--no-verify-remote` Disables remote verification if lfs.pruneverifyremotealways was enabled in settings. See [VERIFY REMOTE]. * `--verbose` `-v` Report the full detail of what is/would be deleted. ## RECENT FILES Prune won't delete LFS files referenced by 'recent' commits, in case you want to use them again without having to download. The definition of 'recent' is derived from the one used by git-lfs-fetch(1) to download recent objects with the `--recent` option, with an offset of a number of days (default 3) to ensure that we always keep files you download for a few days. Here are the git-config(1) settings that control this behaviour: * `lfs.pruneoffsetdays`
The number of extra days added to the fetch recent settings when using them to decide when to prune. So for a reference to be considered old enough to prune, it has to be this many days older than the oldest reference that would be downloaded via `git lfs fetch --recent`. Only used if the relevant fetch recent 'days' setting is non-zero. Default 3 days. * `lfs.fetchrecentrefsdays`
`lfs.fetchrecentremoterefs`
`lfs.fetchrecentcommitsdays`
These have the same meaning as git-lfs-fetch(1) with the `--recent` option, they are used as a base for the offset above. Anything which falls outside of this offsetted window is considered old enough to prune. If a day value is zero, that condition is not used at all to retain objects and they will be pruned. ## UNPUSHED LFS FILES When the only copy of an LFS file is local, and it is still reachable from any reference, that file can never be pruned, regardless of how old it is. To determine whether an LFS file has been pushed, we check the difference between local refs and remote refs; where the local ref is ahead, any LFS files referenced in those commits is unpushed and will not be deleted. This works because the LFS pre-push hook always ensures that LFS files are pushed before the remote branch is updated. See [DEFAULT REMOTE], for which remote is considered 'pushed' for pruning purposes. ## VERIFY REMOTE The `--verify-remote` option calls the remote to ensure that any LFS files to be deleted have copies on the remote before actually deleting them. Usually the check performed by [UNPUSHED LFS FILES] is enough to determine that files have been pushed, but if you want to be extra sure at the expense of extra overhead you can make prune actually call the remote API and verify the presence of the files you're about to delete locally. See [DEFAULT REMOTE] for which remote is checked. You can make this behaviour the default by setting `lfs.pruneverifyremotealways` to true. In addition to the overhead of calling the remote, using this option also requires prune to distinguish between totally unreachable files (e.g. those that were added to the index but never committed, or referenced only by orphaned commits), and files which are still referenced, but by commits which are prunable. This makes the prune process take longer. ## DEFAULT REMOTE When identifying [UNPUSHED LFS FILES] and performing [VERIFY REMOTE], a single remote, 'origin', is normally used as the reference. This one remote is considered canonical; even if you use multiple remotes, you probably want to retain your local copies until they've made it to that remote. 'origin' is used by default because that will usually be a master central repo, or your fork of it - in both cases that's a valid remote backup of your work. If origin doesn't exist then by default nothing will be pruned because everything is treated as 'unpushed'. You can alter the remote via git config: `lfs.pruneremotetocheck`. Set this to a different remote name to check that one instead of 'origin'. ## SEE ALSO git-lfs-fetch(1) Part of the git-lfs(1) suite. git-lfs-2.3.4/docs/man/git-lfs-pull.1.ronn000066400000000000000000000027331317167762300201660ustar00rootroot00000000000000git-lfs-pull(1) -- Download all Git LFS files for current ref & checkout ======================================================================== ## SYNOPSIS `git lfs pull` [options] [] ## DESCRIPTION Download Git LFS objects for the currently checked out ref, and update the working copy with the downloaded content if required. This is equivalent to running the following 2 commands: git lfs fetch [options] [] git lfs checkout ## OPTIONS * `-I` `--include=`: Specify lfs.fetchinclude just for this invocation; see [INCLUSION & EXCLUSION] * `-X` `--exclude=`: Specify lfs.fetchexclude just for this invocation; see [INCLUSION & EXCLUSION] ## INCLUSION & EXCLUSION You can configure Git LFS to only fetch objects to satisfy references in certain paths of the repo, and/or to exclude certain paths of the repo, to reduce the time you spend downloading things you do not use. In gitconfig, set lfs.fetchinclude and lfs.fetchexclude to comma-separated lists of paths to include/exclude in the fetch (wildcard matching as per gitignore). Only paths which are matched by fetchinclude and not matched by fetchexclude will have objects fetched for them. ## DEFAULT REMOTE Without arguments, pull downloads from the default remote. The default remote is the same as for `git pull`, i.e. based on the remote branch you're tracking first, or origin otherwise. ## SEE ALSO git-lfs-fetch(1), git-lfs-checkout(1). Part of the git-lfs(1) suite. git-lfs-2.3.4/docs/man/git-lfs-push.1.ronn000066400000000000000000000016651317167762300201740ustar00rootroot00000000000000git-lfs-push(1) -- Push queued large files to the Git LFS endpoint ================================================================== ## SYNOPSIS `git lfs push` [options] [...]
`git lfs push` [...]
`git lfs push` --object-id [...] ## DESCRIPTION Upload Git LFS files to the configured endpoint for the current Git remote. By default, it filters out objects that are already referenced by the local clone of the remote. ## OPTIONS * `--dry-run`: Print the files that would be pushed, without actually pushing them. * `--all`: This pushes all objects to the remote that are referenced by any commit reachable from the refs provided as arguments. If no refs are provided, then all refs are pushed. * `--object-id`: This pushes only the object OIDs listed at the end of the command, separated by spaces. ## SEE ALSO git-lfs-pre-push(1). Part of the git-lfs(1) suite. git-lfs-2.3.4/docs/man/git-lfs-smudge.1.ronn000066400000000000000000000020471317167762300204740ustar00rootroot00000000000000git-lfs-smudge(1) -- Git smudge filter that converts pointer in blobs to the actual content =========================================================================================== ## SYNOPSIS `git lfs smudge` [] `git lfs smudge` --skip [] ## DESCRIPTION Read a Git LFS pointer file from standard input and write the contents of the corresponding large file to standard output. If needed, download the file's contents from the Git LFS endpoint. The argument, if provided, is only used for a progress bar. Smudge is typically run by Git's smudge filter, configured by the repository's Git attributes. ## OPTIONS Without any options, `git lfs smudge` outputs the raw Git LFS content to standard output. * `--skip`: Skip automatic downloading of objects on clone or pull. ## KNOWN BUGS On Windows, Git does not handle files in the working tree larger than 4 gigabytes. For more information, see: https://github.com/git-lfs/git-lfs/issues/2434. ## SEE ALSO git-lfs-install(1), gitattributes(5). Part of the git-lfs(1) suite. git-lfs-2.3.4/docs/man/git-lfs-status.1.ronn000066400000000000000000000015101317167762300205250ustar00rootroot00000000000000git-lfs-status(1) -- Show the status of Git LFS files in the working tree ========================================================================= ## SYNOPSIS `git lfs status` [] ## DESCRIPTION Display paths of Git LFS objects that * have not been pushed to the Git LFS server. These are large files that would be uploaded by `git push`. * have differences between the index file and the current HEAD commit. These are large files that would be committed by `git commit`. * have differences between the working tree and the index file. These are files that could be staged using `git add`. ## OPTIONS * `--porcelain`: Give the output in an easy-to-parse format for scripts. * `--json`: Give the output in a stable json format for scripts. ## SEE ALSO git-lfs-ls-files(1). Part of the git-lfs(1) suite. git-lfs-2.3.4/docs/man/git-lfs-track.1.ronn000066400000000000000000000035421317167762300203150ustar00rootroot00000000000000git-lfs-track(1) - View or add Git LFS paths to Git attributes ============================================================== ## SYNOPSIS `git lfs track` [options] [...] ## DESCRIPTION Start tracking the given patterns(s) through Git LFS. The argument is written to .gitattributes. If no paths are provided, simply list the currently-tracked paths. The [gitattributes documentation](https://git-scm.com/docs/gitattributes) states that patterns use the [gitignore pattern rules](https://git-scm.com/docs/gitignore) to match paths. ## OPTIONS * `--verbose` `-v`: If enabled, have `git lfs track` log files which it will touch. Disabled by default. * `--dry-run` `-d`: If enabled, have `git lfs track` log all actions it would normally take (adding entries to .gitattributes, touching files on disk, etc) without performing any mutative operations to the disk. `git lfs track --dry-run [files]` also implicitly mocks the behavior of passing the `--verbose`, and will log in greater detail what it is doing. Disabled by default. * `--lockable` `-l` Make the paths 'lockable', meaning they should be locked to edit them, and will be made read-only in the working copy when not locked. * `--not-lockable` Remove the lockable flag from the paths so they are no longer read-only unless locked. * `--no-modify-attrs` Makes matched entries stat-dirty so that Git can re-index files you wish to convert to LFS. Does not modify any `.gitattributes` file(s). ## EXAMPLES * List the patterns that Git LFS is currently tracking: `git lfs track` * Configure Git LFS to track GIF files: `git lfs track "*.gif"` * Configure Git LFS to track PSD files and make them read-only unless locked: `git lfs track --lockable "*.psd"` ## SEE ALSO git-lfs-untrack(1), git-lfs-install(1), gitattributes(5). Part of the git-lfs(1) suite. git-lfs-2.3.4/docs/man/git-lfs-uninstall.1.ronn000066400000000000000000000006621317167762300212220ustar00rootroot00000000000000git-lfs-uninstall(1) -- Remove Git LFS configuration ================================================= ## SYNOPSIS `git lfs uninstall` ## DESCRIPTION Perform the following actions to remove the Git LFS configuration: * Remove the "lfs" clean and smudge filters from the global Git config. * Uninstall the Git LFS pre-push hook if run from inside a Git repository. ## SEE ALSO git-lfs-install(1). Part of the git-lfs(1) suite. git-lfs-2.3.4/docs/man/git-lfs-unlock.1.ronn000066400000000000000000000017561317167762300205110ustar00rootroot00000000000000git-lfs-unlock(1) -- Remove "locked" setting for a file on the Git LFS server ============================================================================= ## SYNOPSIS `git lfs unlock` [OPTIONS] ## DESCRIPTION Removes the given file path as "locked" on the Git LFS server. Files must exist and have a clean git status before they can be unlocked. The `--force` flag will skip these checks. ## OPTIONS * `-r` `--remote=`: Specify the Git LFS server to use. Ignored if the `lfs.url` config key is set. * `-f` `--force`: Tells the server to remove the lock, even if it's owned by another user. * `-i ` `--id=`: Specifies a lock by its ID instead of path. * `--json`: Writes lock info as JSON to STDOUT if the command exits successfully. Intended for interoperation with external tools. If the command returns with a non-zero exit code, plain text messages will be sent to STDERR. ## SEE ALSO git-lfs-lock(1), git-lfs-locks(1). Part of the git-lfs(1) suite. git-lfs-2.3.4/docs/man/git-lfs-untrack.1.ronn000066400000000000000000000007451317167762300206620ustar00rootroot00000000000000git-lfs-untrack(1) - Remove Git LFS paths from Git Attributes ============================================================= ## SYNOPSIS `git lfs untrack` ... ## DESCRIPTION Stop tracking the given path(s) through Git LFS. The argument can be a glob pattern or a file path. ## EXAMPLES * Configure Git LFS to stop tracking GIF files: `git lfs untrack "*.gif"` ## SEE ALSO git-lfs-track(1), git-lfs-install(1), gitattributes(5). Part of the git-lfs(1) suite. git-lfs-2.3.4/docs/man/git-lfs-update.1.ronn000066400000000000000000000014431317167762300204710ustar00rootroot00000000000000git-lfs-update(1) -- Update Git hooks ===================================== ## SYNOPSIS `git lfs update` [--manual | --force] ## DESCRIPTION Updates the Git hooks used by Git LFS. Silently upgrades known hook contents. If you have your own custom hooks you may need to use one of the extended options below. ## OPTIONS * `--manual` `-m` Print instructions for manually updating your hooks to include git-lfs functionality. Use this option if `git lfs update` fails because of existing hooks and you want to retain their functionality. * `--force` `-f` Forcibly overwrite any existing hooks with git-lfs hooks. Use this option if `git lfs update` fails because of existing hooks but you don't care about their current contents. ## SEE ALSO Part of the git-lfs(1) suite. git-lfs-2.3.4/docs/man/git-lfs.1.ronn000066400000000000000000000054521317167762300172150ustar00rootroot00000000000000git-lfs(1) -- Work with large files in Git repositories ======================================================= ## SYNOPSIS `git lfs` [] ## DESCRIPTION Git LFS is a system for managing and versioning large files in association with a Git repository. Instead of storing the large files within the Git repository as blobs, Git LFS stores special "pointer files" in the repository, while storing the actual file contents on a Git LFS server. The contents of the large file are downloaded automatically when needed, for example when a Git branch containing the large file is checked out. Git LFS works by using a "smudge" filter to look up the large file contents based on the pointer file, and a "clean" filter to create a new version of the pointer file when the large file's contents change. It also uses a `pre-push` hook to upload the large file contents to the Git LFS server whenever a commit containing a new large file version is about to be pushed to the corresponding Git server. ## COMMANDS Like Git, Git LFS commands are separated into high level ("porcelain") commands and low level ("plumbing") commands. ### High level commands (porcelain) * git-lfs-env(1): Display the Git LFS environment. * git-lfs-checkout(1): Populate working copy with real content from Git LFS files. * git lfs clone: Efficiently clone a Git LFS-enabled repository. * git-lfs-fetch(1): Download git LFS files from a remote. * git-lfs-fsck(1): Check GIT LFS files for consistency. * git-lfs-install(1): Install Git LFS configuration. * git-lfs-lock(1): Set a file as "locked" on the Git LFS server. * git-lfs-locks(1): List currently "locked" files from the Git LFS server. * git-lfs-logs(1): Show errors from the git-lfs command. * git-lfs-ls-files(1): Show information about Git LFS files in the index and working tree. * git-lfs-migrate(1): Migrate history to or from git-lfs * git-lfs-pull(1): Fetch LFS changes from the remote & checkout any required working tree files. * git-lfs-push(1): Push queued large files to the Git LFS endpoint. * git-lfs-status(1): Show the status of Git LFS files in the working tree. * git-lfs-track(1): View or add Git LFS paths to Git attributes. * git-lfs-unlock(1): Remove "locked" setting for a file on the Git LFS server. * git-lfs-untrack(1): Remove Git LFS paths from Git Attributes. * git-lfs-update(1): Update Git hooks for the current Git repository. * git lfs version: Report the version number. ### Low level commands (plumbing) * git-lfs-clean(1): Git clean filter that converts large files to pointers. * git-lfs-pointer(1): Build and compare pointers. * git-lfs-pre-push(1): Git pre-push hook implementation. * git-lfs-smudge(1): Git smudge filter that converts pointer in blobs to the actual content. git-lfs-2.3.4/docs/man/index.txt000066400000000000000000000000741317167762300164560ustar00rootroot00000000000000# external gitattributes(5) http://man.cx/gitattributes(5) git-lfs-2.3.4/docs/man/mangen.go000066400000000000000000000105441317167762300164050ustar00rootroot00000000000000package main import ( "bufio" "fmt" "io/ioutil" "os" "path/filepath" "regexp" "strings" ) func readManDir() (string, []os.FileInfo) { rootDirs := []string{ "..", "/tmp/docker_run/git-lfs", } var err error for _, rootDir := range rootDirs { fs, err := ioutil.ReadDir(filepath.Join(rootDir, "docs", "man")) if err == nil { return rootDir, fs } } fmt.Fprintf(os.Stderr, "Failed to open man dir: %v\n", err) os.Exit(2) return "", nil } // Reads all .ronn files & and converts them to string literals // triggered by "go generate" comment // Literals are inserted into a map using an init function, this means // that there are no compilation errors if 'go generate' hasn't been run, just // blank man files. func main() { fmt.Fprintf(os.Stderr, "Converting man pages into code...\n") rootDir, fs := readManDir() manDir := filepath.Join(rootDir, "docs", "man") out, err := os.Create(filepath.Join(rootDir, "commands", "mancontent_gen.go")) if err != nil { fmt.Fprintf(os.Stderr, "Failed to create go file: %v\n", err) os.Exit(2) } out.WriteString("package commands\n\nfunc init() {\n") out.WriteString("// THIS FILE IS GENERATED, DO NOT EDIT\n") out.WriteString("// Use 'go generate ./commands' to update\n") fileregex := regexp.MustCompile(`git-lfs(?:-([A-Za-z\-]+))?.\d.ronn`) headerregex := regexp.MustCompile(`^###?\s+([A-Za-z0-9 ]+)`) // only pick up caps in links to avoid matching optional args linkregex := regexp.MustCompile(`\[([A-Z\- ]+)\]`) // man links manlinkregex := regexp.MustCompile(`(git)(?:-(lfs))?-([a-z\-]+)\(\d\)`) count := 0 for _, f := range fs { if match := fileregex.FindStringSubmatch(f.Name()); match != nil { fmt.Fprintf(os.Stderr, "%v\n", f.Name()) cmd := match[1] if len(cmd) == 0 { // This is git-lfs.1.ronn cmd = "git-lfs" } out.WriteString("ManPages[\"" + cmd + "\"] = `") contentf, err := os.Open(filepath.Join(manDir, f.Name())) if err != nil { fmt.Fprintf(os.Stderr, "Failed to open %v: %v\n", f.Name(), err) os.Exit(2) } // Process the ronn to make it nicer as help text scanner := bufio.NewScanner(contentf) firstHeaderDone := false skipNextLineIfBlank := false lastLineWasBullet := false scanloop: for scanner.Scan() { line := scanner.Text() trimmedline := strings.TrimSpace(line) if skipNextLineIfBlank && len(trimmedline) == 0 { skipNextLineIfBlank = false lastLineWasBullet = false continue } // Special case headers if hmatch := headerregex.FindStringSubmatch(line); hmatch != nil { header := strings.ToLower(hmatch[1]) switch header { case "synopsis": // Ignore this, just go direct to command case "description": // Just skip the header & newline skipNextLineIfBlank = true case "options": out.WriteString("Options:" + "\n") case "see also": // don't include any content after this break scanloop default: out.WriteString(strings.ToUpper(header[:1]) + header[1:] + "\n") out.WriteString(strings.Repeat("-", len(header)) + "\n") } firstHeaderDone = true lastLineWasBullet = false continue } if lmatches := linkregex.FindAllStringSubmatch(line, -1); lmatches != nil { for _, lmatch := range lmatches { linktext := strings.ToLower(lmatch[1]) line = strings.Replace(line, lmatch[0], `"`+strings.ToUpper(linktext[:1])+linktext[1:]+`"`, 1) } } if manmatches := manlinkregex.FindAllStringSubmatch(line, -1); manmatches != nil { for _, manmatch := range manmatches { line = strings.Replace(line, manmatch[0], strings.Join(manmatch[1:], " "), 1) } } // Skip content until after first header if !firstHeaderDone { continue } // OK, content here // remove characters that markdown would render invisible in a text env. for _, invis := range []string{"`", "
"} { line = strings.Replace(line, invis, "", -1) } // indent bullets if strings.HasPrefix(line, "*") { lastLineWasBullet = true } else if lastLineWasBullet && !strings.HasPrefix(line, " ") { // indent paragraphs under bullets if not already done line = " " + line } out.WriteString(line + "\n") } out.WriteString("`\n") contentf.Close() count++ } } out.WriteString("}\n") fmt.Fprintf(os.Stderr, "Successfully processed %d man pages.\n", count) } git-lfs-2.3.4/docs/proposals/000077500000000000000000000000001317167762300160545ustar00rootroot00000000000000git-lfs-2.3.4/docs/proposals/README.md000066400000000000000000000005241317167762300173340ustar00rootroot00000000000000# Git LFS Proposals This directory contains high level proposals for future Git LFS features. Inclusion here does not guarantee when or if a feature will make it in to Git LFS. It doesn't even guarantee that the specifics won't change. Everyone is welcome to submit their own proposal as a markdown file in a pull request for discussion. git-lfs-2.3.4/docs/proposals/locking.md000066400000000000000000000443011317167762300200260ustar00rootroot00000000000000# Locking feature proposal We need the ability to lock files to discourage (we can never prevent) parallel editing of binary files which will result in an unmergeable situation. This is not a common theme in git (for obvious reasons, it conflicts with its distributed, parallel nature), but is a requirement of any binary management system, since files are very often completely unmergeable, and no-one likes having to throw their work away & do it again. ## What not to do: single branch model The simplest way to organise locking is to require that if binary files are only ever edited on a single branch, and therefore editing this file can follow a simple sequence: 1. File starts out read-only locally 2. User locks the file, user is required to have the latest version locally from the 'main' branch 3. User edits file & commits 1 or more times 4. User pushes these commits to the main branch 5. File is unlocked (and made read only locally again) ## A more usable approach: multi-branch model In practice teams need to work on more than one branch, and sometimes that work will have corresponding binary edits. It's important to remember that the core requirement is to prevent *unintended parallel edits of an unmergeable file*. One way to address this would be to say that locking a file locks it across all branches, and that lock is only released when the branch where the edit is is merged back into a 'primary' branch. The problem is that although that allows branching and also prevents merge conflicts, it forces merging of feature branches before a further edit can be made by someone else. An alternative is that locking a file locks it across all branches, but when the lock is released, further locks on that file can only be taken on a descendant of the latest edit that has been made, whichever branch it is on. That means a change to the rules of the lock sequence, namely: 1. File starts out read-only locally 2. User tries to lock a file. This is only allowed if: * The file is not already locked by anyone else, AND * One of the following are true: * The user has, or agrees to check out, a descendant of the latest commit that was made for that file, whatever branch that was on, OR * The user stays on their current commit but resets the locked file to the state of the latest commit (making it modified locally, and also cherry-picking changes for that file in practice). 3. User edits file & commits 1 or more times, on any branch they like 4. User pushes the commits 5. File is unlocked if: * the latest commit to that file has been pushed (on any branch), and * the file is not locally edited This means that long-running branches can be maintained but that editing of a binary file must always incorporate the latest binary edits. This means that if this system is always respected, there is only ever one linear stream of development for this binary file, even though that 'thread' may wind its way across many different branches in the process. This does mean that no-one's changes are accidentally lost, but it does mean that we are either making new branches dependent on others, OR we're cherry-picking changes to individual files across branches. This does change the traditional git workflow, but importantly it achieves the core requirement of never *accidentally* losing anyone's changes. How changes are threaded across branches is always under the user's control. ## Breaking the rules We must allow the user to break the rules if they know what they are doing. Locking is there to prevent unintended binary merge conflicts, but sometimes you might want to intentionally create one, with the full knowledge that you're going to have to manually merge the result (or more likely, pick one side and discard the other) later down the line. There are 2 cases of rule breaking to support: 1. **Break someone else's lock** People lock files and forget they've locked them, then go on holiday, or worse, leave the company. You can't be stuck not being able to edit that file so must be able to forcibly break someone else's lock. Ideally this should result in some kind of notification to the original locker (might need to be a special value-add on BB/Stash). This effectively removes the other person's lock and is likely to cause them problems if they had edited and try to push next time. 2. **Allow a parallel lock** Actually similar to breaking someone else's lock, except it lets you take another lock on a file in parallel, leaving their lock in place too, and knowing that you're going to have to resolve the merge problem later. You could handle this just by manually making files read/write, then using 'force push' to override hooks that prevent pushing when not locked. However by explicitly registering a parallel lock (possible form: 'git lfs lock --force') this could be recorded and communicated to anyone else with a lock, letting them know about possible merge issues down the line. ## Detailed feature points |No | Feature | Notes |---|---------|------------------ |1 |Lock server must be available at same API URL| |2 |Identify unmergeable files as subset of lfs files|`git lfs track -b` ? |3 |Make unmergeable files read-only on checkout|Perform in smudge filter |4 |Lock a file
  • Check with server which must atomically check/set
  • Check person requesting the lock is checked out on a commit which is a descendent of the last edit of that file (locally or on server, although last lock shouldn't have been released until push anyway), or allow --force to break rule
  • Record lock on server
  • Make file read/write locally if success
|`git lfs lock `? |5 |Release a lock
  • Check if locally modified, if so must discard
  • Check if user has more recent commit of this file than server, if so must push first
  • Release lock on server atomically
  • Make local file read-only
|`git lfs unlock `? |6 |Break a lock, ie override someone else's lock and take it yourself.
  • Release lock on server atomically
  • Proceed as per 'Lock a file'
  • Notify original lock holder HOW?
|`git lfs lock -break `? |7 |Release lock on reset (maybe). Configurable option / prompt? May be resetting just to start editing again| |8 |Release lock on push (maybe, if unmodified). See above| |9 |Cater for read-only binary files when merging locally
  • Because files are read-only this might prevent merge from working when actually it's valid.
  • Always fine to merge the latest version of a binary file to anywhere else
  • Fine to merge the non-latest version if user is aware that this may cause merge problems (see Breaking the rules)
  • Therefore this feature is about dealing with the read-only flag and issuing a warning if not the latest
| |10 |List current locks
  • That the current user has
  • That anyone has
  • Potentially scoped to folder
|`git lfs lock --list [paths...]` |11 |Reject a push containing a binary file currently locked by someone else|pre-receive hook on server, allow --force to override (i.e. existing parameter to git push) ## Locking challenges ### Making files read-only This is useful because it means it provides a reminder that the user should be locking the file before they start to edit it, to avoid the case of an unexpected merge later on. I've done some tests with chmod and discovered: * Removing the write bit doesn't cause the file to be marked modified (good) * In most editors it either prevents saving or (in Apple tools) prompts to 'unlock'. The latter is slightly unhelpful * In terms of marking files that need locking, adding custom flags to .gitattributes (like 'lock') seems to work; `git check-attr -a ` correctly lists the custom attribute * Once a file is marked read-only however, `git checkout` replaces it without prompting, with the write bit set * We can use the `post-checkout` hook to make files read-only, but we don't get any file information, on refs. This means we'd have to scan the whole working copy to figure out what we needed to mark read-only. To do this we'd have to have the attribute information and all the current lock information. This could be time consuming. * A way to speed up the `post-checkout` would be to diff the pre- and post-ref information that's provided and only check the files that changed. In the case of single-file checkouts I'm not sure this is possible though. * We could also feed either the diff or a file scan into `git check-attr --stdin` in order to share the exe, or do our own attribute matching * It's not entirely clear yet how merge & rebase might operate. May also need the `post-merge` hook * See contrib/hooks/setgitperms.perl for an example; so this isn't unprecedented #### Test cases for post-checkout * Checkout a branch * Calls `post-checkout` with pre/post SHA and branch=1 * Checkout a tag * Calls `post-checkout` with pre/post SHA and branch=1 (even though it's a tag) * Checkout by commit SHA * Calls `post-checkout` with pre/post SHA and branch=1 (even though it's a plain SHA) * Checkout named files (e.g. discard changes) * Calls `post-checkout` with identical pre/post SHA (HEAD) and branch=0 * Reset all files (discard all changes ie git reset --hard HEAD) * Doesn't call `post-checkout` - could restore write bit, but must have been set anyway for file to be edited, so not a problem? * Reset a branch to a previous commit * Doesn't call `post-checkout` - PROBLEM because can restore write bit & file was not modified. BUT: rare & maybe liveable * Merge a branch with lockable file changes (non-conflicting) * Rebase a branch with lockable files (non-conflicting) * Merge conflicts - fix then commit * Rebase conflicts - fix then continue * ## Implementation details (Initial simple API-only pass) ### Types To make the implementing locking on the lfs-test-server as well as other servers in the future easier, it makes sense to create a `lock` package that can be depended upon from any server. This will go along with Steve's refactor which touches the `lfs` package quite a bit. Below are enumerated some of the types that will presumably land in this sub-package. ```go // Lock represents a single lock that against a particular path. // // Locks returned from the API may or may not be currently active, according to // the Expired flag. type Lock struct { // Id is the unique identifier corresponding to this particular Lock. It // must be consistent with the local copy, and the server's copy. Id string `json:"id"` // Path is an absolute path to the file that is locked as a part of this // lock. Path string `json:"path"` // Committer is the author who initiated this lock. Committer struct { Name string `json:"name"` Email string `json:"email"` } `json:"creator"` // CommitSHA is the commit that this Lock was created against. It is // strictly equal to the SHA of the minimum commit negotiated in order // to create this lock. CommitSHA string `json:"commit_sha" // LockedAt is a required parameter that represents the instant in time // that this lock was created. For most server implementations, this // should be set to the instant at which the lock was initially // received. LockedAt time.Time `json:"locked_at"` // ExpiresAt is an optional parameter that represents the instant in // time that the lock stopped being active. If the lock is still active, // the server can either a) not send this field, or b) send the // zero-value of time.Time. UnlockedAt time.Time `json:"unlocked_at,omitempty"` } // Active returns whether or not the given lock is still active against the file // that it is protecting. func (l *Lock) Active() bool { return time.IsZero(l.UnlockedAt) } ``` ### Proposed Commands #### `git lfs lock ` The `lock` command will be used in accordance with the multi-branch flow as proposed above to request that lock be granted to the specific path passed an argument to the command. ```go // LockRequest encapsulates the payload sent across the API when a client would // like to obtain a lock against a particular path on a given remote. type LockRequest struct { // Path is the path that the client would like to obtain a lock against. Path string `json:"path"` // LatestRemoteCommit is the SHA of the last known commit from the // remote that we are trying to create the lock against, as found in // `.git/refs/origin/`. LatestRemoteCommit string `json:"latest_remote_commit"` // Committer is the individual that wishes to obtain the lock. Committer struct { // Name is the name of the individual who would like to obtain the // lock, for instance: "Rick Olson". Name string `json:"name"` // Email is the email assopsicated with the individual who would // like to obtain the lock, for instance: "rick@github.com". Email string `json:"email"` } `json:"committer"` } ``` ```go // LockResponse encapsulates the information sent over the API in response to // a `LockRequest`. type LockResponse struct { // Lock is the Lock that was optionally created in response to the // payload that was sent (see above). If the lock already exists, then // the existing lock is sent in this field instead, and the author of // that lock remains the same, meaning that the client failed to obtain // that lock. An HTTP status of "409 - Conflict" is used here. // // If the lock was unable to be created, this field will hold the // zero-value of Lock and the Err field will provide a more detailed set // of information. // // If an error was experienced in creating this lock, then the // zero-value of Lock should be sent here instead. Lock Lock `json:"lock"` // CommitNeeded holds the minimum commit SHA that client must have to // obtain the lock. CommitNeeded string `json:"commit_needed"` // Err is the optional error that was encountered while trying to create // the above lock. Err error `json:"error,omitempty"` } ``` #### `git lfs unlock ` The `unlock` command is responsible for releasing the lock against a particular file. The command takes a `` argument which the LFS client will have to internally resolve into a Id to unlock. The API associated with this command can also be used on the server to remove existing locks after a push. ```go // An UnlockRequest is sent by the client over the API when they wish to remove // a lock associated with the given Id. type UnlockRequest struct { // Id is the identifier of the lock that the client wishes to remove. Id string `json:"id"` } ``` ```go // UnlockResult is the result sent back from the API when asked to remove a // lock. type UnlockResult struct { // Lock is the lock corresponding to the asked-about lock in the // `UnlockPayload` (see above). If no matching lock was found, this // field will take the zero-value of Lock, and Err will be non-nil. Lock Lock `json:"lock"` // Err is an optional field which holds any error that was experienced // while removing the lock. Err error `json:"error,omitempty"` } ``` Clients can determine whether or not their lock was removed by calling the `Active()` method on the returned Lock, if `UnlockResult.Err` is nil. #### `git lfs locks (-r |-b )|(-i id)` For many operations, the LFS client will need to have knowledge of existing locks on the server. Additionally, the client should not have to self-sort/index this (potentially) large set. To remove this need, both the `locks` command and corresponding API method take several filters. Clients should turn the flag-values that were passed during the command invocation into `Filter`s as described below, and batched up into the `Filters` field in the `LockListRequest`. ```go // Property is a constant-type that narrows fields pertaining to the server's // Locks. type Property string const ( Branch Property = "branch" Id Property = "id" // (etc) ... ) // LockListRequest encapsulates the request sent to the server when the client // would like a list of locks that match the given criteria. type LockListRequest struct { // Filters is the set of filters to query against. If the client wishes // to obtain a list of all locks, an empty array should be passed here. Filters []{ // Prop is the property to search against. Prop Property `json:"prop"` // Value is the value that the property must take. Value string `json:"value"` } `json:"filters"` // Cursor is an optional field used to tell the server which lock was // seen last, if scanning through multiple pages of results. // // Servers must return a list of locks sorted in reverse chronological // order, so the Cursor provides a consistent method of viewing all // locks, even if more were created between two requests. Cursor string `json:"cursor,omitempty"` // Limit is the maximum number of locks to return in a single page. Limit int `json:"limit"` } ``` ```go // LockList encapsulates a set of Locks. type LockList struct { // Locks is the set of locks returned back, typically matching the query // parameters sent in the LockListRequest call. If no locks were matched // from a given query, then `Locks` will be represented as an empty // array. Locks []Lock `json:"locks"` // NextCursor returns the Id of the Lock the client should update its // cursor to, if there are multiple pages of results for a particular // `LockListRequest`. NextCursor string `json:"next_cursor,omitempty"` // Err populates any error that was encountered during the search. If no // error was encountered and the operation was succesful, then a value // of nil will be passed here. Err error `json:"error,omitempty"` } git-lfs-2.3.4/docs/proposals/locking_api.md000066400000000000000000000072131317167762300206600ustar00rootroot00000000000000# Locking API proposal ## POST /locks | Method | Accept | Content-Type | Authorization | |---------|--------------------------------|--------------------------------|---------------| | `POST` | `application/vnd.git-lfs+json` | `application/vnd.git-lfs+json` | Basic | ### Request ``` > GET https://git-lfs-server.com/locks > Accept: application/vnd.git-lfs+json > Authorization: Basic > Content-Type: application/vnd.git-lfs+json > > { > path: "/path/to/file", > remote: "origin", > latest_remote_commit: "d3adbeef", > committer: { > name: "Jane Doe", > email: "jane@example.com" > } > } ``` ### Response * **Successful response** ``` < HTTP/1.1 201 Created < Content-Type: application/vnd.git-lfs+json < < { < lock: { < id: "some-uuid", < path: "/path/to/file", < committer: { < name: "Jane Doe", < email: "jane@example.com" < }, < commit_sha: "d3adbeef", < locked_at: "2016-05-17T15:49:06+00:00" < } < } ``` * **Bad request: minimum commit not met** ``` < HTTP/1.1 400 Bad request < Content-Type: application/vnd.git-lfs+json < < { < "commit_needed": "other_sha" < } ``` * **Bad request: lock already present** ``` < HTTP/1.1 409 Conflict < Content-Type: application/vnd.git-lfs+json < < { < lock: { < /* the previously created lock */ < }, < error: "already created lock" < } ``` * **Bad repsonse: server error** ``` < HTTP/1.1 500 Internal server error < Content-Type: application/vnd.git-lfs+json < < { < error: "unable to create lock" < } ``` ## POST /locks/:id/unlock | Method | Accept | Content-Type | Authorization | |---------|--------------------------------|--------------|---------------| | `POST` | `application/vnd.git-lfs+json` | None | Basic | ### Request ``` > POST https://git-lfs-server.com/locks/:id/unlock > Accept: application/vnd.git-lfs+json > Authorization: Basic ``` ### Repsonse * **Success: unlocked** ``` < HTTP/1.1 200 Ok < Content-Type: application/vnd.git-lfs+json < < { < lock: { < id: "some-uuid", < path: "/path/to/file", < committer: { < name: "Jane Doe", < email: "jane@example.com" < }, < commit_sha: "d3adbeef", < locked_at: "2016-05-17T15:49:06+00:00", < unlocked_at: "2016-05-17T15:49:06+00:00" < } < } } ``` * **Bad response: server error** ``` < HTTP/1.1 500 Internal error < Content-Type: application/vnd.git-lfs+json < < { < error: "git-lfs/git-lfs: internal server error" < } ``` ## GET /locks | Method | Accept | Content-Type | Authorization | |--------|-------------------------------|--------------|---------------| | `GET` | `application/vnd.git-lfs+json | None | Basic | ### Request ``` > GET https://git-lfs-server.com/locks?filters...&cursor=&limit= > Accept: application/vnd.git-lfs+json > Authorization: Basic ``` ### Response * **Success: locks found** Note: no matching locks yields a payload of `locks: []`, and a status of 200. ``` < HTTP/1.1 200 Ok < Content-Type: application/vnd.git-lfs+json < < { < locks: [ < { < id: "some-uuid", < path: "/path/to/file", < committer": { < name: "Jane Doe", < email: "jane@example.com" < }, < commit_sha: "1ec245f", < locked_at: "2016-05-17T15:49:06+00:00" < } < ], < next_cursor: "optional-next-id", < error: "optional error" < } ``` * **Bad response: some locks may have matched, but the server encountered an error** ``` < HTTP/1.1 500 Internal error < Content-Type: application/vnd.git-lfs+json < < { < locks: [], < error: "git-lfs/git-lfs: internal server error" < } ``` git-lfs-2.3.4/docs/proposals/ntlm.md000066400000000000000000000027251317167762300173560ustar00rootroot00000000000000# NTLM Authentication With Git-Lfs Enterprise users in a windows ecosystem are frequently required to use integrated auth. Basic auth does not meet their security requirements and setting up SSH on Windows is painful. There is an overview of NTLM at http://www.innovation.ch/personal/ronald/ntlm.html ### Implementation If the LFS server returns a "Www-Authenticate: NTLM" header, we will set lfs.{endpoint}.access to be ntlm and resubmit the http request. Subsequent requests will go through the ntlm auth flow. We will store NTLM credentials in the credential helper. When the user is prompted for their credentials they must use username:{DOMAIN}\{user} and password:{pass} The ntlm protocl will be handled by an ntlm.go class that hides the implementation of InitHandshake, Authenticate, and Challenge. This allows miminal changesto the existing client.go class. ### Tech There is a ntlm-go library available at https://github.com/ThomsonReutersEikon/go-ntlm that we can use. We will need to implementate the Negotiate method and publish docs on what NTLM switches we support. I think simple user/pass/domain is best here so we avoid supporting a million settings with conflicting docs. ### Work Before supporting this as a mainstream scenario we should investigate making the CI work on windows so that we can successfully test changes. ### More Info You can see a hacked-together implementation of git lfs push with NTLM at https://github.com/WillHipschman/git-lfs/tree/ntlm git-lfs-2.3.4/docs/proposals/transfer_adapters.md000066400000000000000000000105161317167762300221100ustar00rootroot00000000000000# Transfer adapters for resumable upload / download ## Concept To allow the uploading and downloading of LFS content to be implemented in more ways than the current simple HTTP GET/PUT approach. Features that could be supported by opening this up to other protocols might include: - Resumable transfers - Block-level de-duplication - Delegation to 3rd party services like Dropbox / Google Drive / OneDrive - Non-HTTP services ## API extensions See the [API documentation](../http-v1-batch.md) for specifics. All changes are optional extras so there are no breaking changes to the API. The current HTTP GET/PUT system will remain the default. When a version of the git-lfs client supports alternative transfer mechanisms, it notifies the server in the API request using the `accept-transfers` field. If the server also supports one of the mechanisms the client advertised, it may select one and alter the upload / download URLs to point at resources compatible with this transfer mechanism. It must also indicate the chosen transfer mechanism in the response using the `transfer` field. The URLs provided in this case may not be HTTP, they may be custom protocols. It is up to each individual transfer mechanism to define how URLs are used. ## Client extensions ### Phase 1: refactoring & abstraction 1. Introduce a new concept of 'transfer adapter'. 2. Adapters can provide either upload or download support, or both. This is necessary because some mechanisms are unidirectional, e.g. HTTP Content-Range is download only, tus.io is upload only. 3. Refactor our current HTTP GET/PUT mechanism to be the default implementation for both upload & download 4. The LFS core will pass oids to transfer to this adapter in bulk, and receive events back from the adapter for transfer progress, and file completion. 5. Each adapter is responsible for its own parallelism, but should respect the `lfs.concurrenttransfers` setting. For example the default (current) approach will parallelise on files (oids), but others may parallelise in other ways e.g. downloading multiple parts of the same file at once 6. Each adapter should store its own temporary files. On file completion it must notify the core which in the case of a download is then responsible for moving a completed file into permanent storage. 7. Update the core to have a registry of available transfer mechanisms which it passes to the API, and can recognise a chosen one in the response. Default to our refactored original. ### Phase 2: basic resumable downloads 1. Add a client transfer adapter for [HTTP Range headers](https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35) 2. Add a range request reference implementation to our integration test server ### Phase 3: basic resumable uploads 1. Add a client transfer adapter for [tus.io](http://tus.io) (upload only) 2. Add a tus.io reference implementation to our integration test server ### Phase 4: external transfer adapters Ideally we should allow people to add other transfer implementations so that we don't have to implement everything, or bloat the git-lfs binary with every custom system possible. Because Go is statically linked it's not possible to extend client functionality at runtime through loading libaries, so instead I propose allowing an external process to be invoked, and communicated with via a defined stream protocol. This protocol will be logically identical to the internal adapters; the core passing oids and receiving back progress and completion notifications; just that the implementation will be in an external process and the messages will be serialised over streams. Only one process will be launched and will remain for the entire period of all transfers. Like internal adapters, the external process will be responsible for its own parallelism and temporary storage, so internally they can (should) do multiple transfers at once. 1. Build a generic 'external' adapter which can invoke a named process and communicate with it using the standard stream protocol (probably just over stdout / stdin) 2. Establish a configuration for external adapters; minimum is an identifier (client and server must agree on what that is) and a path to invoke 3. Implement a small test process in Go which simply wraps the default HTTP mechanism in an external process, to prove the approach (not in release) git-lfs-2.3.4/docs/spec.md000066400000000000000000000135051317167762300153120ustar00rootroot00000000000000# Git LFS Specification This is a general guide for Git LFS clients. Typically it should be implemented by a command line `git-lfs` tool, but the details may be useful for other tools. ## The Pointer The core Git LFS idea is that instead of writing large blobs to a Git repository, only a pointer file is written. * Pointer files are text files which MUST contain only UTF-8 characters. * Each line MUST be of the format `{key} {value}\n` (trailing unix newline). * Only a single space character between `{key}` and `{value}`. * Keys MUST only use the characters `[a-z] [0-9] . -`. * The first key is _always_ `version`. * Lines of key/value pairs MUST be sorted alphabetically in ascending order (with the exception of `version`, which is always first). * Values MUST NOT contain return or newline characters. * Pointer files MUST be stored in Git with their executable bit matching that of the replaced file. An empty file is the pointer for an empty file. That is, empty files are passed through LFS without any change. The required keys are: * `version` is a URL that identifies the pointer file spec. Parsers MUST use simple string comparison on the version, without any URL parsing or normalization. It is case sensitive, and %-encoding is discouraged. * `oid` tracks the unique object id for the file, prefixed by its hashing method: `{hash-method}:{hash}`. Currently, only `sha256` is supported. * `size` is in bytes. Example of a v1 text pointer: ``` version https://git-lfs.github.com/spec/v1 oid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393 size 12345 (ending \n) ``` Blobs created with the pre-release version of the tool generated files with a different version URL. Git LFS can read these files, but writes them using the version URL above. ``` version https://hawser.github.com/spec/v1 oid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393 size 12345 (ending \n) ``` For testing compliance of any tool generating its own pointer files, the reference is this official Git LFS tool: **NOTE:** exact pointer command behavior TBD! * Tools that parse and regenerate pointer files MUST preserve keys that they don't know or care about. * Run the `pointer` command to generate a pointer file for the given local file: ``` $ git lfs pointer --file=path/to/file Git LFS pointer for path/to/file: version https://git-lfs.github.com/spec/v1 oid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393 size 12345 ``` * Run `pointer` to compare the blob OID of a pointer file built by Git LFS with a pointer built by another tool. * Write the other implementation's pointer to "other/pointer/file": ``` $ git lfs pointer --file=path/to/file --pointer=other/pointer/file Git LFS pointer for path/to/file: version https://git-lfs.github.com/spec/v1 oid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393 size 12345 Blob OID: 60c8d8ab2adcf57a391163a7eeb0cdb8bf348e44 Pointer from other/pointer/file version https://git-lfs.github.com/spec/v1 oid sha256 4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393 size 12345 Blob OID: 08e593eeaa1b6032e971684825b4b60517e0638d Pointers do not match ``` * It can also read STDIN to get the other implementation's pointer: ``` $ cat other/pointer/file | git lfs pointer --file=path/to/file --stdin Git LFS pointer for path/to/file: version https://git-lfs.github.com/spec/v1 oid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393 size 12345 Blob OID: 60c8d8ab2adcf57a391163a7eeb0cdb8bf348e44 Pointer from STDIN version https://git-lfs.github.com/spec/v1 oid sha256 4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393 size 12345 Blob OID: 08e593eeaa1b6032e971684825b4b60517e0638d Pointers do not match ``` ## Intercepting Git Git LFS uses the `clean` and `smudge` filters to decide which files use it. The global filters can be set up with `git lfs install`: ``` $ git lfs install ``` These filters ensure that large files aren't written into the repository proper, instead being stored locally at `.git/lfs/objects/{OID-PATH}` (where `{OID-PATH}` is a sharded filepath of the form `OID[0:2]/OID[2:4]/OID`), synchronized with the Git LFS server as necessary. Here is a sample path to a file: .git/lfs/objects/4d/7a/4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393 The `clean` filter runs as files are added to repositories. Git sends the content of the file being added as STDIN, and expects the content to write to Git as STDOUT. * Stream binary content from STDIN to a temp file, while calculating its SHA-256 signature. * Atomically move the temp file to `.git/lfs/objects/{OID-PATH}` if it does not exist, and the sha-256 signature of the contents matches the given OID. * Delete the temp file. * Write the pointer file to STDOUT. Note that the `clean` filter does not push the file to the server. Use the `git push` command to do that (lfs files are pushed before commits in a pre-push hook). The `smudge` filter runs as files are being checked out from the Git repository to the working directory. Git sends the content of the Git blob as STDIN, and expects the content to write to the working directory as STDOUT. * Read 100 bytes. * If the content is ASCII and matches the pointer file format: * Look for the file in `.git/lfs/objects/{OID-PATH}`. * If it's not there, download it from the server. * Write its contents to STDOUT * Otherwise, simply pass the STDIN out through STDOUT. The `.gitattributes` file controls when the filters run. Here's a sample file that runs all mp3 and zip files through Git LFS: ``` $ cat .gitattributes *.mp3 filter=lfs -text *.zip filter=lfs -text ``` Use the `git lfs track` command to view and add to `.gitattributes`. git-lfs-2.3.4/errors/000077500000000000000000000000001317167762300144165ustar00rootroot00000000000000git-lfs-2.3.4/errors/context.go000066400000000000000000000021221317167762300164260ustar00rootroot00000000000000package errors type withContext interface { Set(string, interface{}) Get(string) interface{} Del(string) Context() map[string]interface{} } // ErrorSetContext sets a value in the error's context. If the error has not // been wrapped, it does nothing. func SetContext(err error, key string, value interface{}) { if e, ok := err.(withContext); ok { e.Set(key, value) } } // ErrorGetContext gets a value from the error's context. If the error has not // been wrapped, it returns an empty string. func GetContext(err error, key string) interface{} { if e, ok := err.(withContext); ok { return e.Get(key) } return "" } // ErrorDelContext removes a value from the error's context. If the error has // not been wrapped, it does nothing. func DelContext(err error, key string) { if e, ok := err.(withContext); ok { e.Del(key) } } // ErrorContext returns the context map for an error if it is a wrappedError. // If it is not a wrappedError it will return an empty map. func Context(err error) map[string]interface{} { if e, ok := err.(withContext); ok { return e.Context() } return nil } git-lfs-2.3.4/errors/errors.go000066400000000000000000000065751317167762300162760ustar00rootroot00000000000000// Package errors provides common error handling tools // NOTE: Subject to change, do not rely on this package from outside git-lfs source package errors // The LFS error system provides a simple wrapper around Go errors and the // ability to inspect errors. It is strongly influenced by Dave Cheney's post // at http://dave.cheney.net/2014/12/24/inspecting-errors. // // When passing errors out of lfs package functions, the return type should // always be `error`. The wrappedError details are not exported. If an error is // the kind of error a caller should need to investigate, an IsXError() // function is provided that tells the caller if the error is of that type. // There should only be a handfull of cases where a simple `error` is // insufficient. // // The error behaviors can be nested when created. For example, the not // implemented error can also be marked as a fatal error: // // func LfsFunction() error { // err := functionCall() // if err != nil { // return newFatalError(newNotImplementedError(err)) // } // return nil // } // // Then in the caller: // // err := lfs.LfsFunction() // if lfs.IsNotImplementedError(err) { // log.Print("feature not implemented") // } // if lfs.IsFatalError(err) { // os.Exit(1) // } // // Wrapped errors contain a context, which is a map[string]string. These // contexts can be accessed through the Error*Context functions. Calling these // functions on a regular Go error will have no effect. // // Example: // // err := lfs.SomeFunction() // errors.ErrorSetContext(err, "foo", "bar") // errors.ErrorGetContext(err, "foo") // => "bar" // errors.ErrorDelContext(err, "foo") // // Wrapped errors also contain the stack from the point at which they are // called. Use the '%+v' printf verb to display. See the github.com/pkg/errors // docs for more info: https://godoc.org/github.com/pkg/errors import ( "bytes" "fmt" "github.com/pkg/errors" ) // New returns an error with the supplied message. New also records the stack // trace at thepoint it was called. func New(message string) error { return errors.New(message) } // Errorf formats according to a format specifier and returns the string // as a value that satisfies error. // Errorf also records the stack trace at the point it was called. func Errorf(format string, args ...interface{}) error { return errors.Errorf(format, args...) } // Wrap wraps an error with an additional message. func Wrap(err error, msg string) error { return newWrappedError(err, msg) } // Wrapf wraps an error with an additional formatted message. func Wrapf(err error, format string, args ...interface{}) error { if err == nil { err = errors.New("") } message := fmt.Sprintf(format, args...) return newWrappedError(err, message) } func StackTrace(err error) []string { type stacktrace interface { StackTrace() errors.StackTrace } if err, ok := err.(stacktrace); ok { frames := err.StackTrace() lines := make([]string, len(frames)) for i, f := range frames { lines[i] = fmt.Sprintf("%+v", f) } return lines } return nil } func Combine(errs []error) error { if len(errs) == 0 { return nil } var buf bytes.Buffer for i, err := range errs { if i > 0 { buf.WriteString("\n") } buf.WriteString(err.Error()) } return fmt.Errorf(buf.String()) } func Cause(err error) error { type causer interface { Cause() error } if cause, ok := err.(causer); ok { return Cause(cause.Cause()) } return err } git-lfs-2.3.4/errors/errors_test.go000066400000000000000000000027121317167762300173220ustar00rootroot00000000000000package errors import ( "errors" "testing" ) func TestChecksHandleGoErrors(t *testing.T) { err := errors.New("Go Error") if IsFatalError(err) { t.Error("go error should not be a fatal error") } } func TestCheckHandlesWrappedErrors(t *testing.T) { err := errors.New("Go error") fatal := NewFatalError(err) if !IsFatalError(fatal) { t.Error("expected error to be fatal") } } func TestBehaviorWraps(t *testing.T) { err := errors.New("Go error") fatal := NewFatalError(err) ni := NewNotImplementedError(fatal) if !IsNotImplementedError(ni) { t.Error("expected erro to be not implemeted") } if !IsFatalError(ni) { t.Error("expected wrapped error to also be fatal") } if IsNotImplementedError(fatal) { t.Error("expected fatal error to not be not implemented") } } func TestContextOnGoErrors(t *testing.T) { err := errors.New("Go error") SetContext(err, "foo", "bar") v := GetContext(err, "foo") if v == "bar" { t.Error("expected empty context on go error") } } func TestContextOnWrappedErrors(t *testing.T) { err := NewFatalError(errors.New("Go error")) SetContext(err, "foo", "bar") if v := GetContext(err, "foo"); v != "bar" { t.Error("expected to be able to use context on wrapped errors") } ctxt := Context(err) if ctxt["foo"] != "bar" { t.Error("expected to get the context of an error") } DelContext(err, "foo") if v := GetContext(err, "foo"); v == "bar" { t.Errorf("expected to delete from error context") } } git-lfs-2.3.4/errors/types.go000066400000000000000000000170551317167762300161210ustar00rootroot00000000000000package errors import ( "fmt" "net/url" "github.com/pkg/errors" ) // IsFatalError indicates that the error is fatal and the process should exit // immediately after handling the error. func IsFatalError(err error) bool { if e, ok := err.(interface { Fatal() bool }); ok { return e.Fatal() } if parent := parentOf(err); parent != nil { return IsFatalError(parent) } return false } // IsNotImplementedError indicates the client attempted to use a feature the // server has not implemented (e.g. the batch endpoint). func IsNotImplementedError(err error) bool { if e, ok := err.(interface { NotImplemented() bool }); ok { return e.NotImplemented() } if parent := parentOf(err); parent != nil { return IsNotImplementedError(parent) } return false } // IsAuthError indicates the client provided a request with invalid or no // authentication credentials when credentials are required (e.g. HTTP 401). func IsAuthError(err error) bool { if e, ok := err.(interface { AuthError() bool }); ok { return e.AuthError() } if parent := parentOf(err); parent != nil { return IsAuthError(parent) } return false } // IsSmudgeError indicates an error while smudging a files. func IsSmudgeError(err error) bool { if e, ok := err.(interface { SmudgeError() bool }); ok { return e.SmudgeError() } if parent := parentOf(err); parent != nil { return IsSmudgeError(parent) } return false } // IsCleanPointerError indicates an error while cleaning a file. func IsCleanPointerError(err error) bool { if e, ok := err.(interface { CleanPointerError() bool }); ok { return e.CleanPointerError() } if parent := parentOf(err); parent != nil { return IsCleanPointerError(parent) } return false } // IsNotAPointerError indicates the parsed data is not an LFS pointer. func IsNotAPointerError(err error) bool { if e, ok := err.(interface { NotAPointerError() bool }); ok { return e.NotAPointerError() } if parent := parentOf(err); parent != nil { return IsNotAPointerError(parent) } return false } // IsBadPointerKeyError indicates that the parsed data has an invalid key. func IsBadPointerKeyError(err error) bool { if e, ok := err.(interface { BadPointerKeyError() bool }); ok { return e.BadPointerKeyError() } if parent := parentOf(err); parent != nil { return IsBadPointerKeyError(parent) } return false } // If an error is abad pointer error of any type, returns NotAPointerError func StandardizeBadPointerError(err error) error { if IsBadPointerKeyError(err) { badErr := err.(badPointerKeyError) if badErr.Expected == "version" { return NewNotAPointerError(err) } } return err } // IsDownloadDeclinedError indicates that the smudge operation should not download. // TODO: I don't really like using errors to control that flow, it should be refactored. func IsDownloadDeclinedError(err error) bool { if e, ok := err.(interface { DownloadDeclinedError() bool }); ok { return e.DownloadDeclinedError() } if parent := parentOf(err); parent != nil { return IsDownloadDeclinedError(parent) } return false } // IsRetriableError indicates the low level transfer had an error but the // caller may retry the operation. func IsRetriableError(err error) bool { if e, ok := err.(interface { RetriableError() bool }); ok { return e.RetriableError() } if cause, ok := Cause(err).(*url.Error); ok { return cause.Temporary() || cause.Timeout() } if parent := parentOf(err); parent != nil { return IsRetriableError(parent) } return false } type errorWithCause interface { Cause() error StackTrace() errors.StackTrace error fmt.Formatter } // wrappedError is the base error wrapper. It provides a Message string, a // stack, and a context map around a regular Go error. type wrappedError struct { errorWithCause context map[string]interface{} } // newWrappedError creates a wrappedError. func newWrappedError(err error, message string) *wrappedError { if err == nil { err = errors.New("Error") } var errWithCause errorWithCause if len(message) > 0 { errWithCause = errors.Wrap(err, message).(errorWithCause) } else if ewc, ok := err.(errorWithCause); ok { errWithCause = ewc } else { errWithCause = errors.Wrap(err, "LFS").(errorWithCause) } return &wrappedError{ context: make(map[string]interface{}), errorWithCause: errWithCause, } } // Set sets the value for the key in the context. func (e wrappedError) Set(key string, val interface{}) { e.context[key] = val } // Get gets the value for a key in the context. func (e wrappedError) Get(key string) interface{} { return e.context[key] } // Del removes a key from the context. func (e wrappedError) Del(key string) { delete(e.context, key) } // Context returns the underlying context. func (e wrappedError) Context() map[string]interface{} { return e.context } // Definitions for IsFatalError() type fatalError struct { *wrappedError } func (e fatalError) Fatal() bool { return true } func NewFatalError(err error) error { return fatalError{newWrappedError(err, "Fatal error")} } // Definitions for IsNotImplementedError() type notImplementedError struct { *wrappedError } func (e notImplementedError) NotImplemented() bool { return true } func NewNotImplementedError(err error) error { return notImplementedError{newWrappedError(err, "Not implemented")} } // Definitions for IsAuthError() type authError struct { *wrappedError } func (e authError) AuthError() bool { return true } func NewAuthError(err error) error { return authError{newWrappedError(err, "Authentication required")} } // Definitions for IsSmudgeError() type smudgeError struct { *wrappedError } func (e smudgeError) SmudgeError() bool { return true } func NewSmudgeError(err error, oid, filename string) error { e := smudgeError{newWrappedError(err, "Smudge error")} SetContext(e, "OID", oid) SetContext(e, "FileName", filename) return e } // Definitions for IsCleanPointerError() type cleanPointerError struct { *wrappedError } func (e cleanPointerError) CleanPointerError() bool { return true } func NewCleanPointerError(pointer interface{}, bytes []byte) error { err := New("pointer error") e := cleanPointerError{newWrappedError(err, "clean")} SetContext(e, "pointer", pointer) SetContext(e, "bytes", bytes) return e } // Definitions for IsNotAPointerError() type notAPointerError struct { *wrappedError } func (e notAPointerError) NotAPointerError() bool { return true } func NewNotAPointerError(err error) error { return notAPointerError{newWrappedError(err, "Pointer file error")} } type badPointerKeyError struct { Expected string Actual string *wrappedError } func (e badPointerKeyError) BadPointerKeyError() bool { return true } func NewBadPointerKeyError(expected, actual string) error { err := Errorf("Expected key %s, got %s", expected, actual) return badPointerKeyError{expected, actual, newWrappedError(err, "pointer parsing")} } // Definitions for IsDownloadDeclinedError() type downloadDeclinedError struct { *wrappedError } func (e downloadDeclinedError) DownloadDeclinedError() bool { return true } func NewDownloadDeclinedError(err error, msg string) error { return downloadDeclinedError{newWrappedError(err, msg)} } // Definitions for IsRetriableError() type retriableError struct { *wrappedError } func (e retriableError) RetriableError() bool { return true } func NewRetriableError(err error) error { return retriableError{newWrappedError(err, "")} } func parentOf(err error) error { type causer interface { Cause() error } if c, ok := err.(causer); ok { if innerC, innerOk := c.Cause().(causer); innerOk { return innerC.Cause() } } return nil } git-lfs-2.3.4/errors/types_test.go000066400000000000000000000015041317167762300171500ustar00rootroot00000000000000package errors_test import ( "net/url" "testing" "github.com/git-lfs/git-lfs/errors" "github.com/stretchr/testify/assert" ) type TemporaryError struct { } func (e TemporaryError) Error() string { return "" } func (e TemporaryError) Temporary() bool { return true } type TimeoutError struct { } func (e TimeoutError) Error() string { return "" } func (e TimeoutError) Timeout() bool { return true } func TestCanRetryOnTemporaryError(t *testing.T) { err := &url.Error{Err: TemporaryError{}} assert.True(t, errors.IsRetriableError(err)) } func TestCanRetryOnTimeoutError(t *testing.T) { err := &url.Error{Err: TimeoutError{}} assert.True(t, errors.IsRetriableError(err)) } func TestCannotRetryOnGenericUrlError(t *testing.T) { err := &url.Error{Err: errors.New("")} assert.False(t, errors.IsRetriableError(err)) } git-lfs-2.3.4/filepathfilter/000077500000000000000000000000001317167762300161045ustar00rootroot00000000000000git-lfs-2.3.4/filepathfilter/bench_test.go000066400000000000000000000047151317167762300205600ustar00rootroot00000000000000package filepathfilter_test import ( "os" "path/filepath" "sync" "testing" "github.com/git-lfs/git-lfs/filepathfilter" "github.com/git-lfs/git-lfs/tools" ) func BenchmarkFilterSimplePath(b *testing.B) { files := benchmarkTree(b) filter := filepathfilter.New([]string{"lfs"}, nil) for i := 0; i < b.N; i++ { for _, f := range files { filter.Allows(f) } } } func BenchmarkPatternSimplePath(b *testing.B) { files := benchmarkTree(b) pattern := filepathfilter.NewPattern("lfs") for i := 0; i < b.N; i++ { for _, f := range files { pattern.Match(f) } } } func BenchmarkFilterSimpleExtension(b *testing.B) { files := benchmarkTree(b) filter := filepathfilter.New([]string{"*.go"}, nil) for i := 0; i < b.N; i++ { for _, f := range files { filter.Allows(f) } } } func BenchmarkPatternSimpleExtension(b *testing.B) { files := benchmarkTree(b) pattern := filepathfilter.NewPattern("*.go") for i := 0; i < b.N; i++ { for _, f := range files { pattern.Match(f) } } } func BenchmarkFilterComplexExtension(b *testing.B) { files := benchmarkTree(b) filter := filepathfilter.New([]string{"*.travis.yml"}, nil) for i := 0; i < b.N; i++ { for _, f := range files { filter.Allows(f) } } } func BenchmarkPatternComplexExtension(b *testing.B) { files := benchmarkTree(b) pattern := filepathfilter.NewPattern("*.travis.yml") for i := 0; i < b.N; i++ { for _, f := range files { pattern.Match(f) } } } func BenchmarkFilterDoubleAsterisk(b *testing.B) { files := benchmarkTree(b) filter := filepathfilter.New([]string{"**/README.md"}, nil) for i := 0; i < b.N; i++ { for _, f := range files { filter.Allows(f) } } } func BenchmarkPatternDoubleAsterisk(b *testing.B) { files := benchmarkTree(b) pattern := filepathfilter.NewPattern("**/README.md") for i := 0; i < b.N; i++ { for _, f := range files { pattern.Match(f) } } } var ( benchmarkFiles []string benchmarkMu sync.Mutex ) func benchmarkTree(b *testing.B) []string { benchmarkMu.Lock() defer benchmarkMu.Unlock() if benchmarkFiles != nil { return benchmarkFiles } wd, err := os.Getwd() if err != nil { b.Fatal(err) } hasErrors := false tools.FastWalkGitRepo(filepath.Dir(wd), func(parent string, info os.FileInfo, err error) { if err != nil { hasErrors = true b.Error(err) return } benchmarkFiles = append(benchmarkFiles, filepath.Join(parent, info.Name())) }) if hasErrors { b.Fatal("has errors :(") } return benchmarkFiles } git-lfs-2.3.4/filepathfilter/filepathfilter.go000066400000000000000000000217401317167762300214410ustar00rootroot00000000000000package filepathfilter import ( "fmt" "path/filepath" "regexp" "strings" ) type Pattern interface { // HasPrefix returns whether the receiving Pattern will match a fullpath // that contains the prefix "prefix". // // For instance, if the receiving pattern were to match 'a/b/c.txt', // HasPrefix() will return true for: // // - 'a', and 'a/' // - 'a/b', and 'a/b/' HasPrefix(prefix string) bool Match(filename string) bool // String returns a string representation (see: regular expressions) of // the underlying pattern used to match filenames against this Pattern. String() string } type Filter struct { include []Pattern exclude []Pattern } func NewFromPatterns(include, exclude []Pattern) *Filter { return &Filter{include: include, exclude: exclude} } func New(include, exclude []string) *Filter { return NewFromPatterns(convertToPatterns(include), convertToPatterns(exclude)) } // Include returns the result of calling String() on each Pattern in the // include set of this *Filter. func (f *Filter) Include() []string { return patternsToStrings(f.include...) } // Exclude returns the result of calling String() on each Pattern in the // exclude set of this *Filter. func (f *Filter) Exclude() []string { return patternsToStrings(f.exclude...) } // patternsToStrings maps the given set of Pattern's to a string slice by // calling String() on each pattern. func patternsToStrings(ps ...Pattern) []string { s := make([]string, 0, len(ps)) for _, p := range ps { s = append(s, p.String()) } return s } func (f *Filter) Allows(filename string) bool { _, allowed := f.AllowsPattern(filename) return allowed } // HasPrefix returns whether the given prefix "prefix" is a prefix for all // included Patterns, and not a prefix for any excluded Patterns. func (f *Filter) HasPrefix(prefix string) bool { if f == nil { return true } parts := strings.Split(prefix, sep) L: for i := len(parts); i > 0; i-- { prefix := strings.Join(parts[:i], sep) for _, p := range f.exclude { if p.Match(prefix) { break L } } if len(f.include) == 0 { return true } for _, p := range f.include { if p.HasPrefix(prefix) { return true } } } return false } // AllowsPattern returns whether the given filename is permitted by the // inclusion/exclusion rules of this filter, as well as the pattern that either // allowed or disallowed that filename. // // In special cases, such as a nil `*Filter` receiver, the absence of any // patterns, or the given filename not being matched by any pattern, the empty // string "" will be returned in place of the pattern. func (f *Filter) AllowsPattern(filename string) (pattern string, allowed bool) { if f == nil { return "", true } if len(f.include)+len(f.exclude) == 0 { return "", true } cleanedName := filepath.Clean(filename) if len(f.include) > 0 { matched := false for _, inc := range f.include { matched = inc.Match(cleanedName) if matched { pattern = inc.String() break } } if !matched { return "", false } } if len(f.exclude) > 0 { for _, ex := range f.exclude { if ex.Match(cleanedName) { return ex.String(), false } } } return pattern, true } const ( sep = string(filepath.Separator) ) func NewPattern(rawpattern string) Pattern { cleanpattern := filepath.Clean(rawpattern) // Special case local dir, matches all (inc subpaths) if _, local := localDirSet[cleanpattern]; local { return noOpMatcher{} } hasPathSep := strings.Contains(cleanpattern, sep) ext := filepath.Ext(cleanpattern) plen := len(cleanpattern) if plen > 1 && !hasPathSep && strings.HasPrefix(cleanpattern, "*") && cleanpattern[1:plen] == ext { return &simpleExtPattern{ext: ext} } // special case * when there are no path separators // filepath.Match never allows * to match a path separator, which is correct // for gitignore IF the pattern includes a path separator, but not otherwise // So *.txt should match in any subdir, as should test*, but sub/*.txt would // only match directly in the sub dir // Don't need to test cross-platform separators as both cleaned above if !hasPathSep && strings.Contains(cleanpattern, "*") { pattern := regexp.QuoteMeta(cleanpattern) regpattern := fmt.Sprintf("^%s$", strings.Replace(pattern, "\\*", ".*", -1)) return &pathlessWildcardPattern{ rawPattern: cleanpattern, wildcardRE: regexp.MustCompile(regpattern), } } // Also support ** with path separators if hasPathSep && strings.Contains(cleanpattern, "**") { pattern := regexp.QuoteMeta(cleanpattern) regpattern := fmt.Sprintf("^%s$", strings.Replace(pattern, "\\*\\*", ".*", -1)) return &doubleWildcardPattern{ rawPattern: cleanpattern, wildcardRE: regexp.MustCompile(regpattern), } } if hasPathSep && strings.HasPrefix(cleanpattern, sep) { rel := cleanpattern[1:len(cleanpattern)] prefix := rel if strings.HasSuffix(rel, sep) { rel = rel[0 : len(rel)-1] } else { prefix += sep } return &pathPrefixPattern{ rawPattern: cleanpattern, relative: rel, prefix: prefix, } } return &pathPattern{ rawPattern: cleanpattern, prefix: cleanpattern + sep, suffix: sep + cleanpattern, inner: sep + cleanpattern + sep, } } func convertToPatterns(rawpatterns []string) []Pattern { patterns := make([]Pattern, len(rawpatterns)) for i, raw := range rawpatterns { patterns[i] = NewPattern(raw) } return patterns } type pathPrefixPattern struct { rawPattern string relative string prefix string } // Match is a revised version of filepath.Match which makes it behave more // like gitignore func (p *pathPrefixPattern) Match(name string) bool { if name == p.relative || strings.HasPrefix(name, p.prefix) { return true } matched, _ := filepath.Match(p.rawPattern, name) return matched } func (p *pathPrefixPattern) HasPrefix(name string) bool { return strings.HasPrefix(p.relative, name) } // String returns a string representation of the underlying pattern for which // this *pathPrefixPattern is matching. func (p *pathPrefixPattern) String() string { return p.rawPattern } type pathPattern struct { rawPattern string prefix string suffix string inner string } // Match is a revised version of filepath.Match which makes it behave more // like gitignore func (p *pathPattern) Match(name string) bool { if strings.HasPrefix(name, p.prefix) || strings.HasSuffix(name, p.suffix) || strings.Contains(name, p.inner) { return true } matched, _ := filepath.Match(p.rawPattern, name) return matched } func (p *pathPattern) HasPrefix(name string) bool { return strings.HasPrefix(p.prefix, name) } // String returns a string representation of the underlying pattern for which // this *pathPattern is matching. func (p *pathPattern) String() string { return p.rawPattern } type simpleExtPattern struct { ext string } func (p *simpleExtPattern) Match(name string) bool { return strings.HasSuffix(name, p.ext) } func (p *simpleExtPattern) HasPrefix(name string) bool { return true } // String returns a string representation of the underlying pattern for which // this *simpleExtPattern is matching. func (p *simpleExtPattern) String() string { return fmt.Sprintf("*%s", p.ext) } type pathlessWildcardPattern struct { rawPattern string wildcardRE *regexp.Regexp } // Match is a revised version of filepath.Match which makes it behave more // like gitignore func (p *pathlessWildcardPattern) Match(name string) bool { matched, _ := filepath.Match(p.rawPattern, name) // Match the whole of the base name but allow matching in folders if no path return matched || p.wildcardRE.MatchString(filepath.Base(name)) } func (p *pathlessWildcardPattern) HasPrefix(name string) bool { lit, ok := p.wildcardRE.LiteralPrefix() if !ok { return true } return strings.HasPrefix(name, lit) } // String returns a string representation of the underlying pattern for which // this *pathlessWildcardPattern is matching. func (p *pathlessWildcardPattern) String() string { return p.rawPattern } type doubleWildcardPattern struct { rawPattern string wildcardRE *regexp.Regexp } // Match is a revised version of filepath.Match which makes it behave more // like gitignore func (p *doubleWildcardPattern) Match(name string) bool { matched, _ := filepath.Match(p.rawPattern, name) // Match the whole of the base name but allow matching in folders if no path return matched || p.wildcardRE.MatchString(name) } func (p *doubleWildcardPattern) HasPrefix(name string) bool { lit, ok := p.wildcardRE.LiteralPrefix() if !ok { return true } return strings.HasPrefix(name, lit) } // String returns a string representation of the underlying pattern for which // this *doubleWildcardPattern is matching. func (p *doubleWildcardPattern) String() string { return p.rawPattern } type noOpMatcher struct { } func (n noOpMatcher) Match(name string) bool { return true } func (n noOpMatcher) HasPrefix(name string) bool { return true } func (n noOpMatcher) String() string { return "" } var localDirSet = map[string]struct{}{ ".": struct{}{}, "./": struct{}{}, ".\\": struct{}{}, } git-lfs-2.3.4/filepathfilter/filepathfilter_test.go000066400000000000000000000267041317167762300225050ustar00rootroot00000000000000package filepathfilter import ( "path/filepath" "runtime" "strings" "testing" "github.com/stretchr/testify/assert" ) func TestPatternMatch(t *testing.T) { assertPatternMatch(t, "filename.txt", "filename.txt") assertPatternMatch(t, "*.txt", "filename.txt") refutePatternMatch(t, "*.tx", "filename.txt") assertPatternMatch(t, "f*.txt", "filename.txt") refutePatternMatch(t, "g*.txt", "filename.txt") assertPatternMatch(t, "file*", "filename.txt") refutePatternMatch(t, "file", "filename.txt") // With no path separators, should match in subfolders assertPatternMatch(t, "*.txt", "sub/filename.txt") refutePatternMatch(t, "*.tx", "sub/filename.txt") assertPatternMatch(t, "f*.txt", "sub/filename.txt") refutePatternMatch(t, "g*.txt", "sub/filename.txt") assertPatternMatch(t, "file*", "sub/filename.txt") refutePatternMatch(t, "file", "sub/filename.txt") // matches only in subdir assertPatternMatch(t, "sub/*.txt", "sub/filename.txt") refutePatternMatch(t, "sub/*.txt", "top/sub/filename.txt") refutePatternMatch(t, "sub/*.txt", "sub/filename.dat") refutePatternMatch(t, "sub/*.txt", "other/filename.txt") // Needs wildcard for exact filename assertPatternMatch(t, "**/filename.txt", "sub/sub/sub/filename.txt") // Should not match dots to subparts refutePatternMatch(t, "*.ign", "sub/shouldignoreme.txt") // Path specific assertPatternMatch(t, "sub", "sub/") assertPatternMatch(t, "sub", "sub") assertPatternMatch(t, "sub", "sub/filename.txt") assertPatternMatch(t, "sub/", "sub/filename.txt") assertPatternMatch(t, "sub", "top/sub/filename.txt") assertPatternMatch(t, "sub/", "top/sub/filename.txt") assertPatternMatch(t, "sub", "top/sub/") assertPatternMatch(t, "sub", "top/sub") assertPatternMatch(t, "/sub", "sub/") assertPatternMatch(t, "/sub", "sub") assertPatternMatch(t, "/sub", "sub/filename.txt") assertPatternMatch(t, "/sub/", "sub/filename.txt") refutePatternMatch(t, "/sub", "top/sub/filename.txt") refutePatternMatch(t, "/sub/", "top/sub/filename.txt") refutePatternMatch(t, "/sub", "top/sub/") refutePatternMatch(t, "/sub", "top/sub") refutePatternMatch(t, "sub", "subfilename.txt") refutePatternMatch(t, "sub/", "subfilename.txt") refutePatternMatch(t, "/sub", "subfilename.txt") refutePatternMatch(t, "/sub/", "subfilename.txt") // nested path assertPatternMatch(t, "top/sub", "top/sub/filename.txt") assertPatternMatch(t, "top/sub/", "top/sub/filename.txt") assertPatternMatch(t, "top/sub", "top/sub/") assertPatternMatch(t, "top/sub", "top/sub") assertPatternMatch(t, "top/sub", "root/top/sub/filename.txt") assertPatternMatch(t, "top/sub/", "root/top/sub/filename.txt") assertPatternMatch(t, "top/sub", "root/top/sub/") assertPatternMatch(t, "top/sub", "root/top/sub") assertPatternMatch(t, "/top/sub", "top/sub/filename.txt") assertPatternMatch(t, "/top/sub/", "top/sub/filename.txt") assertPatternMatch(t, "/top/sub", "top/sub/") assertPatternMatch(t, "/top/sub", "top/sub") refutePatternMatch(t, "/top/sub", "root/top/sub/filename.txt") refutePatternMatch(t, "/top/sub/", "root/top/sub/filename.txt") refutePatternMatch(t, "/top/sub", "root/top/sub/") refutePatternMatch(t, "/top/sub", "root/top/sub") refutePatternMatch(t, "top/sub", "top/subfilename.txt") refutePatternMatch(t, "top/sub/", "top/subfilename.txt") refutePatternMatch(t, "/top/sub", "top/subfilename.txt") refutePatternMatch(t, "/top/sub/", "top/subfilename.txt") // Absolute assertPatternMatch(t, "*.dat", "/path/to/sub/.git/test.dat") assertPatternMatch(t, "**/.git", "/path/to/sub/.git") // Match anything assertPatternMatch(t, ".", "path.txt") assertPatternMatch(t, "./", "path.txt") assertPatternMatch(t, ".\\", "path.txt") } func assertPatternMatch(t *testing.T, pattern, filename string) { assert.True(t, patternMatch(pattern, filename), "%q should match pattern %q", filename, pattern) } func refutePatternMatch(t *testing.T, pattern, filename string) { assert.False(t, patternMatch(pattern, filename), "%q should not match pattern %q", filename, pattern) } func patternMatch(pattern, filename string) bool { return NewPattern(pattern).Match(filepath.Clean(filename)) } type filterTest struct { expectedResult bool expectedPattern string includes []string excludes []string } type filterPrefixTest struct { expected bool prefixes []string includes []string excludes []string } func (c *filterPrefixTest) Assert(t *testing.T) { f := New(c.platformIncludes(), c.platformExcludes()) prefixes := c.prefixes if runtime.GOOS == "windows" { prefixes = toWindowsPaths(prefixes) } for _, prefix := range prefixes { assert.Equal(t, c.expected, f.HasPrefix(prefix), "expected=%v, prefix=%s", c.expected, prefix) } } func (c *filterPrefixTest) platformIncludes() []string { if runtime.GOOS == "windows" { return toWindowsPaths(c.includes) } return c.includes } func (c *filterPrefixTest) platformExcludes() []string { if runtime.GOOS == "windows" { return toWindowsPaths(c.excludes) } return c.excludes } func toWindowsPaths(paths []string) []string { var out []string for _, path := range paths { out = append(out, strings.Replace(path, "/", "\\", -1)) } return out } func TestFilterHasPrefix(t *testing.T) { prefixes := []string{"foo", "foo/", "foo/bar", "foo/bar/baz", "foo/bar/baz/"} for desc, c := range map[string]*filterPrefixTest{ "empty filter": {true, prefixes, nil, nil}, "path prefix pattern": {true, prefixes, []string{"/foo/bar/baz"}, nil}, "path pattern": {true, prefixes, []string{"foo/bar/baz"}, nil}, "simple ext pattern": {true, prefixes, []string{"*.dat"}, nil}, "pathless wildcard pattern": {true, prefixes, []string{"foo*.dat"}, nil}, "double wildcard pattern": {true, prefixes, []string{"foo/**/baz"}, nil}, "include other dir": {false, prefixes, []string{"other"}, nil}, "exclude pattern": {true, prefixes, nil, []string{"other"}}, "exclude simple ext pattern": {true, prefixes, nil, []string{"*.dat"}}, "exclude pathless wildcard pattern": {true, prefixes, nil, []string{"foo*.dat"}}, } { t.Run(desc, c.Assert) } prefixes = []string{"foo", "foo/", "foo/bar"} for desc, c := range map[string]*filterPrefixTest{ "exclude path prefix pattern": {true, prefixes, nil, []string{"/foo/bar/baz"}}, "exclude path pattern": {true, prefixes, nil, []string{"foo/bar/baz"}}, "exclude double wildcard pattern": {true, prefixes, nil, []string{"foo/**/baz"}}, } { t.Run(desc, c.Assert) } prefixes = []string{"foo/bar/baz", "foo/bar/baz/"} for desc, c := range map[string]*filterPrefixTest{ "exclude path prefix pattern": {false, prefixes, nil, []string{"/foo/bar/baz"}}, "exclude path pattern": {false, prefixes, nil, []string{"foo/bar/baz"}}, } { t.Run(desc, c.Assert) } prefixes = []string{"foo/bar/baz", "foo/test/baz"} for desc, c := range map[string]*filterPrefixTest{ "exclude double wildcard pattern": {false, prefixes, nil, []string{"foo/**/baz"}}, } { t.Run(desc, c.Assert) } } func TestFilterAllows(t *testing.T) { cases := []filterTest{ // Null case filterTest{true, "", nil, nil}, // Inclusion filterTest{true, "*.dat", []string{"*.dat"}, nil}, filterTest{true, "file*.dat", []string{"file*.dat"}, nil}, filterTest{true, "file*", []string{"file*"}, nil}, filterTest{true, "*name.dat", []string{"*name.dat"}, nil}, filterTest{false, "", []string{"/*.dat"}, nil}, filterTest{false, "", []string{"otherfolder/*.dat"}, nil}, filterTest{false, "", []string{"*.nam"}, nil}, filterTest{true, "test/filename.dat", []string{"test/filename.dat"}, nil}, filterTest{true, "test/filename.dat", []string{"test/filename.dat"}, nil}, filterTest{false, "", []string{"blank", "something", "foo"}, nil}, filterTest{false, "", []string{"test/notfilename.dat"}, nil}, filterTest{true, "test", []string{"test"}, nil}, filterTest{true, "test/*", []string{"test/*"}, nil}, filterTest{false, "", []string{"nottest"}, nil}, filterTest{false, "", []string{"nottest/*"}, nil}, filterTest{true, "test/fil*", []string{"test/fil*"}, nil}, filterTest{false, "", []string{"test/g*"}, nil}, filterTest{true, "tes*/*", []string{"tes*/*"}, nil}, filterTest{true, "[Tt]est/[Ff]ilename.dat", []string{"[Tt]est/[Ff]ilename.dat"}, nil}, // Exclusion filterTest{false, "*.dat", nil, []string{"*.dat"}}, filterTest{false, "file*.dat", nil, []string{"file*.dat"}}, filterTest{false, "file*", nil, []string{"file*"}}, filterTest{false, "*name.dat", nil, []string{"*name.dat"}}, filterTest{true, "", nil, []string{"/*.dat"}}, filterTest{true, "", nil, []string{"otherfolder/*.dat"}}, filterTest{false, "test/filename.dat", nil, []string{"test/filename.dat"}}, filterTest{false, "test/filename.dat", nil, []string{"blank", "something", "test/filename.dat", "foo"}}, filterTest{true, "", nil, []string{"blank", "something", "foo"}}, filterTest{true, "", nil, []string{"test/notfilename.dat"}}, filterTest{false, "test", nil, []string{"test"}}, filterTest{false, "test/*", nil, []string{"test/*"}}, filterTest{true, "", nil, []string{"nottest"}}, filterTest{true, "", nil, []string{"nottest/*"}}, filterTest{false, "test/fil*", nil, []string{"test/fil*"}}, filterTest{true, "", nil, []string{"test/g*"}}, filterTest{false, "tes*/*", nil, []string{"tes*/*"}}, filterTest{false, "[Tt]est/[Ff]ilename.dat", nil, []string{"[Tt]est/[Ff]ilename.dat"}}, // // Both filterTest{true, "test/filename.dat", []string{"test/filename.dat"}, []string{"test/notfilename.dat"}}, filterTest{false, "test/filename.dat", []string{"test"}, []string{"test/filename.dat"}}, filterTest{true, "test/*", []string{"test/*"}, []string{"test/notfile*"}}, filterTest{false, "test/file*", []string{"test/*"}, []string{"test/file*"}}, filterTest{false, "test/filename.dat", []string{"another/*", "test/*"}, []string{"test/notfilename.dat", "test/filename.dat"}}, } for _, c := range cases { if runtime.GOOS == "windows" { c.expectedPattern = strings.Replace(c.expectedPattern, "/", "\\", -1) } filter := New(c.includes, c.excludes) r1 := filter.Allows("test/filename.dat") pattern, r2 := filter.AllowsPattern("test/filename.dat") assert.Equal(t, r1, r2, "filepathfilter: expected Allows() and AllowsPattern() to return identical result") assert.Equal(t, c.expectedResult, r2, "includes: %v excludes: %v", c.includes, c.excludes) assert.Equal(t, c.expectedPattern, pattern, "filepathfilter: expected pattern match of: %q, got: %q", c.expectedPattern, pattern) if runtime.GOOS == "windows" { // also test with \ path separators, tolerate mixed separators for i, inc := range c.includes { c.includes[i] = strings.Replace(inc, "/", "\\", -1) } for i, ex := range c.excludes { c.excludes[i] = strings.Replace(ex, "/", "\\", -1) } filter = New(c.includes, c.excludes) r1 = filter.Allows("test/filename.dat") pattern, r2 = filter.AllowsPattern("test/filename.dat") assert.Equal(t, r1, r2, "filepathfilter: expected Allows() and AllowsPattern() to return identical result") assert.Equal(t, c.expectedResult, r1, c) assert.Equal(t, c.expectedPattern, pattern, "filepathfilter: expected pattern match of: %q, got: %q", c.expectedPattern, pattern) } } } func TestFilterReportsIncludePatterns(t *testing.T) { filter := New([]string{"*.foo", "*.bar"}, nil) assert.Equal(t, []string{"*.foo", "*.bar"}, filter.Include()) } func TestFilterReportsExcludePatterns(t *testing.T) { filter := New(nil, []string{"*.baz", "*.quux"}) assert.Equal(t, []string{"*.baz", "*.quux"}, filter.Exclude()) } git-lfs-2.3.4/git-lfs.go000066400000000000000000000011331317167762300147740ustar00rootroot00000000000000//go:generate goversioninfo -icon=script/windows-installer/git-lfs-logo.ico package main import ( "fmt" "os" "os/signal" "sync" "syscall" "github.com/git-lfs/git-lfs/commands" ) func main() { c := make(chan os.Signal) signal.Notify(c, os.Interrupt, os.Kill) var once sync.Once go func() { for { sig := <-c once.Do(commands.Cleanup) fmt.Fprintf(os.Stderr, "\nExiting because of %q signal.\n", sig) exitCode := 1 if sysSig, ok := sig.(syscall.Signal); ok { exitCode = int(sysSig) } os.Exit(exitCode + 128) } }() commands.Run() once.Do(commands.Cleanup) } git-lfs-2.3.4/git/000077500000000000000000000000001317167762300136655ustar00rootroot00000000000000git-lfs-2.3.4/git/attribs.go000066400000000000000000000076371317167762300157010ustar00rootroot00000000000000package git import ( "bufio" "bytes" "os" "path/filepath" "strings" "github.com/git-lfs/git-lfs/tools" "github.com/rubyist/tracerx" ) const ( LockableAttrib = "lockable" ) // AttributePath is a path entry in a gitattributes file which has the LFS filter type AttributePath struct { // Path entry in the attribute file Path string // The attribute file which was the source of this entry Source *AttributeSource // Path also has the 'lockable' attribute Lockable bool } type AttributeSource struct { Path string LineEnding string } func (s *AttributeSource) String() string { return s.Path } // GetAttributePaths returns a list of entries in .gitattributes which are // configured with the filter=lfs attribute // workingDIr is the root of the working copy // gitDir is the root of the git repo func GetAttributePaths(workingDir, gitDir string) []AttributePath { paths := make([]AttributePath, 0) for _, path := range findAttributeFiles(workingDir, gitDir) { attributes, err := os.Open(path) if err != nil { continue } relfile, _ := filepath.Rel(workingDir, path) reldir := filepath.Dir(relfile) source := &AttributeSource{Path: relfile} le := &lineEndingSplitter{} scanner := bufio.NewScanner(attributes) scanner.Split(le.ScanLines) for scanner.Scan() { line := scanner.Text() // Check for filter=lfs (signifying that LFS is tracking // this file) or "lockable", which indicates that the // file is lockable (and may or may not be tracked by // Git LFS). if strings.Contains(line, "filter=lfs") || strings.HasSuffix(line, "lockable") { fields := strings.Fields(line) pattern := fields[0] if len(reldir) > 0 { pattern = filepath.Join(reldir, pattern) } // Find lockable flag in any position after pattern to avoid // edge case of matching "lockable" to a file pattern lockable := false for _, f := range fields[1:] { if f == LockableAttrib { lockable = true break } } paths = append(paths, AttributePath{ Path: pattern, Source: source, Lockable: lockable, }) } } source.LineEnding = le.LineEnding() } return paths } // copies bufio.ScanLines(), counting LF vs CRLF in a file type lineEndingSplitter struct { LFCount int CRLFCount int } func (s *lineEndingSplitter) LineEnding() string { if s.CRLFCount > s.LFCount { return "\r\n" } else if s.LFCount == 0 { return "" } return "\n" } func (s *lineEndingSplitter) ScanLines(data []byte, atEOF bool) (advance int, token []byte, err error) { if atEOF && len(data) == 0 { return 0, nil, nil } if i := bytes.IndexByte(data, '\n'); i >= 0 { // We have a full newline-terminated line. return i + 1, s.dropCR(data[0:i]), nil } // If we're at EOF, we have a final, non-terminated line. Return it. if atEOF { return len(data), data, nil } // Request more data. return 0, nil, nil } // dropCR drops a terminal \r from the data. func (s *lineEndingSplitter) dropCR(data []byte) []byte { if len(data) > 0 && data[len(data)-1] == '\r' { s.CRLFCount++ return data[0 : len(data)-1] } s.LFCount++ return data } func findAttributeFiles(workingDir, gitDir string) []string { var paths []string repoAttributes := filepath.Join(gitDir, "info", "attributes") if info, err := os.Stat(repoAttributes); err == nil && !info.IsDir() { paths = append(paths, repoAttributes) } tools.FastWalkGitRepo(workingDir, func(parentDir string, info os.FileInfo, err error) { if err != nil { tracerx.Printf("Error finding .gitattributes: %v", err) return } if info.IsDir() || info.Name() != ".gitattributes" { return } paths = append(paths, filepath.Join(parentDir, info.Name())) }) // reverse the order of the files so more specific entries are found first // when iterating from the front (respects precedence) for i, j := 0, len(paths)-1; i < j; i, j = i+1, j-1 { paths[i], paths[j] = paths[j], paths[i] } return paths } git-lfs-2.3.4/git/filter_process_scanner.go000066400000000000000000000157361317167762300207640ustar00rootroot00000000000000// Package git contains various commands that shell out to git // NOTE: Subject to change, do not rely on this package from outside git-lfs source package git import ( "fmt" "io" "strings" "github.com/git-lfs/git-lfs/errors" "github.com/rubyist/tracerx" ) // FilterProcessScanner provides a scanner-like interface capable of // initializing the filter process with the Git parent, and scanning for // requests across the protocol. // // Reading a request (and errors) is as follows: // // s := NewFilterProcessScanner(os.Stdin, os.Stderr) // for s.Scan() { // req := s.Request() // // ... // } // // if err := s.Err(); err != nil { // // ... // } type FilterProcessScanner struct { // pl is the *pktline instance used to read and write packets back and // forth between Git. pl *pktline // req is a temporary variable used to hold the value accessible by the // `Request()` function. It is cleared at the beginning of each `Scan()` // invocation, and written to at the end of each `Scan()` invocation. req *Request // err is a temporary variable used to hold the value accessible by the // `Request()` function. It is cleared at the beginning of each `Scan()` // invocation, and written to at the end of each `Scan()` invocation. err error } // NewFilterProcessScanner constructs a new instance of the // `*FilterProcessScanner` type which reads packets from the `io.Reader` "r", // and writes packets to the `io.Writer`, "w". // // Both reader and writers SHOULD NOT be `*git.PacketReader` or // `*git.PacketWriter`s, they will be transparently treated as such. In other // words, it is safe (and recommended) to pass `os.Stdin` and `os.Stdout` // directly. func NewFilterProcessScanner(r io.Reader, w io.Writer) *FilterProcessScanner { return &FilterProcessScanner{ pl: newPktline(r, w), } } // Init initializes the filter and ACKs back and forth between the Git LFS // subprocess and the Git parent process that each is a git-filter-server and // client respectively. // // If either side wrote an invalid sequence of data, or did not meet // expectations, an error will be returned. If the filter type is not supported, // an error will be returned. If the pkt-line welcome message was invalid, an // error will be returned. // // If there was an error reading or writing any of the packets below, an error // will be returned. func (o *FilterProcessScanner) Init() error { tracerx.Printf("Initialize filter-process") reqVer := "version=2" initMsg, err := o.pl.readPacketText() if err != nil { return errors.Wrap(err, "reading filter-process initialization") } if initMsg != "git-filter-client" { return fmt.Errorf("invalid filter-process pkt-line welcome message: %s", initMsg) } supVers, err := o.pl.readPacketList() if err != nil { return errors.Wrap(err, "reading filter-process versions") } if !isStringInSlice(supVers, reqVer) { return fmt.Errorf("filter '%s' not supported (your Git supports: %s)", reqVer, supVers) } err = o.pl.writePacketList([]string{"git-filter-server", reqVer}) if err != nil { return errors.Wrap(err, "writing filter-process initialization failed") } return nil } // NegotiateCapabilities executes the process of negotiating capabilities // between the filter client and server. If we don't support any of the // capabilities given to LFS by Git, an error will be returned. If there was an // error reading or writing capabilities between the two, an error will be // returned. func (o *FilterProcessScanner) NegotiateCapabilities() ([]string, error) { reqCaps := []string{"capability=clean", "capability=smudge"} supCaps, err := o.pl.readPacketList() if err != nil { return nil, fmt.Errorf("reading filter-process capabilities failed with %s", err) } for _, sup := range supCaps { if sup == "capability=delay" { reqCaps = append(reqCaps, "capability=delay") break } } for _, reqCap := range reqCaps { if !isStringInSlice(supCaps, reqCap) { return nil, fmt.Errorf("filter '%s' not supported (your Git supports: %s)", reqCap, supCaps) } } err = o.pl.writePacketList(reqCaps) if err != nil { return nil, fmt.Errorf("writing filter-process capabilities failed with %s", err) } return supCaps, nil } // Request represents a single command sent to LFS from the parent Git process. type Request struct { // Header maps header strings to values, and is encoded as the first // part of the packet. Header map[string]string // Payload represents the body of the packet, and contains the contents // of the file in the index. Payload io.Reader } // Scan scans for the next request, or error and returns whether or not the scan // was successful, indicating the presence of a valid request. If the Scan // failed, there was either an error reading the next request (and the results // of calling `Err()` should be inspected), or the pipe was closed and no more // requests are present. // // Closing the pipe is Git's way to communicate that no more files will be // filtered. Git expects that the LFS process exits after this event. func (o *FilterProcessScanner) Scan() bool { o.req, o.err = nil, nil req, err := o.readRequest() if err != nil { o.err = err return false } o.req = req return true } // Request returns the request read from a call to Scan(). It is available only // after a call to `Scan()` has completed, and is re-initialized to nil at the // beginning of the subsequent `Scan()` call. func (o *FilterProcessScanner) Request() *Request { return o.req } // Err returns any error encountered from the last call to Scan(). It is available only // after a call to `Scan()` has completed, and is re-initialized to nil at the // beginning of the subsequent `Scan()` call. func (o *FilterProcessScanner) Err() error { return o.err } // readRequest reads the headers of a request and yields an `io.Reader` which // will read the body of the request. Since the body is _not_ offset, one // request should be read in its entirety before consuming the next request. func (o *FilterProcessScanner) readRequest() (*Request, error) { tracerx.Printf("Read filter-process request.") requestList, err := o.pl.readPacketList() if err != nil { return nil, err } req := &Request{ Header: make(map[string]string), Payload: &pktlineReader{pl: o.pl}, } for _, pair := range requestList { v := strings.SplitN(pair, "=", 2) req.Header[v[0]] = v[1] } return req, nil } // WriteList writes a list of strings to the underlying pktline data stream in // pktline format. func (o *FilterProcessScanner) WriteList(list []string) error { return o.pl.writePacketList(list) } func (o *FilterProcessScanner) WriteStatus(status FilterProcessStatus) error { return o.pl.writePacketList([]string{"status=" + status.String()}) } // isStringInSlice returns whether a given string "what" is contained in a // slice, "s". // // isStringInSlice is copied from "github.com/xeipuuv/gojsonschema/utils.go" func isStringInSlice(s []string, what string) bool { for i := range s { if s[i] == what { return true } } return false } git-lfs-2.3.4/git/filter_process_scanner_test.go000066400000000000000000000107121317167762300220100ustar00rootroot00000000000000package git import ( "bytes" "io/ioutil" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestFilterProcessScannerInitializesWithCorrectSupportedValues(t *testing.T) { var from, to bytes.Buffer pl := newPktline(nil, &from) if err := pl.writePacketText("git-filter-client"); err != nil { t.Fatalf("expected... %v", err.Error()) } require.Nil(t, pl.writePacketText("git-filter-client")) require.Nil(t, pl.writePacketList([]string{"version=2"})) fps := NewFilterProcessScanner(&from, &to) err := fps.Init() assert.Nil(t, err) out, err := newPktline(&to, nil).readPacketList() assert.Nil(t, err) assert.Equal(t, []string{"git-filter-server", "version=2"}, out) } func TestFilterProcessScannerRejectsUnrecognizedInitializationMessages(t *testing.T) { var from, to bytes.Buffer pl := newPktline(nil, &from) require.Nil(t, pl.writePacketText("git-filter-client-unknown")) require.Nil(t, pl.writeFlush()) fps := NewFilterProcessScanner(&from, &to) err := fps.Init() require.NotNil(t, err) assert.Equal(t, "invalid filter-process pkt-line welcome message: git-filter-client-unknown", err.Error()) assert.Empty(t, to.Bytes()) } func TestFilterProcessScannerRejectsUnsupportedFilters(t *testing.T) { var from, to bytes.Buffer pl := newPktline(nil, &from) require.Nil(t, pl.writePacketText("git-filter-client")) // Write an unsupported version require.Nil(t, pl.writePacketList([]string{"version=0"})) fps := NewFilterProcessScanner(&from, &to) err := fps.Init() require.NotNil(t, err) assert.Equal(t, "filter 'version=2' not supported (your Git supports: [version=0])", err.Error()) assert.Empty(t, to.Bytes()) } func TestFilterProcessScannerNegotitatesSupportedCapabilities(t *testing.T) { var from, to bytes.Buffer pl := newPktline(nil, &from) require.Nil(t, pl.writePacketList([]string{ "capability=clean", "capability=smudge", "capability=not-invented-yet", })) fps := NewFilterProcessScanner(&from, &to) caps, err := fps.NegotiateCapabilities() assert.Contains(t, caps, "capability=clean") assert.Contains(t, caps, "capability=smudge") assert.Nil(t, err) out, err := newPktline(&to, nil).readPacketList() assert.Nil(t, err) assert.Equal(t, []string{"capability=clean", "capability=smudge"}, out) } func TestFilterProcessScannerDoesNotNegotitatesUnsupportedCapabilities(t *testing.T) { var from, to bytes.Buffer pl := newPktline(nil, &from) // Write an unsupported capability require.Nil(t, pl.writePacketList([]string{ "capability=unsupported", })) fps := NewFilterProcessScanner(&from, &to) caps, err := fps.NegotiateCapabilities() require.NotNil(t, err) assert.Empty(t, caps) assert.Equal(t, "filter 'capability=clean' not supported (your Git supports: [capability=unsupported])", err.Error()) assert.Empty(t, to.Bytes()) } func TestFilterProcessScannerReadsRequestHeadersAndPayload(t *testing.T) { var from, to bytes.Buffer pl := newPktline(nil, &from) // Headers require.Nil(t, pl.writePacketList([]string{ "foo=bar", "other=woot", "crazy='sq',\\$x=.bin", })) // Multi-line packet require.Nil(t, pl.writePacketText("first")) require.Nil(t, pl.writePacketText("second")) require.Nil(t, pl.writeFlush()) req, err := readRequest(NewFilterProcessScanner(&from, &to)) assert.Nil(t, err) assert.Equal(t, req.Header["foo"], "bar") assert.Equal(t, req.Header["other"], "woot") assert.Equal(t, req.Header["crazy"], "'sq',\\$x=.bin") payload, err := ioutil.ReadAll(req.Payload) assert.Nil(t, err) assert.Equal(t, []byte("first\nsecond\n"), payload) } func TestFilterProcessScannerRejectsInvalidHeaderPackets(t *testing.T) { from := bytes.NewBuffer([]byte{ 0x30, 0x30, 0x30, 0x34, // 0004 (invalid packet length) }) req, err := readRequest(NewFilterProcessScanner(from, nil)) require.NotNil(t, err) assert.Equal(t, "Invalid packet length.", err.Error()) assert.Nil(t, req) } func TestFilterProcessScannerWritesLists(t *testing.T) { var to bytes.Buffer fps := NewFilterProcessScanner(nil, &to) err := fps.WriteList([]string{"hello", "goodbye"}) assert.NoError(t, err) assert.Equal(t, "000ahello\n000cgoodbye\n0000", to.String()) } // readRequest performs a single scan operation on the given // `*FilterProcessScanner`, "s", and returns: an error if there was one, or a // request if there was one. If neither, it returns (nil, nil). func readRequest(s *FilterProcessScanner) (*Request, error) { s.Scan() if err := s.Err(); err != nil { return nil, err } return s.Request(), nil } git-lfs-2.3.4/git/filter_process_status.go000066400000000000000000000015661317167762300206520ustar00rootroot00000000000000package git import "fmt" // FilterProcessStatus is a constant type representing the various valid // responses for `status=` in the Git filtering process protocol. type FilterProcessStatus uint8 const ( // StatusSuccess is a valid response when a successful event has // occurred. StatusSuccess FilterProcessStatus = iota + 1 // StatusDelay is a valid response when a delay has occurred. StatusDelay // StatusError is a valid response when an error has occurred. StatusError ) // String implements fmt.Stringer by returning a protocol-compliant // representation of the receiving status, or panic()-ing if the Status is // unknown. func (s FilterProcessStatus) String() string { switch s { case StatusSuccess: return "success" case StatusDelay: return "delayed" case StatusError: return "error" } panic(fmt.Sprintf("git: unknown FilterProcessStatus '%d'", s)) } git-lfs-2.3.4/git/git.go000066400000000000000000001127721317167762300150110ustar00rootroot00000000000000// Package git contains various commands that shell out to git // NOTE: Subject to change, do not rely on this package from outside git-lfs source package git import ( "bufio" "bytes" "encoding/hex" "errors" "fmt" "io" "io/ioutil" "net/url" "os" "path/filepath" "regexp" "strconv" "strings" "sync" "time" lfserrors "github.com/git-lfs/git-lfs/errors" "github.com/git-lfs/git-lfs/subprocess" "github.com/git-lfs/git-lfs/tools" "github.com/rubyist/tracerx" ) type RefType int const ( RefTypeLocalBranch = RefType(iota) RefTypeRemoteBranch = RefType(iota) RefTypeLocalTag = RefType(iota) RefTypeRemoteTag = RefType(iota) RefTypeHEAD = RefType(iota) // current checkout RefTypeOther = RefType(iota) // stash or unknown // A ref which can be used as a placeholder for before the first commit // Equivalent to git mktree < /dev/null, useful for diffing before first commit RefBeforeFirstCommit = "4b825dc642cb6eb9a060e54bf8d69288fbee4904" ) // Prefix returns the given RefType's prefix, "refs/heads", "ref/remotes", // etc. It returns an additional value of either true/false, whether or not this // given ref type has a prefix. // // If the RefType is unrecognized, Prefix() will panic. func (t RefType) Prefix() (string, bool) { switch t { case RefTypeLocalBranch: return "refs/heads", true case RefTypeRemoteBranch: return "refs/remotes", true case RefTypeLocalTag: return "refs/tags", true case RefTypeRemoteTag: return "refs/remotes/tags", true case RefTypeHEAD: return "", false case RefTypeOther: return "", false default: panic(fmt.Sprintf("git: unknown RefType %d", t)) } } // A git reference (branch, tag etc) type Ref struct { Name string Type RefType Sha string } // Some top level information about a commit (only first line of message) type CommitSummary struct { Sha string ShortSha string Parents []string CommitDate time.Time AuthorDate time.Time AuthorName string AuthorEmail string CommitterName string CommitterEmail string Subject string } // Prepend Git config instructions to disable Git LFS filter func gitConfigNoLFS(args ...string) []string { // Before git 2.8, setting filters to blank causes lots of warnings, so use cat instead (slightly slower) // Also pre 2.2 it failed completely. We used to use it anyway in git 2.2-2.7 and // suppress the messages in stderr, but doing that with standard StderrPipe suppresses // the git clone output (git thinks it's not a terminal) and makes it look like it's // not working. You can get around that with https://github.com/kr/pty but that // causes difficult issues with passing through Stdin for login prompts // This way is simpler & more practical. filterOverride := "" if !Config.IsGitVersionAtLeast("2.8.0") { filterOverride = "cat" } return append([]string{ "-c", fmt.Sprintf("filter.lfs.smudge=%v", filterOverride), "-c", fmt.Sprintf("filter.lfs.clean=%v", filterOverride), "-c", "filter.lfs.process=", "-c", "filter.lfs.required=false", }, args...) } // Invoke Git with disabled LFS filters func gitNoLFS(args ...string) *subprocess.Cmd { return subprocess.ExecCommand("git", gitConfigNoLFS(args...)...) } func gitNoLFSSimple(args ...string) (string, error) { return subprocess.SimpleExec("git", gitConfigNoLFS(args...)...) } func gitNoLFSBuffered(args ...string) (*subprocess.BufferedCmd, error) { return subprocess.BufferedExec("git", gitConfigNoLFS(args...)...) } // Invoke Git with enabled LFS filters func git(args ...string) *subprocess.Cmd { return subprocess.ExecCommand("git", args...) } func gitSimple(args ...string) (string, error) { return subprocess.SimpleExec("git", args...) } func gitBuffered(args ...string) (*subprocess.BufferedCmd, error) { return subprocess.BufferedExec("git", args...) } func CatFile() (*subprocess.BufferedCmd, error) { return gitNoLFSBuffered("cat-file", "--batch-check") } func DiffIndex(ref string, cached bool) (*bufio.Scanner, error) { args := []string{"diff-index", "-M"} if cached { args = append(args, "--cached") } args = append(args, ref) cmd, err := gitBuffered(args...) if err != nil { return nil, err } if err = cmd.Stdin.Close(); err != nil { return nil, err } return bufio.NewScanner(cmd.Stdout), nil } func HashObject(r io.Reader) (string, error) { cmd := gitNoLFS("hash-object", "--stdin") cmd.Stdin = r out, err := cmd.Output() if err != nil { return "", fmt.Errorf("Error building Git blob OID: %s", err) } return string(bytes.TrimSpace(out)), nil } func Log(args ...string) (*subprocess.BufferedCmd, error) { logArgs := append([]string{"log"}, args...) return gitNoLFSBuffered(logArgs...) } func LsRemote(remote, remoteRef string) (string, error) { if remote == "" { return "", errors.New("remote required") } if remoteRef == "" { return gitNoLFSSimple("ls-remote", remote) } return gitNoLFSSimple("ls-remote", remote, remoteRef) } func LsTree(ref string) (*subprocess.BufferedCmd, error) { return gitNoLFSBuffered( "ls-tree", "-r", // recurse "-l", // report object size (we'll need this) "-z", // null line termination "--full-tree", // start at the root regardless of where we are in it ref, ) } func ResolveRef(ref string) (*Ref, error) { outp, err := gitNoLFSSimple("rev-parse", ref, "--symbolic-full-name", ref) if err != nil { return nil, fmt.Errorf("Git can't resolve ref: %q", ref) } if outp == "" { return nil, fmt.Errorf("Git can't resolve ref: %q", ref) } lines := strings.Split(outp, "\n") fullref := &Ref{Sha: lines[0]} if len(lines) == 1 { // ref is a sha1 and has no symbolic-full-name fullref.Name = lines[0] // fullref.Sha fullref.Type = RefTypeOther return fullref, nil } // parse the symbolic-full-name fullref.Type, fullref.Name = ParseRefToTypeAndName(lines[1]) return fullref, nil } func ResolveRefs(refnames []string) ([]*Ref, error) { refs := make([]*Ref, len(refnames)) for i, name := range refnames { ref, err := ResolveRef(name) if err != nil { return refs, err } refs[i] = ref } return refs, nil } func CurrentRef() (*Ref, error) { return ResolveRef("HEAD") } func CurrentRemoteRef() (*Ref, error) { remoteref, err := RemoteRefNameForCurrentBranch() if err != nil { return nil, err } return ResolveRef(remoteref) } // RemoteForCurrentBranch returns the name of the remote that the current branch is tracking func RemoteForCurrentBranch() (string, error) { ref, err := CurrentRef() if err != nil { return "", err } remote := RemoteForBranch(ref.Name) if remote == "" { return "", fmt.Errorf("remote not found for branch %q", ref.Name) } return remote, nil } // RemoteRefForCurrentBranch returns the full remote ref (refs/remotes/{remote}/{remotebranch}) // that the current branch is tracking. func RemoteRefNameForCurrentBranch() (string, error) { ref, err := CurrentRef() if err != nil { return "", err } if ref.Type == RefTypeHEAD || ref.Type == RefTypeOther { return "", errors.New("not on a branch") } remote := RemoteForBranch(ref.Name) if remote == "" { return "", fmt.Errorf("remote not found for branch %q", ref.Name) } remotebranch := RemoteBranchForLocalBranch(ref.Name) return fmt.Sprintf("refs/remotes/%s/%s", remote, remotebranch), nil } // RemoteForBranch returns the remote name that a given local branch is tracking (blank if none) func RemoteForBranch(localBranch string) string { return Config.Find(fmt.Sprintf("branch.%s.remote", localBranch)) } // RemoteBranchForLocalBranch returns the name (only) of the remote branch that the local branch is tracking // If no specific branch is configured, returns local branch name func RemoteBranchForLocalBranch(localBranch string) string { // get remote ref to track, may not be same name merge := Config.Find(fmt.Sprintf("branch.%s.merge", localBranch)) if strings.HasPrefix(merge, "refs/heads/") { return merge[11:] } else { return localBranch } } func RemoteList() ([]string, error) { cmd := gitNoLFS("remote") outp, err := cmd.StdoutPipe() if err != nil { return nil, fmt.Errorf("Failed to call git remote: %v", err) } cmd.Start() defer cmd.Wait() scanner := bufio.NewScanner(outp) var ret []string for scanner.Scan() { ret = append(ret, strings.TrimSpace(scanner.Text())) } return ret, nil } // Refs returns all of the local and remote branches and tags for the current // repository. Other refs (HEAD, refs/stash, git notes) are ignored. func LocalRefs() ([]*Ref, error) { cmd := gitNoLFS("show-ref", "--heads", "--tags") outp, err := cmd.StdoutPipe() if err != nil { return nil, fmt.Errorf("Failed to call git show-ref: %v", err) } var refs []*Ref if err := cmd.Start(); err != nil { return refs, err } scanner := bufio.NewScanner(outp) for scanner.Scan() { line := strings.TrimSpace(scanner.Text()) parts := strings.SplitN(line, " ", 2) if len(parts) != 2 || len(parts[0]) != 40 || len(parts[1]) < 1 { tracerx.Printf("Invalid line from git show-ref: %q", line) continue } rtype, name := ParseRefToTypeAndName(parts[1]) if rtype != RefTypeLocalBranch && rtype != RefTypeLocalTag { continue } refs = append(refs, &Ref{name, rtype, parts[0]}) } return refs, cmd.Wait() } // UpdateRef moves the given ref to a new sha with a given reason (and creates a // reflog entry, if a "reason" was provided). It returns an error if any were // encountered. func UpdateRef(ref *Ref, to []byte, reason string) error { return UpdateRefIn("", ref, to, reason) } // UpdateRef moves the given ref to a new sha with a given reason (and creates a // reflog entry, if a "reason" was provided). It operates within the given // working directory "wd". It returns an error if any were encountered. func UpdateRefIn(wd string, ref *Ref, to []byte, reason string) error { var refspec string if prefix, ok := ref.Type.Prefix(); ok { refspec = fmt.Sprintf("%s/%s", prefix, ref.Name) } else { refspec = ref.Name } args := []string{"update-ref", refspec, hex.EncodeToString(to)} if len(reason) > 0 { args = append(args, "-m", reason) } cmd := gitNoLFS(args...) cmd.Dir = wd return cmd.Run() } // ValidateRemote checks that a named remote is valid for use // Mainly to check user-supplied remotes & fail more nicely func ValidateRemote(remote string) error { remotes, err := RemoteList() if err != nil { return err } for _, r := range remotes { if r == remote { return nil } } if err = ValidateRemoteURL(remote); err == nil { return nil } return fmt.Errorf("Invalid remote name: %q", remote) } // ValidateRemoteURL checks that a string is a valid Git remote URL func ValidateRemoteURL(remote string) error { u, _ := url.Parse(remote) if u == nil || u.Scheme == "" { // This is either an invalid remote name (maybe the user made a typo // when selecting a named remote) or a bare SSH URL like // "x@y.com:path/to/resource.git". Guess that this is a URL in the latter // form if the string contains a colon ":", and an invalid remote if it // does not. if strings.Contains(remote, ":") { return nil } else { return fmt.Errorf("Invalid remote name: %q", remote) } } switch u.Scheme { case "ssh", "http", "https", "git": return nil default: return fmt.Errorf("Invalid remote url protocol %q in %q", u.Scheme, remote) } } // DefaultRemote returns the default remote based on: // 1. The currently tracked remote branch, if present // 2. "origin", if defined // 3. Any other SINGLE remote defined in .git/config // Returns an error if all of these fail, i.e. no tracked remote branch, no // "origin", and either no remotes defined or 2+ non-"origin" remotes func DefaultRemote() (string, error) { tracked, err := RemoteForCurrentBranch() if err == nil { return tracked, nil } // Otherwise, check what remotes are defined remotes, err := RemoteList() if err != nil { return "", err } switch len(remotes) { case 0: return "", errors.New("No remotes defined") case 1: // always use a single remote whether it's origin or otherwise return remotes[0], nil default: for _, remote := range remotes { // Use origin if present if remote == "origin" { return remote, nil } } } return "", errors.New("Unable to pick default remote, too ambiguous") } func UpdateIndexFromStdin() *subprocess.Cmd { return git("update-index", "-q", "--refresh", "--stdin") } type gitConfig struct { gitVersion string mu sync.Mutex } var Config = &gitConfig{} // Find returns the git config value for the key func (c *gitConfig) Find(val string) string { output, _ := gitSimple("config", val) return output } // FindGlobal returns the git config value global scope for the key func (c *gitConfig) FindGlobal(val string) string { output, _ := gitSimple("config", "--global", val) return output } // FindSystem returns the git config value in system scope for the key func (c *gitConfig) FindSystem(val string) string { output, _ := gitSimple("config", "--system", val) return output } // Find returns the git config value for the key func (c *gitConfig) FindLocal(val string) string { output, _ := gitSimple("config", "--local", val) return output } // SetGlobal sets the git config value for the key in the global config func (c *gitConfig) SetGlobal(key, val string) (string, error) { return gitSimple("config", "--global", "--replace-all", key, val) } // SetSystem sets the git config value for the key in the system config func (c *gitConfig) SetSystem(key, val string) (string, error) { return gitSimple("config", "--system", "--replace-all", key, val) } // UnsetGlobal removes the git config value for the key from the global config func (c *gitConfig) UnsetGlobal(key string) (string, error) { return gitSimple("config", "--global", "--unset", key) } // UnsetSystem removes the git config value for the key from the system config func (c *gitConfig) UnsetSystem(key string) (string, error) { return gitSimple("config", "--system", "--unset", key) } // UnsetGlobalSection removes the entire named section from the global config func (c *gitConfig) UnsetGlobalSection(key string) (string, error) { return gitSimple("config", "--global", "--remove-section", key) } // UnsetSystemSection removes the entire named section from the system config func (c *gitConfig) UnsetSystemSection(key string) (string, error) { return gitSimple("config", "--system", "--remove-section", key) } // UnsetLocalSection removes the entire named section from the system config func (c *gitConfig) UnsetLocalSection(key string) (string, error) { return gitSimple("config", "--local", "--remove-section", key) } // SetLocal sets the git config value for the key in the specified config file func (c *gitConfig) SetLocal(file, key, val string) (string, error) { args := make([]string, 1, 6) args[0] = "config" if len(file) > 0 { args = append(args, "--file", file) } args = append(args, "--replace-all", key, val) return gitSimple(args...) } // UnsetLocalKey removes the git config value for the key from the specified config file func (c *gitConfig) UnsetLocalKey(file, key string) (string, error) { args := make([]string, 1, 5) args[0] = "config" if len(file) > 0 { args = append(args, "--file", file) } args = append(args, "--unset", key) return gitSimple(args...) } // List lists all of the git config values func (c *gitConfig) List() (string, error) { return gitSimple("config", "-l") } // ListFromFile lists all of the git config values in the given config file func (c *gitConfig) ListFromFile(f string) (string, error) { return gitSimple("config", "-l", "-f", f) } // Version returns the git version func (c *gitConfig) Version() (string, error) { c.mu.Lock() defer c.mu.Unlock() if len(c.gitVersion) == 0 { v, err := gitSimple("version") if err != nil { return v, err } c.gitVersion = v } return c.gitVersion, nil } // IsVersionAtLeast returns whether the git version is the one specified or higher // argument is plain version string separated by '.' e.g. "2.3.1" but can omit minor/patch func (c *gitConfig) IsGitVersionAtLeast(ver string) bool { gitver, err := c.Version() if err != nil { tracerx.Printf("Error getting git version: %v", err) return false } return IsVersionAtLeast(gitver, ver) } // RecentBranches returns branches with commit dates on or after the given date/time // Return full Ref type for easier detection of duplicate SHAs etc // since: refs with commits on or after this date will be included // includeRemoteBranches: true to include refs on remote branches // onlyRemote: set to non-blank to only include remote branches on a single remote func RecentBranches(since time.Time, includeRemoteBranches bool, onlyRemote string) ([]*Ref, error) { cmd := gitNoLFS("for-each-ref", `--sort=-committerdate`, `--format=%(refname) %(objectname) %(committerdate:iso)`, "refs") outp, err := cmd.StdoutPipe() if err != nil { return nil, fmt.Errorf("Failed to call git for-each-ref: %v", err) } cmd.Start() defer cmd.Wait() scanner := bufio.NewScanner(outp) // Output is like this: // refs/heads/master f03686b324b29ff480591745dbfbbfa5e5ac1bd5 2015-08-19 16:50:37 +0100 // refs/remotes/origin/master ad3b29b773e46ad6870fdf08796c33d97190fe93 2015-08-13 16:50:37 +0100 // Output is ordered by latest commit date first, so we can stop at the threshold regex := regexp.MustCompile(`^(refs/[^/]+/\S+)\s+([0-9A-Za-z]{40})\s+(\d{4}-\d{2}-\d{2}\s+\d{2}\:\d{2}\:\d{2}\s+[\+\-]\d{4})`) tracerx.Printf("RECENT: Getting refs >= %v", since) var ret []*Ref for scanner.Scan() { line := scanner.Text() if match := regex.FindStringSubmatch(line); match != nil { fullref := match[1] sha := match[2] reftype, ref := ParseRefToTypeAndName(fullref) if reftype == RefTypeRemoteBranch || reftype == RefTypeRemoteTag { if !includeRemoteBranches { continue } if onlyRemote != "" && !strings.HasPrefix(ref, onlyRemote+"/") { continue } } // This is a ref we might use // Check the date commitDate, err := ParseGitDate(match[3]) if err != nil { return ret, err } if commitDate.Before(since) { // the end break } tracerx.Printf("RECENT: %v (%v)", ref, commitDate) ret = append(ret, &Ref{ref, reftype, sha}) } } return ret, nil } // Get the type & name of a git reference func ParseRefToTypeAndName(fullref string) (t RefType, name string) { const localPrefix = "refs/heads/" const remotePrefix = "refs/remotes/" const remoteTagPrefix = "refs/remotes/tags/" const localTagPrefix = "refs/tags/" if fullref == "HEAD" { name = fullref t = RefTypeHEAD } else if strings.HasPrefix(fullref, localPrefix) { name = fullref[len(localPrefix):] t = RefTypeLocalBranch } else if strings.HasPrefix(fullref, remotePrefix) { name = fullref[len(remotePrefix):] t = RefTypeRemoteBranch } else if strings.HasPrefix(fullref, remoteTagPrefix) { name = fullref[len(remoteTagPrefix):] t = RefTypeRemoteTag } else if strings.HasPrefix(fullref, localTagPrefix) { name = fullref[len(localTagPrefix):] t = RefTypeLocalTag } else { name = fullref t = RefTypeOther } return } // Parse a Git date formatted in ISO 8601 format (%ci/%ai) func ParseGitDate(str string) (time.Time, error) { // Unfortunately Go and Git don't overlap in their builtin date formats // Go's time.RFC1123Z and Git's %cD are ALMOST the same, except that // when the day is < 10 Git outputs a single digit, but Go expects a leading // zero - this is enough to break the parsing. Sigh. // Format is for 2 Jan 2006, 15:04:05 -7 UTC as per Go return time.Parse("2006-01-02 15:04:05 -0700", str) } // FormatGitDate converts a Go date into a git command line format date func FormatGitDate(tm time.Time) string { // Git format is "Fri Jun 21 20:26:41 2013 +0900" but no zero-leading for day return tm.Format("Mon Jan 2 15:04:05 2006 -0700") } // Get summary information about a commit func GetCommitSummary(commit string) (*CommitSummary, error) { cmd := gitNoLFS("show", "-s", `--format=%H|%h|%P|%ai|%ci|%ae|%an|%ce|%cn|%s`, commit) out, err := cmd.CombinedOutput() if err != nil { return nil, fmt.Errorf("Failed to call git show: %v %v", err, string(out)) } // At most 10 substrings so subject line is not split on anything fields := strings.SplitN(string(out), "|", 10) // Cope with the case where subject is blank if len(fields) >= 9 { ret := &CommitSummary{} // Get SHAs from output, not commit input, so we can support symbolic refs ret.Sha = fields[0] ret.ShortSha = fields[1] ret.Parents = strings.Split(fields[2], " ") // %aD & %cD (RFC2822) matches Go's RFC1123Z format ret.AuthorDate, _ = ParseGitDate(fields[3]) ret.CommitDate, _ = ParseGitDate(fields[4]) ret.AuthorEmail = fields[5] ret.AuthorName = fields[6] ret.CommitterEmail = fields[7] ret.CommitterName = fields[8] if len(fields) > 9 { ret.Subject = strings.TrimRight(fields[9], "\n") } return ret, nil } else { msg := fmt.Sprintf("Unexpected output from git show: %v", string(out)) return nil, errors.New(msg) } } func GitAndRootDirs() (string, string, error) { cmd := gitNoLFS("rev-parse", "--git-dir", "--show-toplevel") buf := &bytes.Buffer{} cmd.Stderr = buf out, err := cmd.Output() output := string(out) if err != nil { return "", "", fmt.Errorf("Failed to call git rev-parse --git-dir --show-toplevel: %q", buf.String()) } paths := strings.Split(output, "\n") pathLen := len(paths) for i := 0; i < pathLen; i++ { paths[i], err = tools.TranslateCygwinPath(paths[i]) } if pathLen == 0 { return "", "", fmt.Errorf("Bad git rev-parse output: %q", output) } absGitDir, err := filepath.Abs(paths[0]) if err != nil { return "", "", fmt.Errorf("Error converting %q to absolute: %s", paths[0], err) } if pathLen == 1 || len(paths[1]) == 0 { return absGitDir, "", nil } absRootDir := paths[1] return absGitDir, absRootDir, nil } func RootDir() (string, error) { cmd := gitNoLFS("rev-parse", "--show-toplevel") out, err := cmd.Output() if err != nil { return "", fmt.Errorf("Failed to call git rev-parse --show-toplevel: %v %v", err, string(out)) } path := strings.TrimSpace(string(out)) path, err = tools.TranslateCygwinPath(path) if len(path) > 0 { return filepath.Abs(path) } return "", nil } func GitDir() (string, error) { cmd := gitNoLFS("rev-parse", "--git-dir") out, err := cmd.Output() if err != nil { return "", fmt.Errorf("Failed to call git rev-parse --git-dir: %v %v", err, string(out)) } path := strings.TrimSpace(string(out)) if len(path) > 0 { return filepath.Abs(path) } return "", nil } // GetAllWorkTreeHEADs returns the refs that all worktrees are using as HEADs // This returns all worktrees plus the master working copy, and works even if // working dir is actually in a worktree right now // Pass in the git storage dir (parent of 'objects') to work from func GetAllWorkTreeHEADs(storageDir string) ([]*Ref, error) { worktreesdir := filepath.Join(storageDir, "worktrees") dirf, err := os.Open(worktreesdir) if err != nil && !os.IsNotExist(err) { return nil, err } var worktrees []*Ref if err == nil { // There are some worktrees defer dirf.Close() direntries, err := dirf.Readdir(0) if err != nil { return nil, err } for _, dirfi := range direntries { if dirfi.IsDir() { // to avoid having to chdir and run git commands to identify the commit // just read the HEAD file & git rev-parse if necessary // Since the git repo is shared the same rev-parse will work from this location headfile := filepath.Join(worktreesdir, dirfi.Name(), "HEAD") ref, err := parseRefFile(headfile) if err != nil { tracerx.Printf("Error reading %v for worktree, skipping: %v", headfile, err) continue } worktrees = append(worktrees, ref) } } } // This has only established the separate worktrees, not the original checkout // If the storageDir contains a HEAD file then there is a main checkout // as well; this mus tbe resolveable whether you're in the main checkout or // a worktree headfile := filepath.Join(storageDir, "HEAD") ref, err := parseRefFile(headfile) if err == nil { worktrees = append(worktrees, ref) } else if !os.IsNotExist(err) { // ok if not exists, probably bare repo tracerx.Printf("Error reading %v for main checkout, skipping: %v", headfile, err) } return worktrees, nil } // Manually parse a reference file like HEAD and return the Ref it resolves to func parseRefFile(filename string) (*Ref, error) { bytes, err := ioutil.ReadFile(filename) if err != nil { return nil, err } contents := strings.TrimSpace(string(bytes)) if strings.HasPrefix(contents, "ref:") { contents = strings.TrimSpace(contents[4:]) } return ResolveRef(contents) } // IsVersionAtLeast compares 2 version strings (ok to be prefixed with 'git version', ignores) func IsVersionAtLeast(actualVersion, desiredVersion string) bool { // Capture 1-3 version digits, optionally prefixed with 'git version' and possibly // with suffixes which we'll ignore (e.g. unstable builds, MinGW versions) verregex := regexp.MustCompile(`(?:git version\s+)?(\d+)(?:.(\d+))?(?:.(\d+))?.*`) var atleast uint64 // Support up to 1000 in major/minor/patch digits const majorscale = 1000 * 1000 const minorscale = 1000 if match := verregex.FindStringSubmatch(desiredVersion); match != nil { // Ignore errors as regex won't match anything other than digits major, _ := strconv.Atoi(match[1]) atleast += uint64(major * majorscale) if len(match) > 2 { minor, _ := strconv.Atoi(match[2]) atleast += uint64(minor * minorscale) } if len(match) > 3 { patch, _ := strconv.Atoi(match[3]) atleast += uint64(patch) } } var actual uint64 if match := verregex.FindStringSubmatch(actualVersion); match != nil { major, _ := strconv.Atoi(match[1]) actual += uint64(major * majorscale) if len(match) > 2 { minor, _ := strconv.Atoi(match[2]) actual += uint64(minor * minorscale) } if len(match) > 3 { patch, _ := strconv.Atoi(match[3]) actual += uint64(patch) } } return actual >= atleast } // IsBare returns whether or not a repository is bare. It requires that the // current working directory is a repository. // // If there was an error determining whether or not the repository is bare, it // will be returned. func IsBare() (bool, error) { s, err := subprocess.SimpleExec( "git", "rev-parse", "--is-bare-repository") if err != nil { return false, err } return strconv.ParseBool(s) } // For compatibility with git clone we must mirror all flags in CloneWithoutFilters type CloneFlags struct { // --template TemplateDirectory string // -l --local Local bool // -s --shared Shared bool // --no-hardlinks NoHardlinks bool // -q --quiet Quiet bool // -n --no-checkout NoCheckout bool // --progress Progress bool // --bare Bare bool // --mirror Mirror bool // -o --origin Origin string // -b --branch Branch string // -u --upload-pack Upload string // --reference Reference string // --reference-if-able ReferenceIfAble string // --dissociate Dissociate bool // --separate-git-dir SeparateGit string // --depth Depth string // --recursive Recursive bool // --recurse-submodules RecurseSubmodules bool // -c --config Config string // --single-branch SingleBranch bool // --no-single-branch NoSingleBranch bool // --verbose Verbose bool // --ipv4 Ipv4 bool // --ipv6 Ipv6 bool // --shallow-since ShallowSince string // --shallow-since ShallowExclude string // --shallow-submodules ShallowSubmodules bool // --no-shallow-submodules NoShallowSubmodules bool // jobs Jobs int64 } // CloneWithoutFilters clones a git repo but without the smudge filter enabled // so that files in the working copy will be pointers and not real LFS data func CloneWithoutFilters(flags CloneFlags, args []string) error { cmdargs := []string{"clone"} // flags if flags.Bare { cmdargs = append(cmdargs, "--bare") } if len(flags.Branch) > 0 { cmdargs = append(cmdargs, "--branch", flags.Branch) } if len(flags.Config) > 0 { cmdargs = append(cmdargs, "--config", flags.Config) } if len(flags.Depth) > 0 { cmdargs = append(cmdargs, "--depth", flags.Depth) } if flags.Dissociate { cmdargs = append(cmdargs, "--dissociate") } if flags.Ipv4 { cmdargs = append(cmdargs, "--ipv4") } if flags.Ipv6 { cmdargs = append(cmdargs, "--ipv6") } if flags.Local { cmdargs = append(cmdargs, "--local") } if flags.Mirror { cmdargs = append(cmdargs, "--mirror") } if flags.NoCheckout { cmdargs = append(cmdargs, "--no-checkout") } if flags.NoHardlinks { cmdargs = append(cmdargs, "--no-hardlinks") } if flags.NoSingleBranch { cmdargs = append(cmdargs, "--no-single-branch") } if len(flags.Origin) > 0 { cmdargs = append(cmdargs, "--origin", flags.Origin) } if flags.Progress { cmdargs = append(cmdargs, "--progress") } if flags.Quiet { cmdargs = append(cmdargs, "--quiet") } if flags.Recursive { cmdargs = append(cmdargs, "--recursive") } if flags.RecurseSubmodules { cmdargs = append(cmdargs, "--recurse-submodules") } if len(flags.Reference) > 0 { cmdargs = append(cmdargs, "--reference", flags.Reference) } if len(flags.ReferenceIfAble) > 0 { cmdargs = append(cmdargs, "--reference-if-able", flags.ReferenceIfAble) } if len(flags.SeparateGit) > 0 { cmdargs = append(cmdargs, "--separate-git-dir", flags.SeparateGit) } if flags.Shared { cmdargs = append(cmdargs, "--shared") } if flags.SingleBranch { cmdargs = append(cmdargs, "--single-branch") } if len(flags.TemplateDirectory) > 0 { cmdargs = append(cmdargs, "--template", flags.TemplateDirectory) } if len(flags.Upload) > 0 { cmdargs = append(cmdargs, "--upload-pack", flags.Upload) } if flags.Verbose { cmdargs = append(cmdargs, "--verbose") } if len(flags.ShallowSince) > 0 { cmdargs = append(cmdargs, "--shallow-since", flags.ShallowSince) } if len(flags.ShallowExclude) > 0 { cmdargs = append(cmdargs, "--shallow-exclude", flags.ShallowExclude) } if flags.ShallowSubmodules { cmdargs = append(cmdargs, "--shallow-submodules") } if flags.NoShallowSubmodules { cmdargs = append(cmdargs, "--no-shallow-submodules") } if flags.Jobs > -1 { cmdargs = append(cmdargs, "--jobs", strconv.FormatInt(flags.Jobs, 10)) } // Now args cmdargs = append(cmdargs, args...) cmd := gitNoLFS(cmdargs...) // Assign all streams direct cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr cmd.Stdin = os.Stdin err := cmd.Start() if err != nil { return fmt.Errorf("Failed to start git clone: %v", err) } err = cmd.Wait() if err != nil { return fmt.Errorf("git clone failed: %v", err) } return nil } // Checkout performs an invocation of `git-checkout(1)` applying the given // treeish, paths, and force option, if given. // // If any error was encountered, it will be returned immediately. Otherwise, the // checkout has occurred successfully. func Checkout(treeish string, paths []string, force bool) error { args := []string{"checkout"} if force { args = append(args, "--force") } if len(treeish) > 0 { args = append(args, treeish) } if len(paths) > 0 { args = append(args, append([]string{"--"}, paths...)...) } _, err := gitNoLFSSimple(args...) return err } // CachedRemoteRefs returns the list of branches & tags for a remote which are // currently cached locally. No remote request is made to verify them. func CachedRemoteRefs(remoteName string) ([]*Ref, error) { var ret []*Ref cmd := gitNoLFS("show-ref") outp, err := cmd.StdoutPipe() if err != nil { return nil, fmt.Errorf("Failed to call git show-ref: %v", err) } cmd.Start() scanner := bufio.NewScanner(outp) r := regexp.MustCompile(fmt.Sprintf(`([0-9a-fA-F]{40})\s+refs/remotes/%v/(.*)`, remoteName)) for scanner.Scan() { if match := r.FindStringSubmatch(scanner.Text()); match != nil { name := strings.TrimSpace(match[2]) // Don't match head if name == "HEAD" { continue } sha := match[1] ret = append(ret, &Ref{name, RefTypeRemoteBranch, sha}) } } return ret, cmd.Wait() } // Fetch performs a fetch with no arguments against the given remotes. func Fetch(remotes ...string) error { if len(remotes) == 0 { return nil } _, err := gitNoLFSSimple(append([]string{"fetch"}, remotes...)...) return err } // RemoteRefs returns a list of branches & tags for a remote by actually // accessing the remote vir git ls-remote func RemoteRefs(remoteName string) ([]*Ref, error) { var ret []*Ref cmd := gitNoLFS("ls-remote", "--heads", "--tags", "-q", remoteName) outp, err := cmd.StdoutPipe() if err != nil { return nil, fmt.Errorf("Failed to call git ls-remote: %v", err) } cmd.Start() scanner := bufio.NewScanner(outp) r := regexp.MustCompile(`([0-9a-fA-F]{40})\s+refs/(heads|tags)/(.*)`) for scanner.Scan() { if match := r.FindStringSubmatch(scanner.Text()); match != nil { name := strings.TrimSpace(match[3]) // Don't match head if name == "HEAD" { continue } sha := match[1] if match[2] == "heads" { ret = append(ret, &Ref{name, RefTypeRemoteBranch, sha}) } else { ret = append(ret, &Ref{name, RefTypeRemoteTag, sha}) } } } return ret, cmd.Wait() } // AllRefs returns a slice of all references in a Git repository in the current // working directory, or an error if those references could not be loaded. func AllRefs() ([]*Ref, error) { return AllRefsIn("") } // AllRefs returns a slice of all references in a Git repository located in a // the given working directory "wd", or an error if those references could not // be loaded. func AllRefsIn(wd string) ([]*Ref, error) { cmd := gitNoLFS( "for-each-ref", "--format=%(objectname)%00%(refname)") cmd.Dir = wd outp, err := cmd.StdoutPipe() if err != nil { return nil, lfserrors.Wrap(err, "cannot open pipe") } cmd.Start() refs := make([]*Ref, 0) scanner := bufio.NewScanner(outp) for scanner.Scan() { parts := strings.SplitN(scanner.Text(), "\x00", 2) if len(parts) != 2 { return nil, lfserrors.Errorf( "git: invalid for-each-ref line: %q", scanner.Text()) } sha := parts[0] typ, name := ParseRefToTypeAndName(parts[1]) refs = append(refs, &Ref{ Name: name, Type: typ, Sha: sha, }) } if err := scanner.Err(); err != nil { return nil, err } return refs, nil } // GetTrackedFiles returns a list of files which are tracked in Git which match // the pattern specified (standard wildcard form) // Both pattern and the results are relative to the current working directory, not // the root of the repository func GetTrackedFiles(pattern string) ([]string, error) { safePattern := sanitizePattern(pattern) rootWildcard := len(safePattern) < len(pattern) && strings.ContainsRune(safePattern, '*') var ret []string cmd := gitNoLFS( "-c", "core.quotepath=false", // handle special chars in filenames "ls-files", "--cached", // include things which are staged but not committed right now "--", // no ambiguous patterns safePattern) outp, err := cmd.StdoutPipe() if err != nil { return nil, fmt.Errorf("Failed to call git ls-files: %v", err) } cmd.Start() scanner := bufio.NewScanner(outp) for scanner.Scan() { line := scanner.Text() // If the given pattern is a root wildcard, skip all files which // are not direct descendants of the repository's root. // // This matches the behavior of how .gitattributes performs // filename matches. if rootWildcard && filepath.Dir(line) != "." { continue } ret = append(ret, strings.TrimSpace(line)) } return ret, cmd.Wait() } func sanitizePattern(pattern string) string { if strings.HasPrefix(pattern, "/") { return pattern[1:] } return pattern } // GetFilesChanged returns a list of files which were changed, either between 2 // commits, or at a single commit if you only supply one argument and a blank // string for the other func GetFilesChanged(from, to string) ([]string, error) { var files []string args := []string{ "-c", "core.quotepath=false", // handle special chars in filenames "diff-tree", "--no-commit-id", "--name-only", "-r", } if len(from) > 0 { args = append(args, from) } if len(to) > 0 { args = append(args, to) } args = append(args, "--") // no ambiguous patterns cmd := gitNoLFS(args...) outp, err := cmd.StdoutPipe() if err != nil { return nil, fmt.Errorf("Failed to call git diff: %v", err) } if err := cmd.Start(); err != nil { return nil, fmt.Errorf("Failed to start git diff: %v", err) } scanner := bufio.NewScanner(outp) for scanner.Scan() { files = append(files, strings.TrimSpace(scanner.Text())) } if err := cmd.Wait(); err != nil { return nil, fmt.Errorf("Git diff failed: %v", err) } return files, err } // IsFileModified returns whether the filepath specified is modified according // to `git status`. A file is modified if it has uncommitted changes in the // working copy or the index. This includes being untracked. func IsFileModified(filepath string) (bool, error) { args := []string{ "-c", "core.quotepath=false", // handle special chars in filenames "status", "--porcelain", "--", // separator in case filename ambiguous filepath, } cmd := git(args...) outp, err := cmd.StdoutPipe() if err != nil { return false, lfserrors.Wrap(err, "Failed to call git status") } if err := cmd.Start(); err != nil { return false, lfserrors.Wrap(err, "Failed to start git status") } matched := false for scanner := bufio.NewScanner(outp); scanner.Scan(); { line := scanner.Text() // Porcelain format is " " // Where = index status, = working copy status if len(line) > 3 { // Double-check even though should be only match if strings.TrimSpace(line[3:]) == filepath { matched = true // keep consuming output to exit cleanly // will typically fall straight through anyway due to 1 line output } } } if err := cmd.Wait(); err != nil { return false, lfserrors.Wrap(err, "Git status failed") } return matched, nil } git-lfs-2.3.4/git/git_test.go000066400000000000000000000377521317167762300160540ustar00rootroot00000000000000package git_test // to avoid import cycles import ( "io/ioutil" "os" "path/filepath" "sort" "testing" "time" . "github.com/git-lfs/git-lfs/git" "github.com/git-lfs/git-lfs/test" "github.com/stretchr/testify/assert" ) func TestCurrentRefAndCurrentRemoteRef(t *testing.T) { repo := test.NewRepo(t) repo.Pushd() defer func() { repo.Popd() repo.Cleanup() }() // test commits; we'll just modify the same file each time since we're // only interested in branches inputs := []*test.CommitInput{ { // 0 Files: []*test.FileInput{ {Filename: "file1.txt", Size: 20}, }, }, { // 1 NewBranch: "branch2", Files: []*test.FileInput{ {Filename: "file1.txt", Size: 25}, }, }, { // 2 ParentBranches: []string{"master"}, // back on master Files: []*test.FileInput{ {Filename: "file1.txt", Size: 30}, }, }, { // 3 NewBranch: "branch3", Files: []*test.FileInput{ {Filename: "file1.txt", Size: 32}, }, }, } outputs := repo.AddCommits(inputs) // last commit was on branch3 ref, err := CurrentRef() assert.Nil(t, err) assert.Equal(t, &Ref{"branch3", RefTypeLocalBranch, outputs[3].Sha}, ref) test.RunGitCommand(t, true, "checkout", "master") ref, err = CurrentRef() assert.Nil(t, err) assert.Equal(t, &Ref{"master", RefTypeLocalBranch, outputs[2].Sha}, ref) // Check remote repo.AddRemote("origin") test.RunGitCommand(t, true, "push", "-u", "origin", "master:someremotebranch") ref, err = CurrentRemoteRef() assert.Nil(t, err) assert.Equal(t, &Ref{"origin/someremotebranch", RefTypeRemoteBranch, outputs[2].Sha}, ref) refname, err := RemoteRefNameForCurrentBranch() assert.Nil(t, err) assert.Equal(t, "refs/remotes/origin/someremotebranch", refname) remote, err := RemoteForCurrentBranch() assert.Nil(t, err) assert.Equal(t, "origin", remote) ref, err = ResolveRef(outputs[2].Sha) assert.Nil(t, err) assert.Equal(t, &Ref{outputs[2].Sha, RefTypeOther, outputs[2].Sha}, ref) } func TestRecentBranches(t *testing.T) { repo := test.NewRepo(t) repo.Pushd() defer func() { repo.Popd() repo.Cleanup() }() now := time.Now() // test commits; we'll just modify the same file each time since we're // only interested in branches & dates inputs := []*test.CommitInput{ { // 0 CommitDate: now.AddDate(0, 0, -20), Files: []*test.FileInput{ {Filename: "file1.txt", Size: 20}, }, }, { // 1 CommitDate: now.AddDate(0, 0, -15), NewBranch: "excluded_branch", // new branch & tag but too old Tags: []string{"excluded_tag"}, Files: []*test.FileInput{ {Filename: "file1.txt", Size: 25}, }, }, { // 2 CommitDate: now.AddDate(0, 0, -12), ParentBranches: []string{"master"}, // back on master Files: []*test.FileInput{ {Filename: "file1.txt", Size: 30}, }, }, { // 3 CommitDate: now.AddDate(0, 0, -6), NewBranch: "included_branch", // new branch within 7 day limit Files: []*test.FileInput{ {Filename: "file1.txt", Size: 32}, }, }, { // 4 CommitDate: now.AddDate(0, 0, -3), NewBranch: "included_branch_2", // new branch within 7 day limit Files: []*test.FileInput{ {Filename: "file1.txt", Size: 36}, }, }, { // 5 // Final commit, current date/time ParentBranches: []string{"master"}, // back on master Files: []*test.FileInput{ {Filename: "file1.txt", Size: 21}, }, }, } outputs := repo.AddCommits(inputs) // Add a couple of remotes and push some branches repo.AddRemote("origin") repo.AddRemote("upstream") test.RunGitCommand(t, true, "push", "origin", "master") test.RunGitCommand(t, true, "push", "origin", "excluded_branch") test.RunGitCommand(t, true, "push", "origin", "included_branch") test.RunGitCommand(t, true, "push", "upstream", "master") test.RunGitCommand(t, true, "push", "upstream", "included_branch_2") // Recent, local only refs, err := RecentBranches(now.AddDate(0, 0, -7), false, "") assert.Equal(t, nil, err) expectedRefs := []*Ref{ &Ref{"master", RefTypeLocalBranch, outputs[5].Sha}, &Ref{"included_branch_2", RefTypeLocalBranch, outputs[4].Sha}, &Ref{"included_branch", RefTypeLocalBranch, outputs[3].Sha}, } assert.Equal(t, expectedRefs, refs, "Refs should be correct") // Recent, remotes too (all of them) refs, err = RecentBranches(now.AddDate(0, 0, -7), true, "") assert.Equal(t, nil, err) expectedRefs = []*Ref{ &Ref{"master", RefTypeLocalBranch, outputs[5].Sha}, &Ref{"included_branch_2", RefTypeLocalBranch, outputs[4].Sha}, &Ref{"included_branch", RefTypeLocalBranch, outputs[3].Sha}, &Ref{"upstream/master", RefTypeRemoteBranch, outputs[5].Sha}, &Ref{"upstream/included_branch_2", RefTypeRemoteBranch, outputs[4].Sha}, &Ref{"origin/master", RefTypeRemoteBranch, outputs[5].Sha}, &Ref{"origin/included_branch", RefTypeRemoteBranch, outputs[3].Sha}, } // Need to sort for consistent comparison sort.Sort(test.RefsByName(expectedRefs)) sort.Sort(test.RefsByName(refs)) assert.Equal(t, expectedRefs, refs, "Refs should be correct") // Recent, only single remote refs, err = RecentBranches(now.AddDate(0, 0, -7), true, "origin") assert.Equal(t, nil, err) expectedRefs = []*Ref{ &Ref{"master", RefTypeLocalBranch, outputs[5].Sha}, &Ref{"origin/master", RefTypeRemoteBranch, outputs[5].Sha}, &Ref{"included_branch_2", RefTypeLocalBranch, outputs[4].Sha}, &Ref{"included_branch", RefTypeLocalBranch, outputs[3].Sha}, &Ref{"origin/included_branch", RefTypeRemoteBranch, outputs[3].Sha}, } // Need to sort for consistent comparison sort.Sort(test.RefsByName(expectedRefs)) sort.Sort(test.RefsByName(refs)) assert.Equal(t, expectedRefs, refs, "Refs should be correct") } func TestResolveEmptyCurrentRef(t *testing.T) { repo := test.NewRepo(t) repo.Pushd() defer func() { repo.Popd() repo.Cleanup() }() _, err := CurrentRef() assert.NotEqual(t, nil, err) } func TestWorkTrees(t *testing.T) { // Only git 2.5+ if !Config.IsGitVersionAtLeast("2.5.0") { return } repo := test.NewRepo(t) repo.Pushd() defer func() { repo.Popd() repo.Cleanup() }() // test commits; we'll just modify the same file each time since we're // only interested in branches & dates inputs := []*test.CommitInput{ { // 0 Files: []*test.FileInput{ {Filename: "file1.txt", Size: 20}, }, }, { // 1 NewBranch: "branch2", Files: []*test.FileInput{ {Filename: "file1.txt", Size: 25}, }, }, { // 2 NewBranch: "branch3", ParentBranches: []string{"master"}, // back on master Files: []*test.FileInput{ {Filename: "file1.txt", Size: 30}, }, }, { // 3 NewBranch: "branch4", ParentBranches: []string{"master"}, // back on master Files: []*test.FileInput{ {Filename: "file1.txt", Size: 40}, }, }, } outputs := repo.AddCommits(inputs) // Checkout master again otherwise can't create a worktree from branch4 if we're on it here test.RunGitCommand(t, true, "checkout", "master") // We can create worktrees as subfolders for convenience // Each one is checked out to a different branch // Note that we *won't* create one for branch3 test.RunGitCommand(t, true, "worktree", "add", "branch2_wt", "branch2") test.RunGitCommand(t, true, "worktree", "add", "branch4_wt", "branch4") refs, err := GetAllWorkTreeHEADs(filepath.Join(repo.Path, ".git")) assert.Equal(t, nil, err) expectedRefs := []*Ref{ &Ref{"master", RefTypeLocalBranch, outputs[0].Sha}, &Ref{"branch2", RefTypeLocalBranch, outputs[1].Sha}, &Ref{"branch4", RefTypeLocalBranch, outputs[3].Sha}, } // Need to sort for consistent comparison sort.Sort(test.RefsByName(expectedRefs)) sort.Sort(test.RefsByName(refs)) assert.Equal(t, expectedRefs, refs, "Refs should be correct") } func TestVersionCompare(t *testing.T) { assert.True(t, IsVersionAtLeast("2.6.0", "2.6.0")) assert.True(t, IsVersionAtLeast("2.6.0", "2.6")) assert.True(t, IsVersionAtLeast("2.6.0", "2")) assert.True(t, IsVersionAtLeast("2.6.10", "2.6.5")) assert.True(t, IsVersionAtLeast("2.8.1", "2.7.2")) assert.False(t, IsVersionAtLeast("1.6.0", "2")) assert.False(t, IsVersionAtLeast("2.5.0", "2.6")) assert.False(t, IsVersionAtLeast("2.5.0", "2.5.1")) assert.False(t, IsVersionAtLeast("2.5.2", "2.5.10")) } func TestGitAndRootDirs(t *testing.T) { repo := test.NewRepo(t) repo.Pushd() defer func() { repo.Popd() repo.Cleanup() }() git, root, err := GitAndRootDirs() if err != nil { t.Fatal(err) } expected, err := os.Stat(git) if err != nil { t.Fatal(err) } actual, err := os.Stat(filepath.Join(root, ".git")) if err != nil { t.Fatal(err) } assert.True(t, os.SameFile(expected, actual)) } func TestGetTrackedFiles(t *testing.T) { repo := test.NewRepo(t) repo.Pushd() defer func() { repo.Popd() repo.Cleanup() }() // test commits; we'll just modify the same file each time since we're // only interested in branches inputs := []*test.CommitInput{ { // 0 Files: []*test.FileInput{ {Filename: "file1.txt", Size: 20}, {Filename: "file2.txt", Size: 20}, {Filename: "folder1/file10.txt", Size: 20}, {Filename: "folder1/anotherfile.txt", Size: 20}, }, }, { // 1 Files: []*test.FileInput{ {Filename: "file3.txt", Size: 20}, {Filename: "file4.txt", Size: 20}, {Filename: "folder2/something.txt", Size: 20}, {Filename: "folder2/folder3/deep.txt", Size: 20}, }, }, } repo.AddCommits(inputs) tracked, err := GetTrackedFiles("*.txt") assert.Nil(t, err) sort.Strings(tracked) // for direct comparison fulllist := []string{"file1.txt", "file2.txt", "file3.txt", "file4.txt", "folder1/anotherfile.txt", "folder1/file10.txt", "folder2/folder3/deep.txt", "folder2/something.txt"} assert.Equal(t, fulllist, tracked) tracked, err = GetTrackedFiles("*file*.txt") assert.Nil(t, err) sort.Strings(tracked) sublist := []string{"file1.txt", "file2.txt", "file3.txt", "file4.txt", "folder1/anotherfile.txt", "folder1/file10.txt"} assert.Equal(t, sublist, tracked) tracked, err = GetTrackedFiles("folder1/*") assert.Nil(t, err) sort.Strings(tracked) sublist = []string{"folder1/anotherfile.txt", "folder1/file10.txt"} assert.Equal(t, sublist, tracked) tracked, err = GetTrackedFiles("folder2/*") assert.Nil(t, err) sort.Strings(tracked) sublist = []string{"folder2/folder3/deep.txt", "folder2/something.txt"} assert.Equal(t, sublist, tracked) // relative dir os.Chdir("folder1") tracked, err = GetTrackedFiles("*.txt") assert.Nil(t, err) sort.Strings(tracked) sublist = []string{"anotherfile.txt", "file10.txt"} assert.Equal(t, sublist, tracked) os.Chdir("..") // absolute paths only includes matches in repo root tracked, err = GetTrackedFiles("/*.txt") assert.Nil(t, err) sort.Strings(tracked) assert.Equal(t, []string{"file1.txt", "file2.txt", "file3.txt", "file4.txt"}, tracked) // Test includes staged but uncommitted files ioutil.WriteFile("z_newfile.txt", []byte("Hello world"), 0644) test.RunGitCommand(t, true, "add", "z_newfile.txt") tracked, err = GetTrackedFiles("*.txt") assert.Nil(t, err) sort.Strings(tracked) fulllist = append(fulllist, "z_newfile.txt") assert.Equal(t, fulllist, tracked) // Test includes modified files (not staged) ioutil.WriteFile("file1.txt", []byte("Modifications"), 0644) tracked, err = GetTrackedFiles("*.txt") assert.Nil(t, err) sort.Strings(tracked) assert.Equal(t, fulllist, tracked) // Test includes modified files (staged) test.RunGitCommand(t, true, "add", "file1.txt") tracked, err = GetTrackedFiles("*.txt") assert.Nil(t, err) sort.Strings(tracked) assert.Equal(t, fulllist, tracked) // Test excludes deleted files (not committed) test.RunGitCommand(t, true, "rm", "file2.txt") tracked, err = GetTrackedFiles("*.txt") assert.Nil(t, err) sort.Strings(tracked) deletedlist := []string{"file1.txt", "file3.txt", "file4.txt", "folder1/anotherfile.txt", "folder1/file10.txt", "folder2/folder3/deep.txt", "folder2/something.txt", "z_newfile.txt"} assert.Equal(t, deletedlist, tracked) } func TestLocalRefs(t *testing.T) { repo := test.NewRepo(t) repo.Pushd() defer func() { repo.Popd() repo.Cleanup() }() repo.AddCommits([]*test.CommitInput{ { Files: []*test.FileInput{ {Filename: "file1.txt", Size: 20}, }, }, { NewBranch: "branch", ParentBranches: []string{"master"}, Files: []*test.FileInput{ {Filename: "file1.txt", Size: 20}, }, }, }) test.RunGitCommand(t, true, "tag", "v1") refs, err := LocalRefs() if err != nil { t.Fatal(err) } actual := make(map[string]bool) for _, r := range refs { t.Logf("REF: %s", r.Name) switch r.Type { case RefTypeHEAD: t.Errorf("Local HEAD ref: %v", r) case RefTypeOther: t.Errorf("Stash or unknown ref: %v", r) case RefTypeRemoteBranch, RefTypeRemoteTag: t.Errorf("Remote ref: %v", r) default: actual[r.Name] = true } } expected := []string{"master", "branch", "v1"} found := 0 for _, refname := range expected { if actual[refname] { found += 1 } else { t.Errorf("could not find ref %q", refname) } } if found != len(expected) { t.Errorf("Unexpected local refs: %v", actual) } } func TestGetFilesChanges(t *testing.T) { repo := test.NewRepo(t) repo.Pushd() defer func() { repo.Popd() repo.Cleanup() }() commits := repo.AddCommits([]*test.CommitInput{ { Files: []*test.FileInput{ {Filename: "file1.txt", Size: 20}, }, }, { Files: []*test.FileInput{ {Filename: "file1.txt", Size: 25}, {Filename: "file2.txt", Size: 20}, {Filename: "folder/file3.txt", Size: 10}, }, Tags: []string{"tag1"}, }, { NewBranch: "abranch", ParentBranches: []string{"master"}, Files: []*test.FileInput{ {Filename: "file1.txt", Size: 30}, {Filename: "file4.txt", Size: 40}, }, }, }) expected0to1 := []string{"file1.txt", "file2.txt", "folder/file3.txt"} expected1to2 := []string{"file1.txt", "file4.txt"} expected0to2 := []string{"file1.txt", "file2.txt", "file4.txt", "folder/file3.txt"} // Test 2 SHAs changes, err := GetFilesChanged(commits[0].Sha, commits[1].Sha) assert.Nil(t, err) assert.Equal(t, expected0to1, changes) // Test SHA & tag changes, err = GetFilesChanged(commits[0].Sha, "tag1") assert.Nil(t, err) assert.Equal(t, expected0to1, changes) // Test SHA & branch changes, err = GetFilesChanged(commits[0].Sha, "abranch") assert.Nil(t, err) assert.Equal(t, expected0to2, changes) // Test tag & branch changes, err = GetFilesChanged("tag1", "abranch") assert.Nil(t, err) assert.Equal(t, expected1to2, changes) // Test fail _, err = GetFilesChanged("tag1", "nonexisting") assert.NotNil(t, err) _, err = GetFilesChanged("nonexisting", "tag1") assert.NotNil(t, err) // Test Single arg version changes, err = GetFilesChanged(commits[1].Sha, "") assert.Nil(t, err) assert.Equal(t, expected0to1, changes) changes, err = GetFilesChanged("abranch", "") assert.Nil(t, err) assert.Equal(t, expected1to2, changes) } func TestValidateRemoteURL(t *testing.T) { assert.Nil(t, ValidateRemoteURL("https://github.com/git-lfs/git-lfs")) assert.Nil(t, ValidateRemoteURL("http://github.com/git-lfs/git-lfs")) assert.Nil(t, ValidateRemoteURL("git://github.com/git-lfs/git-lfs")) assert.Nil(t, ValidateRemoteURL("ssh://git@github.com/git-lfs/git-lfs")) assert.Nil(t, ValidateRemoteURL("ssh://git@github.com:22/git-lfs/git-lfs")) assert.Nil(t, ValidateRemoteURL("git@github.com:git-lfs/git-lfs")) assert.Nil(t, ValidateRemoteURL("git@server:/absolute/path.git")) assert.NotNil(t, ValidateRemoteURL("ftp://git@github.com/git-lfs/git-lfs")) } func TestRefTypeKnownPrefixes(t *testing.T) { for typ, expected := range map[RefType]struct { Prefix string Ok bool }{ RefTypeLocalBranch: {"refs/heads", true}, RefTypeRemoteBranch: {"refs/remotes", true}, RefTypeLocalTag: {"refs/tags", true}, RefTypeRemoteTag: {"refs/remotes/tags", true}, RefTypeHEAD: {"", false}, RefTypeOther: {"", false}, } { prefix, ok := typ.Prefix() assert.Equal(t, expected.Prefix, prefix) assert.Equal(t, expected.Ok, ok) } } func TestRefTypeUnknownPrefix(t *testing.T) { defer func() { if err := recover(); err != nil { assert.Equal(t, "git: unknown RefType -1", err) } else { t.Fatal("git: expected panic() from RefType.Prefix()") } }() unknown := RefType(-1) unknown.Prefix() } git-lfs-2.3.4/git/githistory/000077500000000000000000000000001317167762300160725ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/000077500000000000000000000000001317167762300177435ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/identical-blobs.git/000077500000000000000000000000001317167762300235605ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/identical-blobs.git/HEAD000066400000000000000000000000271317167762300242030ustar00rootroot00000000000000ref: refs/heads/master git-lfs-2.3.4/git/githistory/fixtures/identical-blobs.git/config000066400000000000000000000002111317167762300247420ustar00rootroot00000000000000[core] repositoryformatversion = 0 filemode = true bare = false logallrefupdates = true ignorecase = true precomposeunicode = true git-lfs-2.3.4/git/githistory/fixtures/identical-blobs.git/index000066400000000000000000000003211317167762300246060ustar00rootroot00000000000000DIRCY1Y13la X@.w7o&ia.txtY1Y13la X@.w7o&ib.txtTREE2 0 +E+Y2gltj~*h.#7uUgit-lfs-2.3.4/git/githistory/fixtures/identical-blobs.git/logs/000077500000000000000000000000001317167762300245245ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/identical-blobs.git/logs/HEAD000066400000000000000000000002421317167762300251460ustar00rootroot000000000000000000000000000000000000000000000000000000 42723ad796caa500ddf4e3f6ad37600ed5a65491 Taylor Blau 1496440063 -0600 commit (initial): initial commit git-lfs-2.3.4/git/githistory/fixtures/identical-blobs.git/logs/refs/000077500000000000000000000000001317167762300254635ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/identical-blobs.git/logs/refs/heads/000077500000000000000000000000001317167762300265475ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/identical-blobs.git/logs/refs/heads/master000066400000000000000000000002421317167762300277630ustar00rootroot000000000000000000000000000000000000000000000000000000 42723ad796caa500ddf4e3f6ad37600ed5a65491 Taylor Blau 1496440063 -0600 commit (initial): initial commit git-lfs-2.3.4/git/githistory/fixtures/identical-blobs.git/objects/000077500000000000000000000000001317167762300252115ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/identical-blobs.git/objects/42/000077500000000000000000000000001317167762300254365ustar00rootroot00000000000000723ad796caa500ddf4e3f6ad37600ed5a65491000066400000000000000000000001771317167762300325720ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/identical-blobs.git/objects/42x] B!F{v¿;"DDkhN$h}hI{0d2CBŦ-懳`L>8 B)vy wz[Ν"_1Oy {Qju'wJh8git-lfs-2.3.4/git/githistory/fixtures/identical-blobs.git/objects/94/000077500000000000000000000000001317167762300254455ustar00rootroot00000000000000f3610c08588440112ed977376f26a8fba169b0000066400000000000000000000000271317167762300322250ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/identical-blobs.git/objects/94xKOR`/LK(Mgit-lfs-2.3.4/git/githistory/fixtures/identical-blobs.git/objects/b6/000077500000000000000000000000001317167762300255205ustar00rootroot000000000000002b45ed2b59cf32dd676ca47497e76a1dab9c7e000066400000000000000000000000671317167762300330350ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/identical-blobs.git/objects/b6x+)JMU03c040031QH+(a9#APfyڊ 37@$TDZgit-lfs-2.3.4/git/githistory/fixtures/identical-blobs.git/refs/000077500000000000000000000000001317167762300245175ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/identical-blobs.git/refs/heads/000077500000000000000000000000001317167762300256035ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/identical-blobs.git/refs/heads/master000066400000000000000000000000511317167762300270150ustar00rootroot0000000000000042723ad796caa500ddf4e3f6ad37600ed5a65491 git-lfs-2.3.4/git/githistory/fixtures/linear-history-with-tags.git/000077500000000000000000000000001317167762300254035ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/linear-history-with-tags.git/HEAD000066400000000000000000000000271317167762300260260ustar00rootroot00000000000000ref: refs/heads/master git-lfs-2.3.4/git/githistory/fixtures/linear-history-with-tags.git/config000066400000000000000000000002111317167762300265650ustar00rootroot00000000000000[core] repositoryformatversion = 0 filemode = true bare = false logallrefupdates = true ignorecase = true precomposeunicode = true git-lfs-2.3.4/git/githistory/fixtures/linear-history-with-tags.git/index000066400000000000000000000002111317167762300264270ustar00rootroot00000000000000DIRCY9eY9eg(4+|^՛CGhsome.txtTREE1 0 >ti_ iN.tꐨF#_Kϛ8vgit-lfs-2.3.4/git/githistory/fixtures/linear-history-with-tags.git/logs/000077500000000000000000000000001317167762300263475ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/linear-history-with-tags.git/logs/HEAD000066400000000000000000000007111317167762300267720ustar00rootroot000000000000000000000000000000000000000000000000000000 91b85be6928569390e937479509b80a1d0dccb0c Taylor Blau 1496954196 -0600 commit (initial): some.txt: a 91b85be6928569390e937479509b80a1d0dccb0c 228afe30855933151f7a88e70d9d88314fd2f191 Taylor Blau 1496954207 -0600 commit: some.txt: b 228afe30855933151f7a88e70d9d88314fd2f191 d941e4756add6b06f5bee766fcf669f55419f13f Taylor Blau 1496954214 -0600 commit: some.txt: c git-lfs-2.3.4/git/githistory/fixtures/linear-history-with-tags.git/logs/refs/000077500000000000000000000000001317167762300273065ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/linear-history-with-tags.git/logs/refs/heads/000077500000000000000000000000001317167762300303725ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/linear-history-with-tags.git/logs/refs/heads/master000066400000000000000000000007111317167762300316070ustar00rootroot000000000000000000000000000000000000000000000000000000 91b85be6928569390e937479509b80a1d0dccb0c Taylor Blau 1496954196 -0600 commit (initial): some.txt: a 91b85be6928569390e937479509b80a1d0dccb0c 228afe30855933151f7a88e70d9d88314fd2f191 Taylor Blau 1496954207 -0600 commit: some.txt: b 228afe30855933151f7a88e70d9d88314fd2f191 d941e4756add6b06f5bee766fcf669f55419f13f Taylor Blau 1496954214 -0600 commit: some.txt: c git-lfs-2.3.4/git/githistory/fixtures/linear-history-with-tags.git/objects/000077500000000000000000000000001317167762300270345ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/linear-history-with-tags.git/objects/20/000077500000000000000000000000001317167762300272555ustar00rootroot00000000000000ecedad3e74a113695fe5f00ab003694e2e1e9c000066400000000000000000000000651317167762300345410ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/linear-history-with-tags.git/objects/20x+)JMU06c040031Q(M+(a0`^Vwuvs=|'g*git-lfs-2.3.4/git/githistory/fixtures/linear-history-with-tags.git/objects/22/000077500000000000000000000000001317167762300272575ustar00rootroot000000000000008afe30855933151f7a88e70d9d88314fd2f191000066400000000000000000000002341317167762300341400ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/linear-history-with-tags.git/objects/22x B1NicE{A7؁_9vcU N\lΖP]!d3hG@V<ӠKT9Lut  6PKPDZڇ<}tO7:2XJo,5!-x1w(^o,Cjgit-lfs-2.3.4/git/githistory/fixtures/linear-history-with-tags.git/objects/2e/000077500000000000000000000000001317167762300273425ustar00rootroot0000000000000065efe2a145dda7ee51d1741299f848e5bf752e000066400000000000000000000000201317167762300345110ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/linear-history-with-tags.git/objects/2exKOR0dH Rgit-lfs-2.3.4/git/githistory/fixtures/linear-history-with-tags.git/objects/34/000077500000000000000000000000001317167762300272625ustar00rootroot0000000000000010062ba67c5ed59b854387a8bc0ec012479368000066400000000000000000000000201317167762300341100ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/linear-history-with-tags.git/objects/34xKOR0dH Tgit-lfs-2.3.4/git/githistory/fixtures/linear-history-with-tags.git/objects/3c/000077500000000000000000000000001317167762300273415ustar00rootroot00000000000000b3201d7942353fff5f45e03d114e8e7a061f87000066400000000000000000000000651317167762300342600ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/linear-history-with-tags.git/objects/3cx+)JMU06c040031Q(M+(aK}h/tVOgit-lfs-2.3.4/git/githistory/fixtures/linear-history-with-tags.git/objects/52/000077500000000000000000000000001317167762300272625ustar00rootroot00000000000000a8963f48d54c7d352695a278ca4b025e130cb4000066400000000000000000000000641317167762300342000ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/linear-history-with-tags.git/objects/52x+)JMU06c040031Q(M+(aHq r^ [r.}=5git-lfs-2.3.4/git/githistory/fixtures/linear-history-with-tags.git/objects/63/000077500000000000000000000000001317167762300272645ustar00rootroot00000000000000d8dbd40c23542e740659a7168a0ce3138ea748000066400000000000000000000000201317167762300341700ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/linear-history-with-tags.git/objects/63xKOR0dH Sgit-lfs-2.3.4/git/githistory/fixtures/linear-history-with-tags.git/objects/91/000077500000000000000000000000001317167762300272655ustar00rootroot00000000000000b85be6928569390e937479509b80a1d0dccb0c000066400000000000000000000001771317167762300342250ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/linear-history-with-tags.git/objects/91x[ 0))@L7(4 aR-%@OҘAV̨9l,^ O#:ʓWqgmp."?цTd PppF;C9dt&6}git-lfs-2.3.4/git/githistory/fixtures/linear-history-with-tags.git/objects/d9/000077500000000000000000000000001317167762300273505ustar00rootroot0000000000000041e4756add6b06f5bee766fcf669f55419f13f000066400000000000000000000002341317167762300345350ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/linear-history-with-tags.git/objects/d9x 1}Nic7?w5lP0Fh;i>fR&`Hg8qlyqh @FI=c&Pa h-z, CJ6#*Zں>>tU+E~OՃFǠ3f5'^$o餾jDgit-lfs-2.3.4/git/githistory/fixtures/linear-history-with-tags.git/refs/000077500000000000000000000000001317167762300263425ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/linear-history-with-tags.git/refs/heads/000077500000000000000000000000001317167762300274265ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/linear-history-with-tags.git/refs/heads/master000066400000000000000000000000511317167762300306400ustar00rootroot00000000000000d941e4756add6b06f5bee766fcf669f55419f13f git-lfs-2.3.4/git/githistory/fixtures/linear-history-with-tags.git/refs/tags/000077500000000000000000000000001317167762300273005ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/linear-history-with-tags.git/refs/tags/middle000066400000000000000000000000511317167762300304550ustar00rootroot00000000000000228afe30855933151f7a88e70d9d88314fd2f191 git-lfs-2.3.4/git/githistory/fixtures/linear-history.git/000077500000000000000000000000001317167762300234765ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/linear-history.git/HEAD000066400000000000000000000000271317167762300241210ustar00rootroot00000000000000ref: refs/heads/master git-lfs-2.3.4/git/githistory/fixtures/linear-history.git/config000066400000000000000000000002111317167762300246600ustar00rootroot00000000000000[core] repositoryformatversion = 0 filemode = true bare = false logallrefupdates = true ignorecase = true precomposeunicode = true git-lfs-2.3.4/git/githistory/fixtures/linear-history.git/index000066400000000000000000000002111317167762300245220ustar00rootroot00000000000000DIRCY0sY0s2 D@BXiew.ha+S hello.txtTREE1 0 n1pĭsH85il~iNT7[cgit-lfs-2.3.4/git/githistory/fixtures/linear-history.git/logs/000077500000000000000000000000001317167762300244425ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/linear-history.git/logs/HEAD000066400000000000000000000007141317167762300250700ustar00rootroot000000000000000000000000000000000000000000000000000000 62811b8f930323895033b3b338c35f51c0b7268b Taylor Blau 1496347620 -0600 commit (initial): hello.txt: 1 62811b8f930323895033b3b338c35f51c0b7268b efeab7a9b61312fa56fc74eee1e0f5a714abfb70 Taylor Blau 1496347630 -0600 commit: hello.txt: 2 efeab7a9b61312fa56fc74eee1e0f5a714abfb70 e669b63f829bfb0b91fc52a5bcea53dd7977a0ee Taylor Blau 1496347641 -0600 commit: hello.txt: 3 git-lfs-2.3.4/git/githistory/fixtures/linear-history.git/logs/refs/000077500000000000000000000000001317167762300254015ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/linear-history.git/logs/refs/heads/000077500000000000000000000000001317167762300264655ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/linear-history.git/logs/refs/heads/master000066400000000000000000000007141317167762300277050ustar00rootroot000000000000000000000000000000000000000000000000000000 62811b8f930323895033b3b338c35f51c0b7268b Taylor Blau 1496347620 -0600 commit (initial): hello.txt: 1 62811b8f930323895033b3b338c35f51c0b7268b efeab7a9b61312fa56fc74eee1e0f5a714abfb70 Taylor Blau 1496347630 -0600 commit: hello.txt: 2 efeab7a9b61312fa56fc74eee1e0f5a714abfb70 e669b63f829bfb0b91fc52a5bcea53dd7977a0ee Taylor Blau 1496347641 -0600 commit: hello.txt: 3 git-lfs-2.3.4/git/githistory/fixtures/linear-history.git/objects/000077500000000000000000000000001317167762300251275ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/linear-history.git/objects/56/000077500000000000000000000000001317167762300253615ustar00rootroot00000000000000a6051ca2b02b04ef92d5150c9ef600403cb1de000066400000000000000000000000201317167762300324410ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/linear-history.git/objects/56xKOR0d0 "git-lfs-2.3.4/git/githistory/fixtures/linear-history.git/objects/62/000077500000000000000000000000001317167762300253565ustar00rootroot00000000000000811b8f930323895033b3b338c35f51c0b7268b000066400000000000000000000001751317167762300320500ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/linear-history.git/objects/62x 1}NۀǮ#"b 6]X8!!pvo)D9ئJQS"j0YX}'>uQӵ'v#9} '#f7ؙUk퓽B^7git-lfs-2.3.4/git/githistory/fixtures/linear-history.git/objects/6e/000077500000000000000000000000001317167762300254415ustar00rootroot0000000000000007bd31cb70c4add2c973481ad4fa38b235ca69000066400000000000000000000000661317167762300326420ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/linear-history.git/objects/6ex+)JMU06g040031QH+(axSDfnHԖbgit-lfs-2.3.4/git/githistory/fixtures/linear-history.git/objects/71/000077500000000000000000000000001317167762300253565ustar00rootroot00000000000000a488ec1804ee97ea651b094aa9181ca85aab0a000066400000000000000000000000661317167762300325630ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/linear-history.git/objects/71x+)JMU06g040031QH+(a[*h6IWEy}cpx?Igit-lfs-2.3.4/git/githistory/fixtures/linear-history.git/objects/c5/000077500000000000000000000000001317167762300254365ustar00rootroot00000000000000decfe1fcf39b8c489f4a0bf3b3823676339f80000066400000000000000000000000661317167762300327000ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/linear-history.git/objects/c5x+)JMU06g040031QH+(afuʥl޿Ń-O(git-lfs-2.3.4/git/githistory/fixtures/linear-history.git/objects/d8/000077500000000000000000000000001317167762300254425ustar00rootroot00000000000000263ee9860594d2806b0dfd1bfd17528b0ba2a4000066400000000000000000000000201317167762300324770ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/linear-history.git/objects/d8xKOR0d0 #git-lfs-2.3.4/git/githistory/fixtures/linear-history.git/objects/e4/000077500000000000000000000000001317167762300254375ustar00rootroot0000000000000040e5c842586965a7fb77deda2eca68612b1f53000066400000000000000000000000201317167762300325170ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/linear-history.git/objects/e4xKOR0d0 $git-lfs-2.3.4/git/githistory/fixtures/linear-history.git/objects/e6/000077500000000000000000000000001317167762300254415ustar00rootroot0000000000000069b63f829bfb0b91fc52a5bcea53dd7977a0ee000066400000000000000000000002361317167762300330300ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/linear-history.git/objects/e6xQ !Evn <N}:p\nj`!IGl EŜu 0ۂf ]/dI0z с]pv%yKD@b,cKk:pFWSjE X,ȣrJa=CRm7_ Fgit-lfs-2.3.4/git/githistory/fixtures/linear-history.git/objects/ef/000077500000000000000000000000001317167762300255215ustar00rootroot00000000000000eab7a9b61312fa56fc74eee1e0f5a714abfb70000066400000000000000000000002361317167762300331420ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/linear-history.git/objects/efxQ B!Evnoh m@M) upڃV~#g-UI}d H}!0ӤTP#y 8bUjGQ}k6ylcI2ޡ ĤEqϵ|Z|Cgit-lfs-2.3.4/git/githistory/fixtures/linear-history.git/refs/000077500000000000000000000000001317167762300244355ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/linear-history.git/refs/heads/000077500000000000000000000000001317167762300255215ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/linear-history.git/refs/heads/master000066400000000000000000000000511317167762300267330ustar00rootroot00000000000000e669b63f829bfb0b91fc52a5bcea53dd7977a0ee git-lfs-2.3.4/git/githistory/fixtures/non-repeated-subtrees.git/000077500000000000000000000000001317167762300247405ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/non-repeated-subtrees.git/HEAD000066400000000000000000000000271317167762300253630ustar00rootroot00000000000000ref: refs/heads/master git-lfs-2.3.4/git/githistory/fixtures/non-repeated-subtrees.git/config000066400000000000000000000002111317167762300261220ustar00rootroot00000000000000[core] repositoryformatversion = 0 filemode = true bare = false logallrefupdates = true ignorecase = true precomposeunicode = true git-lfs-2.3.4/git/githistory/fixtures/non-repeated-subtrees.git/index000066400000000000000000000003701317167762300257720ustar00rootroot00000000000000DIRCY5Y54փh4#a.txtY5Y54݁ q[‚^N` subdir/b.txtTREE82 1 =.>Wzu{vsubdir1 0 ~uHb1 }Nz9%BHH2git-lfs-2.3.4/git/githistory/fixtures/non-repeated-subtrees.git/logs/000077500000000000000000000000001317167762300257045ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/non-repeated-subtrees.git/logs/HEAD000066400000000000000000000005171317167762300263330ustar00rootroot000000000000000000000000000000000000000000000000000000 37f99c7f2706d317b3bf7ff13d574eef33d8788a Taylor Blau 1496686519 -0600 commit (initial): a.txt: initial commit 37f99c7f2706d317b3bf7ff13d574eef33d8788a bc63077ac5e575ccc9dbbd93dc882f1e10600ea7 Taylor Blau 1496686541 -0600 commit: subdir/b.txt: initial commit git-lfs-2.3.4/git/githistory/fixtures/non-repeated-subtrees.git/logs/refs/000077500000000000000000000000001317167762300266435ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/non-repeated-subtrees.git/logs/refs/heads/000077500000000000000000000000001317167762300277275ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/non-repeated-subtrees.git/logs/refs/heads/master000066400000000000000000000005171317167762300311500ustar00rootroot000000000000000000000000000000000000000000000000000000 37f99c7f2706d317b3bf7ff13d574eef33d8788a Taylor Blau 1496686519 -0600 commit (initial): a.txt: initial commit 37f99c7f2706d317b3bf7ff13d574eef33d8788a bc63077ac5e575ccc9dbbd93dc882f1e10600ea7 Taylor Blau 1496686541 -0600 commit: subdir/b.txt: initial commit git-lfs-2.3.4/git/githistory/fixtures/non-repeated-subtrees.git/objects/000077500000000000000000000000001317167762300263715ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/non-repeated-subtrees.git/objects/07/000077500000000000000000000000001317167762300266175ustar00rootroot00000000000000bd7fbfc41b7d36135bcffe7c465490f4aca32d000066400000000000000000000000621317167762300342460ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/non-repeated-subtrees.git/objects/07x+)JMU06f040031QH+(a9yk2S$f0Pgit-lfs-2.3.4/git/githistory/fixtures/non-repeated-subtrees.git/objects/12/000077500000000000000000000000001317167762300266135ustar00rootroot000000000000007ececad475cde6da0048051d62121cabd23194000066400000000000000000000000621317167762300337760ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/non-repeated-subtrees.git/objects/12x+)JMU06f040031QH+(a\sq5?jKhqgit-lfs-2.3.4/git/githistory/fixtures/non-repeated-subtrees.git/objects/19/000077500000000000000000000000001317167762300266225ustar00rootroot00000000000000acdd81ab0abc15c771fe005bf1c2825e4e6080000066400000000000000000000000241317167762300340600ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/non-repeated-subtrees.git/objects/19xKOR0eH+(git-lfs-2.3.4/git/githistory/fixtures/non-repeated-subtrees.git/objects/37/000077500000000000000000000000001317167762300266225ustar00rootroot00000000000000f99c7f2706d317b3bf7ff13d574eef33d8788a000066400000000000000000000002051317167762300340120ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/non-repeated-subtrees.git/objects/37x 1Nļ6""`7/ $wA7h~ 9V4Rn}砄Q!9'0=A,Ma@.X\b,mGb)R|Rw+ iTRQ"H1Moko~;fUWS^ q !?:8`~OߡylNOhg3o.Kgit-lfs-2.3.4/git/githistory/fixtures/non-repeated-subtrees.git/refs/000077500000000000000000000000001317167762300256775ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/non-repeated-subtrees.git/refs/heads/000077500000000000000000000000001317167762300267635ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/non-repeated-subtrees.git/refs/heads/master000066400000000000000000000000511317167762300301750ustar00rootroot00000000000000bc63077ac5e575ccc9dbbd93dc882f1e10600ea7 git-lfs-2.3.4/git/githistory/fixtures/octopus-merge.git/000077500000000000000000000000001317167762300233165ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/octopus-merge.git/HEAD000066400000000000000000000000271317167762300237410ustar00rootroot00000000000000ref: refs/heads/master git-lfs-2.3.4/git/githistory/fixtures/octopus-merge.git/ORIG_HEAD000066400000000000000000000000511317167762300245560ustar00rootroot000000000000008be6d64cddab01f53381e9feafe50d95ca5e6629 git-lfs-2.3.4/git/githistory/fixtures/octopus-merge.git/config000066400000000000000000000002111317167762300245000ustar00rootroot00000000000000[core] repositoryformatversion = 0 filemode = true bare = false logallrefupdates = true ignorecase = true precomposeunicode = true git-lfs-2.3.4/git/githistory/fixtures/octopus-merge.git/index000066400000000000000000000004311317167762300243460ustar00rootroot00000000000000DIRCY1Y12.eEݧQtHu.a.txtY1Y12c #T.tY Hb.txtY1nY1n2Lb g_:\0] hello.txtTREE3 0 /:ɝ[EAg^Ru9|^1q3~u@!8[git-lfs-2.3.4/git/githistory/fixtures/octopus-merge.git/logs/000077500000000000000000000000001317167762300242625ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/octopus-merge.git/logs/HEAD000066400000000000000000000024721317167762300247130ustar00rootroot000000000000000000000000000000000000000000000000000000 8be6d64cddab01f53381e9feafe50d95ca5e6629 Taylor Blau 1496421999 -0600 commit (initial): initial commit 8be6d64cddab01f53381e9feafe50d95ca5e6629 8be6d64cddab01f53381e9feafe50d95ca5e6629 Taylor Blau 1496422012 -0600 checkout: moving from master to branch-a 8be6d64cddab01f53381e9feafe50d95ca5e6629 251e6b3461a3b5adc6bab694d5ae1abc878edf85 Taylor Blau 1496422020 -0600 commit: a.txt: initial 251e6b3461a3b5adc6bab694d5ae1abc878edf85 8be6d64cddab01f53381e9feafe50d95ca5e6629 Taylor Blau 1496422026 -0600 checkout: moving from branch-a to master 8be6d64cddab01f53381e9feafe50d95ca5e6629 8be6d64cddab01f53381e9feafe50d95ca5e6629 Taylor Blau 1496422029 -0600 checkout: moving from master to branch-b 8be6d64cddab01f53381e9feafe50d95ca5e6629 15805fe2044dc1a0508853e93d1a230bd94636be Taylor Blau 1496422035 -0600 commit: b.txt: initial 15805fe2044dc1a0508853e93d1a230bd94636be 8be6d64cddab01f53381e9feafe50d95ca5e6629 Taylor Blau 1496422041 -0600 checkout: moving from branch-b to master 8be6d64cddab01f53381e9feafe50d95ca5e6629 6c9ccaeb45446e3fa88cd5848a940fd34c18192b Taylor Blau 1496422044 -0600 merge branch-a branch-b: Merge made by the 'octopus' strategy. git-lfs-2.3.4/git/githistory/fixtures/octopus-merge.git/logs/refs/000077500000000000000000000000001317167762300252215ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/octopus-merge.git/logs/refs/heads/000077500000000000000000000000001317167762300263055ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/octopus-merge.git/logs/refs/heads/branch-a000066400000000000000000000004631317167762300277060ustar00rootroot000000000000000000000000000000000000000000000000000000 8be6d64cddab01f53381e9feafe50d95ca5e6629 Taylor Blau 1496422012 -0600 branch: Created from HEAD 8be6d64cddab01f53381e9feafe50d95ca5e6629 251e6b3461a3b5adc6bab694d5ae1abc878edf85 Taylor Blau 1496422020 -0600 commit: a.txt: initial git-lfs-2.3.4/git/githistory/fixtures/octopus-merge.git/logs/refs/heads/branch-b000066400000000000000000000004631317167762300277070ustar00rootroot000000000000000000000000000000000000000000000000000000 8be6d64cddab01f53381e9feafe50d95ca5e6629 Taylor Blau 1496422029 -0600 branch: Created from HEAD 8be6d64cddab01f53381e9feafe50d95ca5e6629 15805fe2044dc1a0508853e93d1a230bd94636be Taylor Blau 1496422035 -0600 commit: b.txt: initial git-lfs-2.3.4/git/githistory/fixtures/octopus-merge.git/logs/refs/heads/master000066400000000000000000000005421317167762300275240ustar00rootroot000000000000000000000000000000000000000000000000000000 8be6d64cddab01f53381e9feafe50d95ca5e6629 Taylor Blau 1496421999 -0600 commit (initial): initial commit 8be6d64cddab01f53381e9feafe50d95ca5e6629 6c9ccaeb45446e3fa88cd5848a940fd34c18192b Taylor Blau 1496422044 -0600 merge branch-a branch-b: Merge made by the 'octopus' strategy. git-lfs-2.3.4/git/githistory/fixtures/octopus-merge.git/objects/000077500000000000000000000000001317167762300247475ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/octopus-merge.git/objects/04/000077500000000000000000000000001317167762300251725ustar00rootroot00000000000000df07b08ca746b3167d0f1d1514e2f39a52c16c000066400000000000000000000000661317167762300322320ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/octopus-merge.git/objects/04x+)JMU06g040031QH+(a';fT!Uc.Kwgit-lfs-2.3.4/git/githistory/fixtures/octopus-merge.git/objects/15/000077500000000000000000000000001317167762300251745ustar00rootroot00000000000000805fe2044dc1a0508853e93d1a230bd94636be000066400000000000000000000002371317167762300320760ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/octopus-merge.git/objects/15xA 0=li""v@Jق< /UEr1ҎX3i(w%2zΙF v]Pt9!J m\Vϴ>f/Mn?xiWzס=whƓl\dFgit-lfs-2.3.4/git/githistory/fixtures/octopus-merge.git/objects/25/000077500000000000000000000000001317167762300251755ustar00rootroot000000000000001e6b3461a3b5adc6bab694d5ae1abc878edf85000066400000000000000000000002401317167762300326220ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/octopus-merge.git/objects/25x 1ENieMƍ5$``fA7؁_M۲T֚4fq.Kb X 9%`(ZAgRsrgx[ao}135OBp{{b^ll~a'lO>[1git-lfs-2.3.4/git/githistory/fixtures/octopus-merge.git/objects/8b/000077500000000000000000000000001317167762300252605ustar00rootroot00000000000000e6d64cddab01f53381e9feafe50d95ca5e6629000066400000000000000000000002001317167762300326360ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/octopus-merge.git/objects/8bxK 1]};$D3x|08zޠ'pUPPŭ*@d hSA9n5|BYxё\Tsw7M+)n dBpDwU}U෠>1I84git-lfs-2.3.4/git/githistory/fixtures/octopus-merge.git/objects/b6/000077500000000000000000000000001317167762300252565ustar00rootroot00000000000000fc4c620b67d95f953a5c1c1230aaab5db5a1b0000066400000000000000000000000241317167762300325110ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/octopus-merge.git/objects/b6xKOR0eH git-lfs-2.3.4/git/githistory/fixtures/octopus-merge.git/objects/e9/000077500000000000000000000000001317167762300252645ustar00rootroot000000000000004edfabfb7605f7cb959b4ce8fb6652b509fe03000066400000000000000000000001201317167762300326500ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/octopus-merge.git/objects/e9x+)JMU07`040031QH+(aK}h/tTAFjNN>XѶ?>I7ZZu6git-lfs-2.3.4/git/githistory/fixtures/octopus-merge.git/refs/000077500000000000000000000000001317167762300242555ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/octopus-merge.git/refs/heads/000077500000000000000000000000001317167762300253415ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/octopus-merge.git/refs/heads/branch-a000066400000000000000000000000511317167762300267330ustar00rootroot00000000000000251e6b3461a3b5adc6bab694d5ae1abc878edf85 git-lfs-2.3.4/git/githistory/fixtures/octopus-merge.git/refs/heads/branch-b000066400000000000000000000000511317167762300267340ustar00rootroot0000000000000015805fe2044dc1a0508853e93d1a230bd94636be git-lfs-2.3.4/git/githistory/fixtures/octopus-merge.git/refs/heads/master000066400000000000000000000000511317167762300265530ustar00rootroot000000000000006c9ccaeb45446e3fa88cd5848a940fd34c18192b git-lfs-2.3.4/git/githistory/fixtures/packed-objects.git/000077500000000000000000000000001317167762300234035ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/packed-objects.git/HEAD000066400000000000000000000000271317167762300240260ustar00rootroot00000000000000ref: refs/heads/master git-lfs-2.3.4/git/githistory/fixtures/packed-objects.git/config000066400000000000000000000002101317167762300245640ustar00rootroot00000000000000[core] repositoryformatversion = 0 filemode = true bare = true logallrefupdates = true ignorecase = true precomposeunicode = true git-lfs-2.3.4/git/githistory/fixtures/packed-objects.git/index000066400000000000000000000002111317167762300244270ustar00rootroot00000000000000DIRCY Y 'yV&˂=, 1504643527 -0400 commit (initial): *: initial commit git-lfs-2.3.4/git/githistory/fixtures/packed-objects.git/logs/refs/000077500000000000000000000000001317167762300253065ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/packed-objects.git/logs/refs/heads/000077500000000000000000000000001317167762300263725ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/packed-objects.git/logs/refs/heads/master000066400000000000000000000002451317167762300276110ustar00rootroot000000000000000000000000000000000000000000000000000000 749f1b43e00eeb98194fedb7827b3cfb43b42b0e Taylor Blau 1504643527 -0400 commit (initial): *: initial commit git-lfs-2.3.4/git/githistory/fixtures/packed-objects.git/objects/000077500000000000000000000000001317167762300250345ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/packed-objects.git/objects/info/000077500000000000000000000000001317167762300257675ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/packed-objects.git/objects/info/packs000066400000000000000000000000661317167762300270150ustar00rootroot00000000000000P pack-ac516ce2d006668dc5e001e8dda0aa1c7198500f.pack git-lfs-2.3.4/git/githistory/fixtures/packed-objects.git/objects/pack/000077500000000000000000000000001317167762300257525ustar00rootroot00000000000000pack-ac516ce2d006668dc5e001e8dda0aa1c7198500f.idx000066400000000000000000000022041317167762300350040ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/packed-objects.git/objects/packtOcGk 1ԅtCO{xHQ(/IQ$x340031QH+(aXeMlбq{* jQlfݠqPgit-lfs-2.3.4/git/githistory/fixtures/packed-objects.git/refs/000077500000000000000000000000001317167762300243425ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/packed-objects.git/refs/heads/000077500000000000000000000000001317167762300254265ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/packed-objects.git/refs/heads/master000066400000000000000000000000511317167762300266400ustar00rootroot00000000000000749f1b43e00eeb98194fedb7827b3cfb43b42b0e git-lfs-2.3.4/git/githistory/fixtures/repeated-subtrees.git/000077500000000000000000000000001317167762300241505ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/repeated-subtrees.git/HEAD000066400000000000000000000000271317167762300245730ustar00rootroot00000000000000ref: refs/heads/master git-lfs-2.3.4/git/githistory/fixtures/repeated-subtrees.git/config000066400000000000000000000002111317167762300253320ustar00rootroot00000000000000[core] repositoryformatversion = 0 filemode = true bare = false logallrefupdates = true ignorecase = true precomposeunicode = true git-lfs-2.3.4/git/githistory/fixtures/repeated-subtrees.git/index000066400000000000000000000003701317167762300252020ustar00rootroot00000000000000DIRCY0/Y0/2l#=!6e3=G'Ca.txtY0Y02uc #T.tY H subdir/b.txtTREE82 1 G"j% #IS^(subdir1 0 M6s МϫpǏ)؉9;¹ggit-lfs-2.3.4/git/githistory/fixtures/repeated-subtrees.git/logs/000077500000000000000000000000001317167762300251145ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/repeated-subtrees.git/logs/HEAD000066400000000000000000000004721317167762300255430ustar00rootroot000000000000000000000000000000000000000000000000000000 0b4747509ab885114690ff291f8f108045b1d749 Taylor Blau 1496362788 -0600 commit (initial): initial commit 0b4747509ab885114690ff291f8f108045b1d749 b9621d5d84b3174de020ad2c869f43b2f61f337f Taylor Blau 1496362801 -0600 commit: a.txt: changes git-lfs-2.3.4/git/githistory/fixtures/repeated-subtrees.git/logs/refs/000077500000000000000000000000001317167762300260535ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/repeated-subtrees.git/logs/refs/heads/000077500000000000000000000000001317167762300271375ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/repeated-subtrees.git/logs/refs/heads/master000066400000000000000000000004721317167762300303600ustar00rootroot000000000000000000000000000000000000000000000000000000 0b4747509ab885114690ff291f8f108045b1d749 Taylor Blau 1496362788 -0600 commit (initial): initial commit 0b4747509ab885114690ff291f8f108045b1d749 b9621d5d84b3174de020ad2c869f43b2f61f337f Taylor Blau 1496362801 -0600 commit: a.txt: changes git-lfs-2.3.4/git/githistory/fixtures/repeated-subtrees.git/objects/000077500000000000000000000000001317167762300256015ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/repeated-subtrees.git/objects/0b/000077500000000000000000000000001317167762300261025ustar00rootroot000000000000004747509ab885114690ff291f8f108045b1d749000066400000000000000000000002001317167762300325270ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/repeated-subtrees.git/objects/0bxA EsF z^`Ld+ zO,`=IgqYr6!hx(jOj);,WC zfo5gVÎ;CY2-/9igit-lfs-2.3.4/git/githistory/fixtures/repeated-subtrees.git/objects/12/000077500000000000000000000000001317167762300260235ustar00rootroot00000000000000b98c239e8f933d213617a1b965333d478b2743000066400000000000000000000000211317167762300325270ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/repeated-subtrees.git/objects/12xKOR0bH4git-lfs-2.3.4/git/githistory/fixtures/repeated-subtrees.git/objects/2e/000077500000000000000000000000001317167762300261075ustar00rootroot0000000000000065efe2a145dda7ee51d1741299f848e5bf752e000066400000000000000000000000201317167762300332560ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/repeated-subtrees.git/objects/2exKOR0dH Rgit-lfs-2.3.4/git/githistory/fixtures/repeated-subtrees.git/objects/47/000077500000000000000000000000001317167762300260335ustar00rootroot00000000000000d4d71022adc7ec6a14250d23491e535ec228f4000066400000000000000000000001231317167762300330030ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/repeated-subtrees.git/objects/47x+)JMU03c040031QH+(a٣<ƶ&@P\YİWqf׏^9yau=ygit-lfs-2.3.4/git/githistory/fixtures/repeated-subtrees.git/objects/5e/000077500000000000000000000000001317167762300261125ustar00rootroot00000000000000497ceceb14ad3c43bac781ed5c804bc67e8f3b000066400000000000000000000001231317167762300335430ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/repeated-subtrees.git/objects/5ex+)JMU03c040031QH+(aK}h/t(&d1l\tYWq^) git-lfs-2.3.4/git/githistory/fixtures/repeated-subtrees.git/objects/63/000077500000000000000000000000001317167762300260315ustar00rootroot00000000000000d8dbd40c23542e740659a7168a0ce3138ea748000066400000000000000000000000201317167762300327350ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/repeated-subtrees.git/objects/63xKOR0dH Sgit-lfs-2.3.4/git/githistory/fixtures/repeated-subtrees.git/objects/b9/000077500000000000000000000000001317167762300261135ustar00rootroot00000000000000621d5d84b3174de020ad2c869f43b2f61f337f000066400000000000000000000002421317167762300331020ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/fixtures/repeated-subtrees.git/objects/b9x[j@ HJ9C. jBG1Am?lS@$&Q)@ jVȦw0qȔs@Xw*sGa–8yڼn* // []string{"a", "b", "c.txt"}). parts := strings.Split(path, string(os.PathSeparator)) for i := 0; i < len(parts)-1; i++ { part := parts[i] // Load the subtree given by that name. var subtree *odb.Tree for _, entry := range root.Entries { if entry.Name != part { continue } subtree, err = db.Tree(entry.Oid) if err != nil { t.Fatalf("git/odb: cannot load subtree %s: %s", filepath.Join(parts[:i]...), err) } break } if subtree == nil { t.Fatalf("git/odb: subtree %s does not exist", path) } // And re-assign it to root, creating a sort of pseudo-recursion. root = subtree } filename := parts[len(parts)-1] // Find the blob given by the last entry in parts (the filename). var blob *odb.Blob for _, entry := range root.Entries { if entry.Name == filename { blob, err = db.Blob(entry.Oid) if err != nil { t.Fatalf("git/odb: cannot load blob %x: %s", entry.Oid, err) } } } // If we couldn't find the blob, fail immediately. if blob == nil { t.Fatalf("git/odb: blob at %s in %s does not exist", path, tree) } // Perform an assertion on the blob's contents. got, err := ioutil.ReadAll(blob.Contents) if err != nil { t.Fatalf("git/odb: cannot read contents from blob %s: %s", path, err) } assert.Equal(t, contents, string(got)) } // AssertCommitParent asserts that the given commit has a parent equivalent to // the one provided. func AssertCommitParent(t *testing.T, db *odb.ObjectDatabase, sha, parent string) { commit, err := db.Commit(HexDecode(t, sha)) if err != nil { t.Fatalf("git/odb: expected to read commit: %s, couldn't: %v", sha, err) } decoded, err := hex.DecodeString(parent) if err != nil { t.Fatalf("git/odb: expected to decode parent SHA: %s, couldn't: %v", parent, err) } assert.Contains(t, commit.ParentIDs, decoded, "git/odb: expected parents of commit: %s to contain: %s", sha, parent) } // AssertCommitTree asserts that the given commit has a tree equivelant to the // one provided. func AssertCommitTree(t *testing.T, db *odb.ObjectDatabase, sha, tree string) { commit, err := db.Commit(HexDecode(t, sha)) if err != nil { t.Fatalf("git/odb: expected to read commit: %s, couldn't: %v", sha, err) } decoded, err := hex.DecodeString(tree) if err != nil { t.Fatalf("git/odb: expected to decode tree SHA: %s, couldn't: %v", tree, err) } assert.Equal(t, decoded, commit.TreeID, "git/odb: expected tree ID: %s (got: %x)", tree, commit.TreeID) } // AssertRef asserts that a given refname points at the expected commit. func AssertRef(t *testing.T, db *odb.ObjectDatabase, ref string, expected []byte) { root, ok := db.Root() assert.True(t, ok, "git/odb: expected *odb.ObjectDatabase to have Root()") cmd := exec.Command("git", "rev-parse", ref) cmd.Dir = root out, err := cmd.Output() assert.Nil(t, err) assert.Equal(t, hex.EncodeToString(expected), strings.TrimSpace(string(out))) } // HexDecode decodes the given ASCII hex-encoded string into []byte's, or fails // the test immediately if the given "sha" wasn't a valid hex-encoded sequence. func HexDecode(t *testing.T, sha string) []byte { b, err := hex.DecodeString(sha) if err != nil { t.Fatalf("git/odb: could not decode string: %q, %v", sha, err) } return b } // copyToTmp copies the given fixutre to a folder in /tmp. func copyToTmp(fixture string) (string, error) { p, err := ioutil.TempDir("", fmt.Sprintf("git-lfs-fixture-%s", filepath.Dir(fixture))) if err != nil { return "", err } if err = copyDir(fixture, p); err != nil { return "", err } return p, nil } // copyDir copies a directory (and recursively all files and subdirectories) // from "from" to "to" preserving permissions and ownership. func copyDir(from, to string) error { stat, err := os.Stat(from) if err != nil { return err } if err := os.MkdirAll(to, stat.Mode()); err != nil { return err } entries, err := ioutil.ReadDir(from) if err != nil { return err } for _, entry := range entries { sp := filepath.Join(from, entry.Name()) dp := filepath.Join(to, entry.Name()) if entry.IsDir() { err = copyDir(sp, dp) } else { err = copyFile(sp, dp) } if err != nil { return err } } return nil } // copyFile copies a file from "from" to "to" preserving permissions and // ownership. func copyFile(from, to string) error { src, err := os.Open(from) if err != nil { return err } defer src.Close() dst, err := os.Create(to) if err != nil { return err } defer dst.Close() if _, err = io.Copy(dst, src); err != nil { return err } stat, err := os.Stat(from) if err != nil { return err } return os.Chmod(to, stat.Mode()) } git-lfs-2.3.4/git/githistory/log/000077500000000000000000000000001317167762300166535ustar00rootroot00000000000000git-lfs-2.3.4/git/githistory/log/list_task.go000066400000000000000000000020611317167762300211760ustar00rootroot00000000000000package log import ( "fmt" "time" ) // ListTask is a Task implementation that logs all updates in a list where each // entry is line-delimited. // // For example: // entry #1 // entry #2 // msg: ..., done type ListTask struct { msg string ch chan *Update } // NewListTask instantiates a new *ListTask instance with the given message. func NewListTask(msg string) *ListTask { return &ListTask{ msg: msg, ch: make(chan *Update, 1), } } // Entry logs a line-delimited task entry. func (l *ListTask) Entry(update string) { l.ch <- &Update{ S: fmt.Sprintf("%s\n", update), At: time.Now(), } } func (l *ListTask) Complete() { l.ch <- &Update{ S: fmt.Sprintf("%s: ...", l.msg), At: time.Now(), } close(l.ch) } // Throttled implements the Task.Throttled function and ensures that all log // updates are printed to the sink. func (l *ListTask) Throttled() bool { return false } // Updates implements the Task.Updates function and returns a channel of updates // to log to the sink. func (l *ListTask) Updates() <-chan *Update { return l.ch } git-lfs-2.3.4/git/githistory/log/list_task_test.go000066400000000000000000000023131317167762300222350ustar00rootroot00000000000000package log import ( "testing" "github.com/stretchr/testify/assert" ) func TestListTaskCallsDoneWhenComplete(t *testing.T) { task := NewListTask("example") task.Complete() select { case update, ok := <-task.Updates(): assert.Equal(t, "example: ...", update.S) assert.True(t, ok, "git/githistory/log: expected Updates() to remain open") default: t.Fatal("git/githistory/log: expected update from *ListTask") } select { case update, ok := <-task.Updates(): assert.False(t, ok, "git/githistory.log: unexpected *ListTask.Update(): %s", update) default: t.Fatal("git/githistory/log: expected *ListTask.Updates() to be closed") } } func TestListTaskWritesEntries(t *testing.T) { task := NewListTask("example") task.Entry("1") select { case update, ok := <-task.Updates(): assert.True(t, ok, "git/githistory/log: expected ListTask.Updates() to remain open") assert.Equal(t, "1\n", update.S) default: t.Fatal("git/githistory/log: expected task.Updates() to have an update") } } func TestListTaskIsNotThrottled(t *testing.T) { task := NewListTask("example") throttled := task.Throttled() assert.False(t, throttled, "git/githistory/log: expected *ListTask to be Throttle()-d") } git-lfs-2.3.4/git/githistory/log/log.go000066400000000000000000000122411317167762300177630ustar00rootroot00000000000000package log import ( "fmt" "io" "io/ioutil" "strings" "sync" "time" "github.com/git-lfs/git-lfs/tools" "github.com/olekukonko/ts" ) const ( DefaultLoggingThrottle = 200 * time.Millisecond ) // Logger logs a series of tasks to an io.Writer, processing each task in order // until completion . type Logger struct { // sink is the writer to write to. sink io.Writer // widthFn is a function that returns the width of the terminal that // this logger is running within. widthFn func() int // throttle is the minimum amount of time that must pass between each // instant data is logged. throttle time.Duration // queue is the incoming, unbuffered queue of tasks to enqueue. queue chan Task // tasks is the set of tasks to process. tasks chan Task // wg is a WaitGroup that is incremented when new tasks are enqueued, // and decremented when tasks finish. wg *sync.WaitGroup } // NewLogger retuns a new *Logger instance that logs to "sink" and uses the // current terminal width as the width of the line. func NewLogger(sink io.Writer) *Logger { if sink == nil { sink = ioutil.Discard } l := &Logger{ sink: sink, throttle: DefaultLoggingThrottle, widthFn: func() int { size, err := ts.GetSize() if err != nil { return 80 } return size.Col() }, queue: make(chan Task), tasks: make(chan Task), wg: new(sync.WaitGroup), } go l.consume() return l } // Close closes the queue and does not allow new Tasks to be `enqueue()`'d. It // waits until the currently running Task has completed. func (l *Logger) Close() { if l == nil { return } close(l.queue) l.wg.Wait() } // Waitier creates and enqueues a new *WaitingTask. func (l *Logger) Waiter(msg string) *WaitingTask { t := NewWaitingTask(msg) l.enqueue(t) return t } // Percentage creates and enqueues a new *PercentageTask. func (l *Logger) Percentage(msg string, total uint64) *PercentageTask { t := NewPercentageTask(msg, total) l.enqueue(t) return t } // List creates and enqueues a new *ListTask. func (l *Logger) List(msg string) *ListTask { t := NewListTask(msg) l.enqueue(t) return t } // enqueue enqueues the given Tasks "ts". func (l *Logger) enqueue(ts ...Task) { if l == nil { for _, t := range ts { go func(t Task) { for range t.Updates() { // Discard all updates. } }(t) } return } l.wg.Add(len(ts)) for _, t := range ts { l.queue <- t } } // consume creates a pseudo-infinte buffer between the incoming set of tasks and // the queue of tasks to work on. func (l *Logger) consume() { go func() { // Process the single next task in sequence until completion, // then consume the next task. for task := range l.tasks { l.logTask(task) } }() defer close(l.tasks) pending := make([]Task, 0) for { // If there is a pending task, "peek" it off of the set of // pending tasks. var next Task if len(pending) > 0 { next = pending[0] } if next == nil { // If there was no pending task, wait for either a) // l.queue to close, or b) a new task to be submitted. task, ok := <-l.queue if !ok { // If the queue is closed, no more new tasks may // be added. return } // Otherwise, add a new task to the set of tasks to // process immediately, since there is no current // buffer. l.tasks <- task } else { // If there is a pending task, wait for either a) a // write to process the task to become non-blocking, or // b) a new task to enter the queue. select { case task, ok := <-l.queue: if !ok { // If the queue is closed, no more tasks // may be added. return } // Otherwise, add the next task to the set of // pending, active tasks. pending = append(pending, task) case l.tasks <- next: // Or "pop" the peeked task off of the pending // set. pending = pending[1:] } } } } // logTask logs the set of updates from a given task to the sink, then logs a // "done" message, and then marks the task as done. // // By default, the *Logger throttles log entry updates to once per the duration // of time specified by `l.throttle time.Duration`. // // If the duration if 0, or the task is "durable" (by implementing // github.com/git-lfs/git-lfs/git/githistory/log#DurableTask), then all entries // will be logged. func (l *Logger) logTask(task Task) { defer l.wg.Done() logAll := !task.Throttled() var last time.Time var update *Update for update = range task.Updates() { if logAll || l.throttle == 0 || !update.Throttled(last.Add(l.throttle)) { l.logLine(update.S) last = update.At } } l.log(fmt.Sprintf("%s, done\n", update.S)) } // logLine writes a complete line and moves the cursor to the beginning of the // line. // // It returns the number of bytes "n" written to the sink and the error "err", // if one was encountered. func (l *Logger) logLine(str string) (n int, err error) { padding := strings.Repeat(" ", tools.MaxInt(0, l.widthFn()-len(str))) return l.log(str + padding + "\r") } // log writes a string verbatim to the sink. // // It returns the number of bytes "n" written to the sink and the error "err", // if one was encountered. func (l *Logger) log(str string) (n int, err error) { return fmt.Fprint(l.sink, str) } git-lfs-2.3.4/git/githistory/log/log_test.go000066400000000000000000000101411317167762300210170ustar00rootroot00000000000000package log import ( "bytes" "strings" "testing" "time" "github.com/stretchr/testify/assert" ) type ChanTask chan *Update func (e ChanTask) Updates() <-chan *Update { return e } func (e ChanTask) Throttled() bool { return true } type UnthrottledChanTask chan *Update func (e UnthrottledChanTask) Updates() <-chan *Update { return e } func (e UnthrottledChanTask) Throttled() bool { return false } func TestLoggerLogsTasks(t *testing.T) { var buf bytes.Buffer task := make(chan *Update) go func() { task <- &Update{"first", time.Now(), false} task <- &Update{"second", time.Now(), false} close(task) }() l := NewLogger(&buf) l.throttle = 0 l.widthFn = func() int { return 0 } l.enqueue(ChanTask(task)) l.Close() assert.Equal(t, "first\rsecond\rsecond, done\n", buf.String()) } func TestLoggerLogsMultipleTasksInOrder(t *testing.T) { var buf bytes.Buffer t1 := make(chan *Update) go func() { t1 <- &Update{"first", time.Now(), false} t1 <- &Update{"second", time.Now(), false} close(t1) }() t2 := make(chan *Update) go func() { t2 <- &Update{"third", time.Now(), false} t2 <- &Update{"fourth", time.Now(), false} close(t2) }() l := NewLogger(&buf) l.throttle = 0 l.widthFn = func() int { return 0 } l.enqueue(ChanTask(t1), ChanTask(t2)) l.Close() assert.Equal(t, strings.Join([]string{ "first\r", "second\r", "second, done\n", "third\r", "fourth\r", "fourth, done\n", }, ""), buf.String()) } func TestLoggerLogsMultipleTasksWithoutBlocking(t *testing.T) { var buf bytes.Buffer l := NewLogger(&buf) l.throttle = 0 t1, t2 := make(chan *Update), make(chan *Update) l.widthFn = func() int { return 0 } l.enqueue(ChanTask(t1)) t1 <- &Update{"first", time.Now(), false} l.enqueue(ChanTask(t2)) close(t1) t2 <- &Update{"second", time.Now(), false} close(t2) l.Close() assert.Equal(t, strings.Join([]string{ "first\r", "first, done\n", "second\r", "second, done\n", }, ""), buf.String()) } func TestLoggerThrottlesWrites(t *testing.T) { var buf bytes.Buffer t1 := make(chan *Update) go func() { start := time.Now() t1 <- &Update{"first", start, false} // t = 0 ms, throttle was open t1 <- &Update{"forced", start.Add(10 * time.Millisecond), true} // t = 10+ε ms, throttle is closed t1 <- &Update{"second", start.Add(10 * time.Millisecond), false} // t = 10+ε ms, throttle is closed t1 <- &Update{"third", start.Add(26 * time.Millisecond), false} // t = 20+ε ms, throttle was open close(t1) // t = 20+2ε ms, throttle is closed }() l := NewLogger(&buf) l.widthFn = func() int { return 0 } l.throttle = 15 * time.Millisecond l.enqueue(ChanTask(t1)) l.Close() assert.Equal(t, strings.Join([]string{ "first\r", "forced\r", "third\r", "third, done\n", }, ""), buf.String()) } func TestLoggerThrottlesLastWrite(t *testing.T) { var buf bytes.Buffer t1 := make(chan *Update) go func() { start := time.Now() t1 <- &Update{"first", start, false} // t = 0 ms, throttle was open t1 <- &Update{"second", start.Add(10 * time.Millisecond), false} // t = 10+ε ms, throttle is closed close(t1) // t = 10+2ε ms, throttle is closed }() l := NewLogger(&buf) l.widthFn = func() int { return 0 } l.throttle = 15 * time.Millisecond l.enqueue(ChanTask(t1)) l.Close() assert.Equal(t, strings.Join([]string{ "first\r", "second, done\n", }, ""), buf.String()) } func TestLoggerLogsAllDurableUpdates(t *testing.T) { var buf bytes.Buffer l := NewLogger(&buf) l.widthFn = func() int { return 0 } l.throttle = 15 * time.Minute t1 := make(chan *Update) go func() { t1 <- &Update{"first", time.Now(), false} // t = 0+ε ms, throttle is open t1 <- &Update{"second", time.Now(), false} // t = 0+2ε ms, throttle is closed close(t1) // t = 0+3ε ms, throttle is closed }() l.enqueue(UnthrottledChanTask(t1)) l.Close() assert.Equal(t, strings.Join([]string{ "first\r", "second\r", "second, done\n", }, ""), buf.String()) } git-lfs-2.3.4/git/githistory/log/percentage_task.go000066400000000000000000000042441317167762300223450ustar00rootroot00000000000000package log import ( "fmt" "math" "sync/atomic" "time" ) // PercentageTask is a task that is performed against a known number of // elements. type PercentageTask struct { // msg is the task message. msg string // n is the number of elements whose work has been completed. It is // managed sync/atomic. n uint64 // total is the total number of elements to execute work upon. total uint64 // ch is a channel which is written to when the task state changes and // is closed when the task is completed. ch chan *Update } func NewPercentageTask(msg string, total uint64) *PercentageTask { p := &PercentageTask{ msg: msg, total: total, ch: make(chan *Update, 1), } p.Count(0) return p } // Count indicates that work has been completed against "n" number of elements, // marking the task as complete if the total "n" given to all invocations of // this method is equal to total. // // Count returns the new total number of (atomically managed) elements that have // been completed. func (c *PercentageTask) Count(n uint64) (new uint64) { if new = atomic.AddUint64(&c.n, n); new > c.total { panic("git/githistory/log: counted too many items") } var percentage float64 if c.total == 0 { percentage = 100 } else { percentage = 100 * float64(new) / float64(c.total) } u := &Update{ S: fmt.Sprintf("%s: %3.f%% (%d/%d)", c.msg, math.Floor(percentage), new, c.total), At: time.Now(), } select { case c.ch <- u: default: // Use a non-blocking write, since it's unimportant that callers // receive all updates. } if new >= c.total { close(c.ch) } return new } // Entry logs a line-delimited task entry. func (t *PercentageTask) Entry(update string) { t.ch <- &Update{ S: fmt.Sprintf("%s\n", update), At: time.Now(), Force: true, } } // Updates implements Task.Updates and returns a channel which is written to // when the state of this task changes, and closed when the task is completed. // has been completed. func (c *PercentageTask) Updates() <-chan *Update { return c.ch } // Throttled implements Task.Throttled and returns true, indicating that this // task is throttled. func (c *PercentageTask) Throttled() bool { return true } git-lfs-2.3.4/git/githistory/log/percentage_task_test.go000066400000000000000000000030231317167762300233760ustar00rootroot00000000000000package log import ( "testing" "github.com/stretchr/testify/assert" ) func TestPercentageTaskCalculuatesPercentages(t *testing.T) { task := NewPercentageTask("example", 10) assert.Equal(t, "example: 0% (0/10)", (<-task.Updates()).S) n := task.Count(3) assert.EqualValues(t, 3, n) assert.Equal(t, "example: 30% (3/10)", (<-task.Updates()).S) } func TestPercentageTaskCalculatesPercentWithoutTotal(t *testing.T) { task := NewPercentageTask("example", 0) select { case v, ok := <-task.Updates(): if ok { assert.Equal(t, "example: 100% (0/0)", v.S) } else { t.Fatal("expected channel to be open") } default: } } func TestPercentageTaskCallsDoneWhenComplete(t *testing.T) { task := NewPercentageTask("example", 10) select { case v, ok := <-task.Updates(): if ok { assert.Equal(t, "example: 0% (0/10)", v.S) } else { t.Fatal("expected channel to be open") } default: } assert.EqualValues(t, 10, task.Count(10)) assert.Equal(t, "example: 100% (10/10)", (<-task.Updates()).S) if _, ok := <-task.Updates(); ok { t.Fatalf("expected channel to be closed") } } func TestPercentageTaskIsThrottled(t *testing.T) { task := NewPercentageTask("example", 10) throttled := task.Throttled() assert.True(t, throttled, "git/githistory/log: expected *PercentageTask to be Throttle()-d") } func TestPercentageTaskPanicsWhenOvercounted(t *testing.T) { task := NewPercentageTask("example", 0) defer func() { assert.Equal(t, "git/githistory/log: counted too many items", recover()) }() task.Count(1) } git-lfs-2.3.4/git/githistory/log/task.go000066400000000000000000000021741317167762300201500ustar00rootroot00000000000000package log import "time" // Task is an interface which encapsulates an activity which can be logged. type Task interface { // Updates returns a channel which is written to with the current state // of the Task when an update is present. It is closed when the task is // complete. Updates() <-chan *Update // Throttled returns whether or not updates from this task should be // limited when being printed to a sink via *log.Logger. // // It is expected to return the same value for a given Task instance. Throttled() bool } // Update is a single message sent (S) from a Task at a given time (At). type Update struct { // S is the message sent in this update. S string // At is the time that this update was sent. At time.Time // Force determines if this update should not be throttled. Force bool } // Throttled determines whether this update should be throttled, based on the // given earliest time of the next update. The caller should determine how often // updates should be throttled. An Update with Force=true is never throttled. func (u *Update) Throttled(next time.Time) bool { return !(u.Force || u.At.After(next)) } git-lfs-2.3.4/git/githistory/log/waiting_task.go000066400000000000000000000015401317167762300216660ustar00rootroot00000000000000package log import ( "fmt" "time" ) // WaitingTask represents a task for which the total number of items to do work // is on is unknown. type WaitingTask struct { // ch is used to transmit task updates. ch chan *Update } // NewWaitingTask returns a new *WaitingTask. func NewWaitingTask(msg string) *WaitingTask { ch := make(chan *Update, 1) ch <- &Update{ S: fmt.Sprintf("%s: ...", msg), At: time.Now(), } return &WaitingTask{ch: ch} } // Complete marks the task as completed. func (w *WaitingTask) Complete() { close(w.ch) } // Done implements Task.Done and returns a channel which is closed when // Complete() is called. func (w *WaitingTask) Updates() <-chan *Update { return w.ch } // Throttled implements Task.Throttled and returns true, indicating that this // task is Throttled. func (w *WaitingTask) Throttled() bool { return true } git-lfs-2.3.4/git/githistory/log/waiting_task_test.go000066400000000000000000000023211317167762300227230ustar00rootroot00000000000000package log import ( "testing" "github.com/stretchr/testify/assert" ) func TestWaitingTaskDisplaysWaitingStatus(t *testing.T) { task := NewWaitingTask("example") assert.Equal(t, "example: ...", (<-task.Updates()).S) } func TestWaitingTaskCallsDoneWhenComplete(t *testing.T) { task := NewWaitingTask("example") select { case v, ok := <-task.Updates(): if ok { assert.Equal(t, "example: ...", v.S) } else { t.Fatal("expected channel to be open") } default: } task.Complete() if _, ok := <-task.Updates(); ok { t.Fatalf("expected channel to be closed") } } func TestWaitingTaskPanicsWithMultipleDoneCalls(t *testing.T) { task := NewWaitingTask("example") task.Complete() defer func() { if err := recover(); err == nil { t.Fatal("githistory/log: expected panic()") } else { if s, ok := err.(error); ok { assert.Equal(t, "close of closed channel", s.Error()) } else { t.Fatal("githistory/log: expected panic() to implement error") } } }() task.Complete() } func TestWaitingTaskIsThrottled(t *testing.T) { task := NewWaitingTask("example") throttled := task.Throttled() assert.True(t, throttled, "git/githistory/log: expected *WaitingTask to be Throttle()-d") } git-lfs-2.3.4/git/githistory/ref_updater.go000066400000000000000000000035411317167762300207240ustar00rootroot00000000000000package githistory import ( "encoding/hex" "fmt" "strings" "github.com/git-lfs/git-lfs/errors" "github.com/git-lfs/git-lfs/git" "github.com/git-lfs/git-lfs/git/githistory/log" "github.com/git-lfs/git-lfs/tools" ) // refUpdater is a type responsible for moving references from one point in the // Git object graph to another. type refUpdater struct { // CacheFn is a function that returns the SHA1 transformation from an // original hash to a new one. It specifies a "bool" return value // signaling whether or not that given "old" SHA1 was migrated. CacheFn func(old []byte) ([]byte, bool) // Logger logs the progress of reference updating. Logger *log.Logger // Refs is a set of *git.Ref's to migrate. Refs []*git.Ref // Root is the given directory on disk in which the repository is // located. Root string } // UpdateRefs performs the reference update(s) from existing locations (see: // Refs) to their respective new locations in the graph (see CacheFn). // // It creates reflog entries as well as stderr log entries as it progresses // through the reference updates. // // It returns any error encountered, or nil if the reference update(s) was/were // successful. func (r *refUpdater) UpdateRefs() error { list := r.Logger.List("migrate: Updating refs") defer list.Complete() var maxNameLen int for _, ref := range r.Refs { maxNameLen = tools.MaxInt(maxNameLen, len(ref.Name)) } for _, ref := range r.Refs { sha1, err := hex.DecodeString(ref.Sha) if err != nil { return errors.Wrapf(err, "could not decode: %q", ref.Sha) } to, ok := r.CacheFn(sha1) if !ok { continue } if err := git.UpdateRefIn(r.Root, ref, to, ""); err != nil { return err } namePadding := tools.MaxInt(maxNameLen-len(ref.Name), 0) list.Entry(fmt.Sprintf(" %s%s\t%s -> %x", ref.Name, strings.Repeat(" ", namePadding), ref.Sha, to)) } return nil } git-lfs-2.3.4/git/githistory/ref_updater_test.go000066400000000000000000000026571317167762300217720ustar00rootroot00000000000000package githistory import ( "testing" "github.com/git-lfs/git-lfs/git" "github.com/stretchr/testify/assert" ) func TestRefUpdaterMovesRefs(t *testing.T) { db := DatabaseFromFixture(t, "linear-history-with-tags.git") root, _ := db.Root() AssertRef(t, db, "refs/tags/middle", HexDecode(t, "228afe30855933151f7a88e70d9d88314fd2f191")) updater := &refUpdater{ CacheFn: func(old []byte) ([]byte, bool) { return HexDecode(t, "d941e4756add6b06f5bee766fcf669f55419f13f"), true }, Refs: []*git.Ref{ { Name: "middle", Sha: "228afe30855933151f7a88e70d9d88314fd2f191", Type: git.RefTypeLocalTag, }, }, Root: root, } err := updater.UpdateRefs() assert.NoError(t, err) AssertRef(t, db, "refs/tags/middle", HexDecode(t, "d941e4756add6b06f5bee766fcf669f55419f13f")) } func TestRefUpdaterIgnoresUnovedRefs(t *testing.T) { db := DatabaseFromFixture(t, "linear-history-with-tags.git") root, _ := db.Root() AssertRef(t, db, "refs/tags/middle", HexDecode(t, "228afe30855933151f7a88e70d9d88314fd2f191")) updater := &refUpdater{ CacheFn: func(old []byte) ([]byte, bool) { return nil, false }, Refs: []*git.Ref{ { Name: "middle", Sha: "228afe30855933151f7a88e70d9d88314fd2f191", Type: git.RefTypeLocalTag, }, }, Root: root, } err := updater.UpdateRefs() assert.NoError(t, err) AssertRef(t, db, "refs/tags/middle", HexDecode(t, "228afe30855933151f7a88e70d9d88314fd2f191")) } git-lfs-2.3.4/git/githistory/rewriter.go000066400000000000000000000401071317167762300202660ustar00rootroot00000000000000package githistory import ( "encoding/hex" "fmt" "io" "os" "path/filepath" "sync" "github.com/git-lfs/git-lfs/errors" "github.com/git-lfs/git-lfs/filepathfilter" "github.com/git-lfs/git-lfs/git" "github.com/git-lfs/git-lfs/git/githistory/log" "github.com/git-lfs/git-lfs/git/odb" ) // Rewriter allows rewriting topologically equivalent Git histories // between two revisions. type Rewriter struct { // mu guards entries and commits (see below) mu *sync.Mutex // entries is a mapping of old tree entries to new (rewritten) ones. // Since TreeEntry contains a []byte (and is therefore not a key-able // type), a unique TreeEntry -> string function is used for map keys. entries map[string]*odb.TreeEntry // commits is a mapping of old commit SHAs to new ones, where the ASCII // hex encoding of the SHA1 values are used as map keys. commits map[string][]byte // filter is an optional value used to specify which tree entries // (blobs, subtrees) are modifiable given a BlobFn. If non-nil, this // filter will cull out any unmodifiable subtrees and blobs. filter *filepathfilter.Filter // db is the *ObjectDatabase from which blobs, commits, and trees are // loaded from. db *odb.ObjectDatabase // l is the *log.Logger to which updates are written. l *log.Logger } // RewriteOptions is an options type given to the Rewrite() function. type RewriteOptions struct { // Include is the list of refs of which commits reachable by that ref // will be included. Include []string // Exclude is the list of refs of which commits reachable by that ref // will be excluded. Exclude []string // UpdateRefs specifies whether the Rewriter should move refs from the // original graph onto the migrated one. If true, the refs will be // moved, and a reflog entry will be created. UpdateRefs bool // Verbose mode prints migrated objects. Verbose bool // BlobFn specifies a function to rewrite blobs. // // It is called once per unique, unchanged path. That is to say, if // /a/foo and /a/bar contain identical contents, the BlobFn will be // called twice: once for /a/foo and once for /a/bar, but no more on // each blob for subsequent revisions, so long as each entry remains // unchanged. BlobFn BlobRewriteFn // TreeCallbackFn specifies a function to rewrite trees after they have // been reassembled by calling the above BlobFn on all existing tree // entries. TreeCallbackFn TreeCallbackFn } // blobFn returns a useable BlobRewriteFn, either the one that was given in the // *RewriteOptions, or a noopBlobFn. func (r *RewriteOptions) blobFn() BlobRewriteFn { if r.BlobFn == nil { return noopBlobFn } return r.BlobFn } // treeFn returns a useable TreeRewriteFn, either the one that was given in the // *RewriteOptions, or a noopTreeFn. func (r *RewriteOptions) treeFn() TreeCallbackFn { if r.TreeCallbackFn == nil { return noopTreeFn } return r.TreeCallbackFn } // BlobRewriteFn is a mapping function that takes a given blob and returns a // new, modified blob. If it returns an error, the new blob will not be written // and instead the error will be returned from the Rewrite() function. // // Invocations of an instance of BlobRewriteFn are not expected to store the // returned blobs in the *git/odb.ObjectDatabase. // // The path argument is given to be an absolute path to the tree entry being // rewritten, where the repository root is the root of the path given. For // instance, a file "b.txt" in directory "dir" would be given as "/dir/b.txt", // where as a file "a.txt" in the root would be given as "/a.txt". // // As above, the path separators are OS specific, and equivalent to the result // of filepath.Join(...) or os.PathSeparator. type BlobRewriteFn func(path string, b *odb.Blob) (*odb.Blob, error) // TreeCallbackFn specifies a function to call before writing a re-written tree // to the object database. The TreeCallbackFn can return a modified tree to be // written to the object database instead of one generated from calling BlobFn // on all of the tree entries. // // Trees returned from a TreeCallbackFn MUST have all objects referenced in the // entryset already written to the object database. // // TreeCallbackFn can be nil, and will therefore exhibit behavior equivalent to // only calling the BlobFn on existing tree entries. // // If the TreeCallbackFn returns an error, it will be returned from the // Rewrite() invocation. type TreeCallbackFn func(path string, t *odb.Tree) (*odb.Tree, error) type rewriterOption func(*Rewriter) var ( // WithFilter is an optional argument given to the NewRewriter // constructor function to limit invocations of the BlobRewriteFn to // only pathspecs that match the given *filepathfilter.Filter. WithFilter = func(filter *filepathfilter.Filter) rewriterOption { return func(r *Rewriter) { r.filter = filter } } // WithLoggerto logs updates caused by the *git/githistory.Rewriter to // the given io.Writer "sink". WithLoggerTo = func(sink io.Writer) rewriterOption { return WithLogger(log.NewLogger(sink)) } // WithLogger logs updates caused by the *git/githistory.Rewriter to the // be given to the provided logger, "l". WithLogger = func(l *log.Logger) rewriterOption { return func(r *Rewriter) { r.l = l } } // noopBlobFn is a no-op implementation of the BlobRewriteFn. It returns // the blob that it was given, and returns no error. noopBlobFn = func(path string, b *odb.Blob) (*odb.Blob, error) { return b, nil } // noopTreeFn is a no-op implementation of the TreeRewriteFn. It returns // the tree that it was given, and returns no error. noopTreeFn = func(path string, t *odb.Tree) (*odb.Tree, error) { return t, nil } ) // NewRewriter constructs a *Rewriter from the given *ObjectDatabase instance. func NewRewriter(db *odb.ObjectDatabase, opts ...rewriterOption) *Rewriter { rewriter := &Rewriter{ mu: new(sync.Mutex), entries: make(map[string]*odb.TreeEntry), commits: make(map[string][]byte), db: db, } for _, opt := range opts { opt(rewriter) } return rewriter } // Rewrite rewrites the range of commits given by *RewriteOptions.{Left,Right} // using the BlobRewriteFn to rewrite the individual blobs. func (r *Rewriter) Rewrite(opt *RewriteOptions) ([]byte, error) { // First, obtain a list of commits to rewrite. commits, err := r.commitsToMigrate(opt) if err != nil { return nil, err } var perc *log.PercentageTask if opt.UpdateRefs { perc = r.l.Percentage("migrate: Rewriting commits", uint64(len(commits))) } else { perc = r.l.Percentage("migrate: Examining commits", uint64(len(commits))) } var vPerc *log.PercentageTask if opt.Verbose { vPerc = perc } // Keep track of the last commit that we rewrote. Callers often want // this so that they can perform a git-update-ref(1). var tip []byte for _, oid := range commits { // Load the original commit to access the data necessary in // order to rewrite it. original, err := r.db.Commit(oid) if err != nil { return nil, err } // Rewrite the tree given at that commit. rewrittenTree, err := r.rewriteTree(oid, original.TreeID, "", opt.blobFn(), opt.treeFn(), vPerc) if err != nil { return nil, err } // Create a new list of parents from the original commit to // point at the rewritten parents in order to create a // topologically equivalent DAG. // // This operation is safe since we are visiting the commits in // reverse topological order and therefore have seen all parents // before children (in other words, r.uncacheCommit(...) will // always return a value, if the prospective parent is a part of // the migration). rewrittenParents := make([][]byte, 0, len(original.ParentIDs)) for _, originalParent := range original.ParentIDs { rewrittenParent, ok := r.uncacheCommit(originalParent) if !ok { // If we haven't seen the parent before, this // means that we're doing a partial migration // and the parent that we're looking for isn't // included. // // Use the original parent to properly link // history across the migration boundary. rewrittenParent = originalParent } rewrittenParents = append(rewrittenParents, rewrittenParent) } // Construct a new commit using the original header information, // but the rewritten set of parents as well as root tree. rewrittenCommit := &odb.Commit{ Author: original.Author, Committer: original.Committer, ExtraHeaders: original.ExtraHeaders, Message: original.Message, ParentIDs: rewrittenParents, TreeID: rewrittenTree, } var newSha []byte if original.Equal(rewrittenCommit) { newSha = make([]byte, len(oid)) copy(newSha, oid) } else { newSha, err = r.db.WriteCommit(rewrittenCommit) if err != nil { return nil, err } } // Cache that commit so that we can reassign children of this // commit. r.cacheCommit(oid, newSha) // Increment the percentage displayed in the terminal. perc.Count(1) // Move the tip forward. tip = newSha } if opt.UpdateRefs { refs, err := r.refsToMigrate() if err != nil { return nil, errors.Wrap(err, "could not find refs to update") } root, _ := r.db.Root() updater := &refUpdater{ CacheFn: r.uncacheCommit, Logger: r.l, Refs: refs, Root: root, } if err := updater.UpdateRefs(); err != nil { return nil, errors.Wrap(err, "could not update refs") } } return tip, err } // rewriteTree is a recursive function which rewrites a tree given by the ID // "sha" and path "path". It uses the given BlobRewriteFn to rewrite all blobs // within the tree, either calling that function or recurring down into subtrees // by re-assigning the SHA. // // Once it is done assembling the entries in a given subtree, it then calls the // TreeCallbackFn, "tfn" to perform a final traversal of the subtree before // saving it to the object database. // // It returns the new SHA of the rewritten tree, or an error if the tree was // unable to be rewritten. func (r *Rewriter) rewriteTree(commitOID []byte, treeOID []byte, path string, fn BlobRewriteFn, tfn TreeCallbackFn, perc *log.PercentageTask) ([]byte, error) { tree, err := r.db.Tree(treeOID) if err != nil { return nil, err } entries := make([]*odb.TreeEntry, 0, len(tree.Entries)) for _, entry := range tree.Entries { path := filepath.Join(path, entry.Name) if !r.allows(entry.Type(), path) { entries = append(entries, entry) continue } if cached := r.uncacheEntry(entry); cached != nil { entries = append(entries, cached) continue } var oid []byte switch entry.Type() { case odb.BlobObjectType: oid, err = r.rewriteBlob(commitOID, entry.Oid, path, fn, perc) case odb.TreeObjectType: oid, err = r.rewriteTree(commitOID, entry.Oid, path, fn, tfn, perc) default: oid = entry.Oid } if err != nil { return nil, err } entries = append(entries, r.cacheEntry(entry, &odb.TreeEntry{ Filemode: entry.Filemode, Name: entry.Name, Oid: oid, })) } rewritten, err := tfn(string(os.PathSeparator)+path, &odb.Tree{Entries: entries}) if err != nil { return nil, err } if tree.Equal(rewritten) { return treeOID, nil } return r.db.WriteTree(rewritten) } func (r *Rewriter) allows(typ odb.ObjectType, abs string) bool { switch typ { case odb.BlobObjectType: return r.Filter().Allows(abs) case odb.TreeObjectType: return r.Filter().HasPrefix(abs) case odb.CommitObjectType: return true default: panic(fmt.Sprintf("git/githistory: unknown entry type: %s", typ)) } } // rewriteBlob calls the given BlobRewriteFn "fn" on a blob given in the object // database by the SHA1 "from" []byte. It writes and returns the new blob SHA, // or an error if either the BlobRewriteFn returned one, or if the object could // not be loaded/saved. func (r *Rewriter) rewriteBlob(commitOID, from []byte, path string, fn BlobRewriteFn, perc *log.PercentageTask) ([]byte, error) { blob, err := r.db.Blob(from) if err != nil { return nil, err } b, err := fn(path, blob) if err != nil { return nil, err } if !blob.Equal(b) { sha, err := r.db.WriteBlob(b) if err != nil { return nil, err } // Close the source blob, so long as it is not equal to the // rewritten blob. If the two are equal, as in the check above // this comment, calling r.db.WriteBlob(b) will have already // closed both "b" and "blob" since they are the same. // // Closing an *os.File twice causes an `os.ErrInvalid` to be // returned. if err = blob.Close(); err != nil { return nil, err } if perc != nil { perc.Entry(fmt.Sprintf("migrate: commit %s: %s", hex.EncodeToString(commitOID), path)) } return sha, nil } // Close the source blob, since it is identical to the rewritten blob, // but neither were written. if err := blob.Close(); err != nil { return nil, err } return from, nil } // commitsToMigrate returns an in-memory copy of a list of commits according to // the output of git-rev-list(1) (given the *RewriteOptions), where each // outputted commit is 20 bytes of raw SHA1. // // If any error was encountered, it will be returned. func (r *Rewriter) commitsToMigrate(opt *RewriteOptions) ([][]byte, error) { waiter := r.l.Waiter("migrate: Sorting commits") defer waiter.Complete() scanner, err := git.NewRevListScanner( opt.Include, opt.Exclude, r.scannerOpts()) if err != nil { return nil, err } var commits [][]byte for scanner.Scan() { commits = append(commits, scanner.OID()) } if err = scanner.Err(); err != nil { return nil, err } if err = scanner.Close(); err != nil { return nil, err } return commits, nil } // refsToMigrate returns a list of references to migrate, or an error if loading // those references failed. func (r *Rewriter) refsToMigrate() ([]*git.Ref, error) { var refs []*git.Ref var err error if root, ok := r.db.Root(); ok { refs, err = git.AllRefsIn(root) } else { refs, err = git.AllRefs() } if err != nil { return nil, err } var local []*git.Ref for _, ref := range refs { if ref.Type == git.RefTypeRemoteBranch || ref.Type == git.RefTypeRemoteTag { continue } local = append(local, ref) } return local, nil } // scannerOpts returns a *git.ScanRefsOptions instance to be given to the // *git.RevListScanner. // // If the database this *Rewriter is operating in a given root (not in memory) // it re-assigns the working directory to be there. func (r *Rewriter) scannerOpts() *git.ScanRefsOptions { opts := &git.ScanRefsOptions{ Mode: git.ScanRefsMode, Order: git.TopoRevListOrder, Reverse: true, CommitsOnly: true, SkippedRefs: make([]string, 0), Mutex: new(sync.Mutex), Names: make(map[string]string), } if root, ok := r.db.Root(); ok { opts.WorkingDir = root } return opts } // Filter returns the filter used by this *Rewriter to filter subtrees, blobs // (see above). func (r *Rewriter) Filter() *filepathfilter.Filter { return r.filter } // cacheEntry caches then given "from" entry so that it is always rewritten as // a *TreeEntry equivalent to "to". func (r *Rewriter) cacheEntry(from, to *odb.TreeEntry) *odb.TreeEntry { r.mu.Lock() defer r.mu.Unlock() r.entries[r.entryKey(from)] = to return to } // uncacheEntry returns a *TreeEntry that is cached from the given *TreeEntry // "from". That is to say, it returns the *TreeEntry that "from" should be // rewritten to, or nil if none could be found. func (r *Rewriter) uncacheEntry(from *odb.TreeEntry) *odb.TreeEntry { r.mu.Lock() defer r.mu.Unlock() return r.entries[r.entryKey(from)] } // entryKey returns a unique key for a given *TreeEntry "e". func (r *Rewriter) entryKey(e *odb.TreeEntry) string { return fmt.Sprintf("%s:%x", e.Name, e.Oid) } // cacheEntry caches then given "from" commit so that it is always rewritten as // a *git/odb.Commit equivalent to "to". func (r *Rewriter) cacheCommit(from, to []byte) { r.mu.Lock() defer r.mu.Unlock() r.commits[hex.EncodeToString(from)] = to } // uncacheCommit returns a *git/odb.Commit that is cached from the given // *git/odb.Commit "from". That is to say, it returns the *git/odb.Commit that // "from" should be rewritten to and true, or nil and false if none could be // found. func (r *Rewriter) uncacheCommit(from []byte) ([]byte, bool) { r.mu.Lock() defer r.mu.Unlock() c, ok := r.commits[hex.EncodeToString(from)] return c, ok } git-lfs-2.3.4/git/githistory/rewriter_test.go000066400000000000000000000255571317167762300213410ustar00rootroot00000000000000package githistory import ( "bytes" "encoding/hex" "io" "io/ioutil" "path/filepath" "reflect" "strconv" "strings" "testing" "github.com/git-lfs/git-lfs/filepathfilter" "github.com/git-lfs/git-lfs/git/odb" "github.com/stretchr/testify/assert" ) func TestRewriterRewritesHistory(t *testing.T) { db := DatabaseFromFixture(t, "linear-history.git") r := NewRewriter(db) tip, err := r.Rewrite(&RewriteOptions{Include: []string{"refs/heads/master"}, BlobFn: func(path string, b *odb.Blob) (*odb.Blob, error) { contents, err := ioutil.ReadAll(b.Contents) if err != nil { return nil, err } n, err := strconv.Atoi(string(contents)) if err != nil { return nil, err } rewritten := strconv.Itoa(n + 1) return &odb.Blob{ Contents: strings.NewReader(rewritten), Size: int64(len(rewritten)), }, nil }, }) assert.Nil(t, err) tree1 := "ad0aebd16e34cf047820994ea7538a6d4a111082" tree2 := "6e07bd31cb70c4add2c973481ad4fa38b235ca69" tree3 := "c5decfe1fcf39b8c489f4a0bf3b3823676339f80" // After rewriting, the HEAD state of the repository should contain a // tree identical to: // // 100644 blob bf0d87ab1b2b0ec1a11a3973d2845b42413d9767 hello.txt AssertCommitTree(t, db, hex.EncodeToString(tip), tree1) AssertBlobContents(t, db, tree1, "hello.txt", "4") // After rewriting, the HEAD~1 state of the repository should contain a // tree identical to: // // 100644 blob e440e5c842586965a7fb77deda2eca68612b1f53 hello.txt AssertCommitParent(t, db, hex.EncodeToString(tip), "4aaa3f49ffeabbb874250fe13ffeb8c683aba650") AssertCommitTree(t, db, "4aaa3f49ffeabbb874250fe13ffeb8c683aba650", tree2) AssertBlobContents(t, db, tree2, "hello.txt", "3") // After rewriting, the HEAD~2 state of the repository should contain a // tree identical to: // // 100644 blob d8263ee9860594d2806b0dfd1bfd17528b0ba2a4 hello.txt AssertCommitParent(t, db, "4aaa3f49ffeabbb874250fe13ffeb8c683aba650", "24a341e1ff75addc22e336a8d87f82ba56b86fcf") AssertCommitTree(t, db, "24a341e1ff75addc22e336a8d87f82ba56b86fcf", tree3) AssertBlobContents(t, db, tree3, "hello.txt", "2") } func TestRewriterRewritesOctopusMerges(t *testing.T) { db := DatabaseFromFixture(t, "octopus-merge.git") r := NewRewriter(db) tip, err := r.Rewrite(&RewriteOptions{Include: []string{"refs/heads/master"}, BlobFn: func(path string, b *odb.Blob) (*odb.Blob, error) { return &odb.Blob{ Contents: io.MultiReader(b.Contents, strings.NewReader("_new")), Size: b.Size + int64(len("_new")), }, nil }, }) assert.Nil(t, err) tree := "8a56716daa78325c3d0433cc163890969810b0da" // After rewriting, the HEAD state of the repository should contain a // tree identical to: // // 100644 blob 309f7fc2bfd9ae77b4131cf9cbcc3b548c42ca57 a.txt // 100644 blob 70470dc26cb3eef54fe3dcba53066f7ca7c495c0 b.txt // 100644 blob f2557f74fd5b60f959baf77091782089761e2dc3 hello.txt AssertCommitTree(t, db, hex.EncodeToString(tip), tree) AssertBlobContents(t, db, tree, "a.txt", "a_new") AssertBlobContents(t, db, tree, "b.txt", "b_new") AssertBlobContents(t, db, tree, "hello.txt", "hello_new") // And should contain the following parents: // // parent 1fe2b9577d5610e8d8fb2c3030534036fb648393 // parent ca447959bdcd20253d69b227bcc7c2e1d3126d5c AssertCommitParent(t, db, hex.EncodeToString(tip), "1fe2b9577d5610e8d8fb2c3030534036fb648393") AssertCommitParent(t, db, hex.EncodeToString(tip), "ca447959bdcd20253d69b227bcc7c2e1d3126d5c") // And each of those parents should contain the root commit as their own // parent: AssertCommitParent(t, db, "1fe2b9577d5610e8d8fb2c3030534036fb648393", "9237567f379b3c83ddf53ad9a2ae3755afb62a09") AssertCommitParent(t, db, "ca447959bdcd20253d69b227bcc7c2e1d3126d5c", "9237567f379b3c83ddf53ad9a2ae3755afb62a09") } func TestRewriterVisitsPackedObjects(t *testing.T) { db := DatabaseFromFixture(t, "packed-objects.git") r := NewRewriter(db) var contents []byte _, err := r.Rewrite(&RewriteOptions{Include: []string{"refs/heads/master"}, BlobFn: func(path string, b *odb.Blob) (*odb.Blob, error) { var err error contents, err = ioutil.ReadAll(b.Contents) if err != nil { return nil, err } return &odb.Blob{ Contents: bytes.NewReader(contents), Size: int64(len(contents)), }, nil }, }) assert.NoError(t, err) assert.Equal(t, string(contents), "Hello, world!\n") } func TestRewriterDoesntVisitUnchangedSubtrees(t *testing.T) { db := DatabaseFromFixture(t, "repeated-subtrees.git") r := NewRewriter(db) seen := make(map[string]int) _, err := r.Rewrite(&RewriteOptions{Include: []string{"refs/heads/master"}, BlobFn: func(path string, b *odb.Blob) (*odb.Blob, error) { seen[path] = seen[path] + 1 return b, nil }, }) assert.Nil(t, err) assert.Equal(t, 2, seen["a.txt"]) assert.Equal(t, 1, seen[filepath.Join("subdir", "b.txt")]) } func TestRewriterVisitsUniqueEntriesWithIdenticalContents(t *testing.T) { db := DatabaseFromFixture(t, "identical-blobs.git") r := NewRewriter(db) tip, err := r.Rewrite(&RewriteOptions{Include: []string{"refs/heads/master"}, BlobFn: func(path string, b *odb.Blob) (*odb.Blob, error) { if path == "b.txt" { return b, nil } return &odb.Blob{ Contents: strings.NewReader("changed"), Size: int64(len("changed")), }, nil }, }) assert.Nil(t, err) tree := "bbbe0a7676523ae02234bfe874784ca2380c2d4b" AssertCommitTree(t, db, hex.EncodeToString(tip), tree) // After rewriting, the HEAD state of the repository should contain a // tree identical to: // // 100644 blob 21fb1eca31e64cd3914025058b21992ab76edcf9 a.txt // 100644 blob 94f3610c08588440112ed977376f26a8fba169b0 b.txt AssertBlobContents(t, db, tree, "a.txt", "changed") AssertBlobContents(t, db, tree, "b.txt", "original") } func TestRewriterIgnoresPathsThatDontMatchFilter(t *testing.T) { include := []string{"*.txt"} exclude := []string{"subdir/*.txt"} filter := filepathfilter.New(include, exclude) db := DatabaseFromFixture(t, "non-repeated-subtrees.git") r := NewRewriter(db, WithFilter(filter)) seen := make(map[string]int) _, err := r.Rewrite(&RewriteOptions{Include: []string{"refs/heads/master"}, BlobFn: func(path string, b *odb.Blob) (*odb.Blob, error) { seen[path] = seen[path] + 1 return b, nil }, }) assert.Nil(t, err) assert.Equal(t, 1, seen["a.txt"]) assert.Equal(t, 0, seen[filepath.Join("subdir", "b.txt")]) } func TestRewriterAllowsAdditionalTreeEntries(t *testing.T) { db := DatabaseFromFixture(t, "linear-history.git") r := NewRewriter(db) extra, err := db.WriteBlob(&odb.Blob{ Contents: strings.NewReader("extra\n"), Size: int64(len("extra\n")), }) assert.Nil(t, err) tip, err := r.Rewrite(&RewriteOptions{Include: []string{"refs/heads/master"}, BlobFn: func(path string, b *odb.Blob) (*odb.Blob, error) { return b, nil }, TreeCallbackFn: func(path string, tr *odb.Tree) (*odb.Tree, error) { return &odb.Tree{ Entries: append(tr.Entries, &odb.TreeEntry{ Name: "extra.txt", Filemode: 0100644, Oid: extra, }), }, nil }, }) assert.Nil(t, err) tree1 := "40c2eb627a3b8e84b82a47a973d32960f3898b6a" tree2 := "d7a5bcb69f2cd2652a014663a948952ea603c2c0" tree3 := "45b752554d128f85bf23d7c3ddf48c47cbc345c8" // After rewriting, the HEAD state of the repository should contain a // tree identical to: // // 100644 blob e440e5c842586965a7fb77deda2eca68612b1f53 hello.txt // 100644 blob 0f2287157f7cb0dd40498c7a92f74b6975fa2d57 extra.txt AssertCommitTree(t, db, hex.EncodeToString(tip), tree1) AssertBlobContents(t, db, tree1, "hello.txt", "3") AssertBlobContents(t, db, tree1, "extra.txt", "extra\n") // After rewriting, the HEAD~1 state of the repository should contain a // tree identical to: // // 100644 blob d8263ee9860594d2806b0dfd1bfd17528b0ba2a4 hello.txt // 100644 blob 0f2287157f7cb0dd40498c7a92f74b6975fa2d57 extra.txt AssertCommitParent(t, db, hex.EncodeToString(tip), "45af5deb9a25bc4069b15c1f5bdccb0340978707") AssertCommitTree(t, db, "45af5deb9a25bc4069b15c1f5bdccb0340978707", tree2) AssertBlobContents(t, db, tree2, "hello.txt", "2") AssertBlobContents(t, db, tree2, "extra.txt", "extra\n") // After rewriting, the HEAD~2 state of the repository should contain a // tree identical to: // // 100644 blob 56a6051ca2b02b04ef92d5150c9ef600403cb1de hello.txt // 100644 blob 0f2287157f7cb0dd40498c7a92f74b6975fa2d57 extra.txt AssertCommitParent(t, db, "45af5deb9a25bc4069b15c1f5bdccb0340978707", "99f6bd7cd69b45494afed95b026f3e450de8304f") AssertCommitTree(t, db, "99f6bd7cd69b45494afed95b026f3e450de8304f", tree3) AssertBlobContents(t, db, tree3, "hello.txt", "1") AssertBlobContents(t, db, tree3, "extra.txt", "extra\n") } func TestHistoryRewriterUseOriginalParentsForPartialMigration(t *testing.T) { db := DatabaseFromFixture(t, "linear-history-with-tags.git") r := NewRewriter(db) tip, err := r.Rewrite(&RewriteOptions{ Include: []string{"refs/heads/master"}, Exclude: []string{"refs/tags/middle"}, BlobFn: func(path string, b *odb.Blob) (*odb.Blob, error) { return b, nil }, }) // After rewriting, the rewriter should have only modified the latest // commit (HEAD), and excluded the first two, both reachable by // refs/tags/middle. // // This should modify one commit, and appropriately link the parent as // follows: // // tree 20ecedad3e74a113695fe5f00ab003694e2e1e9c // parent 228afe30855933151f7a88e70d9d88314fd2f191 // author Taylor Blau 1496954214 -0600 // committer Taylor Blau 1496954214 -0600 // // some.txt: c expectedParent := "228afe30855933151f7a88e70d9d88314fd2f191" assert.NoError(t, err) AssertCommitParent(t, db, hex.EncodeToString(tip), expectedParent) } func TestHistoryRewriterUpdatesRefs(t *testing.T) { db := DatabaseFromFixture(t, "linear-history.git") r := NewRewriter(db) AssertRef(t, db, "refs/heads/master", HexDecode(t, "e669b63f829bfb0b91fc52a5bcea53dd7977a0ee")) tip, err := r.Rewrite(&RewriteOptions{ Include: []string{"refs/heads/master"}, UpdateRefs: true, BlobFn: func(path string, b *odb.Blob) (*odb.Blob, error) { suffix := strings.NewReader("_suffix") return &odb.Blob{ Contents: io.MultiReader(b.Contents, suffix), Size: b.Size + int64(suffix.Len()), }, nil }, }) assert.Nil(t, err) c1 := hex.EncodeToString(tip) c2 := "66561fe3ae68651658e18e48053dcfe66a2e9da1" c3 := "8268d8486c48024a871fa42fc487dbeabd6e3d86" AssertRef(t, db, "refs/heads/master", tip) AssertCommitParent(t, db, c1, c2) AssertCommitParent(t, db, c2, c3) } func TestHistoryRewriterReturnsFilter(t *testing.T) { f := filepathfilter.New([]string{"a"}, []string{"b"}) r := NewRewriter(nil, WithFilter(f)) expected := reflect.ValueOf(f).Elem().Addr().Pointer() got := reflect.ValueOf(r.Filter()).Elem().Addr().Pointer() assert.Equal(t, expected, got, "git/githistory: expected Rewriter.Filter() to return same *filepathfilter.Filter instance") } git-lfs-2.3.4/git/object_scanner.go000066400000000000000000000137131317167762300172000ustar00rootroot00000000000000package git import ( "bufio" "bytes" "fmt" "io" "io/ioutil" "strconv" "github.com/git-lfs/git-lfs/errors" "github.com/rubyist/tracerx" ) // object represents a generic Git object of any type. type object struct { // Contents reads Git's internal object representation. Contents *io.LimitedReader // Oid is the ID of the object. Oid string // Size is the size in bytes of the object. Size int64 // Type is the type of the object being held. Type string } // ObjectScanner is a scanner type that scans for Git objects reference-able in // Git's object database by their unique OID. type ObjectScanner struct { // object is the object that the ObjectScanner last scanned, or nil. object *object // err is the error (if any) that the ObjectScanner encountered during // its last scan, or nil. err error // from is the buffered source of input to the *ObjectScanner. It // expects input in the form described by // https://git-scm.com/docs/git-cat-file. from *bufio.Reader // to is a writer which accepts the object's OID to be scanned. to io.Writer // closeFn is an optional function that is run before the ObjectScanner // is closed. It is designated to clean up and close any resources held // by the ObjectScanner during runtime. closeFn func() error } // NewObjectScanner constructs a new instance of the `*ObjectScanner` type and // returns it. It backs the ObjectScanner with an invocation of the `git // cat-file --batch` command. If any errors were encountered while starting that // command, they will be returned immediately. // // Otherwise, an `*ObjectScanner` is returned with no error. func NewObjectScanner() (*ObjectScanner, error) { cmd := gitNoLFS("cat-file", "--batch") stdout, err := cmd.StdoutPipe() if err != nil { return nil, errors.Wrap(err, "open stdout") } stdin, err := cmd.StdinPipe() if err != nil { return nil, errors.Wrap(err, "open stdin") } stderr, err := cmd.StderrPipe() if err != nil { return nil, errors.Wrap(err, "open stderr") } closeFn := func() error { if err := stdin.Close(); err != nil { return err } msg, _ := ioutil.ReadAll(stderr) if err = cmd.Wait(); err != nil { return errors.Errorf("Error in git cat-file --batch: %v %v", err, string(msg)) } return nil } tracerx.Printf("run_command: git cat-file --batch") if err := cmd.Start(); err != nil { return nil, err } return &ObjectScanner{ from: bufio.NewReaderSize(stdout, 16384), to: stdin, closeFn: closeFn, }, nil } // NewObjectScannerFrom returns a new `*ObjectScanner` populated with data from // the given `io.Reader`, "r". It supplies no close function, and discards any // input given to the Scan() function. func NewObjectScannerFrom(r io.Reader) *ObjectScanner { return &ObjectScanner{ from: bufio.NewReader(r), to: ioutil.Discard, } } // Scan scans for a particular object given by the "oid" parameter. Once the // scan is complete, the Contents(), Sha1(), Size() and Type() functions may be // called and will return data corresponding to the given OID. // // Scan() returns whether the scan was successful, or in other words, whether or // not the scanner can continue to progress. func (s *ObjectScanner) Scan(oid string) bool { if err := s.reset(); err != nil { s.err = err return false } obj, err := s.scan(oid) s.object = obj if err != nil { if err != io.EOF { s.err = err } return false } return true } // Close closes and frees any resources owned by the *ObjectScanner that it is // called upon. If there were any errors in freeing that (those) resource(s), it // it will be returned, otherwise nil. func (s *ObjectScanner) Close() error { if s == nil { return nil } if s.closeFn != nil { return s.closeFn() } return nil } // Contents returns an io.Reader which reads Git's representation of the object // that was last scanned for. func (s *ObjectScanner) Contents() io.Reader { return s.object.Contents } // Sha1 returns the SHA1 object ID of the object that was last scanned for. func (s *ObjectScanner) Sha1() string { return s.object.Oid } // Size returns the size in bytes of the object that was last scanned for. func (s *ObjectScanner) Size() int64 { return s.object.Size } // Type returns the type of the object that was last scanned for. func (s *ObjectScanner) Type() string { return s.object.Type } // Err returns the error (if any) that was encountered during the last Scan() // operation. func (s *ObjectScanner) Err() error { return s.err } // reset resets the `*ObjectScanner` to scan again by advancing the reader (if // necessary) and clearing both the object and error fields on the // `*ObjectScanner` instance. func (s *ObjectScanner) reset() error { if s.object != nil { if s.object.Contents != nil { remaining := s.object.Contents.N if _, err := io.CopyN(ioutil.Discard, s.object.Contents, remaining); err != nil { return errors.Wrap(err, "unwind contents") } } // Consume extra LF inserted by cat-file if _, err := s.from.ReadByte(); err != nil { return err } } s.object, s.err = nil, nil return nil } type missingErr struct { oid string } func (m *missingErr) Error() string { return fmt.Sprintf("missing object: %s", m.oid) } func IsMissingObject(err error) bool { _, ok := err.(*missingErr) return ok } // scan scans for and populates a new Git object given an OID. func (s *ObjectScanner) scan(oid string) (*object, error) { if _, err := fmt.Fprintln(s.to, oid); err != nil { return nil, err } l, err := s.from.ReadBytes('\n') if err != nil { return nil, err } fields := bytes.Fields(l) switch len(fields) { case 2: if string(fields[1]) == "missing" { return nil, &missingErr{oid: oid} } break case 3: oid = string(fields[0]) typ := string(fields[1]) size, _ := strconv.Atoi(string(fields[2])) contents := io.LimitReader(s.from, int64(size)) return &object{ Contents: contents.(*io.LimitedReader), Oid: oid, Size: int64(size), Type: typ, }, nil } return nil, errors.Errorf("invalid line: %q", l) } git-lfs-2.3.4/git/odb/000077500000000000000000000000001317167762300144315ustar00rootroot00000000000000git-lfs-2.3.4/git/odb/blob.go000066400000000000000000000044311317167762300157000ustar00rootroot00000000000000package odb import ( "bytes" "io" ) // Blob represents a Git object of type "blob". type Blob struct { // Size is the total uncompressed size of the blob's contents. Size int64 // Contents is a reader that yields the uncompressed blob contents. It // may only be read once. It may or may not implement io.ReadSeeker. Contents io.Reader // closeFn is a function that is called to free any resources held by // the Blob. In particular, this will close a file, if the Blob is // being read from a file on disk. closeFn func() error } // NewBlobFromBytes returns a new *Blob that yields the data given. func NewBlobFromBytes(contents []byte) *Blob { return &Blob{ Contents: bytes.NewReader(contents), Size: int64(len(contents)), } } // Type implements Object.ObjectType by returning the correct object type for // Blobs, BlobObjectType. func (b *Blob) Type() ObjectType { return BlobObjectType } // Decode implements Object.Decode and decodes the uncompressed blob contents // being read. It returns the number of bytes that it consumed off of the // stream, which is always zero. // // If any errors are encountered while reading the blob, they will be returned. func (b *Blob) Decode(r io.Reader, size int64) (n int, err error) { b.Size = size b.Contents = io.LimitReader(r, size) b.closeFn = func() error { if closer, ok := r.(io.Closer); ok { return closer.Close() } return nil } return 0, nil } // Encode encodes the blob's contents to the given io.Writer, "w". If there was // any error copying the blob's contents, that error will be returned. // // Otherwise, the number of bytes written will be returned. func (b *Blob) Encode(to io.Writer) (n int, err error) { nn, err := io.Copy(to, b.Contents) return int(nn), err } // Closes closes any resources held by the open Blob, or returns nil if there // were no errors. func (b *Blob) Close() error { if b.closeFn == nil { return nil } return b.closeFn() } // Equal returns whether the receiving and given blobs are equal, or in other // words, whether they are represented by the same SHA-1 when saved to the // object database. func (b *Blob) Equal(other *Blob) bool { if (b == nil) != (other == nil) { return false } if b != nil { return b.Contents == other.Contents && b.Size == other.Size } return true } git-lfs-2.3.4/git/odb/blob_test.go000066400000000000000000000046341317167762300167440ustar00rootroot00000000000000package odb import ( "bytes" "errors" "io/ioutil" "strings" "sync/atomic" "testing" "github.com/stretchr/testify/assert" ) func TestBlobReturnsCorrectObjectType(t *testing.T) { assert.Equal(t, BlobObjectType, new(Blob).Type()) } func TestBlobFromString(t *testing.T) { given := []byte("example") glen := len(given) b := NewBlobFromBytes(given) assert.EqualValues(t, glen, b.Size) contents, err := ioutil.ReadAll(b.Contents) assert.NoError(t, err) assert.Equal(t, given, contents) } func TestBlobEncoding(t *testing.T) { const contents = "Hello, world!\n" b := &Blob{ Size: int64(len(contents)), Contents: strings.NewReader(contents), } var buf bytes.Buffer if _, err := b.Encode(&buf); err != nil { t.Fatal(err.Error()) } assert.Equal(t, contents, (&buf).String()) } func TestBlobDecoding(t *testing.T) { const contents = "Hello, world!\n" from := strings.NewReader(contents) b := new(Blob) n, err := b.Decode(from, int64(len(contents))) assert.Equal(t, 0, n) assert.Nil(t, err) assert.EqualValues(t, len(contents), b.Size) got, err := ioutil.ReadAll(b.Contents) assert.Nil(t, err) assert.Equal(t, []byte(contents), got) } func TestBlobCallCloseFn(t *testing.T) { var calls uint32 expected := errors.New("some close error") b := &Blob{ closeFn: func() error { atomic.AddUint32(&calls, 1) return expected }, } got := b.Close() assert.Equal(t, expected, got) assert.EqualValues(t, 1, calls) } func TestBlobCanCloseWithoutCloseFn(t *testing.T) { b := &Blob{ closeFn: nil, } assert.Nil(t, b.Close()) } func TestBlobEqualReturnsTrueWithUnchangedContents(t *testing.T) { c := strings.NewReader("Hello, world!") b1 := &Blob{Size: int64(c.Len()), Contents: c} b2 := &Blob{Size: int64(c.Len()), Contents: c} assert.True(t, b1.Equal(b2)) } func TestBlobEqualReturnsFalseWithChangedContents(t *testing.T) { c1 := strings.NewReader("Hello, world!") c2 := strings.NewReader("Goodbye, world!") b1 := &Blob{Size: int64(c1.Len()), Contents: c1} b2 := &Blob{Size: int64(c2.Len()), Contents: c2} assert.False(t, b1.Equal(b2)) } func TestBlobEqualReturnsTrueWhenOneBlobIsNil(t *testing.T) { b1 := &Blob{Size: 1, Contents: bytes.NewReader([]byte{0xa})} b2 := (*Blob)(nil) assert.False(t, b1.Equal(b2)) assert.False(t, b2.Equal(b1)) } func TestBlobEqualReturnsTrueWhenBothBlobsAreNil(t *testing.T) { b1 := (*Blob)(nil) b2 := (*Blob)(nil) assert.True(t, b1.Equal(b2)) } git-lfs-2.3.4/git/odb/commit.go000066400000000000000000000137001317167762300162510ustar00rootroot00000000000000package odb import ( "bufio" "bytes" "encoding/hex" "fmt" "io" "strings" "time" ) // Signature represents a commit signature, which can represent either // committership or authorship of the commit that this signature belongs to. It // specifies a name, email, and time that the signature was created. // // NOTE: this type is _not_ used by the `*Commit` instance, as it does not // preserve cruft bytes. It is kept as a convenience type to test with. type Signature struct { // Name is the first and last name of the individual holding this // signature. Name string // Email is the email address of the individual holding this signature. Email string // When is the instant in time when the signature was created. When time.Time } const ( formatTimeZoneOnly = "-0700" ) // String implements the fmt.Stringer interface and formats a Signature as // expected in the Git commit internal object format. For instance: // // Taylor Blau 1494258422 -0600 func (s *Signature) String() string { at := s.When.Unix() zone := s.When.Format(formatTimeZoneOnly) return fmt.Sprintf("%s <%s> %d %s", s.Name, s.Email, at, zone) } // ExtraHeader encapsulates a key-value pairing of header key to header value. // It is stored as a struct{string, string} in memory as opposed to a // map[string]string to maintain ordering in a byte-for-byte encode/decode round // trip. type ExtraHeader struct { // K is the header key, or the first run of bytes up until a ' ' (\x20) // character. K string // V is the header value, or the remaining run of bytes in the line, // stripping off the above "K" field as a prefix. V string } // Commit encapsulates a Git commit entry. type Commit struct { // Author is the Author this commit, or the original writer of the // contents. // // NOTE: this field is stored as a string to ensure any extra "cruft" // bytes are preserved through migration. Author string // Committer is the individual or entity that added this commit to the // history. // // NOTE: this field is stored as a string to ensure any extra "cruft" // bytes are preserved through migration. Committer string // ParentIDs are the IDs of all parents for which this commit is a // linear child. ParentIDs [][]byte // TreeID is the root Tree associated with this commit. TreeID []byte // ExtraHeaders stores headers not listed above, for instance // "encoding", "gpgsig", or "mergetag" (among others). ExtraHeaders []*ExtraHeader // Message is the commit message, including any signing information // associated with this commit. Message string } // Type implements Object.ObjectType by returning the correct object type for // Commits, CommitObjectType. func (c *Commit) Type() ObjectType { return CommitObjectType } // Decode implements Object.Decode and decodes the uncompressed commit being // read. It returns the number of uncompressed bytes being consumed off of the // stream, which should be strictly equal to the size given. // // If any error was encountered along the way, that will be returned, along with // the number of bytes read up to that point. func (c *Commit) Decode(from io.Reader, size int64) (n int, err error) { var finishedHeaders bool var messageParts []string s := bufio.NewScanner(from) for s.Scan() { text := s.Text() n = n + len(text+"\n") if len(s.Text()) == 0 { finishedHeaders = true continue } if fields := strings.Fields(text); len(fields) > 0 && !finishedHeaders { switch fields[0] { case "tree": id, err := hex.DecodeString(fields[1]) if err != nil { return n, err } c.TreeID = id case "parent": id, err := hex.DecodeString(fields[1]) if err != nil { return n, err } c.ParentIDs = append(c.ParentIDs, id) case "author": c.Author = strings.Join(fields[1:], " ") case "committer": c.Committer = strings.Join(fields[1:], " ") default: c.ExtraHeaders = append(c.ExtraHeaders, &ExtraHeader{ K: fields[0], V: strings.Join(fields[1:], " "), }) } } else { messageParts = append(messageParts, s.Text()) } } c.Message = strings.Join(messageParts, "\n") if err = s.Err(); err != nil { return n, err } return n, err } // Encode encodes the commit's contents to the given io.Writer, "w". If there was // any error copying the commit's contents, that error will be returned. // // Otherwise, the number of bytes written will be returned. func (c *Commit) Encode(to io.Writer) (n int, err error) { n, err = fmt.Fprintf(to, "tree %s\n", hex.EncodeToString(c.TreeID)) if err != nil { return n, err } for _, pid := range c.ParentIDs { n1, err := fmt.Fprintf(to, "parent %s\n", hex.EncodeToString(pid)) if err != nil { return n, err } n = n + n1 } n2, err := fmt.Fprintf(to, "author %s\ncommitter %s\n", c.Author, c.Committer) if err != nil { return n, err } n = n + n2 for _, hdr := range c.ExtraHeaders { n3, err := fmt.Fprintf(to, "%s %s\n", hdr.K, hdr.V) if err != nil { return n, err } n = n + n3 } n4, err := fmt.Fprintf(to, "\n%s\n", c.Message) if err != nil { return n, err } return n + n4, err } // Equal returns whether the receiving and given commits are equal, or in other // words, whether they are represented by the same SHA-1 when saved to the // object database. func (c *Commit) Equal(other *Commit) bool { if (c == nil) != (other == nil) { return false } if c != nil { if len(c.ParentIDs) != len(other.ParentIDs) { return false } for i := 0; i < len(c.ParentIDs); i++ { p1 := c.ParentIDs[i] p2 := other.ParentIDs[i] if !bytes.Equal(p1, p2) { return false } } if len(c.ExtraHeaders) != len(other.ExtraHeaders) { return false } for i := 0; i < len(c.ExtraHeaders); i++ { e1 := c.ExtraHeaders[i] e2 := other.ExtraHeaders[i] if e1.K != e2.K || e1.V != e2.V { return false } } return c.Author == other.Author && c.Committer == other.Committer && c.Message == other.Message && bytes.Equal(c.TreeID, other.TreeID) } return true } git-lfs-2.3.4/git/odb/commit_test.go000066400000000000000000000155751317167762300173240ustar00rootroot00000000000000package odb import ( "bytes" "encoding/hex" "fmt" "strings" "testing" "time" "github.com/stretchr/testify/assert" ) func TestCommitReturnsCorrectObjectType(t *testing.T) { assert.Equal(t, CommitObjectType, new(Commit).Type()) } func TestCommitEncoding(t *testing.T) { author := &Signature{Name: "John Doe", Email: "john@example.com", When: time.Now()} committer := &Signature{Name: "Jane Doe", Email: "jane@example.com", When: time.Now()} c := &Commit{ Author: author.String(), Committer: committer.String(), ParentIDs: [][]byte{ []byte("aaaaaaaaaaaaaaaaaaaa"), []byte("bbbbbbbbbbbbbbbbbbbb"), }, TreeID: []byte("cccccccccccccccccccc"), ExtraHeaders: []*ExtraHeader{ {"foo", "bar"}, }, Message: "initial commit", } buf := new(bytes.Buffer) _, err := c.Encode(buf) assert.Nil(t, err) assertLine(t, buf, "tree 6363636363636363636363636363636363636363") assertLine(t, buf, "parent 6161616161616161616161616161616161616161") assertLine(t, buf, "parent 6262626262626262626262626262626262626262") assertLine(t, buf, "author %s", author.String()) assertLine(t, buf, "committer %s", committer.String()) assertLine(t, buf, "foo bar") assertLine(t, buf, "") assertLine(t, buf, "initial commit") assert.Equal(t, 0, buf.Len()) } func TestCommitDecoding(t *testing.T) { author := &Signature{Name: "John Doe", Email: "john@example.com", When: time.Now()} committer := &Signature{Name: "Jane Doe", Email: "jane@example.com", When: time.Now()} p1 := []byte("aaaaaaaaaaaaaaaaaaaa") p2 := []byte("bbbbbbbbbbbbbbbbbbbb") treeId := []byte("cccccccccccccccccccc") from := new(bytes.Buffer) fmt.Fprintf(from, "author %s\n", author) fmt.Fprintf(from, "committer %s\n", committer) fmt.Fprintf(from, "parent %s\n", hex.EncodeToString(p1)) fmt.Fprintf(from, "parent %s\n", hex.EncodeToString(p2)) fmt.Fprintf(from, "foo bar\n") fmt.Fprintf(from, "tree %s\n", hex.EncodeToString(treeId)) fmt.Fprintf(from, "\ninitial commit\n") flen := from.Len() commit := new(Commit) n, err := commit.Decode(from, int64(flen)) assert.Nil(t, err) assert.Equal(t, flen, n) assert.Equal(t, author.String(), commit.Author) assert.Equal(t, committer.String(), commit.Committer) assert.Equal(t, [][]byte{p1, p2}, commit.ParentIDs) assert.Equal(t, 1, len(commit.ExtraHeaders)) assert.Equal(t, "foo", commit.ExtraHeaders[0].K) assert.Equal(t, "bar", commit.ExtraHeaders[0].V) assert.Equal(t, "initial commit", commit.Message) } func TestCommitDecodingWithMessageKeywordPrefix(t *testing.T) { author := &Signature{Name: "John Doe", Email: "john@example.com", When: time.Now()} committer := &Signature{Name: "Jane Doe", Email: "jane@example.com", When: time.Now()} treeId := []byte("aaaaaaaaaaaaaaaaaaaa") treeIdAscii := hex.EncodeToString(treeId) from := new(bytes.Buffer) fmt.Fprintf(from, "author %s\n", author) fmt.Fprintf(from, "committer %s\n", committer) fmt.Fprintf(from, "tree %s\n", hex.EncodeToString(treeId)) fmt.Fprintf(from, "\ntree <- initial commit\n") flen := from.Len() commit := new(Commit) n, err := commit.Decode(from, int64(flen)) assert.NoError(t, err) assert.Equal(t, flen, n) assert.Equal(t, author.String(), commit.Author) assert.Equal(t, committer.String(), commit.Committer) assert.Equal(t, treeIdAscii, hex.EncodeToString(commit.TreeID)) assert.Equal(t, "tree <- initial commit", commit.Message) } func assertLine(t *testing.T, buf *bytes.Buffer, wanted string, args ...interface{}) { got, err := buf.ReadString('\n') assert.Nil(t, err) assert.Equal(t, fmt.Sprintf(wanted, args...), strings.TrimSuffix(got, "\n")) } func TestCommitEqualReturnsTrueWithIdenticalCommits(t *testing.T) { c1 := &Commit{ Author: "Jane Doe 1503956287 -0400", Committer: "Jane Doe 1503956287 -0400", ParentIDs: [][]byte{make([]byte, 20)}, TreeID: make([]byte, 20), ExtraHeaders: []*ExtraHeader{ {K: "Signed-off-by", V: "Joe Smith"}, }, Message: "initial commit", } c2 := &Commit{ Author: "Jane Doe 1503956287 -0400", Committer: "Jane Doe 1503956287 -0400", ParentIDs: [][]byte{make([]byte, 20)}, TreeID: make([]byte, 20), ExtraHeaders: []*ExtraHeader{ {K: "Signed-off-by", V: "Joe Smith"}, }, Message: "initial commit", } assert.True(t, c1.Equal(c2)) } func TestCommitEqualReturnsFalseWithDifferentParentCounts(t *testing.T) { c1 := &Commit{ ParentIDs: [][]byte{make([]byte, 20), make([]byte, 20)}, } c2 := &Commit{ ParentIDs: [][]byte{make([]byte, 20)}, } assert.False(t, c1.Equal(c2)) } func TestCommitEqualReturnsFalseWithDifferentParentsIds(t *testing.T) { c1 := &Commit{ ParentIDs: [][]byte{make([]byte, 20)}, } c2 := &Commit{ ParentIDs: [][]byte{make([]byte, 20)}, } c1.ParentIDs[0][1] = 0x1 assert.False(t, c1.Equal(c2)) } func TestCommitEqualReturnsFalseWithDifferentHeaderCounts(t *testing.T) { c1 := &Commit{ ExtraHeaders: []*ExtraHeader{ {K: "Signed-off-by", V: "Joe Smith"}, {K: "GPG-Signature", V: "..."}, }, } c2 := &Commit{ ExtraHeaders: []*ExtraHeader{ {K: "Signed-off-by", V: "Joe Smith"}, }, } assert.False(t, c1.Equal(c2)) } func TestCommitEqualReturnsFalseWithDifferentHeaders(t *testing.T) { c1 := &Commit{ ExtraHeaders: []*ExtraHeader{ {K: "Signed-off-by", V: "Joe Smith"}, }, } c2 := &Commit{ ExtraHeaders: []*ExtraHeader{ {K: "Signed-off-by", V: "Jane Smith"}, }, } assert.False(t, c1.Equal(c2)) } func TestCommitEqualReturnsFalseWithDifferentAuthors(t *testing.T) { c1 := &Commit{ Author: "Jane Doe 1503956287 -0400", } c2 := &Commit{ Author: "John Doe 1503956287 -0400", } assert.False(t, c1.Equal(c2)) } func TestCommitEqualReturnsFalseWithDifferentCommitters(t *testing.T) { c1 := &Commit{ Committer: "Jane Doe 1503956287 -0400", } c2 := &Commit{ Committer: "John Doe 1503956287 -0400", } assert.False(t, c1.Equal(c2)) } func TestCommitEqualReturnsFalseWithDifferentMessages(t *testing.T) { c1 := &Commit{ Message: "initial commit", } c2 := &Commit{ Message: "not the initial commit", } assert.False(t, c1.Equal(c2)) } func TestCommitEqualReturnsFalseWithDifferentTreeIDs(t *testing.T) { c1 := &Commit{ TreeID: make([]byte, 20), } c2 := &Commit{ TreeID: make([]byte, 20), } c1.TreeID[0] = 0x1 assert.False(t, c1.Equal(c2)) } func TestCommitEqualReturnsFalseWhenOneCommitIsNil(t *testing.T) { c1 := &Commit{ Author: "Jane Doe 1503956287 -0400", Committer: "Jane Doe 1503956287 -0400", ParentIDs: [][]byte{make([]byte, 20)}, TreeID: make([]byte, 20), ExtraHeaders: []*ExtraHeader{ {K: "Signed-off-by", V: "Joe Smith"}, }, Message: "initial commit", } c2 := (*Commit)(nil) assert.False(t, c1.Equal(c2)) } func TestCommitEqualReturnsTrueWhenBothCommitsAreNil(t *testing.T) { c1 := (*Commit)(nil) c2 := (*Commit)(nil) assert.True(t, c1.Equal(c2)) } git-lfs-2.3.4/git/odb/errors.go000066400000000000000000000010511317167762300162710ustar00rootroot00000000000000package odb import "fmt" // UnexpectedObjectType is an error type that represents a scenario where an // object was requested of a given type "Wanted", and received as a different // _other_ type, "Wanted". type UnexpectedObjectType struct { // Got was the object type requested. Got ObjectType // Wanted was the object type received. Wanted ObjectType } // Error implements the error.Error() function. func (e *UnexpectedObjectType) Error() string { return fmt.Sprintf("git/odb: unexpected object type, got: %q, wanted: %q", e.Got, e.Wanted) } git-lfs-2.3.4/git/odb/errors_test.go000066400000000000000000000004741317167762300173400ustar00rootroot00000000000000package odb import ( "testing" "github.com/stretchr/testify/assert" ) func TestUnexpectedObjectTypeErrFormatting(t *testing.T) { err := &UnexpectedObjectType{ Got: TreeObjectType, Wanted: BlobObjectType, } assert.Equal(t, "git/odb: unexpected object type, got: \"tree\", wanted: \"blob\"", err.Error()) } git-lfs-2.3.4/git/odb/file_storer.go000066400000000000000000000052741317167762300173050ustar00rootroot00000000000000package odb import ( "encoding/hex" "io" "io/ioutil" "os" "path/filepath" "github.com/git-lfs/git-lfs/errors" "github.com/git-lfs/git-lfs/lfs" ) // fileStorer implements the storer interface by writing to the .git/objects // directory on disc. type fileStorer struct { // root is the top level /objects directory's path on disc. root string } // NewFileStorer returns a new fileStorer instance with the given root. func newFileStorer(root string) *fileStorer { return &fileStorer{ root: root, } } // Open implements the storer.Open function, and returns a io.ReadWriteCloser // for the given SHA. If the file does not exist, or if there was any other // error in opening the file, an error will be returned. // // It is the caller's responsibility to close the given file "f" after its use // is complete. func (fs *fileStorer) Open(sha []byte) (f io.ReadWriteCloser, err error) { return fs.open(fs.path(sha), os.O_RDONLY) } // Store implements the storer.Store function and returns the number of bytes // written, along with any error encountered in copying the given io.Reader, "r" // into the object database on disk at a path given by "sha". // // If the file could not be created, or opened, an error will be returned. func (fs *fileStorer) Store(sha []byte, r io.Reader) (n int64, err error) { path := fs.path(sha) dir := filepath.Dir(path) if stat, err := os.Stat(path); stat != nil || os.IsExist(err) { // If the file already exists, there is no work left for us to // do, since the object already exists (or there is a SHA1 // collision). _, err = io.Copy(ioutil.Discard, r) if err != nil { return 0, errors.Wrap(err, "discard pre-existing object data") } return 0, nil } tmp, err := lfs.TempFile("") if err != nil { return 0, err } n, err = io.Copy(tmp, r) if err = tmp.Close(); err != nil { return n, err } if err != nil { return n, err } // Since .git/objects partitions objects based on the first two // characters of their ASCII-encoded SHA1 object ID, ensure that // the directory exists before copying a file into it. if err = os.MkdirAll(dir, 0755); err != nil { return n, err } if err = os.Rename(tmp.Name(), path); err != nil { return n, err } return n, nil } // Root gives the absolute (fully-qualified) path to the file storer on disk. func (fs *fileStorer) Root() string { return fs.root } // open opens a given file. func (fs *fileStorer) open(path string, flag int) (*os.File, error) { return os.OpenFile(path, flag, 0) } // path returns an absolute path on disk to the object given by the OID "sha". func (fs *fileStorer) path(sha []byte) string { encoded := hex.EncodeToString(sha) return filepath.Join(fs.root, encoded[:2], encoded[2:]) } git-lfs-2.3.4/git/odb/memory_storer.go000066400000000000000000000037061317167762300176740ustar00rootroot00000000000000package odb import ( "bytes" "fmt" "io" "os" "sync" ) // memoryStorer is an implementation of the storer interface that holds data for // the object database in memory. type memoryStorer struct { // mu guards reads and writes to the map "fs" below. mu *sync.Mutex // fs maps a hex-encoded SHA to a bytes.Buffer wrapped in a no-op closer // type. fs map[string]*bufCloser } // newMemoryStorer initializes a new memoryStorer instance with the given // initial set. // // A value of "nil" is acceptable and indicates that no entries shall be added // to the memory storer at/during construction time. func newMemoryStorer(m map[string]io.ReadWriter) *memoryStorer { fs := make(map[string]*bufCloser, len(m)) for n, rw := range m { fs[n] = &bufCloser{rw} } return &memoryStorer{ mu: new(sync.Mutex), fs: fs, } } // Store implements the storer.Store function and copies the data given in "r" // into an object entry in the memory. If an object given by that SHA "sha" is // already indexed in the database, Store will panic(). func (ms *memoryStorer) Store(sha []byte, r io.Reader) (n int64, err error) { ms.mu.Lock() defer ms.mu.Unlock() key := fmt.Sprintf("%x", sha) ms.fs[key] = &bufCloser{new(bytes.Buffer)} return io.Copy(ms.fs[key], r) } // Open implements the storer.Open function, and returns a io.ReadWriteCloser // for the given SHA. If a reader for the given SHA does not exist an error will // be returned. func (ms *memoryStorer) Open(sha []byte) (f io.ReadWriteCloser, err error) { ms.mu.Lock() defer ms.mu.Unlock() key := fmt.Sprintf("%x", sha) if _, ok := ms.fs[key]; !ok { return nil, os.ErrNotExist } return ms.fs[key], nil } // bufCloser wraps a type satisfying the io.ReadWriter interface with a no-op // Close() function, thus implementing the io.ReadWriteCloser composite // interface. type bufCloser struct { io.ReadWriter } // Close implements io.Closer, and returns nothing. func (b *bufCloser) Close() error { return nil } git-lfs-2.3.4/git/odb/memory_storer_test.go000066400000000000000000000034571317167762300207360ustar00rootroot00000000000000package odb import ( "bytes" "encoding/hex" "io" "io/ioutil" "os" "strings" "testing" "github.com/stretchr/testify/assert" ) func TestMemoryStorerIncludesGivenEntries(t *testing.T) { sha := "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" hex, err := hex.DecodeString(sha) assert.Nil(t, err) ms := newMemoryStorer(map[string]io.ReadWriter{ sha: bytes.NewBuffer([]byte{0x1}), }) buf, err := ms.Open(hex) assert.Nil(t, err) contents, err := ioutil.ReadAll(buf) assert.Nil(t, err) assert.Equal(t, []byte{0x1}, contents) } func TestMemoryStorerAcceptsNilEntries(t *testing.T) { ms := newMemoryStorer(nil) assert.NotNil(t, ms) assert.Equal(t, 0, len(ms.fs)) } func TestMemoryStorerDoesntOpenMissingEntries(t *testing.T) { sha := "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" hex, err := hex.DecodeString(sha) assert.Nil(t, err) ms := newMemoryStorer(nil) f, err := ms.Open(hex) assert.Equal(t, os.ErrNotExist, err) assert.Nil(t, f) } func TestMemoryStorerStoresNewEntries(t *testing.T) { hex, err := hex.DecodeString("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") assert.Nil(t, err) ms := newMemoryStorer(nil) assert.Equal(t, 0, len(ms.fs)) _, err = ms.Store(hex, strings.NewReader("hello")) assert.Nil(t, err) assert.Equal(t, 1, len(ms.fs)) got, err := ms.Open(hex) assert.Nil(t, err) contents, err := ioutil.ReadAll(got) assert.Nil(t, err) assert.Equal(t, "hello", string(contents)) } func TestMemoryStorerStoresExistingEntries(t *testing.T) { hex, err := hex.DecodeString("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") assert.Nil(t, err) ms := newMemoryStorer(nil) assert.Equal(t, 0, len(ms.fs)) _, err = ms.Store(hex, new(bytes.Buffer)) assert.Nil(t, err) assert.Equal(t, 1, len(ms.fs)) n, err := ms.Store(hex, new(bytes.Buffer)) assert.Nil(t, err) assert.EqualValues(t, 0, n) } git-lfs-2.3.4/git/odb/object.go000066400000000000000000000026511317167762300162320ustar00rootroot00000000000000package odb import "io" // Object is an interface satisfied by any concrete type that represents a loose // Git object. type Object interface { // Encode takes an io.Writer, "to", and encodes an uncompressed // Git-compatible representation of itself to that stream. // // It must return "n", the number of uncompressed bytes written to that // stream, along with "err", any error that was encountered during the // write. // // Any error that was encountered should be treated as "fatal-local", // meaning that a particular invocation of Encode() cannot progress, and // an accurate number "n" of bytes written up that point should be // returned. Encode(to io.Writer) (n int, err error) // Decode takes an io.Reader, "from" as well as a size "size" (the // number of uncompressed bytes on the stream that represent the object // trying to be decoded) and decodes the encoded object onto itself, // as a mutative transaction. // // It returns the number of uncompressed bytes "n" that an invoication // of this function has advanced the io.Reader, "from", as well as any // error that was encountered along the way. // // If an(y) error was encountered, it should be returned immediately, // along with the number of bytes read up to that point. Decode(from io.Reader, size int64) (n int, err error) // Type returns the ObjectType constant that represents an instance of // the implementing type. Type() ObjectType } git-lfs-2.3.4/git/odb/object_db.go000066400000000000000000000162121317167762300166750ustar00rootroot00000000000000package odb import ( "bytes" "fmt" "io" "os" "strings" "sync/atomic" "github.com/git-lfs/git-lfs/errors" "github.com/git-lfs/git-lfs/git/odb/pack" "github.com/git-lfs/git-lfs/lfs" ) // ObjectDatabase enables the reading and writing of objects against a storage // backend. type ObjectDatabase struct { // s is the storage backend which opens/creates/reads/writes. s storer // packs are the set of packfiles which contain all packed objects // within this repository. packs *pack.Set // closed is a uint32 managed by sync/atomic's Uint32 methods. It // yields a value of 0 if the *ObjectDatabase it is stored upon is open, // and a value of 1 if it is closed. closed uint32 } // FromFilesystem constructs an *ObjectDatabase instance that is backed by a // directory on the filesystem. Specifically, this should point to: // // /absolute/repo/path/.git/objects func FromFilesystem(root string) (*ObjectDatabase, error) { packs, err := pack.NewSet(root) if err != nil { return nil, err } return &ObjectDatabase{ s: newFileStorer(root), packs: packs, }, nil } // Close closes the *ObjectDatabase, freeing any open resources (namely: the // `*git.ObjectScanner instance), and returning any errors encountered in // closing them. // // If Close() has already been called, this function will return an error. func (o *ObjectDatabase) Close() error { if !atomic.CompareAndSwapUint32(&o.closed, 0, 1) { return errors.New("git/odb: *ObjectDatabase already closed") } if err := o.packs.Close(); err != nil { return err } return nil } // Blob returns a *Blob as identified by the SHA given, or an error if one was // encountered. func (o *ObjectDatabase) Blob(sha []byte) (*Blob, error) { var b Blob if err := o.decode(sha, &b); err != nil { return nil, err } return &b, nil } // Tree returns a *Tree as identified by the SHA given, or an error if one was // encountered. func (o *ObjectDatabase) Tree(sha []byte) (*Tree, error) { var t Tree if err := o.decode(sha, &t); err != nil { return nil, err } return &t, nil } // Commit returns a *Commit as identified by the SHA given, or an error if one // was encountered. func (o *ObjectDatabase) Commit(sha []byte) (*Commit, error) { var c Commit if err := o.decode(sha, &c); err != nil { return nil, err } return &c, nil } // WriteBlob stores a *Blob on disk and returns the SHA it is uniquely // identified by, or an error if one was encountered. func (o *ObjectDatabase) WriteBlob(b *Blob) ([]byte, error) { buf, err := lfs.TempFile("") if err != nil { return nil, err } defer os.Remove(buf.Name()) sha, _, err := o.encodeBuffer(b, buf) if err != nil { return nil, err } if err = b.Close(); err != nil { return nil, err } return sha, nil } // WriteTree stores a *Tree on disk and returns the SHA it is uniquely // identified by, or an error if one was encountered. func (o *ObjectDatabase) WriteTree(t *Tree) ([]byte, error) { sha, _, err := o.encode(t) if err != nil { return nil, err } return sha, nil } // WriteCommit stores a *Commit on disk and returns the SHA it is uniquely // identified by, or an error if one was encountered. func (o *ObjectDatabase) WriteCommit(c *Commit) ([]byte, error) { sha, _, err := o.encode(c) if err != nil { return nil, err } return sha, nil } // Root returns the filesystem root that this *ObjectDatabase works within, if // backed by a fileStorer (constructed by FromFilesystem). If so, it returns // the fully-qualified path on a disk and a value of true. // // Otherwise, it returns empty-string and a value of false. func (o *ObjectDatabase) Root() (string, bool) { type rooter interface { Root() string } if root, ok := o.s.(rooter); ok { return root.Root(), true } return "", false } // encode encodes and saves an object to the storage backend and uses an // in-memory buffer to calculate the object's encoded body. func (d *ObjectDatabase) encode(object Object) (sha []byte, n int64, err error) { return d.encodeBuffer(object, bytes.NewBuffer(nil)) } // encodeBuffer encodes and saves an object to the storage backend by using the // given buffer to calculate and store the object's encoded body. func (d *ObjectDatabase) encodeBuffer(object Object, buf io.ReadWriter) (sha []byte, n int64, err error) { cn, err := object.Encode(buf) if err != nil { return nil, 0, err } tmp, err := lfs.TempFile("") if err != nil { return nil, 0, err } defer os.Remove(tmp.Name()) to := NewObjectWriter(tmp) if _, err = to.WriteHeader(object.Type(), int64(cn)); err != nil { return nil, 0, err } if seek, ok := buf.(io.Seeker); ok { if _, err = seek.Seek(0, io.SeekStart); err != nil { return nil, 0, err } } if _, err = io.Copy(to, buf); err != nil { return nil, 0, err } if err = to.Close(); err != nil { return nil, 0, err } if _, err := tmp.Seek(0, io.SeekStart); err != nil { return nil, 0, err } return d.save(to.Sha(), tmp) } // save writes the given buffer to the location given by the storer "o.s" as // identified by the sha []byte. func (o *ObjectDatabase) save(sha []byte, buf io.Reader) ([]byte, int64, error) { n, err := o.s.Store(sha, buf) return sha, n, err } // open gives an `*ObjectReader` for the given loose object keyed by the given // "sha" []byte, or an error. func (o *ObjectDatabase) open(sha []byte) (*ObjectReader, error) { f, err := o.s.Open(sha) if err != nil { if !os.IsNotExist(err) { // If there was some other issue beyond not being able // to find the object, return that immediately and don't // try and fallback to the *git.ObjectScanner. return nil, err } // Otherwise, if the file simply couldn't be found, attempt to // load its contents from the *git.ObjectScanner by leveraging // `git-cat-file --batch`. if atomic.LoadUint32(&o.closed) == 1 { return nil, errors.New("git/odb: cannot use closed *pack.Set") } packed, err := o.packs.Object(sha) if err != nil { return nil, err } unpacked, err := packed.Unpack() if err != nil { return nil, err } return NewUncompressedObjectReader(io.MultiReader( // Git object header: strings.NewReader(fmt.Sprintf("%s %d\x00", packed.Type(), len(unpacked), )), // Git object (uncompressed) contents: bytes.NewReader(unpacked), )) } return NewObjectReadCloser(f) } // decode decodes an object given by the sha "sha []byte" into the given object // "into", or returns an error if one was encountered. // // Ordinarily, it closes the object's underlying io.ReadCloser (if it implements // the `io.Closer` interface), but skips this if the "into" Object is of type // BlobObjectType. Blob's don't exhaust the buffer completely (they instead // maintain a handle on the blob's contents via an io.LimitedReader) and // therefore cannot be closed until signaled explicitly by git/odb.Blob.Close(). func (o *ObjectDatabase) decode(sha []byte, into Object) error { r, err := o.open(sha) if err != nil { return err } typ, size, err := r.Header() if err != nil { return err } else if typ != into.Type() { return &UnexpectedObjectType{Got: typ, Wanted: into.Type()} } if _, err = into.Decode(r, size); err != nil { return err } if into.Type() == BlobObjectType { return nil } return r.Close() } git-lfs-2.3.4/git/odb/object_db_test.go000066400000000000000000000120461317167762300177350ustar00rootroot00000000000000package odb import ( "bytes" "compress/zlib" "encoding/hex" "fmt" "io" "io/ioutil" "strings" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestDecodeBlob(t *testing.T) { sha := "af5626b4a114abcb82d63db7c8082c3c4756e51b" contents := "Hello, world!\n" var buf bytes.Buffer zw := zlib.NewWriter(&buf) fmt.Fprintf(zw, "blob 14\x00%s", contents) zw.Close() odb := &ObjectDatabase{s: newMemoryStorer(map[string]io.ReadWriter{ sha: &buf, })} shaHex, _ := hex.DecodeString(sha) blob, err := odb.Blob(shaHex) assert.Nil(t, err) assert.EqualValues(t, 14, blob.Size) got, err := ioutil.ReadAll(blob.Contents) assert.Nil(t, err) assert.Equal(t, contents, string(got)) } func TestDecodeTree(t *testing.T) { sha := "fcb545d5746547a597811b7441ed8eba307be1ff" hexSha, err := hex.DecodeString(sha) require.Nil(t, err) blobSha := "e69de29bb2d1d6434b8b29ae775ad8c2e48c5391" hexBlobSha, err := hex.DecodeString(blobSha) require.Nil(t, err) var buf bytes.Buffer zw := zlib.NewWriter(&buf) fmt.Fprintf(zw, "tree 37\x00") fmt.Fprintf(zw, "100644 hello.txt\x00") zw.Write(hexBlobSha) zw.Close() odb := &ObjectDatabase{s: newMemoryStorer(map[string]io.ReadWriter{ sha: &buf, })} tree, err := odb.Tree(hexSha) assert.Nil(t, err) require.Equal(t, 1, len(tree.Entries)) assert.Equal(t, &TreeEntry{ Name: "hello.txt", Oid: hexBlobSha, Filemode: 0100644, }, tree.Entries[0]) } func TestDecodeCommit(t *testing.T) { sha := "d7283480bb6dc90be621252e1001a93871dcf511" commitShaHex, err := hex.DecodeString(sha) assert.Nil(t, err) var buf bytes.Buffer zw := zlib.NewWriter(&buf) fmt.Fprintf(zw, "commit 173\x00") fmt.Fprintf(zw, "tree fcb545d5746547a597811b7441ed8eba307be1ff\n") fmt.Fprintf(zw, "author Taylor Blau 1494620424 -0600\n") fmt.Fprintf(zw, "committer Taylor Blau 1494620424 -0600\n") fmt.Fprintf(zw, "\ninitial commit\n") zw.Close() odb := &ObjectDatabase{s: newMemoryStorer(map[string]io.ReadWriter{ sha: &buf, })} commit, err := odb.Commit(commitShaHex) assert.Nil(t, err) assert.Equal(t, "Taylor Blau 1494620424 -0600", commit.Author) assert.Equal(t, "Taylor Blau 1494620424 -0600", commit.Committer) assert.Equal(t, "initial commit", commit.Message) assert.Equal(t, 0, len(commit.ParentIDs)) assert.Equal(t, "fcb545d5746547a597811b7441ed8eba307be1ff", hex.EncodeToString(commit.TreeID)) } func TestWriteBlob(t *testing.T) { fs := newMemoryStorer(make(map[string]io.ReadWriter)) odb := &ObjectDatabase{s: fs} sha, err := odb.WriteBlob(&Blob{ Size: 14, Contents: strings.NewReader("Hello, world!\n"), }) expected := "af5626b4a114abcb82d63db7c8082c3c4756e51b" assert.Nil(t, err) assert.Equal(t, expected, hex.EncodeToString(sha)) assert.NotNil(t, fs.fs[hex.EncodeToString(sha)]) } func TestWriteTree(t *testing.T) { fs := newMemoryStorer(make(map[string]io.ReadWriter)) odb := &ObjectDatabase{s: fs} blobSha := "e69de29bb2d1d6434b8b29ae775ad8c2e48c5391" hexBlobSha, err := hex.DecodeString(blobSha) require.Nil(t, err) sha, err := odb.WriteTree(&Tree{Entries: []*TreeEntry{ { Name: "hello.txt", Oid: hexBlobSha, Filemode: 0100644, }, }}) expected := "fcb545d5746547a597811b7441ed8eba307be1ff" assert.Nil(t, err) assert.Equal(t, expected, hex.EncodeToString(sha)) assert.NotNil(t, fs.fs[hex.EncodeToString(sha)]) } func TestWriteCommit(t *testing.T) { fs := newMemoryStorer(make(map[string]io.ReadWriter)) odb := &ObjectDatabase{s: fs} when := time.Unix(1257894000, 0).UTC() author := &Signature{Name: "John Doe", Email: "john@example.com", When: when} committer := &Signature{Name: "Jane Doe", Email: "jane@example.com", When: when} tree := "fcb545d5746547a597811b7441ed8eba307be1ff" treeHex, err := hex.DecodeString(tree) assert.Nil(t, err) sha, err := odb.WriteCommit(&Commit{ Author: author.String(), Committer: committer.String(), TreeID: treeHex, Message: "initial commit", }) expected := "fee8a35c2890cd6e0e28d24cc457fcecbd460962" assert.Nil(t, err) assert.Equal(t, expected, hex.EncodeToString(sha)) assert.NotNil(t, fs.fs[hex.EncodeToString(sha)]) } func TestReadingAMissingObjectAfterClose(t *testing.T) { sha, _ := hex.DecodeString("af5626b4a114abcb82d63db7c8082c3c4756e51b") db := &ObjectDatabase{ s: newMemoryStorer(nil), closed: 1, } blob, err := db.Blob(sha) assert.EqualError(t, err, "git/odb: cannot use closed *pack.Set") assert.Nil(t, blob) } func TestClosingAnObjectDatabaseMoreThanOnce(t *testing.T) { db, err := FromFilesystem("/tmp") assert.Nil(t, err) assert.Nil(t, db.Close()) assert.EqualError(t, db.Close(), "git/odb: *ObjectDatabase already closed") } func TestObjectDatabaseRootWithRoot(t *testing.T) { db, err := FromFilesystem("/foo/bar/baz") assert.Nil(t, err) root, ok := db.Root() assert.Equal(t, "/foo/bar/baz", root) assert.True(t, ok) } func TestObjectDatabaseRootWithoutRoot(t *testing.T) { root, ok := new(ObjectDatabase).Root() assert.Equal(t, "", root) assert.False(t, ok) } git-lfs-2.3.4/git/odb/object_reader.go000066400000000000000000000105001317167762300175440ustar00rootroot00000000000000package odb import ( "bufio" "compress/zlib" "io" "io/ioutil" "strconv" "strings" "github.com/pkg/errors" ) // ObjectReader provides an io.Reader implementation that can read Git object // headers, as well as provide an uncompressed view into the object contents // itself. type ObjectReader struct { // header is the object header type header *struct { // typ is the ObjectType encoded in the header pointed at by // this reader. typ ObjectType // size is the number of uncompressed bytes following the header // that encodes the object. size int64 } // r is the underling uncompressed reader. r *bufio.Reader // closeFn supplies an optional function that, when called, frees an // resources (open files, memory, etc) held by this instance of the // *ObjectReader. // // closeFn returns any error encountered when closing/freeing resources // held. // // It is allowed to be nil. closeFn func() error } // NewObjectReader takes a given io.Reader that yields zlib-compressed data, and // returns an *ObjectReader wrapping it, or an error if one occurred during // construction time. func NewObjectReader(r io.Reader) (*ObjectReader, error) { return NewObjectReadCloser(ioutil.NopCloser(r)) } // NewObjectReader takes a given io.Reader that yields uncompressed data and // returns an *ObjectReader wrapping it, or an error if one occurred during // construction time. func NewUncompressedObjectReader(r io.Reader) (*ObjectReader, error) { return NewUncompressedObjectReadCloser(ioutil.NopCloser(r)) } // NewObjectReadCloser takes a given io.Reader that yields zlib-compressed data, and // returns an *ObjectReader wrapping it, or an error if one occurred during // construction time. // // It also calls the Close() function given by the implementation "r" of the // type io.Closer. func NewObjectReadCloser(r io.ReadCloser) (*ObjectReader, error) { zr, err := zlib.NewReader(r) if err != nil { return nil, err } return &ObjectReader{ r: bufio.NewReader(zr), closeFn: func() error { if err := zr.Close(); err != nil { return err } if err := r.Close(); err != nil { return err } return nil }, }, nil } // NewUncompressObjectReadCloser takes a given io.Reader that yields // uncompressed data, and returns an *ObjectReader wrapping it, or an error if // one occurred during construction time. // // It also calls the Close() function given by the implementation "r" of the // type io.Closer. func NewUncompressedObjectReadCloser(r io.ReadCloser) (*ObjectReader, error) { return &ObjectReader{ r: bufio.NewReader(r), closeFn: r.Close, }, nil } // Header returns information about the Object's header, or an error if one // occurred while reading the data. // // Header information is cached, so this function is safe to call at any point // during the object read, and can be called more than once. func (r *ObjectReader) Header() (typ ObjectType, size int64, err error) { if r.header != nil { return r.header.typ, r.header.size, nil } typs, err := r.r.ReadString(' ') if err != nil { return UnknownObjectType, 0, err } if len(typs) == 0 { return UnknownObjectType, 0, errors.Errorf( "git/odb: object type must not be empty", ) } typs = strings.TrimSuffix(typs, " ") sizeStr, err := r.r.ReadString('\x00') if err != nil { return UnknownObjectType, 0, err } sizeStr = strings.TrimSuffix(sizeStr, "\x00") size, err = strconv.ParseInt(sizeStr, 10, 64) if err != nil { return UnknownObjectType, 0, err } r.header = &struct { typ ObjectType size int64 }{ ObjectTypeFromString(typs), size, } return r.header.typ, r.header.size, nil } // Read reads uncompressed bytes into the buffer "p", and returns the number of // uncompressed bytes read. Otherwise, it returns any error encountered along // the way. // // This function is safe to call before reading the Header information, as any // call to Read() will ensure that read has been called at least once. func (r *ObjectReader) Read(p []byte) (n int, err error) { if _, _, err = r.Header(); err != nil { return 0, err } return r.r.Read(p) } // Close frees any resources held by the ObjectReader and must be called before // disposing of this instance. // // It returns any error encountered by the *ObjectReader during close. func (r *ObjectReader) Close() error { if r.closeFn == nil { return nil } return r.closeFn() } git-lfs-2.3.4/git/odb/object_reader_test.go000066400000000000000000000025661317167762300206200ustar00rootroot00000000000000package odb import ( "bytes" "compress/zlib" "errors" "io" "sync/atomic" "testing" "github.com/stretchr/testify/assert" ) func TestObjectReaderReadsHeaders(t *testing.T) { var compressed bytes.Buffer zw := zlib.NewWriter(&compressed) zw.Write([]byte("blob 1\x00")) zw.Close() or, err := NewObjectReader(&compressed) assert.Nil(t, err) typ, size, err := or.Header() assert.Nil(t, err) assert.EqualValues(t, 1, size) assert.Equal(t, BlobObjectType, typ) } func TestObjectReaderConsumesHeaderBeforeReads(t *testing.T) { var compressed bytes.Buffer zw := zlib.NewWriter(&compressed) zw.Write([]byte("blob 1\x00asdf")) zw.Close() or, err := NewObjectReader(&compressed) assert.Nil(t, err) var buf [4]byte n, err := or.Read(buf[:]) assert.Equal(t, 4, n) assert.Equal(t, []byte{'a', 's', 'd', 'f'}, buf[:]) assert.Nil(t, err) } type ReadCloserFn struct { io.Reader closeFn func() error } func (r *ReadCloserFn) Close() error { return r.closeFn() } func TestObjectReaderCallsClose(t *testing.T) { var calls uint32 expected := errors.New("expected") or, err := NewObjectReadCloser(&ReadCloserFn{ Reader: bytes.NewBuffer([]byte{0x78, 0x01}), closeFn: func() error { atomic.AddUint32(&calls, 1) return expected }, }) assert.Nil(t, err) got := or.Close() assert.Equal(t, expected, got) assert.EqualValues(t, 1, atomic.LoadUint32(&calls)) } git-lfs-2.3.4/git/odb/object_type.go000066400000000000000000000017371317167762300172770ustar00rootroot00000000000000package odb import "strings" // ObjectType is a constant enumeration type for identifying the kind of object // type an implementing instance of the Object interface is. type ObjectType uint8 const ( UnknownObjectType ObjectType = iota BlobObjectType TreeObjectType CommitObjectType ) // ObjectTypeFromString converts from a given string to an ObjectType // enumeration instance. func ObjectTypeFromString(s string) ObjectType { switch strings.ToLower(s) { case "blob": return BlobObjectType case "tree": return TreeObjectType case "commit": return CommitObjectType default: return UnknownObjectType } } // String implements the fmt.Stringer interface and returns a string // representation of the ObjectType enumeration instance. func (t ObjectType) String() string { switch t { case UnknownObjectType: return "unknown" case BlobObjectType: return "blob" case TreeObjectType: return "tree" case CommitObjectType: return "commit" } return "" } git-lfs-2.3.4/git/odb/object_type_test.go000066400000000000000000000014351317167762300203310ustar00rootroot00000000000000package odb import ( "math" "testing" "github.com/stretchr/testify/assert" ) func TestObjectTypeFromString(t *testing.T) { for str, typ := range map[string]ObjectType{ "blob": BlobObjectType, "tree": TreeObjectType, "commit": CommitObjectType, "something else": UnknownObjectType, } { t.Run(str, func(t *testing.T) { assert.Equal(t, typ, ObjectTypeFromString(str)) }) } } func TestObjectTypeToString(t *testing.T) { for typ, str := range map[ObjectType]string{ BlobObjectType: "blob", TreeObjectType: "tree", CommitObjectType: "commit", UnknownObjectType: "unknown", ObjectType(math.MaxUint8): "", } { t.Run(str, func(t *testing.T) { assert.Equal(t, str, typ.String()) }) } } git-lfs-2.3.4/git/odb/object_writer.go000066400000000000000000000066561317167762300176370ustar00rootroot00000000000000package odb import ( "compress/zlib" "crypto/sha1" "fmt" "hash" "io" "sync/atomic" ) // ObjectWriter provides an implementation of io.Writer that compresses and // writes data given to it, and keeps track of the SHA1 hash of the data as it // is written. type ObjectWriter struct { // w is the underling writer that this ObjectWriter is writing to. w io.Writer // sum is the in-progress hash calculation. sum hash.Hash // wroteHeader is a uint32 managed by the sync/atomic package. It is 1 // if the header was written, and 0 otherwise. wroteHeader uint32 // closeFn supplies an optional function that, when called, frees an // resources (open files, memory, etc) held by this instance of the // *ObjectWriter. // // closeFn returns any error encountered when closing/freeing resources // held. // // It is allowed to be nil. closeFn func() error } // nopCloser provides a no-op implementation of the io.WriteCloser interface by // taking an io.Writer and wrapping it with a Close() method that returns nil. type nopCloser struct { // Writer is an embedded io.Writer that receives the Write() method // call. io.Writer } // Close implements the io.Closer interface by returning nil. func (n *nopCloser) Close() error { return nil } // NewObjectWriter returns a new *ObjectWriter instance that drains incoming // writes into the io.Writer given, "w". func NewObjectWriter(w io.Writer) *ObjectWriter { return NewObjectWriteCloser(&nopCloser{w}) } // NewObjectWriter returns a new *ObjectWriter instance that drains incoming // writes into the io.Writer given, "w". // // Upon closing, it calls the given Close() function of the io.WriteCloser. func NewObjectWriteCloser(w io.WriteCloser) *ObjectWriter { zw := zlib.NewWriter(w) sum := sha1.New() return &ObjectWriter{ w: io.MultiWriter(zw, sum), sum: sum, closeFn: func() error { if err := zw.Close(); err != nil { return err } if err := w.Close(); err != nil { return err } return nil }, } } // WriteHeader writes object header information and returns the number of // uncompressed bytes written, or any error that was encountered along the way. // // WriteHeader MUST be called only once, or a panic() will occur. func (w *ObjectWriter) WriteHeader(typ ObjectType, len int64) (n int, err error) { if !atomic.CompareAndSwapUint32(&w.wroteHeader, 0, 1) { panic("git/odb: cannot write headers more than once") } return fmt.Fprintf(w, "%s %d\x00", typ, len) } // Write writes the given buffer "p" of uncompressed bytes into the underlying // data-stream, returning the number of uncompressed bytes written, along with // any error encountered along the way. // // A call to WriteHeaders MUST occur before calling Write, or a panic() will // occur. func (w *ObjectWriter) Write(p []byte) (n int, err error) { if atomic.LoadUint32(&w.wroteHeader) != 1 { panic("git/odb: cannot write data without header") } return w.w.Write(p) } // Sha returns the in-progress SHA1 of the compressed object contents. func (w *ObjectWriter) Sha() []byte { return w.sum.Sum(nil) } // Close closes the ObjectWriter and frees any resources held by it, including // flushing the zlib-compressed content to the underling writer. It must be // called before discarding of the Writer instance. // // If any error occurred while calling close, it will be returned immediately, // otherwise nil. func (w *ObjectWriter) Close() error { if w.closeFn == nil { return nil } return w.closeFn() } git-lfs-2.3.4/git/odb/object_writer_test.go000066400000000000000000000043741317167762300206710ustar00rootroot00000000000000package odb import ( "bytes" "compress/zlib" "encoding/hex" "errors" "io" "io/ioutil" "sync/atomic" "testing" "github.com/stretchr/testify/assert" ) func TestObjectWriterWritesHeaders(t *testing.T) { var buf bytes.Buffer w := NewObjectWriter(&buf) n, err := w.WriteHeader(BlobObjectType, 1) assert.Equal(t, 7, n) assert.Nil(t, err) assert.Nil(t, w.Close()) r, err := zlib.NewReader(&buf) assert.Nil(t, err) all, err := ioutil.ReadAll(r) assert.Nil(t, err) assert.Equal(t, []byte("blob 1\x00"), all) assert.Nil(t, r.Close()) } func TestObjectWriterWritesData(t *testing.T) { var buf bytes.Buffer w := NewObjectWriter(&buf) w.WriteHeader(BlobObjectType, 1) n, err := w.Write([]byte{0x31}) assert.Equal(t, 1, n) assert.Nil(t, err) assert.Nil(t, w.Close()) r, err := zlib.NewReader(&buf) assert.Nil(t, err) all, err := ioutil.ReadAll(r) assert.Nil(t, err) assert.Equal(t, []byte("blob 1\x001"), all) assert.Nil(t, r.Close()) } func TestObjectWriterPanicsOnWritesWithoutHeader(t *testing.T) { defer func() { err := recover() assert.NotNil(t, err) assert.Equal(t, "git/odb: cannot write data without header", err) }() w := NewObjectWriter(new(bytes.Buffer)) w.Write(nil) } func TestObjectWriterPanicsOnMultipleHeaderWrites(t *testing.T) { defer func() { err := recover() assert.NotNil(t, err) assert.Equal(t, "git/odb: cannot write headers more than once", err) }() w := NewObjectWriter(new(bytes.Buffer)) w.WriteHeader(BlobObjectType, 1) w.WriteHeader(TreeObjectType, 2) } func TestObjectWriterKeepsTrackOfHash(t *testing.T) { w := NewObjectWriter(new(bytes.Buffer)) n, err := w.WriteHeader(BlobObjectType, 1) assert.Nil(t, err) assert.Equal(t, 7, n) assert.Equal(t, "bb6ca78b66403a67c6281df142de5ef472186283", hex.EncodeToString(w.Sha())) } type WriteCloserFn struct { io.Writer closeFn func() error } func (r *WriteCloserFn) Close() error { return r.closeFn() } func TestObjectWriterCallsClose(t *testing.T) { var calls uint32 expected := errors.New("close error") w := NewObjectWriteCloser(&WriteCloserFn{ Writer: new(bytes.Buffer), closeFn: func() error { atomic.AddUint32(&calls, 1) return expected }, }) got := w.Close() assert.EqualValues(t, 1, calls) assert.Equal(t, expected, got) } git-lfs-2.3.4/git/odb/pack/000077500000000000000000000000001317167762300153475ustar00rootroot00000000000000git-lfs-2.3.4/git/odb/pack/bounds.go000066400000000000000000000043741317167762300172000ustar00rootroot00000000000000package pack import "fmt" // bounds encapsulates the window of search for a single iteration of binary // search. // // Callers may choose to treat the return values from Left() and Right() as // inclusive or exclusive. *bounds makes no assumptions on the inclusivity of // those values. // // See: *git/odb/pack.Index for more. type bounds struct { // left is the left or lower bound of the bounds. left int64 // right is the rightmost or upper bound of the bounds. right int64 } // newBounds returns a new *bounds instance with the given left and right // values. func newBounds(left, right int64) *bounds { return &bounds{ left: left, right: right, } } // Left returns the leftmost value or lower bound of this *bounds instance. func (b *bounds) Left() int64 { return b.left } // right returns the rightmost value or upper bound of this *bounds instance. func (b *bounds) Right() int64 { return b.right } // WithLeft returns a new copy of this *bounds instance, replacing the left // value with the given argument. func (b *bounds) WithLeft(new int64) *bounds { return &bounds{ left: new, right: b.right, } } // WithRight returns a new copy of this *bounds instance, replacing the right // value with the given argument. func (b *bounds) WithRight(new int64) *bounds { return &bounds{ left: b.left, right: new, } } // Equal returns whether or not the receiving *bounds instance is equal to the // given one: // // - If both the argument and receiver are nil, they are given to be equal. // - If both the argument and receiver are not nil, and they share the same // Left() and Right() values, they are equal. // - If both the argument and receiver are not nil, but they do not share the // same Left() and Right() values, they are not equal. // - If either the argument or receiver is nil, but the other is not, they are // not equal. func (b *bounds) Equal(other *bounds) bool { if b == nil { if other == nil { return true } return false } if other == nil { return false } return b.left == other.left && b.right == other.right } // String returns a string representation of this bounds instance, given as: // // [,] func (b *bounds) String() string { return fmt.Sprintf("[%d,%d]", b.Left(), b.Right()) } git-lfs-2.3.4/git/odb/pack/bounds_test.go000066400000000000000000000030271317167762300202310ustar00rootroot00000000000000package pack import ( "testing" "github.com/stretchr/testify/assert" ) func TestBoundsLeft(t *testing.T) { assert.EqualValues(t, 1, newBounds(1, 2).Left()) } func TestBoundsRight(t *testing.T) { assert.EqualValues(t, 2, newBounds(1, 2).Right()) } func TestBoundsWithLeftReturnsNewBounds(t *testing.T) { b1 := newBounds(1, 2) b2 := b1.WithLeft(3) assert.EqualValues(t, 1, b1.Left()) assert.EqualValues(t, 2, b1.Right()) assert.EqualValues(t, 3, b2.Left()) assert.EqualValues(t, 2, b2.Right()) } func TestBoundsWithRightReturnsNewBounds(t *testing.T) { b1 := newBounds(1, 2) b2 := b1.WithRight(3) assert.EqualValues(t, 1, b1.Left()) assert.EqualValues(t, 2, b1.Right()) assert.EqualValues(t, 1, b2.Left()) assert.EqualValues(t, 3, b2.Right()) } func TestBoundsEqualWithIdenticalBounds(t *testing.T) { b1 := newBounds(1, 2) b2 := newBounds(1, 2) assert.True(t, b1.Equal(b2)) } func TestBoundsEqualWithDifferentBounds(t *testing.T) { b1 := newBounds(1, 2) b2 := newBounds(3, 4) assert.False(t, b1.Equal(b2)) } func TestBoundsEqualWithNilReceiver(t *testing.T) { bnil := (*bounds)(nil) b2 := newBounds(1, 2) assert.False(t, bnil.Equal(b2)) } func TestBoundsEqualWithNilArgument(t *testing.T) { b1 := newBounds(1, 2) bnil := (*bounds)(nil) assert.False(t, b1.Equal(bnil)) } func TestBoundsEqualWithNilArgumentAndReceiver(t *testing.T) { b1 := (*bounds)(nil) b2 := (*bounds)(nil) assert.True(t, b1.Equal(b2)) } func TestBoundsString(t *testing.T) { b1 := newBounds(1, 2) assert.Equal(t, "[1,2]", b1.String()) } git-lfs-2.3.4/git/odb/pack/chain.go000066400000000000000000000014141317167762300167600ustar00rootroot00000000000000package pack // Chain represents an element in the delta-base chain corresponding to a packed // object. type Chain interface { // Unpack unpacks the data encoded in the delta-base chain up to and // including the receiving Chain implementation by applying the // delta-base chain successively to itself. // // If there was an error in the delta-base resolution, i.e., the chain // is malformed, has a bad instruction, or there was a file read error, this // function is expected to return that error. // // In the event that a non-nil error is returned, it is assumed that the // unpacked data this function returns is malformed, or otherwise // corrupt. Unpack() ([]byte, error) // Type returns the type of the receiving chain element. Type() PackedObjectType } git-lfs-2.3.4/git/odb/pack/chain_base.go000066400000000000000000000021331317167762300177510ustar00rootroot00000000000000package pack import ( "compress/zlib" "io" ) // ChainBase represents the "base" component of a delta-base chain. type ChainBase struct { // offset returns the offset into the given io.ReaderAt where the read // will begin. offset int64 // size is the total uncompressed size of the data in the base chain. size int64 // typ is the type of data that this *ChainBase encodes. typ PackedObjectType // r is the io.ReaderAt yielding a stream of zlib-compressed data. r io.ReaderAt } // Unpack inflates and returns the uncompressed data encoded in the base // element. // // If there was any error in reading the compressed data (invalid headers, // etc.), it will be returned immediately. func (b *ChainBase) Unpack() ([]byte, error) { zr, err := zlib.NewReader(&OffsetReaderAt{ r: b.r, o: b.offset, }) if err != nil { return nil, err } defer zr.Close() buf := make([]byte, b.size) if _, err := io.ReadFull(zr, buf); err != nil { return nil, err } return buf, nil } // ChainBase returns the type of the object it encodes. func (b *ChainBase) Type() PackedObjectType { return b.typ } git-lfs-2.3.4/git/odb/pack/chain_base_test.go000066400000000000000000000020641317167762300210130ustar00rootroot00000000000000package pack import ( "bytes" "compress/zlib" "testing" "github.com/stretchr/testify/assert" ) func TestChainBaseDecompressesData(t *testing.T) { const contents = "Hello, world!\n" compressed, err := compress(contents) assert.NoError(t, err) var buf bytes.Buffer _, err = buf.Write([]byte{0x0, 0x0, 0x0, 0x0}) assert.NoError(t, err) _, err = buf.Write(compressed) assert.NoError(t, err) _, err = buf.Write([]byte{0x0, 0x0, 0x0, 0x0}) assert.NoError(t, err) base := &ChainBase{ offset: 4, size: int64(len(contents)), r: bytes.NewReader(buf.Bytes()), } unpacked, err := base.Unpack() assert.NoError(t, err) assert.Equal(t, contents, string(unpacked)) } func TestChainBaseTypeReturnsType(t *testing.T) { b := &ChainBase{ typ: TypeCommit, } assert.Equal(t, TypeCommit, b.Type()) } func compress(base string) ([]byte, error) { var buf bytes.Buffer zw := zlib.NewWriter(&buf) if _, err := zw.Write([]byte(base)); err != nil { return nil, err } if err := zw.Close(); err != nil { return nil, err } return buf.Bytes(), nil } git-lfs-2.3.4/git/odb/pack/chain_delta.go000066400000000000000000000113651317167762300201370ustar00rootroot00000000000000package pack import ( "github.com/git-lfs/git-lfs/errors" ) // ChainDelta represents a "delta" component of a delta-base chain. type ChainDelta struct { // Base is the base delta-base chain that this delta should be applied // to. It can be a ChainBase in the simple case, or it can itself be a // ChainDelta, which resolves against another ChainBase, when the // delta-base chain is of length greater than 2. base Chain // delta is the set of copy/add instructions to apply on top of the // base. delta []byte } // Unpack applies the delta operation to the previous delta-base chain, "base". // // If any of the delta-base instructions were invalid, an error will be // returned. func (d *ChainDelta) Unpack() ([]byte, error) { base, err := d.base.Unpack() if err != nil { return nil, err } return patch(base, d.delta) } // Type returns the type of the base of the delta-base chain. func (d *ChainDelta) Type() PackedObjectType { return d.base.Type() } // patch applies the delta instructions in "delta" to the base given as "base". // It returns the result of applying those patch instructions to base, but does // not modify base itself. // // If any of the delta instructions were malformed, or otherwise could not be // applied to the given base, an error will returned, along with an empty set of // data. func patch(base, delta []byte) ([]byte, error) { srcSize, pos := patchDeltaHeader(delta, 0) if srcSize != int64(len(base)) { // The header of the delta gives the size of the source contents // that it is a patch over. // // If this does not match with the srcSize, return an error // early so as to avoid a possible bounds error below. return nil, errors.New("git/odb/pack: invalid delta data") } // The remainder of the delta header contains the destination size, and // moves the "pos" offset to the correct position to begin the set of // delta instructions. destSize, pos := patchDeltaHeader(delta, pos) dest := make([]byte, 0, destSize) for pos < len(delta) { c := int(delta[pos]) pos += 1 if c&0x80 != 0 { // If the most significant bit (MSB, at position 0x80) // is set, this is a copy instruction. Advance the // position one byte backwards, and initialize variables // for the copy offset and size instructions. pos -= 1 var co, cs int // The lower-half of "c" (0000 1111) defines a "bitmask" // for the copy offset. if c&0x1 != 0 { pos += 1 co = int(delta[pos]) } if c&0x2 != 0 { pos += 1 co |= (int(delta[pos]) << 8) } if c&0x4 != 0 { pos += 1 co |= (int(delta[pos]) << 16) } if c&0x8 != 0 { pos += 1 co |= (int(delta[pos]) << 24) } // The upper-half of "c" (1111 0000) defines a "bitmask" // for the size of the copy instruction. if c&0x10 != 0 { pos += 1 cs = int(delta[pos]) } if c&0x20 != 0 { pos += 1 cs |= (int(delta[pos]) << 8) } if c&0x40 != 0 { pos += 1 cs |= (int(delta[pos]) << 16) } if cs == 0 { // If the copy size is zero, we assume that it // is the next whole number after the max uint32 // value. cs = 0x10000 } pos += 1 // Once we have the copy offset and length defined, copy // that number of bytes from the base into the // destination. Since we are copying from the base and // not the delta, the position into the delta ("pos") // need not be updated. dest = append(dest, base[co:co+cs]...) } else if c != 0 { // If the most significant bit (MSB) is _not_ set, we // instead process a copy instruction, where "c" is the // number of successive bytes in the delta patch to add // to the output. // // Copy the bytes and increment the read pointer // forward. dest = append(dest, delta[pos:int(pos)+c]...) pos += int(c) } else { // Otherwise, "c" is 0, and is an invalid delta // instruction. // // Return immediately. return nil, errors.New( "git/odb/pack: invalid delta data") } } if destSize != int64(len(dest)) { // If after patching the delta against the base, the destination // size is different than the expected destination size, we have // an invalid set of patch instructions. // // Return immediately. return nil, errors.New("git/odb/pack: invalid delta data") } return dest, nil } // patchDeltaHeader examines the header within delta at the given offset, and // returns the size encoded within it, as well as the ending offset where begins // the next header, or the patch instructions. func patchDeltaHeader(delta []byte, pos int) (size int64, end int) { var shift uint var c int64 for shift == 0 || c&0x80 != 0 { if len(delta) <= pos { panic("git/odb/pack: invalid delta header") } c = int64(delta[pos]) pos++ size |= (c & 0x7f) << shift shift += 7 } return size, pos } git-lfs-2.3.4/git/odb/pack/chain_delta_test.go000066400000000000000000000044751317167762300212020ustar00rootroot00000000000000package pack import ( "testing" "github.com/stretchr/testify/assert" ) func TestChainDeltaUnpackCopiesFromBase(t *testing.T) { c := &ChainDelta{ base: &ChainSimple{ X: []byte{0x0, 0x1, 0x2, 0x3}, }, delta: []byte{ 0x04, // Source size: 4. 0x03, // Destination size: 3. 0x80 | 0x01 | 0x10, // Copy, omask=0001, smask=0001. 0x1, // Offset: 1. 0x3, // Size: 3. }, } data, err := c.Unpack() assert.NoError(t, err) assert.Equal(t, []byte{0x1, 0x2, 0x3}, data) } func TestChainDeltaUnpackAddsToBase(t *testing.T) { c := &ChainDelta{ base: &ChainSimple{ X: make([]byte, 0), }, delta: []byte{ 0x0, // Source size: 0. 0x3, // Destination size: 3. 0x3, // Add, size=3. 0x1, 0x2, 0x3, // Contents: ... }, } data, err := c.Unpack() assert.NoError(t, err) assert.Equal(t, []byte{0x1, 0x2, 0x3}, data) } func TestChainDeltaWithMultipleInstructions(t *testing.T) { c := &ChainDelta{ base: &ChainSimple{ X: []byte{'H', 'e', 'l', 'l', 'o', '!', '\n'}, }, delta: []byte{ 0x07, // Source size: 7. 0x0e, // Destination size: 14. 0x80 | 0x01 | 0x10, // Copy, omask=0001, smask=0001. 0x0, // Offset: 1. 0x5, // Size: 5. 0x7, // Add, size=7. ',', ' ', 'w', 'o', 'r', 'l', 'd', // Contents: ... 0x80 | 0x01 | 0x10, // Copy, omask=0001, smask=0001. 0x05, // Offset: 5. 0x02, // Size: 2. }, } data, err := c.Unpack() assert.NoError(t, err) assert.Equal(t, []byte("Hello, world!\n"), data) } func TestchainDeltaWithInvalidDeltaInstruction(t *testing.T) { c := &ChainDelta{ base: &ChainSimple{ X: make([]byte, 0), }, delta: []byte{ 0x0, // Source size: 0. 0x1, // Destination size: 3. 0x0, // Invalid instruction. }, } data, err := c.Unpack() assert.EqualError(t, err, "git/odb/pack: invalid delta data") assert.Nil(t, data) } func TestChainDeltaWithExtraInstructions(t *testing.T) { c := &ChainDelta{ base: &ChainSimple{ X: make([]byte, 0), }, delta: []byte{ 0x0, // Source size: 0. 0x3, // Destination size: 3. 0x4, // Add, size=4 (invalid). 0x1, 0x2, 0x3, 0x4, // Contents: ... }, } data, err := c.Unpack() assert.EqualError(t, err, "git/odb/pack: invalid delta data") assert.Nil(t, data) } git-lfs-2.3.4/git/odb/pack/chain_test.go000066400000000000000000000003131317167762300200140ustar00rootroot00000000000000package pack type ChainSimple struct { X []byte Err error } func (c *ChainSimple) Unpack() ([]byte, error) { return c.X, c.Err } func (c *ChainSimple) Type() PackedObjectType { return TypeNone } git-lfs-2.3.4/git/odb/pack/errors.go000066400000000000000000000006371317167762300172200ustar00rootroot00000000000000package pack import "fmt" // UnsupportedVersionErr is a type implementing 'error' which indicates a // the presence of an unsupported packfile version. type UnsupportedVersionErr struct { // Got is the unsupported version that was detected. Got uint32 } // Error implements 'error.Error()'. func (u *UnsupportedVersionErr) Error() string { return fmt.Sprintf("git/odb/pack: unsupported version: %d", u.Got) } git-lfs-2.3.4/git/odb/pack/errors_test.go000066400000000000000000000003371317167762300202540ustar00rootroot00000000000000package pack import ( "testing" "github.com/stretchr/testify/assert" ) func TestUnsupportedVersionErr(t *testing.T) { u := &UnsupportedVersionErr{Got: 3} assert.Error(t, u, "git/odb/pack: unsupported version: 3") } git-lfs-2.3.4/git/odb/pack/index.go000066400000000000000000000102161317167762300170050ustar00rootroot00000000000000package pack import ( "bytes" "io" "github.com/git-lfs/git-lfs/errors" ) // Index stores information about the location of objects in a corresponding // packfile. type Index struct { // version is the encoding version used by this index. // // Currently, versions 1 and 2 are supported. version IndexVersion // fanout is the L1 fanout table stored in this index. For a given index // "i" into the array, the value stored at that index specifies the // number of objects in the packfile/index that are lexicographically // less than or equal to that index. // // See: https://github.com/git/git/blob/v2.13.0/Documentation/technical/pack-format.txt#L41-L45 fanout []uint32 // r is the underlying set of encoded data comprising this index file. r io.ReaderAt } // Count returns the number of objects in the packfile. func (i *Index) Count() int { return int(i.fanout[255]) } // Close closes the packfile index if the underlying data stream is closeable. // If so, it returns any error involved in closing. func (i *Index) Close() error { if close, ok := i.r.(io.Closer); ok { return close.Close() } return nil } var ( // errNotFound is an error returned by Index.Entry() (see: below) when // an object cannot be found in the index. errNotFound = errors.New("git/odb/pack: object not found in index") ) // IsNotFound returns whether a given error represents a missing object in the // index. func IsNotFound(err error) bool { return err == errNotFound } // Entry returns an entry containing the offset of a given SHA1 "name". // // Entry operates in O(log(n))-time in the worst case, where "n" is the number // of objects that begin with the first byte of "name". // // If the entry cannot be found, (nil, ErrNotFound) will be returned. If there // was an error searching for or parsing an entry, it will be returned as (nil, // err). // // Otherwise, (entry, nil) will be returned. func (i *Index) Entry(name []byte) (*IndexEntry, error) { var last *bounds bounds := i.bounds(name) for bounds.Left() < bounds.Right() { if last.Equal(bounds) { // If the bounds are unchanged, that means either that // the object does not exist in the packfile, or the // fanout table is corrupt. // // Either way, we won't be able to find the object. // Return immediately to prevent infinite looping. return nil, errNotFound } last = bounds // Find the midpoint between the upper and lower bounds. mid := bounds.Left() + ((bounds.Right() - bounds.Left()) / 2) got, err := i.version.Name(i, mid) if err != nil { return nil, err } if cmp := bytes.Compare(name, got); cmp == 0 { // If "cmp" is zero, that means the object at that index // "at" had a SHA equal to the one given by name, and we // are done. return i.version.Entry(i, mid) } else if cmp < 0 { // If the comparison is less than 0, we searched past // the desired object, so limit the upper bound of the // search to the midpoint. bounds = bounds.WithRight(mid) } else if cmp > 0 { // Likewise, if the comparison is greater than 0, we // searched below the desired object. Modify the bounds // accordingly. bounds = bounds.WithLeft(mid) } } return nil, errNotFound } // readAt is a convenience method that allow reading into the underlying data // source from other callers within this package. func (i *Index) readAt(p []byte, at int64) (n int, err error) { return i.r.ReadAt(p, at) } // bounds returns the initial bounds for a given name using the fanout table to // limit search results. func (i *Index) bounds(name []byte) *bounds { var left, right int64 if name[0] == 0 { // If the lower bound is 0, there are no objects before it, // start at the beginning of the index file. left = 0 } else { // Otherwise, make the lower bound the slot before the given // object. left = int64(i.fanout[name[0]-1]) } if name[0] == 255 { // As above, if the upper bound is the max byte value, make the // upper bound the last object in the list. right = int64(i.Count()) } else { // Otherwise, make the upper bound the first object which is not // within the given slot. right = int64(i.fanout[name[0]+1]) } return newBounds(left, right) } git-lfs-2.3.4/git/odb/pack/index_decode.go000066400000000000000000000074041317167762300203150ustar00rootroot00000000000000package pack import ( "bytes" "encoding/binary" "io" "github.com/git-lfs/git-lfs/errors" ) const ( // indexMagicWidth is the width of the magic header of packfiles version // 2 and newer. indexMagicWidth = 4 // indexVersionWidth is the width of the version following the magic // header. indexVersionWidth = 4 // indexV2Width is the total width of the header in V2. indexV2Width = indexMagicWidth + indexVersionWidth // indexV1Width is the total width of the header in V1. indexV1Width = 0 // indexFanoutEntries is the number of entries in the fanout table. indexFanoutEntries = 256 // indexFanoutEntryWidth is the width of each entry in the fanout table. indexFanoutEntryWidth = 4 // indexFanoutWidth is the width of the entire fanout table. indexFanoutWidth = indexFanoutEntries * indexFanoutEntryWidth // indexOffsetV1Start is the location of the first object outside of the // V1 header. indexOffsetV1Start = indexV1Width + indexFanoutWidth // indexOffsetV2Start is the location of the first object outside of the // V2 header. indexOffsetV2Start = indexV2Width + indexFanoutWidth // indexObjectNameWidth is the width of a SHA1 object name. indexObjectNameWidth = 20 // indexObjectCRCWidth is the width of the CRC accompanying each object // in V2. indexObjectCRCWidth = 4 // indexObjectSmallOffsetWidth is the width of the small offset encoded // into each object. indexObjectSmallOffsetWidth = 4 // indexObjectLargeOffsetWidth is the width of the optional large offset // encoded into the small offset. indexObjectLargeOffsetWidth = 8 // indexObjectEntryV1Width is the width of one contiguous object entry // in V1. indexObjectEntryV1Width = indexObjectNameWidth + indexObjectSmallOffsetWidth // indexObjectEntryV2Width is the width of one non-contiguous object // entry in V2. indexObjectEntryV2Width = indexObjectNameWidth + indexObjectCRCWidth + indexObjectSmallOffsetWidth ) var ( // ErrShortFanout is an error representing situations where the entire // fanout table could not be read, and is thus too short. ErrShortFanout = errors.New("git/odb/pack: too short fanout table") // indexHeader is the first four "magic" bytes of index files version 2 // or newer. indexHeader = []byte{0xff, 0x74, 0x4f, 0x63} ) // DecodeIndex decodes an index whose underlying data is supplied by "r". // // DecodeIndex reads only the header and fanout table, and does not eagerly // parse index entries. // // If there was an error parsing, it will be returned immediately. func DecodeIndex(r io.ReaderAt) (*Index, error) { version, err := decodeIndexHeader(r) if err != nil { return nil, err } fanout, err := decodeIndexFanout(r, version.Width()) if err != nil { return nil, err } return &Index{ version: version, fanout: fanout, r: r, }, nil } // decodeIndexHeader determines which version the index given by "r" is. func decodeIndexHeader(r io.ReaderAt) (IndexVersion, error) { hdr := make([]byte, 4) if _, err := r.ReadAt(hdr, 0); err != nil { return nil, err } if bytes.Equal(hdr, indexHeader) { vb := make([]byte, 4) if _, err := r.ReadAt(vb, 4); err != nil { return nil, err } version := binary.BigEndian.Uint32(vb) switch version { case 1: return new(V1), nil case 2: return new(V2), nil } return nil, &UnsupportedVersionErr{uint32(version)} } return new(V1), nil } // decodeIndexFanout decodes the fanout table given by "r" and beginning at the // given offset. func decodeIndexFanout(r io.ReaderAt, offset int64) ([]uint32, error) { b := make([]byte, 256*4) if _, err := r.ReadAt(b, offset); err != nil { if err == io.EOF { return nil, ErrShortFanout } return nil, err } fanout := make([]uint32, 256) for i, _ := range fanout { fanout[i] = binary.BigEndian.Uint32(b[(i * 4):]) } return fanout, nil } git-lfs-2.3.4/git/odb/pack/index_decode_test.go000066400000000000000000000033331317167762300213510ustar00rootroot00000000000000package pack import ( "bytes" "encoding/binary" "io" "testing" "github.com/stretchr/testify/assert" ) func TestDecodeIndexV1InvalidFanout(t *testing.T) { idx, err := DecodeIndex(bytes.NewReader(make([]byte, indexFanoutWidth-1))) assert.Equal(t, ErrShortFanout, err) assert.Nil(t, idx) } func TestDecodeIndexV2(t *testing.T) { buf := make([]byte, 0, indexV2Width+indexFanoutWidth) buf = append(buf, 0xff, 0x74, 0x4f, 0x63) buf = append(buf, 0x0, 0x0, 0x0, 0x2) for i := 0; i < indexFanoutEntries; i++ { x := make([]byte, 4) binary.BigEndian.PutUint32(x, uint32(3)) buf = append(buf, x...) } idx, err := DecodeIndex(bytes.NewReader(buf)) assert.NoError(t, err) assert.EqualValues(t, 3, idx.Count()) } func TestDecodeIndexV2InvalidFanout(t *testing.T) { buf := make([]byte, 0, indexV2Width+indexFanoutWidth-indexFanoutEntryWidth) buf = append(buf, 0xff, 0x74, 0x4f, 0x63) buf = append(buf, 0x0, 0x0, 0x0, 0x2) buf = append(buf, make([]byte, indexFanoutWidth-1)...) idx, err := DecodeIndex(bytes.NewReader(buf)) assert.Nil(t, idx) assert.Equal(t, ErrShortFanout, err) } func TestDecodeIndexV1(t *testing.T) { idx, err := DecodeIndex(bytes.NewReader(make([]byte, indexFanoutWidth))) assert.NoError(t, err) assert.EqualValues(t, 0, idx.Count()) } func TestDecodeIndexUnsupportedVersion(t *testing.T) { buf := make([]byte, 0, 4+4) buf = append(buf, 0xff, 0x74, 0x4f, 0x63) buf = append(buf, 0x0, 0x0, 0x0, 0x3) idx, err := DecodeIndex(bytes.NewReader(buf)) assert.EqualError(t, err, "git/odb/pack: unsupported version: 3") assert.Nil(t, idx) } func TestDecodeIndexEmptyContents(t *testing.T) { idx, err := DecodeIndex(bytes.NewReader(make([]byte, 0))) assert.Equal(t, io.EOF, err) assert.Nil(t, idx) } git-lfs-2.3.4/git/odb/pack/index_entry.go000066400000000000000000000003301317167762300202220ustar00rootroot00000000000000package pack // IndexEntry specifies data encoded into an entry in the pack index. type IndexEntry struct { // PackOffset is the number of bytes before the associated object in a // packfile. PackOffset uint64 } git-lfs-2.3.4/git/odb/pack/index_test.go000066400000000000000000000106601317167762300200470ustar00rootroot00000000000000package pack import ( "bytes" "encoding/binary" "testing" "github.com/git-lfs/git-lfs/errors" "github.com/stretchr/testify/assert" ) var ( idx *Index ) func TestIndexEntrySearch(t *testing.T) { e, err := idx.Entry([]byte{ 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, }) assert.NoError(t, err) assert.EqualValues(t, 6, e.PackOffset) } func TestIndexEntrySearchClampLeft(t *testing.T) { e, err := idx.Entry([]byte{ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, }) assert.NoError(t, err) assert.EqualValues(t, 0, e.PackOffset) } func TestIndexEntrySearchClampRight(t *testing.T) { e, err := idx.Entry([]byte{ 0xff, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, }) assert.NoError(t, err) assert.EqualValues(t, 0x4ff, e.PackOffset) } func TestIndexSearchOutOfBounds(t *testing.T) { e, err := idx.Entry([]byte{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, }) assert.True(t, IsNotFound(err), "expected err to be 'not found'") assert.Nil(t, e) } func TestIndexEntryNotFound(t *testing.T) { e, err := idx.Entry([]byte{ 0x1, 0x6, 0x6, 0x6, 0x6, 0x6, 0x6, 0x6, 0x6, 0x6, 0x6, 0x6, 0x6, 0x6, 0x6, 0x6, 0x6, 0x6, 0x6, 0x6, }) assert.True(t, IsNotFound(err), "expected err to be 'not found'") assert.Nil(t, e) } func TestIndexCount(t *testing.T) { fanout := make([]uint32, 256) for i := 0; i < len(fanout); i++ { fanout[i] = uint32(i) } idx := &Index{fanout: fanout} assert.EqualValues(t, 255, idx.Count()) } func TestIndexIsNotFound(t *testing.T) { assert.True(t, IsNotFound(errNotFound), "expected 'errNotFound' to satisfy 'IsNotFound()'") } func TestIndexIsNotFoundForOtherErrors(t *testing.T) { assert.False(t, IsNotFound(errors.New("git/odb/pack: misc")), "expected 'err' not to satisfy 'IsNotFound()'") } // init generates some fixture data and then constructs an *Index instance using // it. func init() { // eps is the number of SHA1 names generated under each 0x slot. const eps = 5 hdr := []byte{ 0xff, 0x74, 0x4f, 0x63, // Index file v2+ magic header 0x00, 0x00, 0x00, 0x02, // 4-byte version indicator } // Create a fanout table using uint32s (later marshalled using // binary.BigEndian). // // Since we have an even distribution of SHA1s in the generated index, // each entry will increase by the number of entries per slot (see: eps // above). fanout := make([]uint32, indexFanoutEntries) for i := 0; i < len(fanout); i++ { // Begin the index at (i+1), since the fanout table mandates // objects less than the value at index "i". fanout[i] = uint32((i + 1) * eps) } offs := make([]uint32, 0, 256*eps) crcs := make([]uint32, 0, 256*eps) names := make([][]byte, 0, 256*eps) for i := 0; i < 256; i++ { // For each name, generate a unique SHA using the prefix "i", // and then suffix "j". // // In other words, when i=1, we will generate: // []byte{0x1 0x0 0x0 0x0 ...} // []byte{0x1 0x1 0x1 0x1 ...} // []byte{0x1 0x2 0x2 0x2 ...} // // and etc. for j := 0; j < eps; j++ { var sha [20]byte sha[0] = byte(i) for r := 1; r < len(sha); r++ { sha[r] = byte(j) } cpy := make([]byte, len(sha)) copy(cpy, sha[:]) names = append(names, cpy) offs = append(offs, uint32((i*eps)+j)) crcs = append(crcs, 0) } } // Create a buffer to hold the index contents: buf := bytes.NewBuffer(hdr) // Write each value in the fanout table using a 32bit network byte-order // integer. for _, f := range fanout { binary.Write(buf, binary.BigEndian, f) } // Write each SHA1 name to the table next. for _, name := range names { buf.Write(name) } // Then write each of the CRC values in network byte-order as a 32bit // unsigned integer. for _, crc := range crcs { binary.Write(buf, binary.BigEndian, crc) } // Do the same with the offsets. for _, off := range offs { binary.Write(buf, binary.BigEndian, off) } idx = &Index{ fanout: fanout, // version is unimportant here, use V2 since it's more common in // the wild. version: new(V2), // *bytes.Buffer does not implement io.ReaderAt, but // *bytes.Reader does. // // Call (*bytes.Buffer).Bytes() to get the data, and then // construct a new *bytes.Reader with it to implement // io.ReaderAt. r: bytes.NewReader(buf.Bytes()), } } git-lfs-2.3.4/git/odb/pack/index_v1.go000066400000000000000000000030771317167762300174220ustar00rootroot00000000000000package pack import ( "encoding/binary" ) // V1 implements IndexVersion for v1 packfiles. type V1 struct{} // Name implements IndexVersion.Name by returning the 20 byte SHA-1 object name // for the given entry at offset "at" in the v1 index file "idx". func (v *V1) Name(idx *Index, at int64) ([]byte, error) { var sha [20]byte if _, err := idx.readAt(sha[:], v1ShaOffset(at)); err != nil { return nil, err } return sha[:], nil } // Entry implements IndexVersion.Entry for v1 packfiles by parsing and returning // the IndexEntry specified at the offset "at" in the given index file. func (v *V1) Entry(idx *Index, at int64) (*IndexEntry, error) { var offs [4]byte if _, err := idx.readAt(offs[:], v1EntryOffset(at)); err != nil { return nil, err } return &IndexEntry{ PackOffset: uint64(binary.BigEndian.Uint32(offs[:])), }, nil } // Width implements IndexVersion.Width() by returning the number of bytes that // v1 packfile index header occupy. func (v *V1) Width() int64 { return indexV1Width } // v1ShaOffset returns the location of the SHA1 of an object given at "at". func v1ShaOffset(at int64) int64 { // Skip forward until the desired entry. return v1EntryOffset(at) + // Skip past the 4-byte object offset in the desired entry to // the SHA1. indexObjectSmallOffsetWidth } // v1EntryOffset returns the location of the packfile offset for the object // given at "at". func v1EntryOffset(at int64) int64 { // Skip the L1 fanout table return indexOffsetV1Start + // Skip the object entries before the one located at "at" (indexObjectEntryV1Width * at) } git-lfs-2.3.4/git/odb/pack/index_v1_test.go000066400000000000000000000031521317167762300204530ustar00rootroot00000000000000package pack import ( "bytes" "encoding/binary" "testing" "github.com/stretchr/testify/assert" ) var ( V1IndexFanout = make([]uint32, indexFanoutEntries) V1IndexSmallEntry = []byte{ 0x0, 0x0, 0x0, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, } V1IndexSmallSha = V1IndexSmallEntry[4:] V1IndexMediumEntry = []byte{ 0x0, 0x0, 0x0, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, } V1IndexMediumSha = V1IndexMediumEntry[4:] V1IndexLargeEntry = []byte{ 0x0, 0x0, 0x0, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, } V1IndexLargeSha = V1IndexLargeEntry[4:] V1Index = &Index{ fanout: V1IndexFanout, version: new(V1), } ) func TestIndexV1SearchExact(t *testing.T) { e, err := new(V1).Entry(V1Index, 1) assert.NoError(t, err) assert.EqualValues(t, 2, e.PackOffset) } func TestIndexVersionWidthV1(t *testing.T) { assert.EqualValues(t, 0, new(V1).Width()) } func init() { V1IndexFanout[1] = 1 V1IndexFanout[2] = 2 V1IndexFanout[3] = 3 for i := 3; i < len(V1IndexFanout); i++ { V1IndexFanout[i] = 3 } fanout := make([]byte, indexFanoutWidth) for i, n := range V1IndexFanout { binary.BigEndian.PutUint32(fanout[i*indexFanoutEntryWidth:], n) } buf := make([]byte, 0, indexOffsetV1Start+(3*indexObjectEntryV1Width)) buf = append(buf, fanout...) buf = append(buf, V1IndexSmallEntry...) buf = append(buf, V1IndexMediumEntry...) buf = append(buf, V1IndexLargeEntry...) V1Index.r = bytes.NewReader(buf) } git-lfs-2.3.4/git/odb/pack/index_v2.go000066400000000000000000000054321317167762300174200ustar00rootroot00000000000000package pack import ( "encoding/binary" ) // V2 implements IndexVersion for v2 packfiles. type V2 struct{} // Name implements IndexVersion.Name by returning the 20 byte SHA-1 object name // for the given entry at offset "at" in the v2 index file "idx". func (v *V2) Name(idx *Index, at int64) ([]byte, error) { var sha [20]byte if _, err := idx.readAt(sha[:], v2ShaOffset(at)); err != nil { return nil, err } return sha[:], nil } // Entry implements IndexVersion.Entry for v2 packfiles by parsing and returning // the IndexEntry specified at the offset "at" in the given index file. func (v *V2) Entry(idx *Index, at int64) (*IndexEntry, error) { var offs [4]byte if _, err := idx.readAt(offs[:], v2SmallOffsetOffset(at, int64(idx.Count()))); err != nil { return nil, err } loc := uint64(binary.BigEndian.Uint32(offs[:])) if loc&0x80000000 > 0 { // If the most significant bit (MSB) of the offset is set, then // the offset encodes the indexed location for an 8-byte offset. // // Mask away (offs&0x7fffffff) the MSB to use as an index to // find the offset of the 8-byte pack offset. lo := v2LargeOffsetOffset(int64(loc&0x7fffffff), int64(idx.Count())) var offs [8]byte if _, err := idx.readAt(offs[:], lo); err != nil { return nil, err } loc = binary.BigEndian.Uint64(offs[:]) } return &IndexEntry{PackOffset: loc}, nil } // Width implements IndexVersion.Width() by returning the number of bytes that // v2 packfile index header occupy. func (v *V2) Width() int64 { return indexV2Width } // v2ShaOffset returns the offset of a SHA1 given at "at" in the V2 index file. func v2ShaOffset(at int64) int64 { // Skip the packfile index header and the L1 fanout table. return indexOffsetV2Start + // Skip until the desired name in the sorted names table. (indexObjectNameWidth * at) } // v2SmallOffsetOffset returns the offset of an object's small (4-byte) offset // given by "at". func v2SmallOffsetOffset(at, total int64) int64 { // Skip the packfile index header and the L1 fanout table. return indexOffsetV2Start + // Skip the name table. (indexObjectNameWidth * total) + // Skip the CRC table. (indexObjectCRCWidth * total) + // Skip until the desired index in the small offsets table. (indexObjectSmallOffsetWidth * at) } // v2LargeOffsetOffset returns the offset of an object's large (4-byte) offset, // given by the index "at". func v2LargeOffsetOffset(at, total int64) int64 { // Skip the packfile index header and the L1 fanout table. return indexOffsetV2Start + // Skip the name table. (indexObjectNameWidth * total) + // Skip the CRC table. (indexObjectCRCWidth * total) + // Skip the small offsets table. (indexObjectSmallOffsetWidth * total) + // Seek to the large offset within the large offset(s) table. (indexObjectLargeOffsetWidth * at) } git-lfs-2.3.4/git/odb/pack/index_v2_test.go000066400000000000000000000041421317167762300204540ustar00rootroot00000000000000package pack import ( "bytes" "encoding/binary" "testing" "github.com/stretchr/testify/assert" ) var ( V2IndexHeader = []byte{ 0xff, 0x74, 0x4f, 0x63, 0x00, 0x00, 0x00, 0x02, } V2IndexFanout = make([]uint32, indexFanoutEntries) V2IndexNames = []byte{ 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, } V2IndexSmallSha = V2IndexNames[0:20] V2IndexMediumSha = V2IndexNames[20:40] V2IndexLargeSha = V2IndexNames[40:60] V2IndexCRCs = []byte{ 0x0, 0x0, 0x0, 0x0, 0x1, 0x1, 0x1, 0x1, 0x2, 0x2, 0x2, 0x2, } V2IndexOffsets = []byte{ 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x80, 0x00, 0x00, 0x01, // use the second large offset 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // filler data 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, // large offset } V2Index = &Index{ fanout: V2IndexFanout, version: new(V2), } ) func TestIndexV2EntryExact(t *testing.T) { e, err := new(V2).Entry(V2Index, 1) assert.NoError(t, err) assert.EqualValues(t, 2, e.PackOffset) } func TestIndexV2EntryExtendedOffset(t *testing.T) { e, err := new(V2).Entry(V2Index, 2) assert.NoError(t, err) assert.EqualValues(t, 3, e.PackOffset) } func TestIndexVersionWidthV2(t *testing.T) { assert.EqualValues(t, 8, new(V2).Width()) } func init() { V2IndexFanout[1] = 1 V2IndexFanout[2] = 2 V2IndexFanout[3] = 3 for i := 3; i < len(V2IndexFanout); i++ { V2IndexFanout[i] = 3 } fanout := make([]byte, indexFanoutWidth) for i, n := range V2IndexFanout { binary.BigEndian.PutUint32(fanout[i*indexFanoutEntryWidth:], n) } buf := make([]byte, 0, indexOffsetV2Start+3*(indexObjectEntryV2Width)+indexObjectLargeOffsetWidth) buf = append(buf, V2IndexHeader...) buf = append(buf, fanout...) buf = append(buf, V2IndexNames...) buf = append(buf, V2IndexCRCs...) buf = append(buf, V2IndexOffsets...) V2Index.r = bytes.NewReader(buf) } git-lfs-2.3.4/git/odb/pack/index_version.go000066400000000000000000000013641317167762300205560ustar00rootroot00000000000000package pack // IndexVersion is a constant type that represents the version of encoding used // by a particular index version. type IndexVersion interface { // Name returns the name of the object located at the given offset "at", // in the Index file "idx". // // It returns an error if the object at that location could not be // parsed. Name(idx *Index, at int64) ([]byte, error) // Entry parses and returns the full *IndexEntry located at the offset // "at" in the Index file "idx". // // If there was an error parsing the IndexEntry at that location, it // will be returned. Entry(idx *Index, at int64) (*IndexEntry, error) // Width returns the number of bytes occupied by the header of a // particular index version. Width() int64 } git-lfs-2.3.4/git/odb/pack/io.go000066400000000000000000000014531317167762300163100ustar00rootroot00000000000000package pack import "io" // OffsetReaderAt transforms an io.ReaderAt into an io.Reader by beginning and // advancing all reads at the given offset. type OffsetReaderAt struct { // r is the data source for this instance of *OffsetReaderAt. r io.ReaderAt // o if the number of bytes read from the underlying data source, "r". // It is incremented upon reads. o int64 } // Read implements io.Reader.Read by reading into the given []byte, "p" from the // last known offset provided to the OffsetReaderAt. // // It returns any error encountered from the underlying data stream, and // advances the reader forward by "n", the number of bytes read from the // underlying data stream. func (r *OffsetReaderAt) Read(p []byte) (n int, err error) { n, err = r.r.ReadAt(p, r.o) r.o += int64(n) return n, err } git-lfs-2.3.4/git/odb/pack/io_test.go000066400000000000000000000016221317167762300173450ustar00rootroot00000000000000package pack import ( "bytes" "testing" "github.com/git-lfs/git-lfs/errors" "github.com/stretchr/testify/assert" ) func TestOffsetReaderAtReadsAtOffset(t *testing.T) { bo := &OffsetReaderAt{ r: bytes.NewReader([]byte{0x0, 0x1, 0x2, 0x3}), o: 1, } var x1 [1]byte n1, e1 := bo.Read(x1[:]) assert.NoError(t, e1) assert.Equal(t, 1, n1) assert.EqualValues(t, 0x1, x1[0]) var x2 [1]byte n2, e2 := bo.Read(x2[:]) assert.NoError(t, e2) assert.Equal(t, 1, n2) assert.EqualValues(t, 0x2, x2[0]) } func TestOffsetReaderPropogatesErrors(t *testing.T) { expected := errors.New("git/odb/pack: testing") bo := &OffsetReaderAt{ r: &ErrReaderAt{Err: expected}, o: 1, } n, err := bo.Read(make([]byte, 1)) assert.Equal(t, expected, err) assert.Equal(t, 0, n) } type ErrReaderAt struct { Err error } func (e *ErrReaderAt) ReadAt(p []byte, at int64) (n int, err error) { return 0, e.Err } git-lfs-2.3.4/git/odb/pack/object.go000066400000000000000000000017241317167762300171500ustar00rootroot00000000000000package pack // Object is an encapsulation of an object found in a packfile, or a packed // object. type Object struct { // data is the front-most element of the delta-base chain, and when // resolved, yields the uncompressed data of this object. data Chain // typ is the underlying object's type. It is not the type of the // front-most chain element, rather, the type of the actual object. typ PackedObjectType } // Unpack resolves the delta-base chain and returns an uncompressed, unpacked, // and full representation of the data encoded by this object. // // If there was any error in unpacking this object, it is returned immediately, // and the object's data can be assumed to be corrupt. func (o *Object) Unpack() ([]byte, error) { return o.data.Unpack() } // Type returns the underlying object's type. Rather than the type of the // front-most delta-base component, it is the type of the object itself. func (o *Object) Type() PackedObjectType { return o.typ } git-lfs-2.3.4/git/odb/pack/object_test.go000066400000000000000000000013361317167762300202060ustar00rootroot00000000000000package pack import ( "testing" "github.com/git-lfs/git-lfs/errors" "github.com/stretchr/testify/assert" ) func TestObjectTypeReturnsObjectType(t *testing.T) { o := &Object{ typ: TypeCommit, } assert.Equal(t, TypeCommit, o.Type()) } func TestObjectUnpackUnpacksData(t *testing.T) { expected := []byte{0x1, 0x2, 0x3, 0x4} o := &Object{ data: &ChainSimple{ X: expected, }, } data, err := o.Unpack() assert.Equal(t, expected, data) assert.NoError(t, err) } func TestObjectUnpackPropogatesErrors(t *testing.T) { expected := errors.New("git/odb/pack: testing") o := &Object{ data: &ChainSimple{ Err: expected, }, } data, err := o.Unpack() assert.Nil(t, data) assert.Equal(t, expected, err) } git-lfs-2.3.4/git/odb/pack/packfile.go000066400000000000000000000153101317167762300174540ustar00rootroot00000000000000package pack import ( "compress/zlib" "io" "io/ioutil" "github.com/git-lfs/git-lfs/errors" ) // Packfile encapsulates the behavior of accessing an unpacked representation of // all of the objects encoded in a single packfile. type Packfile struct { // Version is the version of the packfile. Version uint32 // Objects is the total number of objects in the packfile. Objects uint32 // idx is the corresponding "pack-*.idx" file giving the positions of // objects in this packfile. idx *Index // r is an io.ReaderAt that allows read access to the packfile itself. r io.ReaderAt } // Close closes the packfile if the underlying data stream is closeable. If so, // it returns any error involved in closing. func (p *Packfile) Close() error { var iErr error if p.idx != nil { iErr = p.idx.Close() } if close, ok := p.r.(io.Closer); ok { return close.Close() } return iErr } // Object returns a reference to an object packed in the receiving *Packfile. It // does not attempt to unpack the packfile, rather, that is accomplished by // calling Unpack() on the returned *Object. // // If there was an error loading or buffering the base, it will be returned // without an object. // // If the object given by the SHA-1 name, "name", could not be found, // (nil, errNotFound) will be returned. // // If the object was able to be loaded successfully, it will be returned without // any error. func (p *Packfile) Object(name []byte) (*Object, error) { // First, try and determine the offset of the last entry in the // delta-base chain by loading it from the corresponding pack index. entry, err := p.idx.Entry(name) if err != nil { if !IsNotFound(err) { // If the error was not an errNotFound, re-wrap it with // additional context. err = errors.Wrap(err, "git/odb/pack: could not load index") } return nil, err } // If all goes well, then unpack the object at that given offset. r, err := p.find(int64(entry.PackOffset)) if err != nil { return nil, err } return &Object{ data: r, typ: r.Type(), }, nil } // find finds and returns a Chain element corresponding to the offset of its // last element as given by the "offset" argument. // // If find returns a ChainBase, it loads that data into memory, but does not // zlib-flate it. Otherwise, if find returns a ChainDelta, it loads all of the // leading elements in the chain recursively, but does not apply one delta to // another. func (p *Packfile) find(offset int64) (Chain, error) { // Read the first byte in the chain element. buf := make([]byte, 1) if _, err := p.r.ReadAt(buf, offset); err != nil { return nil, err } // Store the original offset; this will be compared to when loading // chain elements of type OBJ_OFS_DELTA. objectOffset := offset // Of the first byte, (0123 4567): // - Bit 0 is the M.S.B., and indicates whether there is more data // encoded in the length. // - Bits 1-3 ((buf[0] >> 4) & 0x7) are the object type. // - Bits 4-7 (buf[0] & 0xf) are the first 4 bits of the variable // length size of the encoded delta or base. typ := PackedObjectType((buf[0] >> 4) & 0x7) size := uint64(buf[0] & 0xf) shift := uint(4) offset += 1 for buf[0]&0x80 != 0 { // If there is more data to be read, read it. if _, err := p.r.ReadAt(buf, offset); err != nil { return nil, err } // And update the size, bitshift, and offset accordingly. size |= (uint64(buf[0]&0x7f) << shift) shift += 7 offset += 1 } switch typ { case TypeObjectOffsetDelta, TypeObjectReferenceDelta: // If the type of delta-base element is a delta, (either // OBJ_OFS_DELTA, or OBJ_REFS_DELTA), we must load the base, // which itself could be either of the two above, or a // OBJ_COMMIT, OBJ_BLOB, etc. // // Recursively load the base, and keep track of the updated // offset. base, offset, err := p.findBase(typ, offset, objectOffset) if err != nil { return nil, err } // Now load the delta to apply to the base, given at the offset // "offset" and for length "size". // // NB: The delta instructions are zlib compressed, so ensure // that we uncompress the instructions first. zr, err := zlib.NewReader(&OffsetReaderAt{ o: offset, r: p.r, }) if err != nil { return nil, err } delta, err := ioutil.ReadAll(zr) if err != nil { return nil, err } // Then compose the two and return it as a *ChainDelta. return &ChainDelta{ base: base, delta: delta, }, nil case TypeCommit, TypeTree, TypeBlob, TypeTag: // Otherwise, the object's contents are given to be the // following zlib-compressed data. // // The length of the compressed data itself is not known, // rather, "size" determines the length of the data after // inflation. return &ChainBase{ offset: offset, size: int64(size), typ: typ, r: p.r, }, nil } // Otherwise, we received an invalid object type. return nil, errUnrecognizedObjectType } // findBase finds the base (an object, or another delta) for a given // OBJ_OFS_DELTA or OBJ_REFS_DELTA at the given offset. // // It returns the preceding Chain, as well as an updated read offset into the // underlying packfile data. // // If any of the above could not be completed successfully, findBase returns an // error. func (p *Packfile) findBase(typ PackedObjectType, offset, objOffset int64) (Chain, int64, error) { var baseOffset int64 // We assume that we have to read at least 20 bytes (the SHA-1 length in // the case of a OBJ_REF_DELTA, or greater than the length of the base // offset encoded in an OBJ_OFS_DELTA). var sha [20]byte if _, err := p.r.ReadAt(sha[:], offset); err != nil { return nil, baseOffset, err } switch typ { case TypeObjectOffsetDelta: // If the object is of type OBJ_OFS_DELTA, read a // variable-length integer, and find the object at that // location. i := 0 c := int64(sha[i]) baseOffset = c & 0x7f for c&0x80 != 0 { i += 1 c = int64(sha[i]) baseOffset += 1 baseOffset <<= 7 baseOffset |= c & 0x7f } baseOffset = objOffset - baseOffset offset += int64(i) + 1 case TypeObjectReferenceDelta: // If the delta is an OBJ_REFS_DELTA, find the location of its // base by reading the SHA-1 name and looking it up in the // corresponding pack index file. e, err := p.idx.Entry(sha[:]) if err != nil { return nil, baseOffset, err } baseOffset = int64(e.PackOffset) offset += 20 default: // If we did not receive an OBJ_OFS_DELTA, or OBJ_REF_DELTA, the // type given is not a delta-fied type. Return an error. return nil, baseOffset, errors.Errorf( "git/odb/pack: type %s is not deltafied", typ) } // Once we have determined the base offset of the object's chain base, // read the delta-base chain beginning at that offset. r, err := p.find(baseOffset) return r, offset, err } git-lfs-2.3.4/git/odb/pack/packfile_decode.go000066400000000000000000000021021317167762300207520ustar00rootroot00000000000000package pack import ( "bytes" "encoding/binary" "errors" "io" ) var ( // packHeader is the expected header that begins all valid packfiles. packHeader = []byte{'P', 'A', 'C', 'K'} // errBadPackHeader is a sentinel error value returned when the given // pack header does not match the expected one. errBadPackHeader = errors.New("git/odb/pack: bad pack header") ) // DecodePackfile opens the packfile given by the io.ReaderAt "r" for reading. // It does not apply any delta-base chains, nor does it do reading otherwise // beyond the header. // // If the header is malformed, or otherwise cannot be read, an error will be // returned without a corresponding packfile. func DecodePackfile(r io.ReaderAt) (*Packfile, error) { header := make([]byte, 12) if _, err := r.ReadAt(header[:], 0); err != nil { return nil, err } if !bytes.HasPrefix(header, packHeader) { return nil, errBadPackHeader } version := binary.BigEndian.Uint32(header[4:]) objects := binary.BigEndian.Uint32(header[8:]) return &Packfile{ Version: version, Objects: objects, r: r, }, nil } git-lfs-2.3.4/git/odb/pack/packfile_decode_test.go000066400000000000000000000017731317167762300220260ustar00rootroot00000000000000package pack import ( "bytes" "testing" "github.com/stretchr/testify/assert" ) func TestDecodePackfileDecodesIntegerVersion(t *testing.T) { p, err := DecodePackfile(bytes.NewReader([]byte{ 'P', 'A', 'C', 'K', // Pack header. 0x0, 0x0, 0x0, 0x2, // Pack version. 0x0, 0x0, 0x0, 0x0, // Number of packed objects. })) assert.NoError(t, err) assert.EqualValues(t, 2, p.Version) } func TestDecodePackfileDecodesIntegerCount(t *testing.T) { p, err := DecodePackfile(bytes.NewReader([]byte{ 'P', 'A', 'C', 'K', // Pack header. 0x0, 0x0, 0x0, 0x2, // Pack version. 0x0, 0x0, 0x1, 0x2, // Number of packed objects. })) assert.NoError(t, err) assert.EqualValues(t, 258, p.Objects) } func TestDecodePackfileReportsBadHeaders(t *testing.T) { p, err := DecodePackfile(bytes.NewReader([]byte{ 'W', 'R', 'O', 'N', 'G', // Malformed pack header. 0x0, 0x0, 0x0, 0x0, // Pack version. 0x0, 0x0, 0x0, 0x0, // Number of packed objects. })) assert.Equal(t, errBadPackHeader, err) assert.Nil(t, p) } git-lfs-2.3.4/git/odb/pack/packfile_test.go000066400000000000000000000150531317167762300205170ustar00rootroot00000000000000package pack import ( "bytes" "encoding/binary" "encoding/hex" "sort" "strings" "sync/atomic" "testing" "github.com/git-lfs/git-lfs/errors" "github.com/stretchr/testify/assert" ) func TestPackObjectReturnsObjectWithSingleBaseAtLowOffset(t *testing.T) { const original = "Hello, world!\n" compressed, _ := compress(original) p := &Packfile{ idx: IndexWith(map[string]uint32{ "cccccccccccccccccccccccccccccccccccccccc": 32, }), r: bytes.NewReader(append([]byte{ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // (0001 1000) (msb=0, type=commit, size=14) 0x1e}, compressed...), ), } o, err := p.Object(DecodeHex(t, "cccccccccccccccccccccccccccccccccccccccc")) assert.NoError(t, err) assert.Equal(t, TypeCommit, o.Type()) unpacked, err := o.Unpack() assert.Equal(t, []byte(original), unpacked) assert.NoError(t, err) } func TestPackObjectReturnsObjectWithSingleBaseAtHighOffset(t *testing.T) { original := strings.Repeat("four", 64) compressed, _ := compress(original) p := &Packfile{ idx: IndexWith(map[string]uint32{ "cccccccccccccccccccccccccccccccccccccccc": 32, }), r: bytes.NewReader(append([]byte{ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // (1001 0000) (msb=1, type=commit, size=0) 0x90, // (1000 0000) (msb=0, size=1 -> size=256) 0x10}, compressed..., )), } o, err := p.Object(DecodeHex(t, "cccccccccccccccccccccccccccccccccccccccc")) assert.NoError(t, err) assert.Equal(t, TypeCommit, o.Type()) unpacked, err := o.Unpack() assert.Equal(t, []byte(original), unpacked) assert.NoError(t, err) } func TestPackObjectReturnsObjectWithDeltaBaseOffset(t *testing.T) { const original = "Hello" compressed, _ := compress(original) delta, err := compress(string([]byte{ 0x05, // Source size: 5. 0x0e, // Destination size: 14. 0x91, // (1000 0001) (instruction=copy, bitmask=0001) 0x00, // (0000 0000) (offset=0) 0x05, // (0000 0101) (size=5) 0x09, // (0000 0111) (instruction=add, size=7) // Contents: ... ',', ' ', 'w', 'o', 'r', 'l', 'd', '!', '\n', })) p := &Packfile{ idx: IndexWith(map[string]uint32{ "cccccccccccccccccccccccccccccccccccccccc": uint32(32 + 1 + len(compressed)), }), r: bytes.NewReader(append(append([]byte{ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x35, // (0011 0101) (msb=0, type=blob, size=5) }, compressed...), append([]byte{ 0x6e, // (0110 1010) (msb=0, type=obj_ofs_delta, size=10) 0x12, // (0001 0001) (ofs_delta=-17, len(compressed)) }, delta...)...)), } o, err := p.Object(DecodeHex(t, "cccccccccccccccccccccccccccccccccccccccc")) assert.NoError(t, err) assert.Equal(t, TypeBlob, o.Type()) unpacked, err := o.Unpack() assert.Equal(t, []byte(original+", world!\n"), unpacked) assert.NoError(t, err) } func TestPackfileObjectReturnsObjectWithDeltaBaseReference(t *testing.T) { const original = "Hello!\n" compressed, _ := compress(original) delta, _ := compress(string([]byte{ 0x07, // Source size: 7. 0x0e, // Destination size: 14. 0x91, // (1001 0001) (copy, smask=0001, omask=0001) 0x00, // (0000 0000) (offset=0) 0x05, // (0000 0101) (size=5) 0x7, // (0000 0111) (add, length=6) ',', ' ', 'w', 'o', 'r', 'l', 'd', // (data ...) 0x91, // (1001 0001) (copy, smask=0001, omask=0001) 0x05, // (0000 0101) (offset=5) 0x02, // (0000 0010) (size=2) })) p := &Packfile{ idx: IndexWith(map[string]uint32{ "cccccccccccccccccccccccccccccccccccccccc": 32, "dddddddddddddddddddddddddddddddddddddddd": 52, }), r: bytes.NewReader(append(append([]byte{ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x37, // (0011 0101) (msb=0, type=blob, size=7) }, compressed...), append([]byte{ 0x7f, // (0111 1111) (msb=0, type=obj_ref_delta, size=15) // SHA-1 "cccccccccccccccccccccccccccccccccccccccc", // original blob contents is "Hello!\n" 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, }, delta...)...)), } o, err := p.Object(DecodeHex(t, "dddddddddddddddddddddddddddddddddddddddd")) assert.NoError(t, err) assert.Equal(t, TypeBlob, o.Type()) unpacked, err := o.Unpack() assert.Equal(t, []byte("Hello, world!\n"), unpacked) assert.NoError(t, err) } func TestPackfileClosesReadClosers(t *testing.T) { r := new(ReaderAtCloser) p := &Packfile{ r: r, } assert.NoError(t, p.Close()) assert.EqualValues(t, 1, r.N) } func TestPackfileClosePropogatesCloseErrors(t *testing.T) { e := errors.New("git/odb/pack: testing") p := &Packfile{ r: &ReaderAtCloser{E: e}, } assert.Equal(t, e, p.Close()) } type ReaderAtCloser struct { E error N uint64 } func (r *ReaderAtCloser) ReadAt(p []byte, at int64) (int, error) { return 0, nil } func (r *ReaderAtCloser) Close() error { atomic.AddUint64(&r.N, 1) return r.E } func IndexWith(offsets map[string]uint32) *Index { header := []byte{ 0xff, 0x74, 0x4f, 0x63, 0x00, 0x00, 0x00, 0x02, } ns := make([][]byte, 0, len(offsets)) for name, _ := range offsets { x, _ := hex.DecodeString(name) ns = append(ns, x) } sort.Slice(ns, func(i, j int) bool { return bytes.Compare(ns[i], ns[j]) < 0 }) fanout := make([]uint32, 256) for i := 0; i < len(fanout); i++ { var n uint32 for _, name := range ns { if name[0] <= byte(i) { n++ } } fanout[i] = n } crcs := make([]byte, 4*len(offsets)) for i, _ := range ns { binary.BigEndian.PutUint32(crcs[i*4:], 0) } offs := make([]byte, 4*len(offsets)) for i, name := range ns { binary.BigEndian.PutUint32(offs[i*4:], offsets[hex.EncodeToString(name)]) } buf := make([]byte, 0) buf = append(buf, header...) for _, f := range fanout { x := make([]byte, 4) binary.BigEndian.PutUint32(x, f) buf = append(buf, x...) } for _, n := range ns { buf = append(buf, n...) } buf = append(buf, crcs...) buf = append(buf, offs...) return &Index{ fanout: fanout, r: bytes.NewReader(buf), version: new(V2), } } func DecodeHex(t *testing.T, str string) []byte { b, err := hex.DecodeString(str) if err != nil { t.Fatalf("git/odb/pack: unexpected hex.DecodeString error: %s", err) } return b } git-lfs-2.3.4/git/odb/pack/set.go000066400000000000000000000107651317167762300165020ustar00rootroot00000000000000package pack import ( "fmt" "os" "path/filepath" "regexp" "sort" ) // Set allows access of objects stored across a set of packfiles. type Set struct { // m maps the leading byte of a SHA-1 object name to a set of packfiles // that might contain that object, in order of which packfile is most // likely to contain that object. m map[byte][]*Packfile // closeFn is a function that is run by Close(), designated to free // resources held by the *Set, like open packfiles. closeFn func() error } var ( // nameRe is a regular expression that matches the basename of a // filepath that is a packfile. // // It includes one matchgroup, which is the SHA-1 name of the pack. nameRe = regexp.MustCompile(`^pack-([a-f0-9]{40}).pack$`) ) // NewSet creates a new *Set of all packfiles found in a given object database's // root (i.e., "/path/to/repo/.git/objects"). // // It finds all packfiles in the "pack" subdirectory, and instantiates a *Set // containing them. If there was an error parsing the packfiles in that // directory, or the directory was otherwise unable to be observed, NewSet // returns that error. func NewSet(db string) (*Set, error) { pd := filepath.Join(db, "pack") paths, err := filepath.Glob(filepath.Join(pd, "pack-*.pack")) if err != nil { return nil, err } packs := make([]*Packfile, 0, len(paths)) for _, path := range paths { submatch := nameRe.FindStringSubmatch(filepath.Base(path)) if len(submatch) != 2 { continue } name := submatch[1] packf, err := os.Open(filepath.Join(pd, fmt.Sprintf("pack-%s.pack", name))) if err != nil { return nil, err } idxf, err := os.Open(filepath.Join(pd, fmt.Sprintf("pack-%s.idx", name))) if err != nil { return nil, err } pack, err := DecodePackfile(packf) if err != nil { return nil, err } idx, err := DecodeIndex(idxf) if err != nil { return nil, err } pack.idx = idx packs = append(packs, pack) } return NewSetPacks(packs...), nil } // NewSetPacks creates a new *Set from the given packfiles. func NewSetPacks(packs ...*Packfile) *Set { m := make(map[byte][]*Packfile) for i := 0; i < 256; i++ { n := byte(i) for j := 0; j < len(packs); j++ { pack := packs[j] var count uint32 if n == 0 { count = pack.idx.fanout[n] } else { count = pack.idx.fanout[n] - pack.idx.fanout[n-1] } if count > 0 { m[n] = append(m[n], pack) } } sort.Slice(m[n], func(i, j int) bool { ni := m[n][i].idx.fanout[n] nj := m[n][j].idx.fanout[n] return ni > nj }) } return &Set{ m: m, closeFn: func() error { for _, pack := range packs { if err := pack.Close(); err != nil { return err } } return nil }, } } // Close closes all open packfiles, returning an error if one was encountered. func (s *Set) Close() error { if s.closeFn == nil { return nil } return s.closeFn() } // Object opens (but does not unpack, or, apply the delta-base chain) a given // object in the first packfile that matches it. // // Object searches packfiles contained in the set in order of how many objects // they have that begin with the first by of the given SHA-1 "name", in // descending order. // // If the object was unable to be found in any of the packfiles, (nil, // ErrNotFound) will be returned. // // If there was otherwise an error opening the object for reading from any of // the packfiles, it will be returned, and no other packfiles will be searched. // // Otherwise, the object will be returned without error. func (s *Set) Object(name []byte) (*Object, error) { return s.each(name, func(p *Packfile) (*Object, error) { return p.Object(name) }) } // iterFn is a function that takes a given packfile and opens an object from it. type iterFn func(p *Packfile) (o *Object, err error) // each executes the given iterFn "fn" on each Packfile that has any objects // beginning with a prefix of the SHA-1 "name", in order of which packfiles have // the most objects beginning with that prefix. // // If any invocation of "fn" returns a non-nil error, it will either be a) // returned immediately, if the error is not ErrIsNotFound, or b) continued // immediately, if the error is ErrNotFound. // // If no packfiles match the given file, return ErrIsNotFound, along with no // object. func (s *Set) each(name []byte, fn iterFn) (*Object, error) { var key byte if len(name) > 0 { key = name[0] } for _, pack := range s.m[key] { o, err := fn(pack) if err != nil { if IsNotFound(err) { continue } return nil, err } return o, nil } return nil, errNotFound } git-lfs-2.3.4/git/odb/pack/set_test.go000066400000000000000000000033011317167762300175250ustar00rootroot00000000000000package pack import ( "bytes" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestSetOpenOpensAPackedObject(t *testing.T) { const sha = "decafdecafdecafdecafdecafdecafdecafdecaf" const data = "Hello, world!\n" compressed, _ := compress(data) set := NewSetPacks(&Packfile{ idx: IndexWith(map[string]uint32{ sha: 0, }), r: bytes.NewReader(append([]byte{0x3e}, compressed...)), }) o, err := set.Object(DecodeHex(t, sha)) assert.NoError(t, err) assert.Equal(t, TypeBlob, o.Type()) unpacked, err := o.Unpack() assert.NoError(t, err) assert.Equal(t, []byte(data), unpacked) } func TestSetOpenOpensPackedObjectsInPackOrder(t *testing.T) { p1 := &Packfile{ Objects: 1, idx: IndexWith(map[string]uint32{ "aa00000000000000000000000000000000000000": 1, }), r: bytes.NewReader(nil), } p2 := &Packfile{ Objects: 2, idx: IndexWith(map[string]uint32{ "aa11111111111111111111111111111111111111": 1, "aa22222222222222222222222222222222222222": 2, }), r: bytes.NewReader(nil), } p3 := &Packfile{ Objects: 3, idx: IndexWith(map[string]uint32{ "aa33333333333333333333333333333333333333": 3, "aa44444444444444444444444444444444444444": 4, "aa55555555555555555555555555555555555555": 5, }), r: bytes.NewReader(nil), } set := NewSetPacks(p1, p2, p3) var visited []*Packfile set.each( DecodeHex(t, "aa55555555555555555555555555555555555555"), func(p *Packfile) (*Object, error) { visited = append(visited, p) return nil, errNotFound }, ) require.Len(t, visited, 3) assert.EqualValues(t, visited[0].Objects, 3) assert.EqualValues(t, visited[1].Objects, 2) assert.EqualValues(t, visited[2].Objects, 1) } git-lfs-2.3.4/git/odb/pack/type.go000066400000000000000000000030221317167762300166540ustar00rootroot00000000000000package pack import ( "errors" "fmt" ) // PackedObjectType is a constant type that is defined for all valid object // types that a packed object can represent. type PackedObjectType uint8 const ( // TypeNone is the zero-value for PackedObjectType, and represents the // absence of a type. TypeNone PackedObjectType = iota // TypeCommit is the PackedObjectType for commit objects. TypeCommit // TypeTree is the PackedObjectType for tree objects. TypeTree // Typeblob is the PackedObjectType for blob objects. TypeBlob // TypeTag is the PackedObjectType for tag objects. TypeTag // TypeObjectOffsetDelta is the type for OBJ_OFS_DELTA-typed objects. TypeObjectOffsetDelta PackedObjectType = 6 // TypeObjectReferenceDelta is the type for OBJ_REF_DELTA-typed objects. TypeObjectReferenceDelta PackedObjectType = 7 ) // String implements fmt.Stringer and returns an encoding of the type valid for // use in the loose object format protocol (see: package 'git/odb' for more). // // If the receiving instance is not defined, String() will panic(). func (t PackedObjectType) String() string { switch t { case TypeNone: return "" case TypeCommit: return "commit" case TypeTree: return "tree" case TypeBlob: return "blob" case TypeTag: return "tag" case TypeObjectOffsetDelta: return "obj_ofs_delta" case TypeObjectReferenceDelta: return "obj_ref_delta" } panic(fmt.Sprintf("git/odb/pack: unknown object type: %d", t)) } var ( errUnrecognizedObjectType = errors.New("git/odb/pack: unrecognized object type") ) git-lfs-2.3.4/git/odb/pack/type_test.go000066400000000000000000000022031317167762300177130ustar00rootroot00000000000000package pack import ( "fmt" "testing" "github.com/stretchr/testify/assert" ) type PackedObjectStringTestCase struct { T PackedObjectType Expected string Panic bool } func (c *PackedObjectStringTestCase) Assert(t *testing.T) { if c.Panic { defer func() { err := recover() if err == nil { t.Fatalf("git/odb/pack: expected panic()") } assert.Equal(t, c.Expected, fmt.Sprintf("%s", err)) }() } assert.Equal(t, c.Expected, c.T.String()) } func TestPackedObjectTypeString(t *testing.T) { for desc, c := range map[string]*PackedObjectStringTestCase{ "TypeNone": {T: TypeNone, Expected: ""}, "TypeCommit": {T: TypeCommit, Expected: "commit"}, "TypeTree": {T: TypeTree, Expected: "tree"}, "TypeBlob": {T: TypeBlob, Expected: "blob"}, "TypeTag": {T: TypeTag, Expected: "tag"}, "TypeObjectOffsetDelta": {T: TypeObjectOffsetDelta, Expected: "obj_ofs_delta"}, "TypeObjectReferenceDelta": {T: TypeObjectReferenceDelta, Expected: "obj_ref_delta"}, "unknown type": {T: PackedObjectType(5), Panic: true, Expected: "git/odb/pack: unknown object type: 5"}, } { t.Run(desc, c.Assert) } } git-lfs-2.3.4/git/odb/storer.go000066400000000000000000000011741317167762300163010ustar00rootroot00000000000000package odb import "io" // storer implements a storage engine for reading, writing, and creating // io.ReadWriters that can store information about loose objects type storer interface { // Open returns a handle on an existing object keyed by the given SHA. // It returns an error if that file does not already exist. Open(sha []byte) (f io.ReadWriteCloser, err error) // Store copies the data given in "r" to the unique object path given by // "sha". It returns an error if that file already exists (acting as if // the `os.O_EXCL` mode is given in a bitmask to os.Open). Store(sha []byte, r io.Reader) (n int64, err error) } git-lfs-2.3.4/git/odb/tree.go000066400000000000000000000165271317167762300157320ustar00rootroot00000000000000package odb import ( "bufio" "bytes" "fmt" "io" "sort" "strconv" "strings" "syscall" ) // Tree encapsulates a Git tree object. type Tree struct { // Entries is the list of entries held by this tree. Entries []*TreeEntry } // Type implements Object.ObjectType by returning the correct object type for // Trees, TreeObjectType. func (t *Tree) Type() ObjectType { return TreeObjectType } // Decode implements Object.Decode and decodes the uncompressed tree being // read. It returns the number of uncompressed bytes being consumed off of the // stream, which should be strictly equal to the size given. // // If any error was encountered along the way, that will be returned, along with // the number of bytes read up to that point. func (t *Tree) Decode(from io.Reader, size int64) (n int, err error) { buf := bufio.NewReader(from) var entries []*TreeEntry for { modes, err := buf.ReadString(' ') if err != nil { if err == io.EOF { break } return n, err } n += len(modes) modes = strings.TrimSuffix(modes, " ") mode, _ := strconv.ParseInt(modes, 8, 32) fname, err := buf.ReadString('\x00') if err != nil { return n, err } n += len(fname) fname = strings.TrimSuffix(fname, "\x00") var sha [20]byte if _, err = io.ReadFull(buf, sha[:]); err != nil { return n, err } n += 20 entries = append(entries, &TreeEntry{ Name: fname, Oid: sha[:], Filemode: int32(mode), }) } t.Entries = entries return n, nil } // Encode encodes the tree's contents to the given io.Writer, "w". If there was // any error copying the tree's contents, that error will be returned. // // Otherwise, the number of bytes written will be returned. func (t *Tree) Encode(to io.Writer) (n int, err error) { const entryTmpl = "%s %s\x00%s" for _, entry := range t.Entries { fmode := strconv.FormatInt(int64(entry.Filemode), 8) ne, err := fmt.Fprintf(to, entryTmpl, fmode, entry.Name, entry.Oid) if err != nil { return n, err } n = n + ne } return } // Merge performs a merge operation against the given set of `*TreeEntry`'s by // either replacing existing tree entries of the same name, or appending new // entries in sub-tree order. // // It returns a copy of the tree, and performs the merge in O(n*log(n)) time. func (t *Tree) Merge(others ...*TreeEntry) *Tree { unseen := make(map[string]*TreeEntry) // Build a cache of name+filemode to *TreeEntry. for _, other := range others { key := fmt.Sprintf("%s\x00%o", other.Name, other.Filemode) unseen[key] = other } // Map the existing entries ("t.Entries") into a new set by either // copying an existing entry, or replacing it with a new one. entries := make([]*TreeEntry, 0, len(t.Entries)) for _, entry := range t.Entries { key := fmt.Sprintf("%s\x00%o", entry.Name, entry.Filemode) if other, ok := unseen[key]; ok { entries = append(entries, other) delete(unseen, key) } else { oid := make([]byte, len(entry.Oid)) copy(oid, entry.Oid) entries = append(entries, &TreeEntry{ Filemode: entry.Filemode, Name: entry.Name, Oid: oid, }) } } // For all the items we haven't replaced into the new set, append them // to the entries. for _, remaining := range unseen { entries = append(entries, remaining) } // Call sort afterwords, as a tradeoff between speed and spacial // complexity. As a future point of optimization, adding new elements // (see: above) could be done as a linear pass of the "entries" set. // // In order to do that, we must have a constant-time lookup of both // entries in the existing and new sets. This requires building a // map[string]*TreeEntry for the given "others" as well as "t.Entries". // // Trees can be potentially large, so trade this spacial complexity for // an O(n*log(n)) sort. sort.Sort(SubtreeOrder(entries)) return &Tree{Entries: entries} } // Equal returns whether the receiving and given trees are equal, or in other // words, whether they are represented by the same SHA-1 when saved to the // object database. func (t *Tree) Equal(other *Tree) bool { if (t == nil) != (other == nil) { return false } if t != nil { if len(t.Entries) != len(other.Entries) { return false } for i := 0; i < len(t.Entries); i++ { e1 := t.Entries[i] e2 := other.Entries[i] if !e1.Equal(e2) { return false } } } return true } // TreeEntry encapsulates information about a single tree entry in a tree // listing. type TreeEntry struct { // Name is the entry name relative to the tree in which this entry is // contained. Name string // Oid is the object ID for this tree entry. Oid []byte // Filemode is the filemode of this tree entry on disk. Filemode int32 } // Equal returns whether the receiving and given TreeEntry instances are // identical in name, filemode, and OID. func (e *TreeEntry) Equal(other *TreeEntry) bool { if (e == nil) != (other == nil) { return false } if e != nil { return e.Name == other.Name && bytes.Equal(e.Oid, other.Oid) && e.Filemode == other.Filemode } return true } // Type is the type of entry (either blob: BlobObjectType, or a sub-tree: // TreeObjectType). func (e *TreeEntry) Type() ObjectType { switch e.Filemode & syscall.S_IFMT { case syscall.S_IFREG: return BlobObjectType case syscall.S_IFDIR: return TreeObjectType case syscall.S_IFLNK: return BlobObjectType default: if e.Filemode == 0xe000 { // Mode 0xe000, or a gitlink, has no formal filesystem // (`syscall.S_IF`) equivalent. // // Safeguard that catch here, or otherwise panic. return CommitObjectType } else { panic(fmt.Sprintf("git/odb: unknown object type: %o", e.Filemode)) } } } // SubtreeOrder is an implementation of sort.Interface that sorts a set of // `*TreeEntry`'s according to "subtree" order. This ordering is required to // write trees in a correct, readable format to the Git object database. // // The format is as follows: entries are sorted lexicographically in byte-order, // with subtrees (entries of Type() == git/odb.TreeObjectType) being sorted as // if their `Name` fields ended in a "/". // // See: https://github.com/git/git/blob/v2.13.0/fsck.c#L492-L525 for more // details. type SubtreeOrder []*TreeEntry // Len implements sort.Interface.Len() and return the length of the underlying // slice. func (s SubtreeOrder) Len() int { return len(s) } // Swap implements sort.Interface.Swap() and swaps the two elements at i and j. func (s SubtreeOrder) Swap(i, j int) { s[i], s[j] = s[j], s[i] } // Less implements sort.Interface.Less() and returns whether the element at "i" // is compared as "less" than the element at "j". In other words, it returns if // the element at "i" should be sorted ahead of that at "j". // // It performs this comparison in lexicographic byte-order according to the // rules above (see SubtreeOrder). func (s SubtreeOrder) Less(i, j int) bool { return s.Name(i) < s.Name(j) } // Name returns the name for a given entry indexed at "i", which is a C-style // string ('\0' terminated unless it's a subtree), optionally terminated with // '/' if it's a subtree. // // This is done because '/' sorts ahead of '\0', and is compatible with the // tree order in upstream Git. func (s SubtreeOrder) Name(i int) string { if i < 0 || i >= len(s) { return "" } entry := s[i] if entry == nil { return "" } if entry.Type() == TreeObjectType { return entry.Name + "/" } return entry.Name + "\x00" } git-lfs-2.3.4/git/odb/tree_test.go000066400000000000000000000226601317167762300167640ustar00rootroot00000000000000package odb import ( "bufio" "bytes" "fmt" "sort" "strconv" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestTreeReturnsCorrectObjectType(t *testing.T) { assert.Equal(t, TreeObjectType, new(Tree).Type()) } func TestTreeEncoding(t *testing.T) { tree := &Tree{ Entries: []*TreeEntry{ { Name: "a.dat", Oid: []byte("aaaaaaaaaaaaaaaaaaaa"), Filemode: 0100644, }, { Name: "subdir", Oid: []byte("bbbbbbbbbbbbbbbbbbbb"), Filemode: 040000, }, { Name: "submodule", Oid: []byte("cccccccccccccccccccc"), Filemode: 0160000, }, }, } buf := new(bytes.Buffer) n, err := tree.Encode(buf) assert.Nil(t, err) assert.NotEqual(t, 0, n) assertTreeEntry(t, buf, "a.dat", []byte("aaaaaaaaaaaaaaaaaaaa"), 0100644) assertTreeEntry(t, buf, "subdir", []byte("bbbbbbbbbbbbbbbbbbbb"), 040000) assertTreeEntry(t, buf, "submodule", []byte("cccccccccccccccccccc"), 0160000) assert.Equal(t, 0, buf.Len()) } func TestTreeDecoding(t *testing.T) { from := new(bytes.Buffer) fmt.Fprintf(from, "%s %s\x00%s", strconv.FormatInt(int64(0100644), 8), "a.dat", []byte("aaaaaaaaaaaaaaaaaaaa")) fmt.Fprintf(from, "%s %s\x00%s", strconv.FormatInt(int64(040000), 8), "subdir", []byte("bbbbbbbbbbbbbbbbbbbb")) fmt.Fprintf(from, "%s %s\x00%s", strconv.FormatInt(int64(0120000), 8), "symlink", []byte("cccccccccccccccccccc")) fmt.Fprintf(from, "%s %s\x00%s", strconv.FormatInt(int64(0160000), 8), "submodule", []byte("dddddddddddddddddddd")) flen := from.Len() tree := new(Tree) n, err := tree.Decode(from, int64(flen)) assert.Nil(t, err) assert.Equal(t, flen, n) require.Equal(t, 4, len(tree.Entries)) assert.Equal(t, &TreeEntry{ Name: "a.dat", Oid: []byte("aaaaaaaaaaaaaaaaaaaa"), Filemode: 0100644, }, tree.Entries[0]) assert.Equal(t, &TreeEntry{ Name: "subdir", Oid: []byte("bbbbbbbbbbbbbbbbbbbb"), Filemode: 040000, }, tree.Entries[1]) assert.Equal(t, &TreeEntry{ Name: "symlink", Oid: []byte("cccccccccccccccccccc"), Filemode: 0120000, }, tree.Entries[2]) assert.Equal(t, &TreeEntry{ Name: "submodule", Oid: []byte("dddddddddddddddddddd"), Filemode: 0160000, }, tree.Entries[3]) } func TestTreeDecodingShaBoundary(t *testing.T) { var from bytes.Buffer fmt.Fprintf(&from, "%s %s\x00%s", strconv.FormatInt(int64(0100644), 8), "a.dat", []byte("aaaaaaaaaaaaaaaaaaaa")) flen := from.Len() tree := new(Tree) n, err := tree.Decode(bufio.NewReaderSize(&from, flen-2), int64(flen)) assert.Nil(t, err) assert.Equal(t, flen, n) require.Len(t, tree.Entries, 1) assert.Equal(t, &TreeEntry{ Name: "a.dat", Oid: []byte("aaaaaaaaaaaaaaaaaaaa"), Filemode: 0100644, }, tree.Entries[0]) } func TestTreeMergeReplaceElements(t *testing.T) { e1 := &TreeEntry{Name: "a", Filemode: 0100644, Oid: []byte{0x1}} e2 := &TreeEntry{Name: "b", Filemode: 0100644, Oid: []byte{0x2}} e3 := &TreeEntry{Name: "c", Filemode: 0100644, Oid: []byte{0x3}} e4 := &TreeEntry{Name: "b", Filemode: 0100644, Oid: []byte{0x4}} e5 := &TreeEntry{Name: "c", Filemode: 0100644, Oid: []byte{0x5}} t1 := &Tree{Entries: []*TreeEntry{e1, e2, e3}} t2 := t1.Merge(e4, e5) require.Len(t, t1.Entries, 3) assert.True(t, bytes.Equal(t1.Entries[0].Oid, []byte{0x1})) assert.True(t, bytes.Equal(t1.Entries[1].Oid, []byte{0x2})) assert.True(t, bytes.Equal(t1.Entries[2].Oid, []byte{0x3})) require.Len(t, t2.Entries, 3) assert.True(t, bytes.Equal(t2.Entries[0].Oid, []byte{0x1})) assert.True(t, bytes.Equal(t2.Entries[1].Oid, []byte{0x4})) assert.True(t, bytes.Equal(t2.Entries[2].Oid, []byte{0x5})) } func TestMergeInsertElementsInSubtreeOrder(t *testing.T) { e1 := &TreeEntry{Name: "a-b", Filemode: 0100644, Oid: []byte{0x1}} e2 := &TreeEntry{Name: "a", Filemode: 040000, Oid: []byte{0x2}} e3 := &TreeEntry{Name: "a=", Filemode: 0100644, Oid: []byte{0x3}} e4 := &TreeEntry{Name: "a-", Filemode: 0100644, Oid: []byte{0x4}} t1 := &Tree{Entries: []*TreeEntry{e1, e2, e3}} t2 := t1.Merge(e4) require.Len(t, t1.Entries, 3) assert.True(t, bytes.Equal(t1.Entries[0].Oid, []byte{0x1})) assert.True(t, bytes.Equal(t1.Entries[1].Oid, []byte{0x2})) assert.True(t, bytes.Equal(t1.Entries[2].Oid, []byte{0x3})) assert.True(t, bytes.Equal(t2.Entries[0].Oid, []byte{0x4})) assert.True(t, bytes.Equal(t2.Entries[1].Oid, []byte{0x1})) assert.True(t, bytes.Equal(t2.Entries[2].Oid, []byte{0x2})) assert.True(t, bytes.Equal(t2.Entries[3].Oid, []byte{0x3})) } type TreeEntryTypeTestCase struct { Filemode int32 Expected ObjectType } func (c *TreeEntryTypeTestCase) Assert(t *testing.T) { e := &TreeEntry{Filemode: c.Filemode} got := e.Type() assert.Equal(t, c.Expected, got, "git/odb: expected type: %s, got: %s", c.Expected, got) } func TestTreeEntryTypeResolution(t *testing.T) { for desc, c := range map[string]*TreeEntryTypeTestCase{ "blob": {0100644, BlobObjectType}, "subtree": {040000, TreeObjectType}, "symlink": {0120000, BlobObjectType}, "commit": {0160000, CommitObjectType}, } { t.Run(desc, c.Assert) } } func TestTreeEntryTypeResolutionUnknown(t *testing.T) { e := &TreeEntry{Filemode: -1} defer func() { if err := recover(); err == nil { t.Fatal("git/odb: expected panic(), got none") } else { assert.Equal(t, "git/odb: unknown object type: -1", err) } }() e.Type() } func TestSubtreeOrder(t *testing.T) { // The below list (e1, e2, ..., e5) is entered in subtree order: that // is, lexicographically byte-ordered as if blobs end in a '\0', and // sub-trees end in a '/'. // // See: // http://public-inbox.org/git/7vac6jfzem.fsf@assigned-by-dhcp.cox.net e1 := &TreeEntry{Filemode: 0100644, Name: "a-"} e2 := &TreeEntry{Filemode: 0100644, Name: "a-b"} e3 := &TreeEntry{Filemode: 040000, Name: "a"} e4 := &TreeEntry{Filemode: 0100644, Name: "a="} e5 := &TreeEntry{Filemode: 0100644, Name: "a=b"} // Create a set of entries in the wrong order: entries := []*TreeEntry{e3, e4, e1, e5, e2} sort.Sort(SubtreeOrder(entries)) // Assert that they are in the correct order after sorting in sub-tree // order: require.Len(t, entries, 5) assert.Equal(t, "a-", entries[0].Name) assert.Equal(t, "a-b", entries[1].Name) assert.Equal(t, "a", entries[2].Name) assert.Equal(t, "a=", entries[3].Name) assert.Equal(t, "a=b", entries[4].Name) } func TestSubtreeOrderReturnsEmptyForOutOfBounds(t *testing.T) { o := SubtreeOrder([]*TreeEntry{{Name: "a"}}) assert.Equal(t, "", o.Name(len(o)+1)) } func TestSubtreeOrderReturnsEmptyForNilElements(t *testing.T) { o := SubtreeOrder([]*TreeEntry{nil}) assert.Equal(t, "", o.Name(0)) } func TestTreeEqualReturnsTrueWithUnchangedContents(t *testing.T) { t1 := &Tree{Entries: []*TreeEntry{ {Name: "a.dat", Filemode: 0100644, Oid: make([]byte, 20)}, }} t2 := &Tree{Entries: []*TreeEntry{ {Name: "a.dat", Filemode: 0100644, Oid: make([]byte, 20)}, }} assert.True(t, t1.Equal(t2)) } func TestTreeEqualReturnsFalseWithChangedContents(t *testing.T) { t1 := &Tree{Entries: []*TreeEntry{ {Name: "a.dat", Filemode: 0100644, Oid: make([]byte, 20)}, {Name: "b.dat", Filemode: 0100644, Oid: make([]byte, 20)}, }} t2 := &Tree{Entries: []*TreeEntry{ {Name: "a.dat", Filemode: 0100644, Oid: make([]byte, 20)}, {Name: "c.dat", Filemode: 0100644, Oid: make([]byte, 20)}, }} assert.False(t, t1.Equal(t2)) } func TestTreeEqualReturnsTrueWhenOneTreeIsNil(t *testing.T) { t1 := &Tree{Entries: []*TreeEntry{ {Name: "a.dat", Filemode: 0100644, Oid: make([]byte, 20)}, }} t2 := (*Tree)(nil) assert.False(t, t1.Equal(t2)) assert.False(t, t2.Equal(t1)) } func TestTreeEqualReturnsTrueWhenBothTreesAreNil(t *testing.T) { t1 := (*Tree)(nil) t2 := (*Tree)(nil) assert.True(t, t1.Equal(t2)) } func TestTreeEntryEqualReturnsTrueWhenEntriesAreTheSame(t *testing.T) { e1 := &TreeEntry{Name: "a.dat", Filemode: 0100644, Oid: make([]byte, 20)} e2 := &TreeEntry{Name: "a.dat", Filemode: 0100644, Oid: make([]byte, 20)} assert.True(t, e1.Equal(e2)) } func TestTreeEntryEqualReturnsFalseWhenDifferentNames(t *testing.T) { e1 := &TreeEntry{Name: "a.dat", Filemode: 0100644, Oid: make([]byte, 20)} e2 := &TreeEntry{Name: "b.dat", Filemode: 0100644, Oid: make([]byte, 20)} assert.False(t, e1.Equal(e2)) } func TestTreeEntryEqualReturnsFalseWhenDifferentOids(t *testing.T) { e1 := &TreeEntry{Name: "a.dat", Filemode: 0100644, Oid: make([]byte, 20)} e2 := &TreeEntry{Name: "a.dat", Filemode: 0100644, Oid: make([]byte, 20)} e2.Oid[0] = 1 assert.False(t, e1.Equal(e2)) } func TestTreeEntryEqualReturnsFalseWhenDifferentFilemodes(t *testing.T) { e1 := &TreeEntry{Name: "a.dat", Filemode: 0100644, Oid: make([]byte, 20)} e2 := &TreeEntry{Name: "a.dat", Filemode: 0100755, Oid: make([]byte, 20)} assert.False(t, e1.Equal(e2)) } func TestTreeEntryEqualReturnsFalseWhenOneEntryIsNil(t *testing.T) { e1 := &TreeEntry{Name: "a.dat", Filemode: 0100644, Oid: make([]byte, 20)} e2 := (*TreeEntry)(nil) assert.False(t, e1.Equal(e2)) } func TestTreeEntryEqualReturnsTrueWhenBothEntriesAreNil(t *testing.T) { e1 := (*TreeEntry)(nil) e2 := (*TreeEntry)(nil) assert.True(t, e1.Equal(e2)) } func assertTreeEntry(t *testing.T, buf *bytes.Buffer, name string, oid []byte, mode int32) { fmode, err := buf.ReadBytes(' ') assert.Nil(t, err) assert.Equal(t, []byte(strconv.FormatInt(int64(mode), 8)+" "), fmode) fname, err := buf.ReadBytes('\x00') assert.Nil(t, err) assert.Equal(t, []byte(name+"\x00"), fname) var sha [20]byte _, err = buf.Read(sha[:]) assert.Nil(t, err) assert.Equal(t, oid, sha[:]) } git-lfs-2.3.4/git/pkt_line.go000066400000000000000000000103551317167762300160250ustar00rootroot00000000000000package git import ( "bufio" "errors" "fmt" "io" "io/ioutil" "strconv" "strings" ) const ( // MaxPacketLength is the maximum total (header+payload) length // encode-able within one packet using Git's pkt-line protocol. MaxPacketLength = 65516 ) type pktline struct { r *bufio.Reader w *bufio.Writer } func newPktline(r io.Reader, w io.Writer) *pktline { return &pktline{ r: bufio.NewReader(r), w: bufio.NewWriter(w), } } // readPacket reads a single packet entirely and returns the data encoded within // it. Errors can occur in several cases, as described below. // // 1) If no data was present in the reader, and no more data could be read (the // pipe was closed, etc) than an io.EOF will be returned. // 2) If there was some data to be read, but the pipe or reader was closed // before an entire packet (or header) could be ingested, an // io.ErrShortBuffer error will be returned. // 3) If there was a valid header, but no body associated with the packet, an // "Invalid packet length." error will be returned. // 4) If the data in the header could not be parsed as a hexadecimal length in // the Git pktline format, the parse error will be returned. // // If none of the above cases fit the state of the data on the wire, the packet // is returned along with a nil error. func (p *pktline) readPacket() ([]byte, error) { var pktLenHex [4]byte if n, err := io.ReadFull(p.r, pktLenHex[:]); err != nil { return nil, err } else if n != 4 { return nil, io.ErrShortBuffer } pktLen, err := strconv.ParseInt(string(pktLenHex[:]), 16, 0) if err != nil { return nil, err } // pktLen==0: flush packet if pktLen == 0 { return nil, nil } if pktLen <= 4 { return nil, errors.New("Invalid packet length.") } payload, err := ioutil.ReadAll(io.LimitReader(p.r, pktLen-4)) return payload, err } // readPacketText follows identical semantics to the `readPacket()` function, // but additionally removes the trailing `\n` LF from the end of the packet, if // present. func (p *pktline) readPacketText() (string, error) { data, err := p.readPacket() return strings.TrimSuffix(string(data), "\n"), err } // readPacketList reads as many packets as possible using the `readPacketText` // function before encountering a flush packet. It returns a slice of all the // packets it read, or an error if one was encountered. func (p *pktline) readPacketList() ([]string, error) { var list []string for { data, err := p.readPacketText() if err != nil { return nil, err } if len(data) == 0 { break } list = append(list, data) } return list, nil } // writePacket writes the given data in "data" to the underlying data stream // using Git's `pkt-line` format. // // If the data was longer than MaxPacketLength, an error will be returned. If // there was any error encountered while writing any component of the packet // (hdr, payload), it will be returned. // // NB: writePacket does _not_ flush the underlying buffered writer. See instead: // `writeFlush()`. func (p *pktline) writePacket(data []byte) error { if len(data) > MaxPacketLength { return errors.New("Packet length exceeds maximal length") } if _, err := p.w.WriteString(fmt.Sprintf("%04x", len(data)+4)); err != nil { return err } if _, err := p.w.Write(data); err != nil { return err } return nil } // writeFlush writes the terminating "flush" packet and then flushes the // underlying buffered writer. // // If any error was encountered along the way, it will be returned immediately func (p *pktline) writeFlush() error { if _, err := p.w.WriteString(fmt.Sprintf("%04x", 0)); err != nil { return err } if err := p.w.Flush(); err != nil { return err } return nil } // writePacketText follows the same semantics as `writePacket`, but appends a // trailing "\n" LF character to the end of the data. func (p *pktline) writePacketText(data string) error { return p.writePacket([]byte(data + "\n")) } // writePacketList writes a slice of strings using the semantics of // and then writes a terminating flush sequence afterwords. // // If any error was encountered, it will be returned immediately. func (p *pktline) writePacketList(list []string) error { for _, i := range list { if err := p.writePacketText(i); err != nil { return err } } return p.writeFlush() } git-lfs-2.3.4/git/pkt_line_reader.go000066400000000000000000000023511317167762300173440ustar00rootroot00000000000000package git import ( "io" "github.com/git-lfs/git-lfs/tools" ) type pktlineReader struct { pl *pktline buf []byte } var _ io.Reader = new(pktlineReader) func (r *pktlineReader) Read(p []byte) (int, error) { var n int if len(r.buf) > 0 { // If there is data in the buffer, shift as much out of it and // into the given "p" as we can. n = tools.MinInt(len(p), len(r.buf)) copy(p, r.buf[:n]) r.buf = r.buf[n:] } // Loop and grab as many packets as we can in a given "run", until we // have either, a) overfilled the given buffer "p", or we have started // to internally buffer in "r.buf". for len(r.buf) == 0 { chunk, err := r.pl.readPacket() if err != nil { return n, err } if len(chunk) == 0 { // If we got an empty chunk, then we know that we have // reached the end of processing for this particular // packet, so let's terminate. return n, io.EOF } // Figure out how much of the packet we can read into "p". nn := tools.MinInt(len(chunk), len(p[n:])) // Move that amount into "p", from where we left off. copy(p[n:], chunk[:nn]) // And move the rest into the buffer. r.buf = append(r.buf, chunk[nn:]...) // Mark that we have read "nn" bytes into "p" n += nn } return n, nil } git-lfs-2.3.4/git/pkt_line_reader_test.go000066400000000000000000000057521317167762300204130ustar00rootroot00000000000000package git import ( "bytes" "io" "io/ioutil" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) // writePackets func writePacket(t *testing.T, w io.Writer, datas ...[]byte) { pl := newPktline(nil, w) for _, data := range datas { require.Nil(t, pl.writePacket(data)) } require.Nil(t, pl.writeFlush()) } func TestPktlineReaderReadsSinglePacketsInOneCall(t *testing.T) { var buf bytes.Buffer writePacket(t, &buf, []byte("asdf")) pr := &pktlineReader{pl: newPktline(&buf, nil)} data, err := ioutil.ReadAll(pr) assert.Nil(t, err) assert.Equal(t, []byte("asdf"), data) } func TestPktlineReaderReadsManyPacketsInOneCall(t *testing.T) { var buf bytes.Buffer writePacket(t, &buf, []byte("first\n"), []byte("second")) pr := &pktlineReader{pl: newPktline(&buf, nil)} data, err := ioutil.ReadAll(pr) assert.Nil(t, err) assert.Equal(t, []byte("first\nsecond"), data) } func TestPktlineReaderReadsSinglePacketsInMultipleCallsWithUnevenBuffering(t *testing.T) { var buf bytes.Buffer writePacket(t, &buf, []byte("asdf")) pr := &pktlineReader{pl: newPktline(&buf, nil)} var p1 [3]byte var p2 [1]byte n1, e1 := pr.Read(p1[:]) assert.Equal(t, 3, n1) assert.Equal(t, []byte("asd"), p1[:]) assert.Nil(t, e1) n2, e2 := pr.Read(p2[:]) assert.Equal(t, 1, n2) assert.Equal(t, []byte("f"), p2[:]) assert.Equal(t, io.EOF, e2) } func TestPktlineReaderReadsManyPacketsInMultipleCallsWithUnevenBuffering(t *testing.T) { var buf bytes.Buffer writePacket(t, &buf, []byte("first"), []byte("second")) pr := &pktlineReader{pl: newPktline(&buf, nil)} var p1 [4]byte var p2 [7]byte var p3 []byte n1, e1 := pr.Read(p1[:]) assert.Equal(t, 4, n1) assert.Equal(t, []byte("firs"), p1[:]) assert.Nil(t, e1) n2, e2 := pr.Read(p2[:]) assert.Equal(t, 7, n2) assert.Equal(t, []byte("tsecond"), p2[:]) assert.Equal(t, io.EOF, e2) n3, e3 := pr.Read(p3[:]) assert.Equal(t, 0, n3) assert.Empty(t, p3) assert.Equal(t, io.EOF, e3) } func TestPktlineReaderReadsSinglePacketsInMultipleCallsWithEvenBuffering(t *testing.T) { var buf bytes.Buffer writePacket(t, &buf, []byte("firstother")) pr := &pktlineReader{pl: newPktline(&buf, nil)} var p1 [5]byte var p2 [5]byte n1, e1 := pr.Read(p1[:]) assert.Equal(t, 5, n1) assert.Equal(t, []byte("first"), p1[:]) assert.Nil(t, e1) n2, e2 := pr.Read(p2[:]) assert.Equal(t, 5, n2) assert.Equal(t, []byte("other"), p2[:]) assert.Equal(t, io.EOF, e2) } func TestPktlineReaderReadsManyPacketsInMultipleCallsWithEvenBuffering(t *testing.T) { var buf bytes.Buffer writePacket(t, &buf, []byte("first"), []byte("other")) pr := &pktlineReader{pl: newPktline(&buf, nil)} var p1 [5]byte var p2 [5]byte var p3 []byte n1, e1 := pr.Read(p1[:]) assert.Equal(t, 5, n1) assert.Equal(t, []byte("first"), p1[:]) assert.Nil(t, e1) n2, e2 := pr.Read(p2[:]) assert.Equal(t, 5, n2) assert.Equal(t, []byte("other"), p2[:]) assert.Equal(t, io.EOF, e2) n3, e3 := pr.Read(p3) assert.Equal(t, 0, n3) assert.Equal(t, io.EOF, e3) } git-lfs-2.3.4/git/pkt_line_test.go000066400000000000000000000117021317167762300170610ustar00rootroot00000000000000package git import ( "bytes" "io" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) type PacketReadTestCase struct { In []byte Payload []byte Err string } func (c *PacketReadTestCase) Assert(t *testing.T) { buf := bytes.NewReader(c.In) rw := newPktline(buf, nil) pkt, err := rw.readPacket() if len(c.Payload) > 0 { assert.Equal(t, c.Payload, pkt) } else { assert.Empty(t, pkt) } if len(c.Err) > 0 { require.NotNil(t, err) assert.Equal(t, c.Err, err.Error()) } else { assert.Nil(t, err) } } func TestPktLineReadsWholePackets(t *testing.T) { tc := &PacketReadTestCase{ In: []byte{ 0x30, 0x30, 0x30, 0x38, // 0008 (hex. length) 0x1, 0x2, 0x3, 0x4, // payload }, Payload: []byte{0x1, 0x2, 0x3, 0x4}, } tc.Assert(t) } func TestPktLineNoPacket(t *testing.T) { tc := &PacketReadTestCase{ In: []byte{}, Err: io.EOF.Error(), } tc.Assert(t) } func TestPktLineEmptyPacket(t *testing.T) { tc := &PacketReadTestCase{ In: []byte{ 0x30, 0x30, 0x30, 0x34, // No body (invalid) }, Err: "Invalid packet length.", } tc.Assert(t) } func TestPktLineFlushPacket(t *testing.T) { tc := &PacketReadTestCase{ In: []byte{0x30, 0x30, 0x30, 0x30}, // Flush packet Payload: []byte{}, Err: "", } tc.Assert(t) } func TestPktLineDiscardsPacketsWithUnparseableLength(t *testing.T) { tc := &PacketReadTestCase{ In: []byte{ 0xff, 0xff, 0xff, 0xff, // ÿÿÿÿ (invalid hex. length) // No body }, Err: "strconv.ParseInt: parsing \"\\xff\\xff\\xff\\xff\": invalid syntax", } tc.Assert(t) } func TestPktLineReadsTextWithNewline(t *testing.T) { rw := newPktline(bytes.NewReader([]byte{ 0x30, 0x30, 0x30, 0x39, // 0009 (hex. length) 0x61, 0x62, 0x63, 0x64, 0xa, // Empty body }), nil) str, err := rw.readPacketText() assert.Nil(t, err) assert.Equal(t, "abcd", str) } func TestPktLineReadsTextWithoutNewline(t *testing.T) { rw := newPktline(bytes.NewReader([]byte{ 0x30, 0x30, 0x30, 0x38, // 0009 (hex. length) 0x61, 0x62, 0x63, 0x64, }), nil) str, err := rw.readPacketText() assert.Nil(t, err) assert.Equal(t, "abcd", str) } func TestPktLineReadsTextWithErr(t *testing.T) { rw := newPktline(bytes.NewReader([]byte{ 0x30, 0x30, 0x30, 0x34, // 0004 (hex. length) // No body }), nil) str, err := rw.readPacketText() require.NotNil(t, err) assert.Equal(t, "Invalid packet length.", err.Error()) assert.Equal(t, "", str) } func TestPktLineAppendsPacketLists(t *testing.T) { rw := newPktline(bytes.NewReader([]byte{ 0x30, 0x30, 0x30, 0x38, // 0009 (hex. length) 0x61, 0x62, 0x63, 0x64, // "abcd" 0x30, 0x30, 0x30, 0x38, // 0008 (hex. length) 0x65, 0x66, 0x67, 0x68, // "efgh" 0x30, 0x30, 0x30, 0x30, // 0000 (hex. length) }), nil) str, err := rw.readPacketList() assert.Nil(t, err) assert.Equal(t, []string{"abcd", "efgh"}, str) } func TestPktLineAppendsPacketListsAndReturnsErrs(t *testing.T) { rw := newPktline(bytes.NewReader([]byte{ 0x30, 0x30, 0x30, 0x38, // 0009 (hex. length) 0x61, 0x62, 0x63, 0x64, // "abcd" 0x30, 0x30, 0x30, 0x34, // 0004 (hex. length) // No body }), nil) str, err := rw.readPacketList() require.NotNil(t, err) assert.Equal(t, "Invalid packet length.", err.Error()) assert.Empty(t, str) } func TestPktLineWritesPackets(t *testing.T) { var buf bytes.Buffer rw := newPktline(nil, &buf) require.Nil(t, rw.writePacket([]byte{ 0x1, 0x2, 0x3, 0x4, })) require.Nil(t, rw.writeFlush()) assert.Equal(t, []byte{ 0x30, 0x30, 0x30, 0x38, // 0008 (hex. length) 0x1, 0x2, 0x3, 0x4, // payload 0x30, 0x30, 0x30, 0x30, // 0000 (flush packet) }, buf.Bytes()) } func TestPktLineWritesPacketsEqualToMaxLength(t *testing.T) { var buf bytes.Buffer rw := newPktline(nil, &buf) err := rw.writePacket(make([]byte, MaxPacketLength)) assert.Nil(t, err) assert.Equal(t, 4+MaxPacketLength, len(buf.Bytes())) } func TestPktLineDoesNotWritePacketsExceedingMaxLength(t *testing.T) { var buf bytes.Buffer rw := newPktline(nil, &buf) err := rw.writePacket(make([]byte, MaxPacketLength+1)) require.NotNil(t, err) assert.Equal(t, "Packet length exceeds maximal length", err.Error()) assert.Empty(t, buf.Bytes()) } func TestPktLineWritesPacketText(t *testing.T) { var buf bytes.Buffer rw := newPktline(nil, &buf) require.Nil(t, rw.writePacketText("abcd")) require.Nil(t, rw.writeFlush()) assert.Equal(t, []byte{ 0x30, 0x30, 0x30, 0x39, // 0009 (hex. length) 0x61, 0x62, 0x63, 0x64, 0xa, // "abcd\n" (payload) 0x30, 0x30, 0x30, 0x30, // 0000 (flush packet) }, buf.Bytes()) } func TestPktLineWritesPacketLists(t *testing.T) { var buf bytes.Buffer rw := newPktline(nil, &buf) err := rw.writePacketList([]string{"foo", "bar"}) assert.Nil(t, err) assert.Equal(t, []byte{ 0x30, 0x30, 0x30, 0x38, // 0008 (hex. length) 0x66, 0x6f, 0x6f, 0xa, // "foo\n" (payload) 0x30, 0x30, 0x30, 0x38, // 0008 (hex. length) 0x62, 0x61, 0x72, 0xa, // "bar\n" (payload) 0x30, 0x30, 0x30, 0x30, // 0000 (hex. length) }, buf.Bytes()) } git-lfs-2.3.4/git/pkt_line_writer.go000066400000000000000000000070531317167762300174220ustar00rootroot00000000000000package git import ( "io" "github.com/git-lfs/git-lfs/tools" ) // PktlineWriter is an implementation of `io.Writer` which writes data buffers // "p" to an underlying pkt-line stream for use with the Git pkt-line format. type PktlineWriter struct { // buf is an internal buffer used to store data until enough has been // collected to write a full packet, or the buffer was instructed to // flush. buf []byte // pl is the place where packets get written. pl *pktline } var _ io.Writer = new(PktlineWriter) // NewPktlineWriter returns a new *PktlineWriter, which will write to the // underlying data stream "w". The internal buffer is initialized with the given // capacity, "c". // // If "w" is already a `*PktlineWriter`, it will be returned as-is. func NewPktlineWriter(w io.Writer, c int) *PktlineWriter { if pw, ok := w.(*PktlineWriter); ok { return pw } return &PktlineWriter{ buf: make([]byte, 0, c), pl: newPktline(nil, w), } } // Write implements the io.Writer interface's `Write` method by providing a // packet-based backend to the given buffer "p". // // As many bytes are removed from "p" as possible and stored in an internal // buffer until the amount of data in the internal buffer is enough to write a // single packet. Once the internal buffer is full, a packet is written to the // underlying stream of data, and the process repeats. // // When the caller has no more data to write in the given chunk of packets, a // subsequent call to `Flush()` SHOULD be made in order to signify that the // current pkt sequence has terminated, and a new one can begin. // // Write returns the number of bytes in "p" accepted into the writer, which // _MAY_ be written to the underlying protocol stream, or may be written into // the internal buffer. // // If any error was encountered while either buffering or writing, that // error is returned, along with the number of bytes written to the underlying // protocol stream, as described above. func (w *PktlineWriter) Write(p []byte) (int, error) { var n int for len(p[n:]) > 0 { // While there is still data left to process in "p", grab as // much of it as we can while not allowing the internal buffer // to exceed the MaxPacketLength const. m := tools.MinInt(len(p[n:]), MaxPacketLength-len(w.buf)) // Append on all of the data that we could into the internal // buffer. w.buf = append(w.buf, p[n:n+m]...) n += m if len(w.buf) == MaxPacketLength { // If we were able to grab an entire packet's worth of // data, flush the buffer. if _, err := w.flush(); err != nil { return n, err } } } return n, nil } // Flush empties the internal buffer used to store data temporarily and then // writes the pkt-line's FLUSH packet, to signal that it is done writing this // chunk of data. func (w *PktlineWriter) Flush() error { if w == nil { return nil } if _, err := w.flush(); err != nil { return err } if err := w.pl.writeFlush(); err != nil { return err } return nil } // flush writes any data in the internal buffer out to the underlying protocol // stream. If the amount of data in the internal buffer exceeds the // MaxPacketLength, the data will be written in multiple packets to accommodate. // // flush returns the number of bytes written to the underlying packet stream, // and any error that it encountered along the way. func (w *PktlineWriter) flush() (int, error) { var n int for len(w.buf) > 0 { if err := w.pl.writePacket(w.buf); err != nil { return 0, err } m := tools.MinInt(len(w.buf), MaxPacketLength) w.buf = w.buf[m:] n = n + m } return n, nil } git-lfs-2.3.4/git/pkt_line_writer_test.go000066400000000000000000000050711317167762300204570ustar00rootroot00000000000000package git import ( "bytes" "testing" "github.com/stretchr/testify/assert" ) func TestPktlineWriterWritesPacketsShorterThanMaxPacketSize(t *testing.T) { var buf bytes.Buffer w := NewPktlineWriter(&buf, 0) assertWriterWrite(t, w, []byte("Hello, world!"), 13) assertWriterWrite(t, w, nil, 0) pl := newPktline(&buf, nil) assertPacketRead(t, pl, []byte("Hello, world!")) assertPacketRead(t, pl, nil) } func TestPktlineWriterWritesPacketsEqualToMaxPacketLength(t *testing.T) { big := make([]byte, MaxPacketLength) for i, _ := range big { big[i] = 1 } // Make a copy so that we can drain the data inside of it p := make([]byte, MaxPacketLength) copy(p, big) var buf bytes.Buffer w := NewPktlineWriter(&buf, 0) assertWriterWrite(t, w, p, len(big)) assertWriterWrite(t, w, nil, 0) pl := newPktline(&buf, nil) assertPacketRead(t, pl, big) assertPacketRead(t, pl, nil) } func TestPktlineWriterWritesMultiplePacketsLessThanMaxPacketLength(t *testing.T) { var buf bytes.Buffer w := NewPktlineWriter(&buf, 0) assertWriterWrite(t, w, []byte("first\n"), len("first\n")) assertWriterWrite(t, w, []byte("second"), len("second")) assertWriterWrite(t, w, nil, 0) pl := newPktline(&buf, nil) assertPacketRead(t, pl, []byte("first\nsecond")) assertPacketRead(t, pl, nil) } func TestPktlineWriterWritesMultiplePacketsGreaterThanMaxPacketLength(t *testing.T) { var buf bytes.Buffer b1 := make([]byte, MaxPacketLength*3/4) for i, _ := range b1 { b1[i] = 1 } b2 := make([]byte, MaxPacketLength*3/4) for i, _ := range b2 { b2[i] = 2 } w := NewPktlineWriter(&buf, 0) assertWriterWrite(t, w, b1, len(b1)) assertWriterWrite(t, w, b2, len(b2)) assertWriterWrite(t, w, nil, 0) // offs is how far into b2 we needed to buffer before writing an entire // packet offs := MaxPacketLength - len(b1) pl := newPktline(&buf, nil) assertPacketRead(t, pl, append(b1, b2[:offs]...)) assertPacketRead(t, pl, b2[offs:]) assertPacketRead(t, pl, nil) } func TestPktlineWriterAllowsFlushesOnNil(t *testing.T) { assert.NoError(t, (*PktlineWriter)(nil).Flush()) } func TestPktlineWriterDoesntWrapItself(t *testing.T) { itself := &PktlineWriter{} nw := NewPktlineWriter(itself, 0) assert.Equal(t, itself, nw) } func assertWriterWrite(t *testing.T, w *PktlineWriter, p []byte, plen int) { var n int var err error if p == nil { err = w.Flush() } else { n, err = w.Write(p) } assert.Nil(t, err) assert.Equal(t, plen, n) } func assertPacketRead(t *testing.T, pl *pktline, expected []byte) { got, err := pl.readPacket() assert.Nil(t, err) assert.Equal(t, expected, got) } git-lfs-2.3.4/git/rev_list_scanner.go000066400000000000000000000247421317167762300175650ustar00rootroot00000000000000package git import ( "bufio" "encoding/hex" "fmt" "io" "io/ioutil" "regexp" "strings" "sync" "github.com/git-lfs/git-lfs/errors" "github.com/rubyist/tracerx" ) // ScanningMode is a constant type that allows for variation in the range of // commits to scan when given to the `*git.RevListScanner` type. type ScanningMode int const ( // ScanRefsMode will scan between two refspecs. ScanRefsMode ScanningMode = iota // ScanAllMode will scan all history. ScanAllMode // ScanLeftToRemoteMode will scan the difference between any included // SHA1s and a remote tracking ref. ScanLeftToRemoteMode ) // RevListOrder is a constant type that allows for variation in the ordering of // revisions given by the *RevListScanner below. type RevListOrder int const ( // DefaultRevListOrder is the zero-value for this type and yields the // results as given by git-rev-list(1) without any `---order` // argument given. By default: reverse chronological order. DefaultRevListOrder RevListOrder = iota // DateRevListOrder gives the revisions such that no parents are shown // before children, and otherwise in commit timestamp order. DateRevListOrder // AuthorDateRevListOrder gives the revisions such that no parents are // shown before children, and otherwise in author date timestamp order. AuthorDateRevListOrder // TopoRevListOrder gives the revisions such that they appear in // topological order. TopoRevListOrder ) // Flag returns the command-line flag to be passed to git-rev-list(1) in order // to order the output according to the given RevListOrder. It returns both the // flag ("--date-order", "--topo-order", etc) and a bool, whether or not to // append the flag (for instance, DefaultRevListOrder requires no flag). // // Given a type other than those defined above, Flag() will panic(). func (o RevListOrder) Flag() (string, bool) { switch o { case DefaultRevListOrder: return "", false case DateRevListOrder: return "--date-order", true case AuthorDateRevListOrder: return "--author-date-order", true case TopoRevListOrder: return "--topo-order", true default: panic(fmt.Sprintf("git/rev_list_scanner: unknown RevListOrder %d", o)) } } // ScanRefsOptions is an "options" type that is used to configure a scan // operation on the `*git.RevListScanner` instance when given to the function // `NewRevListScanner()`. type ScanRefsOptions struct { // Mode is the scan mode to apply, see above. Mode ScanningMode // Remote is the current remote to scan against, if using // ScanLeftToRemoveMode. Remote string // SkipDeletedBlobs specifies whether or not to traverse into commit // ancestry (revealing potentially deleted (unreferenced) blobs, trees, // or commits. SkipDeletedBlobs bool // Order specifies the order in which revisions are yielded from the // output of `git-rev-list(1)`. For more information, see the above // documentation on the RevListOrder type. Order RevListOrder // CommitsOnly specifies whether or not the *RevListScanner should // return only commits, or all objects in range by performing a // traversal of the graph. By default, false: show all objects. CommitsOnly bool // WorkingDir specifies the working directory in which to run // git-rev-list(1). If this is an empty string, (has len(WorkingDir) == // 0), it is equivalent to running in os.Getwd(). WorkingDir string // Reverse specifies whether or not to give the revisions in reverse // order. Reverse bool // SkippedRefs provides a list of refs to ignore. SkippedRefs []string // Mutex guards names. Mutex *sync.Mutex // Names maps Git object IDs (encoded as hex using // hex.EncodeString()) to their names, i.e., a directory name // (fully-qualified) for trees, or a pathspec for blob tree entries. Names map[string]string } // GetName returns the name associated with a given blob/tree sha and "true" if // it exists, or ("", false) if it doesn't. // // GetName is guarded by a use of o.Mutex, and is goroutine safe. func (o *ScanRefsOptions) GetName(sha string) (string, bool) { o.Mutex.Lock() defer o.Mutex.Unlock() name, ok := o.Names[sha] return name, ok } // SetName sets the name associated with a given blob/tree sha. // // SetName is guarded by a use of o.Mutex, and is therefore goroutine safe. func (o *ScanRefsOptions) SetName(sha, name string) { o.Mutex.Lock() defer o.Mutex.Unlock() o.Names[sha] = name } // RevListScanner is a Scanner type that parses through results of the `git // rev-list` command. type RevListScanner struct { // s is a buffered scanner feeding from the output (stdout) of // git-rev-list(1) invocation. s *bufio.Scanner // closeFn is an optional type returning an error yielded by closing any // resources held by an open (running) instance of the *RevListScanner // type. closeFn func() error // name is the name of the most recently read object. name string // oid is the oid of the most recently read object. oid []byte // err is the most recently encountered error. err error } var ( // ambiguousRegex is a regular expression matching the output of stderr // when ambiguous refnames are encountered. ambiguousRegex = regexp.MustCompile(`warning: refname (.*) is ambiguous`) // z40 is a regular expression matching the empty blob/commit/tree // SHA: "0000000000000000000000000000000000000000". z40 = regexp.MustCompile(`\^?0{40}`) ) // NewRevListScanner instantiates a new RevListScanner instance scanning all // revisions reachable by refs contained in "include" and not reachable by any // refs included in "excluded", using the *ScanRefsOptions "opt" configuration. // // It returns a new *RevListScanner instance, or an error if one was // encountered. Upon returning, the `git-rev-list(1)` instance is already // running, and Scan() may be called immediately. func NewRevListScanner(include, excluded []string, opt *ScanRefsOptions) (*RevListScanner, error) { stdin, args, err := revListArgs(include, excluded, opt) if err != nil { return nil, err } cmd := gitNoLFS(args...).Cmd if len(opt.WorkingDir) > 0 { cmd.Dir = opt.WorkingDir } cmd.Stdin = stdin stdout, err := cmd.StdoutPipe() if err != nil { return nil, err } stderr, err := cmd.StderrPipe() if err != nil { return nil, err } tracerx.Printf("run_command: git %s", strings.Join(args, " ")) if err := cmd.Start(); err != nil { return nil, err } return &RevListScanner{ s: bufio.NewScanner(stdout), closeFn: func() error { msg, _ := ioutil.ReadAll(stderr) // First check if there was a non-zero exit code given // when Wait()-ing on the command execution. if err := cmd.Wait(); err != nil { return errors.Errorf("Error in git %s: %v %s", strings.Join(args, " "), err, msg) } // If the command exited cleanly, but found an ambiguous // refname, promote that to an error and return it. // // `git-rev-list(1)` does not treat ambiguous refnames // as fatal (non-zero exit status), but we do. if am := ambiguousRegex.FindSubmatch(msg); len(am) > 1 { return errors.Errorf("ref %s is ambiguous", am[1]) } return nil }, }, nil } // revListArgs returns the arguments for a given included and excluded set of // SHA1s, and ScanRefsOptions instance. // // In order, it returns the contents of stdin as an io.Reader, the args passed // to git as a []string, and any error encountered in generating those if one // occurred. func revListArgs(include, exclude []string, opt *ScanRefsOptions) (io.Reader, []string, error) { var stdin io.Reader args := []string{"rev-list"} if !opt.CommitsOnly { args = append(args, "--objects") } if opt.Reverse { args = append(args, "--reverse") } if orderFlag, ok := opt.Order.Flag(); ok { args = append(args, orderFlag) } switch opt.Mode { case ScanRefsMode: if opt.SkipDeletedBlobs { args = append(args, "--no-walk") } else { args = append(args, "--do-walk") } args = append(args, includeExcludeShas(include, exclude)...) case ScanAllMode: args = append(args, "--all") case ScanLeftToRemoteMode: if len(opt.SkippedRefs) == 0 { args = append(args, includeExcludeShas(include, exclude)...) args = append(args, "--not", "--remotes="+opt.Remote) } else { args = append(args, "--stdin") stdin = strings.NewReader(strings.Join( append(includeExcludeShas(include, exclude), opt.SkippedRefs...), "\n"), ) } default: return nil, nil, errors.Errorf("unknown scan type: %d", opt.Mode) } return stdin, append(args, "--"), nil } func includeExcludeShas(include, exclude []string) []string { include = nonZeroShas(include) exclude = nonZeroShas(exclude) args := make([]string, 0, len(include)+len(exclude)) for _, i := range include { args = append(args, i) } for _, x := range exclude { args = append(args, fmt.Sprintf("^%s", x)) } return args } func nonZeroShas(all []string) []string { nz := make([]string, 0, len(all)) for _, sha := range all { if len(sha) > 0 && !z40.MatchString(sha) { nz = append(nz, sha) } } return nz } // Name is an optional field that gives the name of the object (if the object is // a tree, blob). // // It can be called before or after Scan(), but will return "" if called // before. func (s *RevListScanner) Name() string { return s.name } // OID is the hex-decoded bytes of the object's ID. // // It can be called before or after Scan(), but will return "" if called // before. func (s *RevListScanner) OID() []byte { return s.oid } // Err returns the last encountered error (or nil) after a call to Scan(). // // It SHOULD be called, checked and handled after a call to Scan(). func (s *RevListScanner) Err() error { return s.err } // Scan scans the next entry given by git-rev-list(1), and returns true/false // indicating if there are more results to scan. func (s *RevListScanner) Scan() bool { var err error s.oid, s.name, err = s.scan() if err != nil { if err != io.EOF { s.err = err } return false } return len(s.oid) > 0 } // Close closes the RevListScanner by freeing any resources held by the // instance while running, and returns any error encountered while doing so. func (s *RevListScanner) Close() error { if s.closeFn == nil { return nil } return s.closeFn() } // scan provides the internal implementation of scanning a line of text from the // output of `git-rev-list(1)`. func (s *RevListScanner) scan() ([]byte, string, error) { if !s.s.Scan() { return nil, "", s.s.Err() } line := strings.TrimSpace(s.s.Text()) if len(line) < 40 { return nil, "", nil } sha1, err := hex.DecodeString(line[:40]) if err != nil { return nil, "", err } var name string if len(line) > 40 { name = line[41:] } return sha1, name, nil } git-lfs-2.3.4/git/rev_list_scanner_test.go000066400000000000000000000135441317167762300206220ustar00rootroot00000000000000package git import ( "bufio" "encoding/hex" "errors" "io/ioutil" "strings" "sync/atomic" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) type ArgsTestCase struct { Include []string Exclude []string Opt *ScanRefsOptions ExpectedStdin string ExpectedArgs []string ExpectedErr string } func (c *ArgsTestCase) Assert(t *testing.T) { stdin, args, err := revListArgs(c.Include, c.Exclude, c.Opt) if len(c.ExpectedErr) > 0 { assert.EqualError(t, err, c.ExpectedErr) } else { assert.Nil(t, err) } require.Equal(t, len(c.ExpectedArgs), len(args)) for i := 0; i < len(c.ExpectedArgs); i++ { assert.Equal(t, c.ExpectedArgs[i], args[i], "element #%d not equal: wanted %q, got %q", i, c.ExpectedArgs[i], args[i]) } if stdin != nil { b, err := ioutil.ReadAll(stdin) assert.Nil(t, err) assert.Equal(t, c.ExpectedStdin, string(b)) } else if len(c.ExpectedStdin) > 0 { t.Errorf("git: expected stdin contents %s, got none", c.ExpectedStdin) } } var ( s1 = "decafdecafdecafdecafdecafdecafdecafdecaf" s2 = "cafecafecafecafecafecafecafecafecafecafe" ) func TestRevListArgs(t *testing.T) { for desc, c := range map[string]*ArgsTestCase{ "scan refs deleted, left and right": { Include: []string{s1}, Exclude: []string{s2}, Opt: &ScanRefsOptions{ Mode: ScanRefsMode, SkipDeletedBlobs: false, }, ExpectedArgs: []string{"rev-list", "--objects", "--do-walk", s1, "^" + s2, "--"}, }, "scan refs not deleted, left and right": { Include: []string{s1}, Exclude: []string{s2}, Opt: &ScanRefsOptions{ Mode: ScanRefsMode, SkipDeletedBlobs: true, }, ExpectedArgs: []string{"rev-list", "--objects", "--no-walk", s1, "^" + s2, "--"}, }, "scan refs deleted, left only": { Include: []string{s1}, Opt: &ScanRefsOptions{ Mode: ScanRefsMode, SkipDeletedBlobs: false, }, ExpectedArgs: []string{"rev-list", "--objects", "--do-walk", s1, "--"}, }, "scan refs not deleted, left only": { Include: []string{s1}, Opt: &ScanRefsOptions{ Mode: ScanRefsMode, SkipDeletedBlobs: true, }, ExpectedArgs: []string{"rev-list", "--objects", "--no-walk", s1, "--"}, }, "scan all": { Include: []string{s1}, Exclude: []string{s2}, Opt: &ScanRefsOptions{ Mode: ScanAllMode, }, ExpectedArgs: []string{"rev-list", "--objects", "--all", "--"}, }, "scan left to remote, no skipped refs": { Include: []string{s1}, Opt: &ScanRefsOptions{ Mode: ScanLeftToRemoteMode, Remote: "origin", SkippedRefs: []string{}, }, ExpectedArgs: []string{"rev-list", "--objects", s1, "--not", "--remotes=origin", "--"}, }, "scan left to remote, skipped refs": { Include: []string{s1}, Exclude: []string{s2}, Opt: &ScanRefsOptions{ Mode: ScanLeftToRemoteMode, Remote: "origin", SkippedRefs: []string{"a", "b", "c"}, }, ExpectedArgs: []string{"rev-list", "--objects", "--stdin", "--"}, ExpectedStdin: s1 + "\n^" + s2 + "\na\nb\nc", }, "scan unknown type": { Include: []string{s1}, Exclude: []string{s2}, Opt: &ScanRefsOptions{ Mode: ScanningMode(-1), }, ExpectedErr: "unknown scan type: -1", }, "scan date order": { Include: []string{s1}, Exclude: []string{s2}, Opt: &ScanRefsOptions{ Mode: ScanRefsMode, Order: DateRevListOrder, }, ExpectedArgs: []string{"rev-list", "--objects", "--date-order", "--do-walk", s1, "^" + s2, "--"}, }, "scan author date order": { Include: []string{s1}, Exclude: []string{s2}, Opt: &ScanRefsOptions{ Mode: ScanRefsMode, Order: AuthorDateRevListOrder, }, ExpectedArgs: []string{"rev-list", "--objects", "--author-date-order", "--do-walk", s1, "^" + s2, "--"}, }, "scan topo order": { Include: []string{s1}, Exclude: []string{s2}, Opt: &ScanRefsOptions{ Mode: ScanRefsMode, Order: TopoRevListOrder, }, ExpectedArgs: []string{"rev-list", "--objects", "--topo-order", "--do-walk", s1, "^" + s2, "--"}, }, "scan commits only": { Include: []string{s1}, Exclude: []string{s2}, Opt: &ScanRefsOptions{ Mode: ScanRefsMode, CommitsOnly: true, }, ExpectedArgs: []string{"rev-list", "--do-walk", s1, "^" + s2, "--"}, }, "scan reverse": { Include: []string{s1}, Exclude: []string{s2}, Opt: &ScanRefsOptions{ Mode: ScanRefsMode, Reverse: true, }, ExpectedArgs: []string{"rev-list", "--objects", "--reverse", "--do-walk", s1, "^" + s2, "--"}, }, } { t.Run(desc, c.Assert) } } func TestRevListScannerCallsClose(t *testing.T) { var called uint32 err := errors.New("Hello world") s := &RevListScanner{ closeFn: func() error { atomic.AddUint32(&called, 1) return err }, } got := s.Close() assert.EqualValues(t, 1, atomic.LoadUint32(&called)) assert.Equal(t, err, got) } func TestRevListScannerTreatsCloseFnAsOptional(t *testing.T) { s := &RevListScanner{ closeFn: nil, } defer func() { assert.Nil(t, recover()) }() assert.Nil(t, s.Close()) } func TestRevListScannerParsesLinesWithNames(t *testing.T) { given := "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa name.dat" s := &RevListScanner{ s: bufio.NewScanner(strings.NewReader(given)), } assert.True(t, s.Scan()) assert.Equal(t, "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", hex.EncodeToString(s.OID())) assert.Equal(t, "name.dat", s.Name()) assert.Nil(t, s.Err()) assert.False(t, s.Scan()) assert.Equal(t, "", s.Name()) assert.Nil(t, s.OID()) assert.Nil(t, s.Err()) } func TestRevListScannerParsesLinesWithoutName(t *testing.T) { given := "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" s := &RevListScanner{ s: bufio.NewScanner(strings.NewReader(given)), } assert.True(t, s.Scan()) assert.Equal(t, "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", hex.EncodeToString(s.OID())) assert.Nil(t, s.Err()) assert.False(t, s.Scan()) assert.Equal(t, "", s.Name()) assert.Nil(t, s.OID()) assert.Nil(t, s.Err()) } git-lfs-2.3.4/glide.lock000066400000000000000000000031201317167762300150340ustar00rootroot00000000000000hash: e19b925b9eaca9a10a7742b4a4b1dc8047bff437584538dda59f4f10e69fa6ca updated: 2017-09-27T12:34:48.032089491-04:00 imports: - name: github.com/bgentry/go-netrc version: 9fd32a8b3d3d3f9d43c341bfe098430e07609480 subpackages: - netrc - name: github.com/inconshreveable/mousetrap version: 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 - name: github.com/kr/pty version: 5cf931ef8f76dccd0910001d74a58a7fca84a83d - name: github.com/olekukonko/ts version: ecf753e7c962639ab5a1fb46f7da627d4c0a04b8 - name: github.com/pkg/errors version: c605e284fe17294bda444b34710735b29d1a9d90 - name: github.com/rubyist/tracerx version: 787959303086f44a8c361240dfac53d3e9d53ed2 - name: github.com/spf13/cobra version: c55cdf33856a08e4822738728b41783292812889 - name: github.com/spf13/pflag version: 580b9be06c33d8ba9dcc8757ea56b7642472c2f5 - name: github.com/stretchr/testify version: 6cb3b85ef5a0efef77caef88363ec4d4b5c0976d subpackages: - assert - require - name: github.com/ThomsonReutersEikon/go-ntlm version: b00ec39bbdd04f845950f4dbb4fd0a2c3155e830 subpackages: - ntlm - ntlm/md4 - name: github.com/xeipuuv/gojsonschema version: 6b67b3fab74d992bd07f72550006ab2c6907c416 testImports: - name: github.com/davecgh/go-spew version: 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d subpackages: - spew - name: github.com/pmezard/go-difflib version: d8ed2627bdf02c080bf22230dbb337003b7aba2d subpackages: - difflib - name: github.com/xeipuuv/gojsonpointer version: 6fe8760cad3569743d51ddbb243b26f8456742dc - name: github.com/xeipuuv/gojsonreference version: e02fc20de94c78484cd5ffb007f8af96be030a45 git-lfs-2.3.4/glide.yaml000066400000000000000000000021001317167762300150430ustar00rootroot00000000000000package: github.com/git-lfs/git-lfs import: - package: github.com/bgentry/go-netrc version: 9fd32a8b3d3d3f9d43c341bfe098430e07609480 subpackages: - netrc - package: github.com/kr/pty version: 5cf931ef8f76dccd0910001d74a58a7fca84a83d - package: github.com/inconshreveable/mousetrap version: 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 - package: github.com/olekukonko/ts version: ecf753e7c962639ab5a1fb46f7da627d4c0a04b8 - package: github.com/rubyist/tracerx version: 787959303086f44a8c361240dfac53d3e9d53ed2 - package: github.com/spf13/cobra version: c55cdf33856a08e4822738728b41783292812889 - package: github.com/spf13/pflag version: 580b9be06c33d8ba9dcc8757ea56b7642472c2f5 - package: github.com/stretchr/testify version: 6cb3b85ef5a0efef77caef88363ec4d4b5c0976d - package: github.com/ThomsonReutersEikon/go-ntlm version: b00ec39bbdd04f845950f4dbb4fd0a2c3155e830 subpackages: - ntlm - package: github.com/xeipuuv/gojsonschema version: 6b67b3fab74d992bd07f72550006ab2c6907c416 - package: github.com/pkg/errors version: c605e284fe17294bda444b34710735b29d1a9d90 git-lfs-2.3.4/lfs/000077500000000000000000000000001317167762300136665ustar00rootroot00000000000000git-lfs-2.3.4/lfs/attribute.go000066400000000000000000000071541317167762300162270ustar00rootroot00000000000000package lfs import ( "fmt" "strings" "github.com/git-lfs/git-lfs/git" ) // Attribute wraps the structure and some operations of Git's conception of an // "attribute", as defined here: http://git-scm.com/docs/gitattributes. type Attribute struct { // The Section of an Attribute refers to the location at which all // properties are relative to. For example, for a Section with the value // "core", Git will produce something like: // // [core] // autocrlf = true // ... Section string // The Properties of an Attribute refer to all of the keys and values // that define that Attribute. Properties map[string]string // Previous values of these attributes that can be automatically upgraded Upgradeables map[string][]string } // InstallOptions serves as an argument to Install(). type InstallOptions struct { Force bool Local bool System bool } // Install instructs Git to set all keys and values relative to the root // location of this Attribute. For any particular key/value pair, if a matching // key is already set, it will be overridden if it is either a) empty, or b) the // `force` argument is passed as true. If an attribute is already set to a // different value than what is given, and force is false, an error will be // returned immediately, and the rest of the attributes will not be set. func (a *Attribute) Install(opt InstallOptions) error { for k, v := range a.Properties { var upgradeables []string if a.Upgradeables != nil { // use pre-normalised key since caller will have set up the same upgradeables = a.Upgradeables[k] } key := a.normalizeKey(k) if err := a.set(key, v, upgradeables, opt); err != nil { return err } } return nil } // normalizeKey makes an absolute path out of a partial relative one. For a // relative path of "foo", and a root Section of "bar", "bar.foo" will be returned. func (a *Attribute) normalizeKey(relative string) string { return strings.Join([]string{a.Section, relative}, ".") } // set attempts to set a single key/value pair portion of this Attribute. If a // matching key already exists and the value is not equal to the desired value, // an error will be thrown if force is set to false. If force is true, the value // will be overridden. func (a *Attribute) set(key, value string, upgradeables []string, opt InstallOptions) error { var currentValue string if opt.Local { currentValue = git.Config.FindLocal(key) } else if opt.System { currentValue = git.Config.FindSystem(key) } else { currentValue = git.Config.FindGlobal(key) } if opt.Force || shouldReset(currentValue, upgradeables) { var err error if opt.Local { _, err = git.Config.SetLocal("", key, value) } else if opt.System { _, err = git.Config.SetSystem(key, value) } else { _, err = git.Config.SetGlobal(key, value) } return err } else if currentValue != value { return fmt.Errorf("The %q attribute should be %q but is %q", key, value, currentValue) } return nil } // Uninstall removes all properties in the path of this property. func (a *Attribute) Uninstall(opt InstallOptions) { if opt.Local { git.Config.UnsetLocalSection(a.Section) } else if opt.System { git.Config.UnsetSystemSection(a.Section) } else { git.Config.UnsetGlobalSection(a.Section) } } // shouldReset determines whether or not a value is resettable given its current // value on the system. If the value is empty (length = 0), then it will pass. // It will also pass if it matches any upgradeable value func shouldReset(value string, upgradeables []string) bool { if len(value) == 0 { return true } for _, u := range upgradeables { if value == u { return true } } return false } git-lfs-2.3.4/lfs/diff_index_scanner.go000066400000000000000000000132411317167762300200260ustar00rootroot00000000000000package lfs import ( "bufio" "fmt" "strconv" "strings" "github.com/git-lfs/git-lfs/errors" "github.com/git-lfs/git-lfs/git" ) // Status represents the status of a file that appears in the output of `git // diff-index`. // // More information about each of its valid instances can be found: // https://git-scm.com/docs/git-diff-index type DiffIndexStatus rune const ( StatusAddition DiffIndexStatus = 'A' StatusCopy DiffIndexStatus = 'C' StatusDeletion DiffIndexStatus = 'D' StatusModification DiffIndexStatus = 'M' StatusRename DiffIndexStatus = 'R' StatusTypeChange DiffIndexStatus = 'T' StatusUnmerged DiffIndexStatus = 'U' StatusUnknown DiffIndexStatus = 'X' ) // String implements fmt.Stringer by returning a human-readable name for each // status. func (s DiffIndexStatus) String() string { switch s { case StatusAddition: return "addition" case StatusCopy: return "copy" case StatusDeletion: return "deletion" case StatusModification: return "modification" case StatusRename: return "rename" case StatusTypeChange: return "change" case StatusUnmerged: return "unmerged" case StatusUnknown: return "unknown" } return "" } // Format implements fmt.Formatter. If printed as "%+d", "%+s", or "%+v", the // status will be written out as an English word: i.e., "addition", "copy", // "deletion", etc. // // If the '+' flag is not given, the shorthand will be used instead: 'A', 'C', // and 'D', respectively. // // If any other format verb is given, this function will panic(). func (s DiffIndexStatus) Format(state fmt.State, c rune) { switch c { case 'd', 's', 'v': if state.Flag('+') { state.Write([]byte(s.String())) } else { state.Write([]byte{byte(rune(s))}) } default: panic(fmt.Sprintf("cannot format %v for DiffIndexStatus", c)) } } // DiffIndexEntry holds information about a single item in the results of a `git // diff-index` command. type DiffIndexEntry struct { // SrcMode is the file mode of the "src" file, stored as a string-based // octal. SrcMode string // DstMode is the file mode of the "dst" file, stored as a string-based // octal. DstMode string // SrcSha is the Git blob ID of the "src" file. SrcSha string // DstSha is the Git blob ID of the "dst" file. DstSha string // Status is the status of the file in the index. Status DiffIndexStatus // StatusScore is the optional "score" associated with a particular // status. StatusScore int // SrcName is the name of the file in its "src" state as it appears in // the index. SrcName string // DstName is the name of the file in its "dst" state as it appears in // the index. DstName string } // DiffIndexScanner scans the output of the `git diff-index` command. type DiffIndexScanner struct { // next is the next entry scanned by the Scanner. next *DiffIndexEntry // err is any error that the Scanner encountered while scanning. err error // from is the underlying scanner, scanning the `git diff-index` // command's stdout. from *bufio.Scanner } // NewDiffIndexScanner initializes a new `DiffIndexScanner` scanning at the // given ref, "ref". // // If "cache" is given, the DiffIndexScanner will scan for differences between // the given ref and the index. If "cache" is _not_ given, DiffIndexScanner will // scan for differences between the given ref and the currently checked out // tree. // // If any error was encountered in starting the command or closing its `stdin`, // that error will be returned immediately. Otherwise, a `*DiffIndexScanner` // will be returned with a `nil` error. func NewDiffIndexScanner(ref string, cached bool) (*DiffIndexScanner, error) { scanner, err := git.DiffIndex(ref, cached) if err != nil { return nil, err } return &DiffIndexScanner{ from: scanner, }, nil } // Scan advances the scan line and yields either a new value for Entry(), or an // Err(). It returns true or false, whether or not it can continue scanning for // more entries. func (s *DiffIndexScanner) Scan() bool { if !s.prepareScan() { return false } s.next, s.err = s.scan(s.from.Text()) if s.err != nil { s.err = errors.Wrap(s.err, "scan") } return s.err == nil } // Entry returns the last entry that was Scan()'d by the DiffIndexScanner. func (s *DiffIndexScanner) Entry() *DiffIndexEntry { return s.next } // Entry returns the last error that was encountered by the DiffIndexScanner. func (s *DiffIndexScanner) Err() error { return s.err } // prepareScan clears out the results from the last Scan() loop, and advances // the internal scanner to fetch a new line of Text(). func (s *DiffIndexScanner) prepareScan() bool { s.next, s.err = nil, nil if !s.from.Scan() { s.err = s.from.Err() return false } return true } // scan parses the given line and returns a `*DiffIndexEntry` or an error, // depending on whether or not the parse was successful. func (s *DiffIndexScanner) scan(line string) (*DiffIndexEntry, error) { // Format is: // :100644 100644 c5b3d83a7542255ec7856487baa5e83d65b1624c 9e82ac1b514be060945392291b5b3108c22f6fe3 M foo.gif // : \t[\t] parts := strings.Split(line, "\t") if len(parts) < 2 { return nil, errors.Errorf("invalid line: %s", line) } desc := strings.Fields(parts[0]) if len(desc) < 5 { return nil, errors.Errorf("invalid description: %s", parts[0]) } entry := &DiffIndexEntry{ SrcMode: strings.TrimPrefix(desc[0], ":"), DstMode: desc[1], SrcSha: desc[2], DstSha: desc[3], Status: DiffIndexStatus(rune(desc[4][0])), SrcName: parts[1], } if score, err := strconv.Atoi(desc[4][1:]); err != nil { entry.StatusScore = score } if len(parts) > 2 { entry.DstName = parts[2] } return entry, nil } git-lfs-2.3.4/lfs/extension.go000066400000000000000000000065451317167762300162430ustar00rootroot00000000000000package lfs import ( "bytes" "crypto/sha256" "encoding/hex" "fmt" "hash" "io" "os" "os/exec" "strings" "github.com/git-lfs/git-lfs/config" ) type pipeRequest struct { action string reader io.Reader fileName string extensions []config.Extension } type pipeResponse struct { file *os.File results []*pipeExtResult } type pipeExtResult struct { name string oidIn string oidOut string } type extCommand struct { cmd *exec.Cmd out io.WriteCloser err *bytes.Buffer hasher hash.Hash result *pipeExtResult } func pipeExtensions(request *pipeRequest) (response pipeResponse, err error) { var extcmds []*extCommand defer func() { // In the case of an early return before the end of this // function (in response to an error, etc), kill all running // processes. Errors are ignored since the function has already // returned. // // In the happy path, the commands will have already been // `Wait()`-ed upon and e.cmd.Process.Kill() will return an // error, but we can ignore it. for _, e := range extcmds { if e.cmd.Process != nil { e.cmd.Process.Kill() } } }() for _, e := range request.extensions { var pieces []string switch request.action { case "clean": pieces = strings.Split(e.Clean, " ") case "smudge": pieces = strings.Split(e.Smudge, " ") default: err = fmt.Errorf("Invalid action: " + request.action) return } name := strings.Trim(pieces[0], " ") var args []string for _, value := range pieces[1:] { arg := strings.Replace(value, "%f", request.fileName, -1) args = append(args, arg) } cmd := exec.Command(name, args...) ec := &extCommand{cmd: cmd, result: &pipeExtResult{name: e.Name}} extcmds = append(extcmds, ec) } hasher := sha256.New() pipeReader, pipeWriter := io.Pipe() multiWriter := io.MultiWriter(hasher, pipeWriter) var input io.Reader var output io.WriteCloser input = pipeReader extcmds[0].cmd.Stdin = input if response.file, err = TempFile(""); err != nil { return } defer response.file.Close() output = response.file last := len(extcmds) - 1 for i, ec := range extcmds { ec.hasher = sha256.New() if i == last { ec.cmd.Stdout = io.MultiWriter(ec.hasher, output) ec.out = output continue } nextec := extcmds[i+1] var nextStdin io.WriteCloser var stdout io.ReadCloser if nextStdin, err = nextec.cmd.StdinPipe(); err != nil { return } if stdout, err = ec.cmd.StdoutPipe(); err != nil { return } ec.cmd.Stdin = input ec.cmd.Stdout = io.MultiWriter(ec.hasher, nextStdin) ec.out = nextStdin input = stdout var errBuff bytes.Buffer ec.err = &errBuff ec.cmd.Stderr = ec.err } for _, ec := range extcmds { if err = ec.cmd.Start(); err != nil { return } } if _, err = io.Copy(multiWriter, request.reader); err != nil { return } if err = pipeWriter.Close(); err != nil { return } for _, ec := range extcmds { if err = ec.cmd.Wait(); err != nil { if ec.err != nil { errStr := ec.err.String() err = fmt.Errorf("Extension '%s' failed with: %s", ec.result.name, errStr) } return } if err = ec.out.Close(); err != nil { return } } oid := hex.EncodeToString(hasher.Sum(nil)) for _, ec := range extcmds { ec.result.oidIn = oid oid = hex.EncodeToString(ec.hasher.Sum(nil)) ec.result.oidOut = oid response.results = append(response.results, ec.result) } return } git-lfs-2.3.4/lfs/gitscanner.go000066400000000000000000000147111317167762300163560ustar00rootroot00000000000000package lfs import ( "errors" "fmt" "sync" "time" "github.com/git-lfs/git-lfs/filepathfilter" "github.com/rubyist/tracerx" ) var missingCallbackErr = errors.New("No callback given") // IsCallbackMissing returns a boolean indicating whether the error is reporting // that a GitScanner is missing a required GitScannerCallback. func IsCallbackMissing(err error) bool { return err == missingCallbackErr } // GitScanner scans objects in a Git repository for LFS pointers. type GitScanner struct { Filter *filepathfilter.Filter FoundPointer GitScannerFoundPointer FoundLockable GitScannerFoundLockable PotentialLockables GitScannerSet remote string skippedRefs []string closed bool started time.Time mu sync.Mutex } type GitScannerFoundPointer func(*WrappedPointer, error) type GitScannerFoundLockable func(filename string) type GitScannerSet interface { Contains(string) bool } // NewGitScanner initializes a *GitScanner for a Git repository in the current // working directory. func NewGitScanner(cb GitScannerFoundPointer) *GitScanner { return &GitScanner{started: time.Now(), FoundPointer: cb} } // Close stops exits once all processing has stopped, and all resources are // tracked and cleaned up. func (s *GitScanner) Close() { s.mu.Lock() defer s.mu.Unlock() if s.closed { return } s.closed = true tracerx.PerformanceSince("scan", s.started) } // RemoteForPush sets up this *GitScanner to scan for objects to push to the // given remote. Needed for ScanLeftToRemote(). func (s *GitScanner) RemoteForPush(r string) error { s.mu.Lock() defer s.mu.Unlock() if len(s.remote) > 0 && s.remote != r { return fmt.Errorf("Trying to set remote to %q, already set to %q", r, s.remote) } s.remote = r s.skippedRefs = calcSkippedRefs(r) return nil } // ScanLeftToRemote scans through all commits starting at the given ref that the // given remote does not have. See RemoteForPush(). func (s *GitScanner) ScanLeftToRemote(left string, cb GitScannerFoundPointer) error { callback, err := firstGitScannerCallback(cb, s.FoundPointer) if err != nil { return err } s.mu.Lock() if len(s.remote) == 0 { s.mu.Unlock() return fmt.Errorf("Unable to scan starting at %q: no remote set.", left) } s.mu.Unlock() return scanRefsToChan(s, callback, left, "", s.opts(ScanLeftToRemoteMode)) } // ScanRefRange scans through all commits from the given left and right refs, // including git objects that have been modified or deleted. func (s *GitScanner) ScanRefRange(left, right string, cb GitScannerFoundPointer) error { callback, err := firstGitScannerCallback(cb, s.FoundPointer) if err != nil { return err } opts := s.opts(ScanRefsMode) opts.SkipDeletedBlobs = false return scanRefsToChan(s, callback, left, right, opts) } // ScanRefWithDeleted scans through all objects in the given ref, including // git objects that have been modified or deleted. func (s *GitScanner) ScanRefWithDeleted(ref string, cb GitScannerFoundPointer) error { return s.ScanRefRange(ref, "", cb) } // ScanRef scans through all objects in the current ref, excluding git objects // that have been modified or deleted before the ref. func (s *GitScanner) ScanRef(ref string, cb GitScannerFoundPointer) error { callback, err := firstGitScannerCallback(cb, s.FoundPointer) if err != nil { return err } opts := s.opts(ScanRefsMode) opts.SkipDeletedBlobs = true return scanRefsToChan(s, callback, ref, "", opts) } // ScanAll scans through all objects in the git repository. func (s *GitScanner) ScanAll(cb GitScannerFoundPointer) error { callback, err := firstGitScannerCallback(cb, s.FoundPointer) if err != nil { return err } opts := s.opts(ScanAllMode) opts.SkipDeletedBlobs = false return scanRefsToChan(s, callback, "", "", opts) } // ScanTree takes a ref and returns WrappedPointer objects in the tree at that // ref. Differs from ScanRefs in that multiple files in the tree with the same // content are all reported. func (s *GitScanner) ScanTree(ref string) error { callback, err := firstGitScannerCallback(s.FoundPointer) if err != nil { return err } return runScanTree(callback, ref, s.Filter) } // ScanUnpushed scans history for all LFS pointers which have been added but not // pushed to the named remote. remote can be left blank to mean 'any remote'. func (s *GitScanner) ScanUnpushed(remote string, cb GitScannerFoundPointer) error { callback, err := firstGitScannerCallback(cb, s.FoundPointer) if err != nil { return err } return scanUnpushed(callback, remote) } // ScanPreviousVersions scans changes reachable from ref (commit) back to since. // Returns channel of pointers for *previous* versions that overlap that time. // Does not include pointers which were still in use at ref (use ScanRefsToChan // for that) func (s *GitScanner) ScanPreviousVersions(ref string, since time.Time, cb GitScannerFoundPointer) error { callback, err := firstGitScannerCallback(cb, s.FoundPointer) if err != nil { return err } return logPreviousSHAs(callback, ref, since) } // ScanIndex scans the git index for modified LFS objects. func (s *GitScanner) ScanIndex(ref string, cb GitScannerFoundPointer) error { callback, err := firstGitScannerCallback(cb, s.FoundPointer) if err != nil { return err } return scanIndex(callback, ref) } func (s *GitScanner) opts(mode ScanningMode) *ScanRefsOptions { s.mu.Lock() defer s.mu.Unlock() opts := newScanRefsOptions() opts.ScanMode = mode opts.RemoteName = s.remote opts.skippedRefs = s.skippedRefs return opts } func firstGitScannerCallback(callbacks ...GitScannerFoundPointer) (GitScannerFoundPointer, error) { for _, cb := range callbacks { if cb == nil { continue } return cb, nil } return nil, missingCallbackErr } type ScanningMode int const ( ScanRefsMode = ScanningMode(iota) // 0 - or default scan mode ScanAllMode = ScanningMode(iota) ScanLeftToRemoteMode = ScanningMode(iota) ) type ScanRefsOptions struct { ScanMode ScanningMode RemoteName string SkipDeletedBlobs bool skippedRefs []string nameMap map[string]string mutex *sync.Mutex } func (o *ScanRefsOptions) GetName(sha string) (string, bool) { o.mutex.Lock() name, ok := o.nameMap[sha] o.mutex.Unlock() return name, ok } func (o *ScanRefsOptions) SetName(sha, name string) { o.mutex.Lock() o.nameMap[sha] = name o.mutex.Unlock() } func newScanRefsOptions() *ScanRefsOptions { return &ScanRefsOptions{ nameMap: make(map[string]string, 0), mutex: &sync.Mutex{}, } } git-lfs-2.3.4/lfs/gitscanner_catfilebatch.go000066400000000000000000000066641317167762300210570ustar00rootroot00000000000000package lfs import ( "bytes" "crypto/sha256" "fmt" "io" "github.com/git-lfs/git-lfs/git" ) // runCatFileBatch uses 'git cat-file --batch' to get the object contents of a // git object, given its sha1. The contents will be decoded into a Git LFS // pointer. Git Blob SHA1s are read from the sha1Ch channel and fed to STDIN. // Results are parsed from STDOUT, and any eligible LFS pointers are sent to // pointerCh. If a Git Blob is not an LFS pointer, check the lockableSet to see // if that blob is for a locked file. Any errors are sent to errCh. An error is // returned if the 'git cat-file' command fails to start. func runCatFileBatch(pointerCh chan *WrappedPointer, lockableCh chan string, lockableSet *lockableNameSet, revs *StringChannelWrapper, errCh chan error) error { scanner, err := NewPointerScanner() if err != nil { scanner.Close() return err } go func() { for r := range revs.Results { canScan := scanner.Scan(r) if err := scanner.Err(); err != nil { errCh <- err } else if p := scanner.Pointer(); p != nil { pointerCh <- p } else if b := scanner.BlobSHA(); len(b) == 40 { if name, ok := lockableSet.Check(b); ok { lockableCh <- name } } if !canScan { break } } if err := revs.Wait(); err != nil { errCh <- err } if err := scanner.Close(); err != nil { errCh <- err } close(pointerCh) close(errCh) close(lockableCh) }() return nil } type PointerScanner struct { scanner *git.ObjectScanner blobSha string contentsSha string pointer *WrappedPointer err error } func NewPointerScanner() (*PointerScanner, error) { scanner, err := git.NewObjectScanner() if err != nil { return nil, err } return &PointerScanner{scanner: scanner}, nil } func (s *PointerScanner) BlobSHA() string { return s.blobSha } func (s *PointerScanner) ContentsSha() string { return s.contentsSha } func (s *PointerScanner) Pointer() *WrappedPointer { return s.pointer } func (s *PointerScanner) Err() error { return s.err } func (s *PointerScanner) Scan(sha string) bool { s.pointer, s.err = nil, nil s.blobSha, s.contentsSha = "", "" b, c, p, err := s.next(sha) s.blobSha = b s.contentsSha = c s.pointer = p if err != nil { if err != io.EOF { s.err = err } return false } return true } func (s *PointerScanner) Close() error { return s.scanner.Close() } func (s *PointerScanner) next(blob string) (string, string, *WrappedPointer, error) { if !s.scanner.Scan(blob) { if err := s.scanner.Err(); err != nil { return "", "", nil, err } return "", "", nil, io.EOF } blobSha := s.scanner.Sha1() size := s.scanner.Size() sha := sha256.New() var buf *bytes.Buffer var to io.Writer = sha if size <= blobSizeCutoff { buf = bytes.NewBuffer(make([]byte, 0, size)) to = io.MultiWriter(to, buf) } read, err := io.CopyN(to, s.scanner.Contents(), int64(size)) if err != nil { return blobSha, "", nil, err } if int64(size) != read { return blobSha, "", nil, fmt.Errorf("expected %d bytes, read %d bytes", size, read) } var pointer *WrappedPointer var contentsSha string if size <= blobSizeCutoff { if p, err := DecodePointer(bytes.NewReader(buf.Bytes())); err != nil { contentsSha = fmt.Sprintf("%x", sha.Sum(nil)) } else { pointer = &WrappedPointer{ Sha1: blobSha, Pointer: p, } contentsSha = p.Oid } } else { contentsSha = fmt.Sprintf("%x", sha.Sum(nil)) } return blobSha, contentsSha, pointer, err } git-lfs-2.3.4/lfs/gitscanner_catfilebatchcheck.go000066400000000000000000000050531317167762300220440ustar00rootroot00000000000000package lfs import ( "bufio" "fmt" "io/ioutil" "strconv" "github.com/git-lfs/git-lfs/git" ) // runCatFileBatchCheck uses 'git cat-file --batch-check' to get the type and // size of a git object. Any object that isn't of type blob and under the // blobSizeCutoff will be ignored, unless it's a locked file. revs is a channel // over which strings containing git sha1s will be sent. It returns a channel // from which sha1 strings can be read. func runCatFileBatchCheck(smallRevCh chan string, lockableCh chan string, lockableSet *lockableNameSet, revs *StringChannelWrapper, errCh chan error) error { cmd, err := git.CatFile() if err != nil { return err } go func() { scanner := &catFileBatchCheckScanner{s: bufio.NewScanner(cmd.Stdout), limit: blobSizeCutoff} for r := range revs.Results { cmd.Stdin.Write([]byte(r + "\n")) hasNext := scanner.Scan() if err := scanner.Err(); err != nil { errCh <- err } else if b := scanner.LFSBlobOID(); len(b) > 0 { smallRevCh <- b } else if b := scanner.GitBlobOID(); len(b) > 0 { if name, ok := lockableSet.Check(b); ok { lockableCh <- name } } if !hasNext { break } } if err := revs.Wait(); err != nil { errCh <- err } cmd.Stdin.Close() stderr, _ := ioutil.ReadAll(cmd.Stderr) err := cmd.Wait() if err != nil { errCh <- fmt.Errorf("Error in git cat-file --batch-check: %v %v", err, string(stderr)) } close(smallRevCh) close(errCh) }() return nil } type catFileBatchCheckScanner struct { s *bufio.Scanner limit int lfsBlobOID string gitBlobOID string } func (s *catFileBatchCheckScanner) LFSBlobOID() string { return s.lfsBlobOID } func (s *catFileBatchCheckScanner) GitBlobOID() string { return s.gitBlobOID } func (s *catFileBatchCheckScanner) Err() error { return s.s.Err() } func (s *catFileBatchCheckScanner) Scan() bool { lfsBlobSha, gitBlobSha, hasNext := s.next() s.lfsBlobOID = lfsBlobSha s.gitBlobOID = gitBlobSha return hasNext } func (s *catFileBatchCheckScanner) next() (string, string, bool) { hasNext := s.s.Scan() line := s.s.Text() lineLen := len(line) // Format is: // // type is at a fixed spot, if we see that it's "blob", we can avoid // splitting the line just to get the size. if lineLen < 46 { return "", "", hasNext } if line[41:45] != "blob" { return "", "", hasNext } size, err := strconv.Atoi(line[46:lineLen]) if err != nil { return "", "", hasNext } blobSha := line[0:40] if size >= s.limit { return "", blobSha, hasNext } return blobSha, "", hasNext } git-lfs-2.3.4/lfs/gitscanner_catfilebatchcheckscanner_test.go000066400000000000000000000031261317167762300244540ustar00rootroot00000000000000package lfs import ( "bufio" "strings" "testing" "github.com/stretchr/testify/assert" ) func TestCatFileBatchCheckScannerWithValidOutput(t *testing.T) { lines := []string{ "short line", "0000000000000000000000000000000000000000 BLOB capitalized", "0000000000000000000000000000000000000001 blob not-a-size", "0000000000000000000000000000000000000002 blob 123", "0000000000000000000000000000000000000003 blob 1 0", "0000000000000000000000000000000000000004 blob 123456789", } r := strings.NewReader(strings.Join(lines, "\n")) s := &catFileBatchCheckScanner{ s: bufio.NewScanner(r), limit: 1024, } assertNextOID(t, s, "", "") assertNextOID(t, s, "", "") assertNextOID(t, s, "", "") assertNextOID(t, s, "0000000000000000000000000000000000000002", "") assertNextOID(t, s, "", "") assertNextOID(t, s, "", "0000000000000000000000000000000000000004") assertScannerDone(t, s) assert.Equal(t, "", s.LFSBlobOID()) assert.Equal(t, "", s.GitBlobOID()) } type stringScanner interface { Next() (string, bool, error) Err() error Scan() bool } type genericScanner interface { Err() error Scan() bool } func assertNextScan(t *testing.T, scanner genericScanner) { assert.True(t, scanner.Scan()) assert.Nil(t, scanner.Err()) } func assertNextOID(t *testing.T, scanner *catFileBatchCheckScanner, lfsBlobOID, gitBlobOID string) { assertNextScan(t, scanner) assert.Equal(t, lfsBlobOID, scanner.LFSBlobOID()) assert.Equal(t, gitBlobOID, scanner.GitBlobOID()) } func assertScannerDone(t *testing.T, scanner genericScanner) { assert.False(t, scanner.Scan()) assert.Nil(t, scanner.Err()) } git-lfs-2.3.4/lfs/gitscanner_index.go000066400000000000000000000104221317167762300175400ustar00rootroot00000000000000package lfs import ( "strings" "sync" ) // ScanIndex returns a slice of WrappedPointer objects for all Git LFS pointers // it finds in the index. // // Ref is the ref at which to scan, which may be "HEAD" if there is at least one // commit. func scanIndex(cb GitScannerFoundPointer, ref string) error { indexMap := &indexFileMap{ nameMap: make(map[string][]*indexFile), nameShaPairs: make(map[string]bool), mutex: &sync.Mutex{}, } revs, err := revListIndex(ref, false, indexMap) if err != nil { return err } cachedRevs, err := revListIndex(ref, true, indexMap) if err != nil { return err } allRevsErr := make(chan error, 5) // can be multiple errors below allRevsChan := make(chan string, 1) allRevs := NewStringChannelWrapper(allRevsChan, allRevsErr) go func() { seenRevs := make(map[string]bool, 0) for rev := range revs.Results { if !seenRevs[rev] { allRevsChan <- rev seenRevs[rev] = true } } err := revs.Wait() if err != nil { allRevsErr <- err } for rev := range cachedRevs.Results { if !seenRevs[rev] { allRevsChan <- rev seenRevs[rev] = true } } err = cachedRevs.Wait() if err != nil { allRevsErr <- err } close(allRevsChan) close(allRevsErr) }() smallShas, _, err := catFileBatchCheck(allRevs, nil) if err != nil { return err } ch := make(chan gitscannerResult, chanBufSize) barePointerCh, _, err := catFileBatch(smallShas, nil) if err != nil { return err } go func() { for p := range barePointerCh.Results { for _, file := range indexMap.FilesFor(p.Sha1) { // Append a new *WrappedPointer that combines the data // from the index file, and the pointer "p". ch <- gitscannerResult{ Pointer: &WrappedPointer{ Sha1: p.Sha1, Name: file.Name, SrcName: file.SrcName, Status: file.Status, Pointer: p.Pointer, }, } } } if err := barePointerCh.Wait(); err != nil { ch <- gitscannerResult{Err: err} } close(ch) }() for result := range ch { cb(result.Pointer, result.Err) } return nil } // revListIndex uses git diff-index to return the list of object sha1s // for in the indexf. It returns a channel from which sha1 strings can be read. // The namMap will be filled indexFile pointers mapping sha1s to indexFiles. func revListIndex(atRef string, cache bool, indexMap *indexFileMap) (*StringChannelWrapper, error) { scanner, err := NewDiffIndexScanner(atRef, cache) if err != nil { return nil, err } revs := make(chan string, chanBufSize) errs := make(chan error, 1) go func() { for scanner.Scan() { var name string = scanner.Entry().DstName if len(name) == 0 { name = scanner.Entry().SrcName } var sha string = scanner.Entry().DstSha if scanner.Entry().Status == StatusModification { sha = scanner.Entry().SrcSha } indexMap.Add(sha, &indexFile{ Name: name, SrcName: scanner.Entry().SrcName, Status: string(scanner.Entry().Status), }) revs <- sha } if err := scanner.Err(); err != nil { errs <- err } close(revs) close(errs) }() return NewStringChannelWrapper(revs, errs), nil } // indexFile is used when scanning the index. It stores the name of // the file, the status of the file in the index, and, in the case of // a moved or copied file, the original name of the file. type indexFile struct { Name string SrcName string Status string } type indexFileMap struct { // mutex guards nameMap and nameShaPairs mutex *sync.Mutex // nameMap maps SHA1s to a slice of `*indexFile`s nameMap map[string][]*indexFile // nameShaPairs maps "sha1:name" -> bool nameShaPairs map[string]bool } // FilesFor returns all `*indexFile`s that match the given `sha`. func (m *indexFileMap) FilesFor(sha string) []*indexFile { m.mutex.Lock() defer m.mutex.Unlock() return m.nameMap[sha] } // Add appends unique index files to the given SHA, "sha". A file is considered // unique if its combination of SHA and current filename have not yet been seen // by this instance "m" of *indexFileMap. func (m *indexFileMap) Add(sha string, index *indexFile) { m.mutex.Lock() defer m.mutex.Unlock() pairKey := strings.Join([]string{sha, index.Name}, ":") if m.nameShaPairs[pairKey] { return } m.nameMap[sha] = append(m.nameMap[sha], index) m.nameShaPairs[pairKey] = true } git-lfs-2.3.4/lfs/gitscanner_log.go000066400000000000000000000173001317167762300172140ustar00rootroot00000000000000package lfs import ( "bufio" "bytes" "fmt" "io" "io/ioutil" "regexp" "time" "github.com/git-lfs/git-lfs/filepathfilter" "github.com/git-lfs/git-lfs/git" "github.com/git-lfs/git-lfs/subprocess" "github.com/rubyist/tracerx" ) // When scanning diffs e.g. parseLogOutputToPointers, which direction of diff to include // data from, i.e. '+' or '-'. Depending on what you're scanning for either might be useful type LogDiffDirection byte const ( LogDiffAdditions = LogDiffDirection('+') // include '+' diffs LogDiffDeletions = LogDiffDirection('-') // include '-' diffs ) var ( // Arguments to append to a git log call which will limit the output to // lfs changes and format the output suitable for parseLogOutput.. method(s) logLfsSearchArgs = []string{ "-G", "oid sha256:", // only diffs which include an lfs file SHA change "-p", // include diff so we can read the SHA "-U12", // Make sure diff context is always big enough to support 10 extension lines to get whole pointer `--format=lfs-commit-sha: %H %P`, // just a predictable commit header we can detect } ) type gitscannerResult struct { Pointer *WrappedPointer Err error } func scanUnpushed(cb GitScannerFoundPointer, remote string) error { logArgs := []string{ "--branches", "--tags", // include all locally referenced commits "--not"} // but exclude everything that comes after if len(remote) == 0 { logArgs = append(logArgs, "--remotes") } else { logArgs = append(logArgs, fmt.Sprintf("--remotes=%v", remote)) } // Add standard search args to find lfs references logArgs = append(logArgs, logLfsSearchArgs...) cmd, err := git.Log(logArgs...) if err != nil { return err } parseScannerLogOutput(cb, LogDiffAdditions, cmd) return nil } func parseScannerLogOutput(cb GitScannerFoundPointer, direction LogDiffDirection, cmd *subprocess.BufferedCmd) { ch := make(chan gitscannerResult, chanBufSize) go func() { scanner := newLogScanner(direction, cmd.Stdout) for scanner.Scan() { if p := scanner.Pointer(); p != nil { ch <- gitscannerResult{Pointer: p} } } stderr, _ := ioutil.ReadAll(cmd.Stderr) err := cmd.Wait() if err != nil { ch <- gitscannerResult{Err: fmt.Errorf("Error in git log: %v %v", err, string(stderr))} } close(ch) }() cmd.Stdin.Close() for result := range ch { cb(result.Pointer, result.Err) } } // logPreviousVersions scans history for all previous versions of LFS pointers // from 'since' up to (but not including) the final state at ref func logPreviousSHAs(cb GitScannerFoundPointer, ref string, since time.Time) error { logArgs := []string{ fmt.Sprintf("--since=%v", git.FormatGitDate(since)), } // Add standard search args to find lfs references logArgs = append(logArgs, logLfsSearchArgs...) // ending at ref logArgs = append(logArgs, ref) cmd, err := git.Log(logArgs...) if err != nil { return err } parseScannerLogOutput(cb, LogDiffDeletions, cmd) return nil } func parseLogOutputToPointers(log io.Reader, dir LogDiffDirection, includePaths, excludePaths []string, results chan *WrappedPointer) { scanner := newLogScanner(dir, log) if len(includePaths)+len(excludePaths) > 0 { scanner.Filter = filepathfilter.New(includePaths, excludePaths) } for scanner.Scan() { if p := scanner.Pointer(); p != nil { results <- p } } } // logScanner parses log output formatted as per logLfsSearchArgs & returns // pointers. type logScanner struct { // Filter will ensure file paths matching the include patterns, or not matchin // the exclude patterns are skipped. Filter *filepathfilter.Filter s *bufio.Scanner dir LogDiffDirection pointer *WrappedPointer pointerData *bytes.Buffer currentFilename string currentFileIncluded bool commitHeaderRegex *regexp.Regexp fileHeaderRegex *regexp.Regexp fileMergeHeaderRegex *regexp.Regexp pointerDataRegex *regexp.Regexp } // dir: whether to include results from + or - diffs // r: a stream of output from git log with at least logLfsSearchArgs specified func newLogScanner(dir LogDiffDirection, r io.Reader) *logScanner { return &logScanner{ s: bufio.NewScanner(r), dir: dir, pointerData: &bytes.Buffer{}, currentFileIncluded: true, // no need to compile these regexes on every `git-lfs` call, just ones that // use the scanner. commitHeaderRegex: regexp.MustCompile(`^lfs-commit-sha: ([A-Fa-f0-9]{40})(?: ([A-Fa-f0-9]{40}))*`), fileHeaderRegex: regexp.MustCompile(`diff --git a\/(.+?)\s+b\/(.+)`), fileMergeHeaderRegex: regexp.MustCompile(`diff --cc (.+)`), pointerDataRegex: regexp.MustCompile(`^([\+\- ])(version https://git-lfs|oid sha256|size|ext-).*$`), } } func (s *logScanner) Pointer() *WrappedPointer { return s.pointer } func (s *logScanner) Err() error { return s.s.Err() } func (s *logScanner) Scan() bool { s.pointer = nil p, canScan := s.scan() s.pointer = p return canScan } // Utility func used at several points below (keep in narrow scope) func (s *logScanner) finishLastPointer() *WrappedPointer { if s.pointerData.Len() == 0 || !s.currentFileIncluded { return nil } p, err := DecodePointer(s.pointerData) s.pointerData.Reset() if err == nil { return &WrappedPointer{Name: s.currentFilename, Pointer: p} } else { tracerx.Printf("Unable to parse pointer from log: %v", err) return nil } } // For each commit we'll get something like this: /* lfs-commit-sha: 60fde3d23553e10a55e2a32ed18c20f65edd91e7 e2eaf1c10b57da7b98eb5d722ec5912ddeb53ea1 diff --git a/1D_Noise.png b/1D_Noise.png new file mode 100644 index 0000000..2622b4a --- /dev/null +++ b/1D_Noise.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f5d84da40ab1f6aa28df2b2bf1ade2cdcd4397133f903c12b4106641b10e1ed6 +size 1289 */ // There can be multiple diffs per commit (multiple binaries) // Also when a binary is changed the diff will include a '-' line for the old SHA func (s *logScanner) scan() (*WrappedPointer, bool) { for s.s.Scan() { line := s.s.Text() if match := s.commitHeaderRegex.FindStringSubmatch(line); match != nil { // Currently we're not pulling out commit groupings, but could if we wanted // This just acts as a delimiter for finishing a multiline pointer if p := s.finishLastPointer(); p != nil { return p, true } } else if match := s.fileHeaderRegex.FindStringSubmatch(line); match != nil { // Finding a regular file header p := s.finishLastPointer() // Pertinent file name depends on whether we're listening to additions or removals if s.dir == LogDiffAdditions { s.setFilename(match[2]) } else { s.setFilename(match[1]) } if p != nil { return p, true } } else if match := s.fileMergeHeaderRegex.FindStringSubmatch(line); match != nil { // Git merge file header is a little different, only one file p := s.finishLastPointer() s.setFilename(match[1]) if p != nil { return p, true } } else if s.currentFileIncluded { if match := s.pointerDataRegex.FindStringSubmatch(line); match != nil { // An LFS pointer data line // Include only the entirety of one side of the diff // -U3 will ensure we always get all of it, even if only // the SHA changed (version & size the same) changeType := match[1][0] // Always include unchanged context lines (normally just the version line) if LogDiffDirection(changeType) == s.dir || changeType == ' ' { // Must skip diff +/- marker s.pointerData.WriteString(line[1:]) s.pointerData.WriteString("\n") // newline was stripped off by scanner } } } } if p := s.finishLastPointer(); p != nil { return p, true } return nil, false } func (s *logScanner) setFilename(name string) { s.currentFilename = name s.currentFileIncluded = s.Filter.Allows(name) } git-lfs-2.3.4/lfs/gitscanner_pointerscanner_test.go000066400000000000000000000063331317167762300225300ustar00rootroot00000000000000package lfs import ( "bytes" "crypto/sha256" "fmt" "io" "math/rand" "testing" "github.com/git-lfs/git-lfs/git" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestPointerScannerWithValidOutput(t *testing.T) { blobs := []*Pointer{ &Pointer{ Version: "https://git-lfs.github.com/spec/v1", Oid: "e71eefd918ea175b8f362611f981f648dbf9888ff74865077cb4c9077728f350", Size: 123, OidType: "sha256", }, &Pointer{ Version: "https://git-lfs.github.com/spec/v1", Oid: "0eb69b651be65d5a61d6bebf2c53c811a5bf8031951111000e2077f4d7fe43b1", Size: 132, OidType: "sha256", }, } reader := fakeReaderWithRandoData(t, blobs) if reader == nil { return } scanner := &PointerScanner{ scanner: git.NewObjectScannerFrom(reader), } for i := 0; i < 5; i++ { assertNextEmptyPointer(t, scanner) } assertNextPointer(t, scanner, "e71eefd918ea175b8f362611f981f648dbf9888ff74865077cb4c9077728f350") for i := 0; i < 5; i++ { assertNextEmptyPointer(t, scanner) } assertNextPointer(t, scanner, "0eb69b651be65d5a61d6bebf2c53c811a5bf8031951111000e2077f4d7fe43b1") for i := 0; i < 5; i++ { assertNextEmptyPointer(t, scanner) } assert.False(t, scanner.Scan("")) assert.Nil(t, scanner.Err()) assert.Nil(t, scanner.Pointer()) } func TestPointerScannerWithLargeBlobs(t *testing.T) { buf := bytes.NewBuffer(make([]byte, 0, 1025)) sha := sha256.New() rng := rand.New(rand.NewSource(0)) _, err := io.CopyN(io.MultiWriter(sha, buf), rng, 1025) require.Nil(t, err) fake := bytes.NewBuffer(nil) writeFakeBuffer(t, fake, buf.Bytes(), buf.Len()) scanner := &PointerScanner{ scanner: git.NewObjectScannerFrom(fake), } require.True(t, scanner.Scan("")) assert.Nil(t, scanner.Pointer()) assert.Equal(t, fmt.Sprintf("%x", sha.Sum(nil)), scanner.ContentsSha()) assert.False(t, scanner.Scan("")) assert.Nil(t, scanner.Err()) assert.Nil(t, scanner.Pointer()) } func assertNextPointer(t *testing.T, scanner *PointerScanner, oid string) { assert.True(t, scanner.Scan("")) assert.Nil(t, scanner.Err()) p := scanner.Pointer() assert.NotNil(t, p) assert.Equal(t, oid, p.Oid) } func assertNextEmptyPointer(t *testing.T, scanner *PointerScanner) { assert.True(t, scanner.Scan("")) assert.Nil(t, scanner.Err()) assert.Nil(t, scanner.Pointer()) } func fakeReaderWithRandoData(t *testing.T, blobs []*Pointer) io.Reader { buf := &bytes.Buffer{} rngbuf := make([]byte, 1000) // just under blob size cutoff rng := rand.New(rand.NewSource(0)) for i := 0; i < 5; i++ { n, err := io.ReadFull(rng, rngbuf) if err != nil { t.Fatalf("error reading from rng: %+v", err) } writeFakeBuffer(t, buf, rngbuf, n) } for _, b := range blobs { ptrtext := b.Encoded() writeFakeBuffer(t, buf, []byte(ptrtext), len(ptrtext)) for i := 0; i < 5; i++ { n, err := io.ReadFull(rng, rngbuf) if err != nil { t.Fatalf("error reading from rng: %+v", err) } writeFakeBuffer(t, buf, rngbuf, n) } } return bytes.NewBuffer(buf.Bytes()) } func writeFakeBuffer(t *testing.T, buf *bytes.Buffer, by []byte, size int) { header := fmt.Sprintf("0000000000000000000000000000000000000000 blob %d", size) t.Log(header) buf.WriteString(header + "\n") buf.Write(by) buf.Write([]byte("\n")) } git-lfs-2.3.4/lfs/gitscanner_refs.go000066400000000000000000000056521317167762300174010ustar00rootroot00000000000000package lfs import ( "encoding/hex" "regexp" "github.com/git-lfs/git-lfs/git" ) var z40 = regexp.MustCompile(`\^?0{40}`) type lockableNameSet struct { opt *ScanRefsOptions set GitScannerSet } // Determines if the given blob sha matches a locked file. func (s *lockableNameSet) Check(blobSha string) (string, bool) { if s == nil || s.opt == nil || s.set == nil { return "", false } name, ok := s.opt.GetName(blobSha) if !ok { return name, ok } if s.set.Contains(name) { return name, true } return name, false } func noopFoundLockable(name string) {} // scanRefsToChan takes a ref and returns a channel of WrappedPointer objects // for all Git LFS pointers it finds for that ref. // Reports unique oids once only, not multiple times if >1 file uses the same content func scanRefsToChan(scanner *GitScanner, pointerCb GitScannerFoundPointer, refLeft, refRight string, opt *ScanRefsOptions) error { if opt == nil { panic("no scan ref options") } revs, err := revListShas([]string{refLeft, refRight}, nil, opt) if err != nil { return err } lockableSet := &lockableNameSet{opt: opt, set: scanner.PotentialLockables} smallShas, batchLockableCh, err := catFileBatchCheck(revs, lockableSet) if err != nil { return err } lockableCb := scanner.FoundLockable if lockableCb == nil { lockableCb = noopFoundLockable } go func(cb GitScannerFoundLockable, ch chan string) { for name := range ch { cb(name) } }(lockableCb, batchLockableCh) pointers, checkLockableCh, err := catFileBatch(smallShas, lockableSet) if err != nil { return err } for p := range pointers.Results { if name, ok := opt.GetName(p.Sha1); ok { p.Name = name } pointerCb(p, nil) } for lockableName := range checkLockableCh { lockableCb(lockableName) } if err := pointers.Wait(); err != nil { pointerCb(nil, err) } return nil } // revListShas uses git rev-list to return the list of object sha1s // for the given ref. If all is true, ref is ignored. It returns a // channel from which sha1 strings can be read. func revListShas(include, exclude []string, opt *ScanRefsOptions) (*StringChannelWrapper, error) { scanner, err := git.NewRevListScanner(include, exclude, &git.ScanRefsOptions{ Mode: git.ScanningMode(opt.ScanMode), Remote: opt.RemoteName, SkipDeletedBlobs: opt.SkipDeletedBlobs, SkippedRefs: opt.skippedRefs, Mutex: opt.mutex, Names: opt.nameMap, }) if err != nil { return nil, err } revs := make(chan string, chanBufSize) errs := make(chan error, 5) // may be multiple errors go func() { for scanner.Scan() { sha := hex.EncodeToString(scanner.OID()) if name := scanner.Name(); len(name) > 0 { opt.SetName(sha, name) } revs <- sha } if err = scanner.Err(); err != nil { errs <- err } if err = scanner.Close(); err != nil { errs <- err } close(revs) close(errs) }() return NewStringChannelWrapper(revs, errs), nil } git-lfs-2.3.4/lfs/gitscanner_remotes.go000066400000000000000000000030211317167762300201040ustar00rootroot00000000000000package lfs import ( "github.com/git-lfs/git-lfs/git" "github.com/git-lfs/git-lfs/tools" ) // calcSkippedRefs checks that locally cached versions of remote refs are still // present on the remote before they are used as a 'from' point. If the server // implements garbage collection and a remote branch had been deleted since we // last did 'git fetch --prune', then the objects in that branch may have also // been deleted on the server if unreferenced. If some refs are missing on the // remote, use a more explicit diff command. func calcSkippedRefs(remote string) []string { cachedRemoteRefs, _ := git.CachedRemoteRefs(remote) actualRemoteRefs, _ := git.RemoteRefs(remote) // Only check for missing refs on remote; if the ref is different it has moved // forward probably, and if not and the ref has changed to a non-descendant // (force push) then that will cause a re-evaluation in a subsequent command. missingRefs := tools.NewStringSet() for _, cachedRef := range cachedRemoteRefs { found := false for _, realRemoteRef := range actualRemoteRefs { if cachedRef.Type == realRemoteRef.Type && cachedRef.Name == realRemoteRef.Name { found = true break } } if !found { missingRefs.Add(cachedRef.Name) } } if len(missingRefs) == 0 { return nil } skippedRefs := make([]string, 0, len(cachedRemoteRefs)-missingRefs.Cardinality()) for _, cachedRef := range cachedRemoteRefs { if !missingRefs.Contains(cachedRef.Name) { skippedRefs = append(skippedRefs, "^"+cachedRef.Sha) } } return skippedRefs } git-lfs-2.3.4/lfs/gitscanner_tree.go000066400000000000000000000102511317167762300173700ustar00rootroot00000000000000package lfs import ( "bufio" "bytes" "fmt" "io" "io/ioutil" "strconv" "strings" "github.com/git-lfs/git-lfs/filepathfilter" "github.com/git-lfs/git-lfs/git" ) // An entry from ls-tree or rev-list including a blob sha and tree path type TreeBlob struct { Sha1 string Filename string } func runScanTree(cb GitScannerFoundPointer, ref string, filter *filepathfilter.Filter) error { // We don't use the nameMap approach here since that's imprecise when >1 file // can be using the same content treeShas, err := lsTreeBlobs(ref, filter) if err != nil { return err } pcw, err := catFileBatchTree(treeShas) if err != nil { return err } for p := range pcw.Results { cb(p, nil) } if err := pcw.Wait(); err != nil { cb(nil, err) } return nil } // catFileBatchTree uses git cat-file --batch to get the object contents // of a git object, given its sha1. The contents will be decoded into // a Git LFS pointer. treeblobs is a channel over which blob entries // will be sent. It returns a channel from which point.Pointers can be read. func catFileBatchTree(treeblobs *TreeBlobChannelWrapper) (*PointerChannelWrapper, error) { scanner, err := NewPointerScanner() if err != nil { scanner.Close() return nil, err } pointers := make(chan *WrappedPointer, chanBufSize) errchan := make(chan error, 10) // Multiple errors possible go func() { for t := range treeblobs.Results { hasNext := scanner.Scan(t.Sha1) if p := scanner.Pointer(); p != nil { p.Name = t.Filename pointers <- p } if err := scanner.Err(); err != nil { errchan <- err } if !hasNext { break } } // Deal with nested error from incoming treeblobs err := treeblobs.Wait() if err != nil { errchan <- err } if err = scanner.Close(); err != nil { errchan <- err } close(pointers) close(errchan) }() return NewPointerChannelWrapper(pointers, errchan), nil } // Use ls-tree at ref to find a list of candidate tree blobs which might be lfs files // The returned channel will be sent these blobs which should be sent to catFileBatchTree // for final check & conversion to Pointer func lsTreeBlobs(ref string, filter *filepathfilter.Filter) (*TreeBlobChannelWrapper, error) { cmd, err := git.LsTree(ref) if err != nil { return nil, err } cmd.Stdin.Close() blobs := make(chan TreeBlob, chanBufSize) errchan := make(chan error, 1) go func() { scanner := newLsTreeScanner(cmd.Stdout) for scanner.Scan() { if t := scanner.TreeBlob(); t != nil && filter.Allows(t.Filename) { blobs <- *t } } stderr, _ := ioutil.ReadAll(cmd.Stderr) err := cmd.Wait() if err != nil { errchan <- fmt.Errorf("Error in git ls-tree: %v %v", err, string(stderr)) } close(blobs) close(errchan) }() return NewTreeBlobChannelWrapper(blobs, errchan), nil } type lsTreeScanner struct { s *bufio.Scanner tree *TreeBlob } func newLsTreeScanner(r io.Reader) *lsTreeScanner { s := bufio.NewScanner(r) s.Split(scanNullLines) return &lsTreeScanner{s: s} } func (s *lsTreeScanner) TreeBlob() *TreeBlob { return s.tree } func (s *lsTreeScanner) Err() error { return nil } func (s *lsTreeScanner) Scan() bool { t, hasNext := s.next() s.tree = t return hasNext } func (s *lsTreeScanner) next() (*TreeBlob, bool) { hasNext := s.s.Scan() line := s.s.Text() parts := strings.SplitN(line, "\t", 2) if len(parts) < 2 { return nil, hasNext } attrs := strings.SplitN(parts[0], " ", 4) if len(attrs) < 4 { return nil, hasNext } if attrs[1] != "blob" { return nil, hasNext } sz, err := strconv.ParseInt(strings.TrimSpace(attrs[3]), 10, 64) if err != nil { return nil, hasNext } if sz < blobSizeCutoff { sha1 := attrs[2] filename := parts[1] return &TreeBlob{Sha1: sha1, Filename: filename}, hasNext } return nil, hasNext } func scanNullLines(data []byte, atEOF bool) (advance int, token []byte, err error) { if atEOF && len(data) == 0 { return 0, nil, nil } if i := bytes.IndexByte(data, '\000'); i >= 0 { // We have a full null-terminated line. return i + 1, data[0:i], nil } // If we're at EOF, we have a final, non-terminated line. Return it. if atEOF { return len(data), data, nil } // Request more data. return 0, nil, nil } git-lfs-2.3.4/lfs/hook.go000066400000000000000000000110601317167762300151530ustar00rootroot00000000000000package lfs import ( "fmt" "io" "io/ioutil" "os" "path/filepath" "strings" "github.com/git-lfs/git-lfs/config" "github.com/git-lfs/git-lfs/errors" "github.com/git-lfs/git-lfs/git" "github.com/git-lfs/git-lfs/tools" "github.com/rubyist/tracerx" ) var ( // The basic hook which just calls 'git lfs TYPE' hookBaseContent = "#!/bin/sh\ncommand -v git-lfs >/dev/null 2>&1 || { echo >&2 \"\\nThis repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting .git/hooks/{{Command}}.\\n\"; exit 2; }\ngit lfs {{Command}} \"$@\"" ) // A Hook represents a githook as described in http://git-scm.com/docs/githooks. // Hooks have a type, which is the type of hook that they are, and a body, which // represents the thing they will execute when invoked by Git. type Hook struct { Type string Contents string Upgradeables []string } // NewStandardHook creates a new hook using the template script calling 'git lfs theType' func NewStandardHook(theType string, upgradeables []string) *Hook { return &Hook{ Type: theType, Contents: strings.Replace(hookBaseContent, "{{Command}}", theType, -1), Upgradeables: upgradeables, } } func (h *Hook) Exists() bool { _, err := os.Stat(h.Path()) return !os.IsNotExist(err) } // Path returns the desired (or actual, if installed) location where this hook // should be installed. It returns an absolute path in all cases. func (h *Hook) Path() string { return filepath.Join(h.Dir(), h.Type) } // Dir returns the directory used by LFS for storing Git hooks. By default, it // will return the hooks/ sub-directory of the local repository's .git // directory. If `core.hooksPath` is configured and supported (Git verison is // greater than "2.9.0"), it will return that instead. func (h *Hook) Dir() string { customHooksSupported := git.Config.IsGitVersionAtLeast("2.9.0") if hp, ok := config.Config.Git.Get("core.hooksPath"); ok && customHooksSupported { return hp } return filepath.Join(config.LocalGitDir, "hooks") } // Install installs this Git hook on disk, or upgrades it if it does exist, and // is upgradeable. It will create a hooks directory relative to the local Git // directory. It returns and halts at any errors, and returns nil if the // operation was a success. func (h *Hook) Install(force bool) error { msg := fmt.Sprintf("Install hook: %s, force=%t, path=%s", h.Type, force, h.Path()) if err := os.MkdirAll(h.Dir(), 0755); err != nil { return err } if h.Exists() && !force { tracerx.Printf(msg + ", upgrading...") return h.Upgrade() } tracerx.Printf(msg) return h.write() } // write writes the contents of this Hook to disk, appending a newline at the // end, and sets the mode to octal 0755. It writes to disk unconditionally, and // returns at any error. func (h *Hook) write() error { return ioutil.WriteFile(h.Path(), []byte(h.Contents+"\n"), 0755) } // Upgrade upgrades the (assumed to be) existing git hook to the current // contents. A hook is considered "upgrade-able" if its contents are matched in // the member variable `Upgradeables`. It halts and returns any errors as they // arise. func (h *Hook) Upgrade() error { match, err := h.matchesCurrent() if err != nil { return err } if !match { return nil } return h.write() } // Uninstall removes the hook on disk so long as it matches the current version, // or any of the past versions of this hook. func (h *Hook) Uninstall() error { if !InRepo() { return errors.New("Not in a git repository") } msg := fmt.Sprintf("Uninstall hook: %s, path=%s", h.Type, h.Path()) match, err := h.matchesCurrent() if err != nil { return err } if !match { tracerx.Printf(msg + ", doesn't match...") return nil } tracerx.Printf(msg) return os.RemoveAll(h.Path()) } // matchesCurrent returns whether or not an existing git hook is able to be // written to or upgraded. A git hook matches those conditions if and only if // its contents match the current contents, or any past "upgrade-able" contents // of this hook. func (h *Hook) matchesCurrent() (bool, error) { file, err := os.Open(h.Path()) if err != nil { return false, err } by, err := ioutil.ReadAll(io.LimitReader(file, 1024)) file.Close() if err != nil { return false, err } contents := strings.TrimSpace(tools.Undent(string(by))) if contents == h.Contents || len(contents) == 0 { return true, nil } for _, u := range h.Upgradeables { if u == contents { return true, nil } } return false, fmt.Errorf("Hook already exists: %s\n\n%s\n", string(h.Type), tools.Indent(contents)) } git-lfs-2.3.4/lfs/lfs.go000066400000000000000000000120651317167762300150050ustar00rootroot00000000000000// Package lfs brings together the core LFS functionality // NOTE: Subject to change, do not rely on this package from outside git-lfs source package lfs import ( "fmt" "os" "path/filepath" "sort" "strings" "github.com/git-lfs/git-lfs/config" "github.com/git-lfs/git-lfs/lfsapi" "github.com/git-lfs/git-lfs/localstorage" "github.com/git-lfs/git-lfs/tools" "github.com/git-lfs/git-lfs/tq" "github.com/rubyist/tracerx" ) // LocalMediaDir returns the root of lfs objects func LocalMediaDir() string { if localstorage.Objects() != nil { return localstorage.Objects().RootDir } return "" } func LocalObjectTempDir() string { if localstorage.Objects() != nil { return localstorage.Objects().TempDir } return "" } func TempDir() string { return localstorage.TempDir } func TempFile(prefix string) (*os.File, error) { return localstorage.TempFile(prefix) } func LocalMediaPath(oid string) (string, error) { return localstorage.Objects().BuildObjectPath(oid) } func LocalMediaPathReadOnly(oid string) string { return localstorage.Objects().ObjectPath(oid) } func LocalReferencePath(sha string) string { if config.LocalReferenceDir == "" { return "" } return filepath.Join(config.LocalReferenceDir, sha[0:2], sha[2:4], sha) } func ObjectExistsOfSize(oid string, size int64) bool { path := localstorage.Objects().ObjectPath(oid) return tools.FileExistsOfSize(path, size) } func Environ(cfg *config.Configuration, manifest *tq.Manifest) []string { osEnviron := os.Environ() env := make([]string, 0, len(osEnviron)+7) api, err := lfsapi.NewClient(cfg.Os, cfg.Git) if err != nil { // TODO(@ttaylorr): don't panic panic(err.Error()) } download := api.Endpoints.AccessFor(api.Endpoints.Endpoint("download", cfg.CurrentRemote).Url) upload := api.Endpoints.AccessFor(api.Endpoints.Endpoint("upload", cfg.CurrentRemote).Url) dltransfers := manifest.GetDownloadAdapterNames() sort.Strings(dltransfers) ultransfers := manifest.GetUploadAdapterNames() sort.Strings(ultransfers) fetchPruneConfig := cfg.FetchPruneConfig() storageConfig := cfg.StorageConfig() env = append(env, fmt.Sprintf("LocalWorkingDir=%s", config.LocalWorkingDir), fmt.Sprintf("LocalGitDir=%s", config.LocalGitDir), fmt.Sprintf("LocalGitStorageDir=%s", config.LocalGitStorageDir), fmt.Sprintf("LocalMediaDir=%s", LocalMediaDir()), fmt.Sprintf("LocalReferenceDir=%s", config.LocalReferenceDir), fmt.Sprintf("TempDir=%s", TempDir()), fmt.Sprintf("ConcurrentTransfers=%d", api.ConcurrentTransfers), fmt.Sprintf("TusTransfers=%v", cfg.TusTransfersAllowed()), fmt.Sprintf("BasicTransfersOnly=%v", cfg.BasicTransfersOnly()), fmt.Sprintf("SkipDownloadErrors=%v", cfg.SkipDownloadErrors()), fmt.Sprintf("FetchRecentAlways=%v", fetchPruneConfig.FetchRecentAlways), fmt.Sprintf("FetchRecentRefsDays=%d", fetchPruneConfig.FetchRecentRefsDays), fmt.Sprintf("FetchRecentCommitsDays=%d", fetchPruneConfig.FetchRecentCommitsDays), fmt.Sprintf("FetchRecentRefsIncludeRemotes=%v", fetchPruneConfig.FetchRecentRefsIncludeRemotes), fmt.Sprintf("PruneOffsetDays=%d", fetchPruneConfig.PruneOffsetDays), fmt.Sprintf("PruneVerifyRemoteAlways=%v", fetchPruneConfig.PruneVerifyRemoteAlways), fmt.Sprintf("PruneRemoteName=%s", fetchPruneConfig.PruneRemoteName), fmt.Sprintf("LfsStorageDir=%s", storageConfig.LfsStorageDir), fmt.Sprintf("AccessDownload=%s", download), fmt.Sprintf("AccessUpload=%s", upload), fmt.Sprintf("DownloadTransfers=%s", strings.Join(dltransfers, ",")), fmt.Sprintf("UploadTransfers=%s", strings.Join(ultransfers, ",")), ) if len(cfg.FetchExcludePaths()) > 0 { env = append(env, fmt.Sprintf("FetchExclude=%s", strings.Join(cfg.FetchExcludePaths(), ", "))) } if len(cfg.FetchIncludePaths()) > 0 { env = append(env, fmt.Sprintf("FetchInclude=%s", strings.Join(cfg.FetchIncludePaths(), ", "))) } for _, ext := range cfg.Extensions() { env = append(env, fmt.Sprintf("Extension[%d]=%s", ext.Priority, ext.Name)) } for _, e := range osEnviron { if !strings.Contains(strings.SplitN(e, "=", 2)[0], "GIT_") { continue } env = append(env, e) } return env } func InRepo() bool { return config.LocalGitDir != "" } func ClearTempObjects() error { if localstorage.Objects() == nil { return nil } return localstorage.Objects().ClearTempObjects() } func ScanObjectsChan() <-chan localstorage.Object { return localstorage.Objects().ScanObjectsChan() } func init() { tracerx.DefaultKey = "GIT" tracerx.Prefix = "trace git-lfs: " if len(os.Getenv("GIT_TRACE")) < 1 { if tt := os.Getenv("GIT_TRANSFER_TRACE"); len(tt) > 0 { os.Setenv("GIT_TRACE", tt) } } } const ( gitExt = ".git" gitPtrPrefix = "gitdir: " ) // only used in tests func AllObjects() []localstorage.Object { return localstorage.Objects().AllObjects() } func LinkOrCopyFromReference(oid string, size int64) error { if ObjectExistsOfSize(oid, size) { return nil } altMediafile := LocalReferencePath(oid) mediafile, err := LocalMediaPath(oid) if err != nil { return err } if altMediafile != "" && tools.FileExistsOfSize(altMediafile, size) { return LinkOrCopy(altMediafile, mediafile) } return nil } git-lfs-2.3.4/lfs/lfs_test.go000066400000000000000000000030711317167762300160410ustar00rootroot00000000000000package lfs_test // avoid import cycle import ( "fmt" "sort" "testing" "github.com/git-lfs/git-lfs/lfs" "github.com/git-lfs/git-lfs/test" "github.com/stretchr/testify/assert" ) func TestAllCurrentObjectsNone(t *testing.T) { repo := test.NewRepo(t) repo.Pushd() defer func() { repo.Popd() repo.Cleanup() }() actual := lfs.AllObjects() if len(actual) > 0 { for _, file := range actual { t.Logf("Found: %v", file) } t.Error("Should be no objects") } } func TestAllCurrentObjectsSome(t *testing.T) { repo := test.NewRepo(t) repo.Pushd() defer func() { repo.Popd() repo.Cleanup() }() // We're not testing commits here, just storage, so just create a single // commit input with lots of files to generate many oids numFiles := 20 files := make([]*test.FileInput, 0, numFiles) for i := 0; i < numFiles; i++ { // Must be >=16 bytes for each file to be unique files = append(files, &test.FileInput{Filename: fmt.Sprintf("file%d.txt", i), Size: 30}) } inputs := []*test.CommitInput{ {Files: files}, } outputs := repo.AddCommits(inputs) expected := make([]*lfs.Pointer, 0, numFiles) for _, f := range outputs[0].Files { expected = append(expected, f) } actualObjects := lfs.AllObjects() actual := make([]*lfs.Pointer, len(actualObjects)) for idx, f := range actualObjects { actual[idx] = lfs.NewPointer(f.Oid, f.Size, nil) } // sort to ensure comparison is equal sort.Sort(test.PointersByOid(expected)) sort.Sort(test.PointersByOid(actual)) assert.Equal(t, expected, actual, "Oids from disk should be the same as in commits") } git-lfs-2.3.4/lfs/pointer.go000066400000000000000000000156411317167762300157040ustar00rootroot00000000000000package lfs import ( "bufio" "bytes" "fmt" "io" "os" "regexp" "sort" "strconv" "strings" "github.com/git-lfs/git-lfs/errors" "github.com/git-lfs/git-lfs/progress" "github.com/git-lfs/git-lfs/tq" ) var ( v1Aliases = []string{ "http://git-media.io/v/2", // alpha "https://hawser.github.com/spec/v1", // pre-release "https://git-lfs.github.com/spec/v1", // public launch } latest = "https://git-lfs.github.com/spec/v1" oidType = "sha256" oidRE = regexp.MustCompile(`\A[[:alnum:]]{64}`) matcherRE = regexp.MustCompile("git-media|hawser|git-lfs") extRE = regexp.MustCompile(`\Aext-\d{1}-\w+`) pointerKeys = []string{"version", "oid", "size"} ) type Pointer struct { Version string Oid string Size int64 OidType string Extensions []*PointerExtension } // A PointerExtension is parsed from the Git LFS Pointer file. type PointerExtension struct { Name string Priority int Oid string OidType string } type ByPriority []*PointerExtension func (p ByPriority) Len() int { return len(p) } func (p ByPriority) Swap(i, j int) { p[i], p[j] = p[j], p[i] } func (p ByPriority) Less(i, j int) bool { return p[i].Priority < p[j].Priority } func NewPointer(oid string, size int64, exts []*PointerExtension) *Pointer { return &Pointer{latest, oid, size, oidType, exts} } func NewPointerExtension(name string, priority int, oid string) *PointerExtension { return &PointerExtension{name, priority, oid, oidType} } func (p *Pointer) Smudge(writer io.Writer, workingfile string, download bool, manifest *tq.Manifest, cb progress.CopyCallback) (int64, error) { return PointerSmudge(writer, p, workingfile, download, manifest, cb) } func (p *Pointer) Encode(writer io.Writer) (int, error) { return EncodePointer(writer, p) } func (p *Pointer) Encoded() string { if p.Size == 0 { return "" } var buffer bytes.Buffer buffer.WriteString(fmt.Sprintf("version %s\n", latest)) for _, ext := range p.Extensions { buffer.WriteString(fmt.Sprintf("ext-%d-%s %s:%s\n", ext.Priority, ext.Name, ext.OidType, ext.Oid)) } buffer.WriteString(fmt.Sprintf("oid %s:%s\n", p.OidType, p.Oid)) buffer.WriteString(fmt.Sprintf("size %d\n", p.Size)) return buffer.String() } func EncodePointer(writer io.Writer, pointer *Pointer) (int, error) { return writer.Write([]byte(pointer.Encoded())) } func DecodePointerFromFile(file string) (*Pointer, error) { // Check size before reading stat, err := os.Stat(file) if err != nil { return nil, err } if stat.Size() > blobSizeCutoff { return nil, errors.NewNotAPointerError(errors.New("file size exceeds lfs pointer size cutoff")) } f, err := os.OpenFile(file, os.O_RDONLY, 0644) if err != nil { return nil, err } defer f.Close() return DecodePointer(f) } func DecodePointer(reader io.Reader) (*Pointer, error) { p, _, err := DecodeFrom(reader) return p, err } // DecodeFrom decodes an *lfs.Pointer from the given io.Reader, "reader". // If the pointer encoded in the reader could successfully be read and decoded, // it will be returned with a nil error. // // If the pointer could not be decoded, an io.Reader containing the entire // blob's data will be returned, along with a parse error. func DecodeFrom(reader io.Reader) (*Pointer, io.Reader, error) { buf := make([]byte, blobSizeCutoff) n, err := reader.Read(buf) buf = buf[:n] var contents io.Reader = bytes.NewReader(buf) if err != io.EOF { contents = io.MultiReader(contents, reader) } if err != nil && err != io.EOF { return nil, contents, err } p, err := decodeKV(bytes.TrimSpace(buf)) return p, contents, err } func verifyVersion(version string) error { if len(version) == 0 { return errors.NewNotAPointerError(errors.New("Missing version")) } for _, v := range v1Aliases { if v == version { return nil } } return errors.New("Invalid version: " + version) } func decodeKV(data []byte) (*Pointer, error) { kvps, exts, err := decodeKVData(data) if err != nil { if errors.IsBadPointerKeyError(err) { return nil, errors.StandardizeBadPointerError(err) } return nil, err } if err := verifyVersion(kvps["version"]); err != nil { return nil, err } value, ok := kvps["oid"] if !ok { return nil, errors.New("Invalid Oid") } oid, err := parseOid(value) if err != nil { return nil, err } value, ok = kvps["size"] size, err := strconv.ParseInt(value, 10, 0) if err != nil || size < 0 { return nil, fmt.Errorf("Invalid size: %q", value) } var extensions []*PointerExtension if exts != nil { for key, value := range exts { ext, err := parsePointerExtension(key, value) if err != nil { return nil, err } extensions = append(extensions, ext) } if err = validatePointerExtensions(extensions); err != nil { return nil, err } sort.Sort(ByPriority(extensions)) } return NewPointer(oid, size, extensions), nil } func parseOid(value string) (string, error) { parts := strings.SplitN(value, ":", 2) if len(parts) != 2 { return "", errors.New("Invalid Oid value: " + value) } if parts[0] != oidType { return "", errors.New("Invalid Oid type: " + parts[0]) } oid := parts[1] if !oidRE.Match([]byte(oid)) { return "", errors.New("Invalid Oid: " + oid) } return oid, nil } func parsePointerExtension(key string, value string) (*PointerExtension, error) { keyParts := strings.SplitN(key, "-", 3) if len(keyParts) != 3 || keyParts[0] != "ext" { return nil, errors.New("Invalid extension value: " + value) } p, err := strconv.Atoi(keyParts[1]) if err != nil || p < 0 { return nil, errors.New("Invalid priority: " + keyParts[1]) } name := keyParts[2] oid, err := parseOid(value) if err != nil { return nil, err } return NewPointerExtension(name, p, oid), nil } func validatePointerExtensions(exts []*PointerExtension) error { m := make(map[int]struct{}) for _, ext := range exts { if _, exist := m[ext.Priority]; exist { return fmt.Errorf("Duplicate priority found: %d", ext.Priority) } m[ext.Priority] = struct{}{} } return nil } func decodeKVData(data []byte) (kvps map[string]string, exts map[string]string, err error) { kvps = make(map[string]string) if !matcherRE.Match(data) { err = errors.NewNotAPointerError(errors.New("invalid header")) return } scanner := bufio.NewScanner(bytes.NewBuffer(data)) line := 0 numKeys := len(pointerKeys) for scanner.Scan() { text := scanner.Text() if len(text) == 0 { continue } parts := strings.SplitN(text, " ", 2) if len(parts) < 2 { err = fmt.Errorf("Error reading line %d: %s", line, text) return } key := parts[0] value := parts[1] if numKeys <= line { err = fmt.Errorf("Extra line: %s", text) return } if expected := pointerKeys[line]; key != expected { if !extRE.Match([]byte(key)) { err = errors.NewBadPointerKeyError(expected, key) return } if exts == nil { exts = make(map[string]string) } exts[key] = value continue } line += 1 kvps[key] = value } err = scanner.Err() return } git-lfs-2.3.4/lfs/pointer_clean.go000066400000000000000000000045131317167762300170420ustar00rootroot00000000000000package lfs import ( "bytes" "crypto/sha256" "encoding/hex" "io" "os" "github.com/git-lfs/git-lfs/config" "github.com/git-lfs/git-lfs/errors" "github.com/git-lfs/git-lfs/progress" "github.com/git-lfs/git-lfs/tools" ) type cleanedAsset struct { Filename string *Pointer } func PointerClean(reader io.Reader, fileName string, fileSize int64, cb progress.CopyCallback) (*cleanedAsset, error) { extensions, err := config.Config.SortedExtensions() if err != nil { return nil, err } var oid string var size int64 var tmp *os.File var exts []*PointerExtension if len(extensions) > 0 { request := &pipeRequest{"clean", reader, fileName, extensions} var response pipeResponse if response, err = pipeExtensions(request); err != nil { return nil, err } oid = response.results[len(response.results)-1].oidOut tmp = response.file var stat os.FileInfo if stat, err = os.Stat(tmp.Name()); err != nil { return nil, err } size = stat.Size() for _, result := range response.results { if result.oidIn != result.oidOut { ext := NewPointerExtension(result.name, len(exts), result.oidIn) exts = append(exts, ext) } } } else { oid, size, tmp, err = copyToTemp(reader, fileSize, cb) if err != nil { return nil, err } } pointer := NewPointer(oid, size, exts) return &cleanedAsset{tmp.Name(), pointer}, err } func copyToTemp(reader io.Reader, fileSize int64, cb progress.CopyCallback) (oid string, size int64, tmp *os.File, err error) { tmp, err = TempFile("") if err != nil { return } defer tmp.Close() oidHash := sha256.New() writer := io.MultiWriter(oidHash, tmp) if fileSize == 0 { cb = nil } ptr, buf, err := DecodeFrom(reader) by := make([]byte, blobSizeCutoff) n, rerr := buf.Read(by) by = by[:n] if rerr != nil || (err == nil && len(by) < 512) { err = errors.NewCleanPointerError(ptr, by) return } var from io.Reader = bytes.NewReader(by) if fileSize < 0 || int64(len(by)) < fileSize { // If there is still more data to be read from the file, tack on // the original reader and continue the read from there. from = io.MultiReader(from, reader) } size, err = tools.CopyWithCallback(writer, from, fileSize, cb) if err != nil { return } oid = hex.EncodeToString(oidHash.Sum(nil)) return } func (a *cleanedAsset) Teardown() error { return os.Remove(a.Filename) } git-lfs-2.3.4/lfs/pointer_smudge.go000066400000000000000000000123321317167762300172420ustar00rootroot00000000000000package lfs import ( "fmt" "io" "os" "path/filepath" "github.com/git-lfs/git-lfs/tools" "github.com/git-lfs/git-lfs/tools/humanize" "github.com/git-lfs/git-lfs/tq" "github.com/git-lfs/git-lfs/config" "github.com/git-lfs/git-lfs/errors" "github.com/git-lfs/git-lfs/progress" "github.com/rubyist/tracerx" ) func PointerSmudgeToFile(filename string, ptr *Pointer, download bool, manifest *tq.Manifest, cb progress.CopyCallback) error { os.MkdirAll(filepath.Dir(filename), 0755) file, err := os.Create(filename) if err != nil { return fmt.Errorf("Could not create working directory file: %v", err) } defer file.Close() if _, err := PointerSmudge(file, ptr, filename, download, manifest, cb); err != nil { if errors.IsDownloadDeclinedError(err) { // write placeholder data instead file.Seek(0, os.SEEK_SET) ptr.Encode(file) return err } else { return fmt.Errorf("Could not write working directory file: %v", err) } } return nil } func PointerSmudge(writer io.Writer, ptr *Pointer, workingfile string, download bool, manifest *tq.Manifest, cb progress.CopyCallback) (int64, error) { mediafile, err := LocalMediaPath(ptr.Oid) if err != nil { return 0, err } LinkOrCopyFromReference(ptr.Oid, ptr.Size) stat, statErr := os.Stat(mediafile) if statErr == nil && stat != nil { fileSize := stat.Size() if fileSize == 0 || fileSize != ptr.Size { tracerx.Printf("Removing %s, size %d is invalid", mediafile, fileSize) os.RemoveAll(mediafile) stat = nil } } var n int64 if statErr != nil || stat == nil { if download { n, err = downloadFile(writer, ptr, workingfile, mediafile, manifest, cb) } else { return 0, errors.NewDownloadDeclinedError(statErr, "smudge") } } else { n, err = readLocalFile(writer, ptr, mediafile, workingfile, cb) } if err != nil { return 0, errors.NewSmudgeError(err, ptr.Oid, mediafile) } return n, nil } func downloadFile(writer io.Writer, ptr *Pointer, workingfile, mediafile string, manifest *tq.Manifest, cb progress.CopyCallback) (int64, error) { fmt.Fprintf(os.Stderr, "Downloading %s (%s)\n", workingfile, humanize.FormatBytes(uint64(ptr.Size))) // NOTE: if given, "cb" is a progress.CopyCallback which writes updates // to the logpath specified by GIT_LFS_PROGRESS. // // Either way, forward it into the *tq.TransferQueue so that updates are // sent over correctly. q := tq.NewTransferQueue(tq.Download, manifest, "", tq.WithProgressCallback(cb)) q.Add(filepath.Base(workingfile), mediafile, ptr.Oid, ptr.Size) q.Wait() if errs := q.Errors(); len(errs) > 0 { var multiErr error for _, e := range errs { if multiErr != nil { multiErr = fmt.Errorf("%v\n%v", multiErr, e) } else { multiErr = e } return 0, errors.Wrapf(multiErr, "Error downloading %s (%s)", workingfile, ptr.Oid) } } return readLocalFile(writer, ptr, mediafile, workingfile, nil) } func readLocalFile(writer io.Writer, ptr *Pointer, mediafile string, workingfile string, cb progress.CopyCallback) (int64, error) { reader, err := os.Open(mediafile) if err != nil { return 0, errors.Wrapf(err, "Error opening media file.") } defer reader.Close() if ptr.Size == 0 { if stat, _ := os.Stat(mediafile); stat != nil { ptr.Size = stat.Size() } } if len(ptr.Extensions) > 0 { registeredExts := config.Config.Extensions() extensions := make(map[string]config.Extension) for _, ptrExt := range ptr.Extensions { ext, ok := registeredExts[ptrExt.Name] if !ok { err := fmt.Errorf("Extension '%s' is not configured.", ptrExt.Name) return 0, errors.Wrap(err, "smudge") } ext.Priority = ptrExt.Priority extensions[ext.Name] = ext } exts, err := config.SortExtensions(extensions) if err != nil { return 0, errors.Wrap(err, "smudge") } // pipe extensions in reverse order var extsR []config.Extension for i := range exts { ext := exts[len(exts)-1-i] extsR = append(extsR, ext) } request := &pipeRequest{"smudge", reader, workingfile, extsR} response, err := pipeExtensions(request) if err != nil { return 0, errors.Wrap(err, "smudge") } actualExts := make(map[string]*pipeExtResult) for _, result := range response.results { actualExts[result.name] = result } // verify name, order, and oids oid := response.results[0].oidIn if ptr.Oid != oid { err = fmt.Errorf("Actual oid %s during smudge does not match expected %s", oid, ptr.Oid) return 0, errors.Wrap(err, "smudge") } for _, expected := range ptr.Extensions { actual := actualExts[expected.Name] if actual.name != expected.Name { err = fmt.Errorf("Actual extension name '%s' does not match expected '%s'", actual.name, expected.Name) return 0, errors.Wrap(err, "smudge") } if actual.oidOut != expected.Oid { err = fmt.Errorf("Actual oid %s for extension '%s' does not match expected %s", actual.oidOut, expected.Name, expected.Oid) return 0, errors.Wrap(err, "smudge") } } // setup reader reader, err = os.Open(response.file.Name()) if err != nil { return 0, errors.Wrapf(err, "Error opening smudged file: %s", err) } defer reader.Close() } n, err := tools.CopyWithCallback(writer, reader, ptr.Size, cb) if err != nil { return n, errors.Wrapf(err, "Error reading from media file: %s", err) } return n, nil } git-lfs-2.3.4/lfs/pointer_test.go000066400000000000000000000243011317167762300167340ustar00rootroot00000000000000package lfs import ( "bufio" "bytes" "io/ioutil" "reflect" "strings" "testing" "github.com/git-lfs/git-lfs/errors" "github.com/stretchr/testify/assert" ) func TestEncode(t *testing.T) { var buf bytes.Buffer pointer := NewPointer("booya", 12345, nil) _, err := EncodePointer(&buf, pointer) assert.Nil(t, err) bufReader := bufio.NewReader(&buf) assertLine(t, bufReader, "version https://git-lfs.github.com/spec/v1\n") assertLine(t, bufReader, "oid sha256:booya\n") assertLine(t, bufReader, "size 12345\n") line, err := bufReader.ReadString('\n') if err == nil { t.Fatalf("More to read: %s", line) } assert.Equal(t, "EOF", err.Error()) } func TestEncodeEmpty(t *testing.T) { var buf bytes.Buffer pointer := NewPointer("", 0, nil) _, err := EncodePointer(&buf, pointer) assert.Equal(t, nil, err) bufReader := bufio.NewReader(&buf) val, err := bufReader.ReadString('\n') assert.Equal(t, "", val) assert.Equal(t, "EOF", err.Error()) } func TestEncodeExtensions(t *testing.T) { var buf bytes.Buffer exts := []*PointerExtension{ NewPointerExtension("foo", 0, "foo_oid"), NewPointerExtension("bar", 1, "bar_oid"), NewPointerExtension("baz", 2, "baz_oid"), } pointer := NewPointer("main_oid", 12345, exts) _, err := EncodePointer(&buf, pointer) assert.Nil(t, err) bufReader := bufio.NewReader(&buf) assertLine(t, bufReader, "version https://git-lfs.github.com/spec/v1\n") assertLine(t, bufReader, "ext-0-foo sha256:foo_oid\n") assertLine(t, bufReader, "ext-1-bar sha256:bar_oid\n") assertLine(t, bufReader, "ext-2-baz sha256:baz_oid\n") assertLine(t, bufReader, "oid sha256:main_oid\n") assertLine(t, bufReader, "size 12345\n") line, err := bufReader.ReadString('\n') if err == nil { t.Fatalf("More to read: %s", line) } assert.Equal(t, "EOF", err.Error()) } func assertLine(t *testing.T, r *bufio.Reader, expected string) { actual, err := r.ReadString('\n') assert.Nil(t, err) assert.Equal(t, expected, actual) } func TestDecodeTinyFile(t *testing.T) { ex := "this is not a git-lfs file!" p, err := DecodePointer(bytes.NewBufferString(ex)) if p != nil { t.Errorf("pointer was decoded: %v", p) } if !errors.IsNotAPointerError(err) { t.Errorf("error is not a NotAPointerError: %s: '%v'", reflect.TypeOf(err), err) } } func TestDecode(t *testing.T) { ex := `version https://git-lfs.github.com/spec/v1 oid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393 size 12345` p, err := DecodePointer(bytes.NewBufferString(ex)) assertEqualWithExample(t, ex, nil, err) assertEqualWithExample(t, ex, latest, p.Version) assertEqualWithExample(t, ex, "4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393", p.Oid) assertEqualWithExample(t, ex, "sha256", p.OidType) assertEqualWithExample(t, ex, int64(12345), p.Size) } func TestDecodeExtensions(t *testing.T) { ex := `version https://git-lfs.github.com/spec/v1 ext-0-foo sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff ext-1-bar sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb ext-2-baz sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa oid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393 size 12345` p, err := DecodePointer(bytes.NewBufferString(ex)) assertEqualWithExample(t, ex, nil, err) assertEqualWithExample(t, ex, latest, p.Version) assertEqualWithExample(t, ex, "4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393", p.Oid) assertEqualWithExample(t, ex, int64(12345), p.Size) assertEqualWithExample(t, ex, "sha256", p.OidType) assertEqualWithExample(t, ex, "foo", p.Extensions[0].Name) assertEqualWithExample(t, ex, 0, p.Extensions[0].Priority) assertEqualWithExample(t, ex, "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", p.Extensions[0].Oid) assertEqualWithExample(t, ex, "sha256", p.Extensions[0].OidType) assertEqualWithExample(t, ex, "bar", p.Extensions[1].Name) assertEqualWithExample(t, ex, 1, p.Extensions[1].Priority) assertEqualWithExample(t, ex, "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", p.Extensions[1].Oid) assertEqualWithExample(t, ex, "sha256", p.Extensions[1].OidType) assertEqualWithExample(t, ex, "baz", p.Extensions[2].Name) assertEqualWithExample(t, ex, 2, p.Extensions[2].Priority) assertEqualWithExample(t, ex, "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", p.Extensions[2].Oid) assertEqualWithExample(t, ex, "sha256", p.Extensions[2].OidType) } func TestDecodeExtensionsSort(t *testing.T) { ex := `version https://git-lfs.github.com/spec/v1 ext-2-baz sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ext-0-foo sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff ext-1-bar sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb oid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393 size 12345` p, err := DecodePointer(bytes.NewBufferString(ex)) assertEqualWithExample(t, ex, nil, err) assertEqualWithExample(t, ex, latest, p.Version) assertEqualWithExample(t, ex, "4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393", p.Oid) assertEqualWithExample(t, ex, int64(12345), p.Size) assertEqualWithExample(t, ex, "sha256", p.OidType) assertEqualWithExample(t, ex, "foo", p.Extensions[0].Name) assertEqualWithExample(t, ex, 0, p.Extensions[0].Priority) assertEqualWithExample(t, ex, "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", p.Extensions[0].Oid) assertEqualWithExample(t, ex, "sha256", p.Extensions[0].OidType) assertEqualWithExample(t, ex, "bar", p.Extensions[1].Name) assertEqualWithExample(t, ex, 1, p.Extensions[1].Priority) assertEqualWithExample(t, ex, "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", p.Extensions[1].Oid) assertEqualWithExample(t, ex, "sha256", p.Extensions[1].OidType) assertEqualWithExample(t, ex, "baz", p.Extensions[2].Name) assertEqualWithExample(t, ex, 2, p.Extensions[2].Priority) assertEqualWithExample(t, ex, "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", p.Extensions[2].Oid) assertEqualWithExample(t, ex, "sha256", p.Extensions[2].OidType) } func TestDecodePreRelease(t *testing.T) { ex := `version https://hawser.github.com/spec/v1 oid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393 size 12345` p, err := DecodePointer(bytes.NewBufferString(ex)) assertEqualWithExample(t, ex, nil, err) assertEqualWithExample(t, ex, latest, p.Version) assertEqualWithExample(t, ex, "4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393", p.Oid) assertEqualWithExample(t, ex, "sha256", p.OidType) assertEqualWithExample(t, ex, int64(12345), p.Size) } func TestDecodeFromEmptyReader(t *testing.T) { p, buf, err := DecodeFrom(strings.NewReader("")) by, rerr := ioutil.ReadAll(buf) assert.Nil(t, rerr) assert.EqualError(t, err, "Pointer file error: invalid header") assert.Nil(t, p) assert.Empty(t, by) } func TestDecodeInvalid(t *testing.T) { examples := []string{ "invalid stuff", // no sha "# git-media", // bad oid `version https://git-lfs.github.com/spec/v1 oid sha256:boom size 12345`, // bad oid type `version https://git-lfs.github.com/spec/v1 oid shazam:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393 size 12345`, // no oid `version https://git-lfs.github.com/spec/v1 size 12345`, // bad version `version http://git-media.io/v/whatever oid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393 size 12345`, // no version `oid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393 size 12345`, // bad size `version https://git-lfs.github.com/spec/v1 oid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393 size fif`, // no size `version https://git-lfs.github.com/spec/v1 oid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393`, // bad `key value` format `version=https://git-lfs.github.com/spec/v1 oid=sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393 size=fif`, // no git-media `version=http://wat.io/v/2 oid=sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393 size=fif`, // extra key `version https://git-lfs.github.com/spec/v1 oid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393 size 12345 wat wat`, // keys out of order `version https://git-lfs.github.com/spec/v1 size 12345 oid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393`, // bad ext name `version https://git-lfs.github.com/spec/v1 ext-0-$$$$ sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff oid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393 size 12345`, // bad ext priority `version https://git-lfs.github.com/spec/v1 ext-#-foo sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff oid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393 size 12345`, // duplicate ext priority `version https://git-lfs.github.com/spec/v1 ext-0-foo sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff ext-0-bar sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb oid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393 size 12345`, // ext priority over 9 `version https://git-lfs.github.com/spec/v1 ext-10-foo sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff oid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393 size 12345`, // bad ext oid `version https://git-lfs.github.com/spec/v1 ext-0-foo sha256:boom oid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393 size 12345`, // bad ext oid type `version https://git-lfs.github.com/spec/v1 ext-0-foo boom:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff oid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393 size 12345`, } for _, ex := range examples { p, err := DecodePointer(bytes.NewBufferString(ex)) if err == nil { t.Errorf("No error decoding: %v\nFrom:\n%s", p, strings.TrimSpace(ex)) } } } func assertEqualWithExample(t *testing.T, example string, expected, actual interface{}) { assert.Equal(t, expected, actual, "Example:\n%s", strings.TrimSpace(example)) } git-lfs-2.3.4/lfs/scanner.go000066400000000000000000000103051317167762300156450ustar00rootroot00000000000000package lfs import "github.com/git-lfs/git-lfs/tools" const ( // blobSizeCutoff is used to determine which files to scan for Git LFS // pointers. Any file with a size below this cutoff will be scanned. blobSizeCutoff = 1024 // stdoutBufSize is the size of the buffers given to a sub-process stdout stdoutBufSize = 16384 // chanBufSize is the size of the channels used to pass data from one // sub-process to another. chanBufSize = 100 ) // WrappedPointer wraps a pointer.Pointer and provides the git sha1 // and the file name associated with the object, taken from the // rev-list output. type WrappedPointer struct { Sha1 string Name string SrcName string Status string *Pointer } // catFileBatchCheck uses git cat-file --batch-check to get the type // and size of a git object. Any object that isn't of type blob and // under the blobSizeCutoff will be ignored. revs is a channel over // which strings containing git sha1s will be sent. It returns a channel // from which sha1 strings can be read. func catFileBatchCheck(revs *StringChannelWrapper, lockableSet *lockableNameSet) (*StringChannelWrapper, chan string, error) { smallRevCh := make(chan string, chanBufSize) lockableCh := make(chan string, chanBufSize) errCh := make(chan error, 2) // up to 2 errors, one from each goroutine if err := runCatFileBatchCheck(smallRevCh, lockableCh, lockableSet, revs, errCh); err != nil { return nil, nil, err } return NewStringChannelWrapper(smallRevCh, errCh), lockableCh, nil } // catFileBatch uses git cat-file --batch to get the object contents // of a git object, given its sha1. The contents will be decoded into // a Git LFS pointer. revs is a channel over which strings containing Git SHA1s // will be sent. It returns a channel from which point.Pointers can be read. func catFileBatch(revs *StringChannelWrapper, lockableSet *lockableNameSet) (*PointerChannelWrapper, chan string, error) { pointerCh := make(chan *WrappedPointer, chanBufSize) lockableCh := make(chan string, chanBufSize) errCh := make(chan error, 5) // shared by 2 goroutines & may add more detail errors? if err := runCatFileBatch(pointerCh, lockableCh, lockableSet, revs, errCh); err != nil { return nil, nil, err } return NewPointerChannelWrapper(pointerCh, errCh), lockableCh, nil } // ChannelWrapper for pointer Scan* functions to more easily return async error data via Wait() // See NewPointerChannelWrapper for construction / use type PointerChannelWrapper struct { *tools.BaseChannelWrapper Results <-chan *WrappedPointer } // Construct a new channel wrapper for WrappedPointer // Caller can use s.Results directly for normal processing then call Wait() to finish & check for errors // Scan function is required to create error channel large enough not to block (usually 1 is ok) func NewPointerChannelWrapper(pointerChan <-chan *WrappedPointer, errorChan <-chan error) *PointerChannelWrapper { return &PointerChannelWrapper{tools.NewBaseChannelWrapper(errorChan), pointerChan} } // ChannelWrapper for string channel functions to more easily return async error data via Wait() // Caller can use s.Results directly for normal processing then call Wait() to finish & check for errors // See NewStringChannelWrapper for construction / use type StringChannelWrapper struct { *tools.BaseChannelWrapper Results <-chan string } // Construct a new channel wrapper for string // Caller can use s.Results directly for normal processing then call Wait() to finish & check for errors func NewStringChannelWrapper(stringChan <-chan string, errorChan <-chan error) *StringChannelWrapper { return &StringChannelWrapper{tools.NewBaseChannelWrapper(errorChan), stringChan} } // ChannelWrapper for TreeBlob channel functions to more easily return async error data via Wait() // See NewTreeBlobChannelWrapper for construction / use type TreeBlobChannelWrapper struct { *tools.BaseChannelWrapper Results <-chan TreeBlob } // Construct a new channel wrapper for TreeBlob // Caller can use s.Results directly for normal processing then call Wait() to finish & check for errors func NewTreeBlobChannelWrapper(treeBlobChan <-chan TreeBlob, errorChan <-chan error) *TreeBlobChannelWrapper { return &TreeBlobChannelWrapper{tools.NewBaseChannelWrapper(errorChan), treeBlobChan} } git-lfs-2.3.4/lfs/scanner_git_test.go000066400000000000000000000131651317167762300175560ustar00rootroot00000000000000package lfs_test // to avoid import cycles // This is for doing complete git-level tests using test utils // Needs to be a separate file from scanner_test so that we can use a diff package // which avoids import cycles with testutils import ( "fmt" "sort" "testing" "time" . "github.com/git-lfs/git-lfs/lfs" "github.com/git-lfs/git-lfs/test" "github.com/stretchr/testify/assert" ) func TestScanUnpushed(t *testing.T) { repo := test.NewRepo(t) repo.Pushd() defer func() { repo.Popd() repo.Cleanup() }() inputs := []*test.CommitInput{ { // 0 Files: []*test.FileInput{ {Filename: "file1.txt", Size: 20}, }, }, { // 1 NewBranch: "branch2", Files: []*test.FileInput{ {Filename: "file1.txt", Size: 25}, }, }, { // 2 ParentBranches: []string{"master"}, // back on master Files: []*test.FileInput{ {Filename: "file1.txt", Size: 30}, }, }, { // 3 NewBranch: "branch3", Files: []*test.FileInput{ {Filename: "file1.txt", Size: 32}, }, }, } repo.AddCommits(inputs) // Add a couple of remotes and test state depending on what's pushed repo.AddRemote("origin") repo.AddRemote("upstream") pointers, err := scanUnpushed("") assert.Nil(t, err, "Should be no error calling ScanUnpushed") assert.Len(t, pointers, 4, "Should be 4 pointers because none pushed") test.RunGitCommand(t, true, "push", "origin", "branch2") // Branch2 will have pushed 2 commits pointers, err = scanUnpushed("") assert.Nil(t, err, "Should be no error calling ScanUnpushed") assert.Len(t, pointers, 2, "Should be 2 pointers") test.RunGitCommand(t, true, "push", "upstream", "master") // Master pushes 1 more commit pointers, err = scanUnpushed("") assert.Nil(t, err, "Should be no error calling ScanUnpushed") assert.Len(t, pointers, 1, "Should be 1 pointer") test.RunGitCommand(t, true, "push", "origin", "branch3") // All pushed (somewhere) pointers, err = scanUnpushed("") assert.Nil(t, err, "Should be no error calling ScanUnpushed") assert.Empty(t, pointers, "Should be 0 pointers unpushed") // Check origin pointers, err = scanUnpushed("origin") assert.Nil(t, err, "Should be no error calling ScanUnpushed") assert.Empty(t, pointers, "Should be 0 pointers unpushed to origin") // Check upstream pointers, err = scanUnpushed("upstream") assert.Nil(t, err, "Should be no error calling ScanUnpushed") assert.Len(t, pointers, 2, "Should be 2 pointers unpushed to upstream") } func scanUnpushed(remoteName string) ([]*WrappedPointer, error) { pointers := make([]*WrappedPointer, 0, 10) var multiErr error gitscanner := NewGitScanner(func(p *WrappedPointer, err error) { if err != nil { if multiErr != nil { multiErr = fmt.Errorf("%v\n%v", multiErr, err) } else { multiErr = err } return } pointers = append(pointers, p) }) if err := gitscanner.ScanUnpushed(remoteName, nil); err != nil { return nil, err } gitscanner.Close() return pointers, multiErr } func TestScanPreviousVersions(t *testing.T) { repo := test.NewRepo(t) repo.Pushd() defer func() { repo.Popd() repo.Cleanup() }() now := time.Now() inputs := []*test.CommitInput{ { // 0 CommitDate: now.AddDate(0, 0, -20), Files: []*test.FileInput{ {Filename: "file1.txt", Size: 20}, {Filename: "file2.txt", Size: 30}, {Filename: "folder/nested.txt", Size: 40}, {Filename: "folder/nested2.txt", Size: 31}, }, }, { // 1 CommitDate: now.AddDate(0, 0, -10), Files: []*test.FileInput{ {Filename: "file2.txt", Size: 22}, }, }, { // 2 NewBranch: "excluded", CommitDate: now.AddDate(0, 0, -6), Files: []*test.FileInput{ {Filename: "file2.txt", Size: 12}, {Filename: "folder/nested2.txt", Size: 16}, }, }, { // 3 ParentBranches: []string{"master"}, CommitDate: now.AddDate(0, 0, -4), Files: []*test.FileInput{ {Filename: "folder/nested.txt", Size: 42}, {Filename: "folder/nested2.txt", Size: 6}, }, }, { // 4 Files: []*test.FileInput{ {Filename: "folder/nested.txt", Size: 22}, }, }, } outputs := repo.AddCommits(inputs) // Previous commits excludes final state of each file, which is: // file1.txt [0] (unchanged since first commit so excluded) // file2.txt [1] (because [2] is on another branch so excluded) // folder/nested.txt [4] (updated at last commit) // folder/nested2.txt [3] // The only changes which will be included are changes prior to final state // where the '-' side of the diff is inside the date range // 7 day limit excludes [0] commit, but includes state from that if there // was a subsequent chang pointers, err := scanPreviousVersions(t, "master", now.AddDate(0, 0, -7)) assert.Equal(t, nil, err) // Includes the following 'before' state at commits: // folder/nested.txt [-diff at 4, ie 3, -diff at 3 ie 0] // folder/nested2.txt [-diff at 3 ie 0] // others are either on diff branches, before this window, or unchanged expected := []*WrappedPointer{ {Name: "folder/nested.txt", Pointer: outputs[3].Files[0]}, {Name: "folder/nested.txt", Pointer: outputs[0].Files[2]}, {Name: "folder/nested2.txt", Pointer: outputs[0].Files[3]}, } // Need to sort to compare equality sort.Sort(test.WrappedPointersByOid(expected)) sort.Sort(test.WrappedPointersByOid(pointers)) assert.Equal(t, expected, pointers) } func scanPreviousVersions(t *testing.T, ref string, since time.Time) ([]*WrappedPointer, error) { pointers := make([]*WrappedPointer, 0, 10) gitscanner := NewGitScanner(func(p *WrappedPointer, err error) { if err != nil { t.Error(err) return } pointers = append(pointers, p) }) err := gitscanner.ScanPreviousVersions(ref, since, nil) return pointers, err } git-lfs-2.3.4/lfs/scanner_test.go000066400000000000000000000260151317167762300167110ustar00rootroot00000000000000package lfs import ( "strings" "testing" "github.com/git-lfs/git-lfs/filepathfilter" "github.com/stretchr/testify/assert" ) var ( pointerParseLogOutput = `lfs-commit-sha: 637908bf28b38ab238e1b5e6a5bfbfb2e513a0df 07d571b413957508679042e45508af5945b3f1e5 diff --git a/smoke_1.png b/smoke_1.png deleted file mode 100644 index 2fe5451..0000000 --- a/smoke_1.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:8eb65d66303acc60062f44b44ef1f7360d7189db8acf3d066e59e2528f39514e -size 35022 lfs-commit-sha: 07d571b413957508679042e45508af5945b3f1e5 8e5bd456b754f7d61c7157e82edc5ed124be4da6 diff --git a/flare_1.png b/flare_1.png deleted file mode 100644 index 1cfc5a1..0000000 --- a/flare_1.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -ext-0-foo sha256:36485434f4f8a55150282ae7c78619a89de52721c00f48159f2562463df9c043 -ext-1-bar sha256:382a2a13e705bbd8de7e2e13857c26551db17121ac57edca5dec9b5bd753e9c8 -ext-2-ray sha256:423ee9e5988fb4670bf815990e9307c3b23296210c31581dec4d4ae89dabae46 -oid sha256:ea61c67cc5e8b3504d46de77212364045f31d9a023ad4448a1ace2a2fb4eed28 -size 72982 diff --git a/radial_1.png b/radial_1.png index 9daa2e5..c648385 100644 --- a/radial_1.png +++ b/radial_1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:334c8a0a520cf9f58189dba5a9a26c7bff2769b4a3cc199650c00618bde5b9dd -size 16849 +oid sha256:3301b3da173d231f0f6b1f9bf075e573758cd79b3cfeff7623a953d708d6688b +size 3152388 diff --git a/radial_2.png b/radial_2.png index 9daa2e5..c648385 100644 --- a/radial_2.png +++ b/radial_2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -ext-0-foo sha256:36485434f4f8a55150282ae7c78619a89de52721c00f48159f2562463df9c043 -ext-1-bar sha256:382a2a13e705bbd8de7e2e13857c26551db17121ac57edca5dec9b5bd753e9c8 -ext-2-ray sha256:423ee9e5988fb4670bf815990e9307c3b23296210c31581dec4d4ae89dabae46 -oid sha256:334c8a0a520cf9f58189dba5a9a26c7bff2769b4a3cc199650c00618bde5b9dd -size 16849 +ext-0-foo sha256:95d8260e8365a9dfd39842bdeee9b20e0e3fe3daf9bb4a8c0a1acb31008ed7b4 +ext-1-bar sha256:674bf4995720a43e03e174bcc1132ca95de6a8e4155fe3b2c482dceb42cbc0a5 +ext-2-ray sha256:0d323c95ae4b0a9c195ddc437c470678bddd2ee0906fb2f7b8166cd2474e22d9 +oid sha256:4b666195c133d8d0541ad0bc0e77399b9dc81861577a98314ac1ff1e9877893a +size 3152388 lfs-commit-sha: 60fde3d23553e10a55e2a32ed18c20f65edd91e7 e2eaf1c10b57da7b98eb5d722ec5912ddeb53ea1 diff --git a/1D_Noise.png b/1D_Noise.png new file mode 100644 index 0000000..2622b4a --- /dev/null +++ b/1D_Noise.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f5d84da40ab1f6aa28df2b2bf1ade2cdcd4397133f903c12b4106641b10e1ed6 +size 1289 diff --git a/waveNM.png b/waveNM.png new file mode 100644 index 0000000..8519883 --- /dev/null +++ b/waveNM.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fe2c2f236b97bba4585d9909a227a8fa64897d9bbe297fa272f714302d86c908 +size 125873 lfs-commit-sha: 64b3372e108daaa593412d5e1d9df8169a9547ea e99c9cac7ff3f3cf1b2e670a64a5a381c44ffceb diff --git a/hobbit_5armies_2.mov b/hobbit_5armies_2.mov new file mode 100644 index 0000000..92a88f8 --- /dev/null +++ b/hobbit_5armies_2.mov @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +ext-0-foo sha256:b37197ac149950d057521bcb7e00806f0528e19352bd72767165bc390d4f055e +ext-1-bar sha256:c71772e5ea8e8c6f053f0f1dc89f8c01243975b1a040acbcf732fe2dbc0bcb61 +oid sha256:ebff26d6b557b1416a6fded097fd9b9102e2d8195532c377ac365c736c87d4bc +size 127142413 ` ) func TestLogScannerAdditionsNoFiltering(t *testing.T) { r := strings.NewReader(pointerParseLogOutput) scanner := newLogScanner(LogDiffAdditions, r) // modification, + side assertNextScan(t, scanner) p := scanner.Pointer() if assert.NotNil(t, p) { assert.Equal(t, "radial_1.png", p.Name) assert.Equal(t, "3301b3da173d231f0f6b1f9bf075e573758cd79b3cfeff7623a953d708d6688b", p.Oid) assert.Equal(t, int64(3152388), p.Size) } // modification, + side with extensions assertNextScan(t, scanner) p = scanner.Pointer() if assert.NotNil(t, p) { assert.Equal(t, "radial_2.png", p.Name) assert.Equal(t, "4b666195c133d8d0541ad0bc0e77399b9dc81861577a98314ac1ff1e9877893a", p.Oid) assert.Equal(t, int64(3152388), p.Size) } // addition, + side assertNextScan(t, scanner) p = scanner.Pointer() if assert.NotNil(t, p) { assert.Equal(t, "1D_Noise.png", p.Name) assert.Equal(t, "f5d84da40ab1f6aa28df2b2bf1ade2cdcd4397133f903c12b4106641b10e1ed6", p.Oid) assert.Equal(t, int64(1289), p.Size) } // addition, + side assertNextScan(t, scanner) p = scanner.Pointer() if assert.NotNil(t, p) { assert.Equal(t, "waveNM.png", p.Name) assert.Equal(t, "fe2c2f236b97bba4585d9909a227a8fa64897d9bbe297fa272f714302d86c908", p.Oid) assert.Equal(t, int64(125873), p.Size) } // addition, + side with extensions assertNextScan(t, scanner) p = scanner.Pointer() if assert.NotNil(t, p) { assert.Equal(t, "hobbit_5armies_2.mov", p.Name) assert.Equal(t, "ebff26d6b557b1416a6fded097fd9b9102e2d8195532c377ac365c736c87d4bc", p.Oid) assert.Equal(t, int64(127142413), p.Size) } assertScannerDone(t, scanner) assert.Nil(t, scanner.Pointer()) } func TestLogScannerAdditionsFilterInclude(t *testing.T) { r := strings.NewReader(pointerParseLogOutput) scanner := newLogScanner(LogDiffAdditions, r) scanner.Filter = filepathfilter.New([]string{"wave*"}, nil) // addition, + side assertNextScan(t, scanner) p := scanner.Pointer() if assert.NotNil(t, p) { assert.Equal(t, "waveNM.png", p.Name) assert.Equal(t, "fe2c2f236b97bba4585d9909a227a8fa64897d9bbe297fa272f714302d86c908", p.Oid) assert.Equal(t, int64(125873), p.Size) } assertScannerDone(t, scanner) assert.Nil(t, scanner.Pointer()) } func TestLogScannerAdditionsFilterExclude(t *testing.T) { r := strings.NewReader(pointerParseLogOutput) scanner := newLogScanner(LogDiffAdditions, r) scanner.Filter = filepathfilter.New(nil, []string{"wave*"}) // modification, + side assertNextScan(t, scanner) p := scanner.Pointer() if assert.NotNil(t, p) { assert.Equal(t, "radial_1.png", p.Name) assert.Equal(t, "3301b3da173d231f0f6b1f9bf075e573758cd79b3cfeff7623a953d708d6688b", p.Oid) assert.Equal(t, int64(3152388), p.Size) } // modification, + side with extensions assertNextScan(t, scanner) p = scanner.Pointer() if assert.NotNil(t, p) { assert.Equal(t, "radial_2.png", p.Name) assert.Equal(t, "4b666195c133d8d0541ad0bc0e77399b9dc81861577a98314ac1ff1e9877893a", p.Oid) assert.Equal(t, int64(3152388), p.Size) } // addition, + side assertNextScan(t, scanner) p = scanner.Pointer() if assert.NotNil(t, p) { assert.Equal(t, "1D_Noise.png", p.Name) assert.Equal(t, "f5d84da40ab1f6aa28df2b2bf1ade2cdcd4397133f903c12b4106641b10e1ed6", p.Oid) assert.Equal(t, int64(1289), p.Size) } // addition, + side with extensions assertNextScan(t, scanner) p = scanner.Pointer() if assert.NotNil(t, p) { assert.Equal(t, "hobbit_5armies_2.mov", p.Name) assert.Equal(t, "ebff26d6b557b1416a6fded097fd9b9102e2d8195532c377ac365c736c87d4bc", p.Oid) assert.Equal(t, int64(127142413), p.Size) } assertScannerDone(t, scanner) assert.Nil(t, scanner.Pointer()) } func TestLogScannerDeletionsNoFiltering(t *testing.T) { r := strings.NewReader(pointerParseLogOutput) scanner := newLogScanner(LogDiffDeletions, r) // deletion, - side assertNextScan(t, scanner) p := scanner.Pointer() if assert.NotNil(t, p) { assert.Equal(t, "smoke_1.png", p.Name) assert.Equal(t, "8eb65d66303acc60062f44b44ef1f7360d7189db8acf3d066e59e2528f39514e", p.Oid) assert.Equal(t, int64(35022), p.Size) } // deletion, - side with extensions assertNextScan(t, scanner) p = scanner.Pointer() if assert.NotNil(t, p) { assert.Equal(t, "flare_1.png", p.Name) assert.Equal(t, "ea61c67cc5e8b3504d46de77212364045f31d9a023ad4448a1ace2a2fb4eed28", p.Oid) assert.Equal(t, int64(72982), p.Size) } // modification, - side assertNextScan(t, scanner) p = scanner.Pointer() if assert.NotNil(t, p) { assert.Equal(t, "radial_1.png", p.Name) assert.Equal(t, "334c8a0a520cf9f58189dba5a9a26c7bff2769b4a3cc199650c00618bde5b9dd", p.Oid) assert.Equal(t, int64(16849), p.Size) } // modification, - side with extensions assertNextScan(t, scanner) p = scanner.Pointer() if assert.NotNil(t, p) { assert.Equal(t, "radial_2.png", p.Name) assert.Equal(t, "334c8a0a520cf9f58189dba5a9a26c7bff2769b4a3cc199650c00618bde5b9dd", p.Oid) assert.Equal(t, int64(16849), p.Size) } assertScannerDone(t, scanner) } func TestLogScannerDeletionsFilterInclude(t *testing.T) { r := strings.NewReader(pointerParseLogOutput) scanner := newLogScanner(LogDiffDeletions, r) scanner.Filter = filepathfilter.New([]string{"flare*"}, nil) // deletion, - side with extensions assertNextScan(t, scanner) p := scanner.Pointer() if assert.NotNil(t, p) { assert.Equal(t, "flare_1.png", p.Name) assert.Equal(t, "ea61c67cc5e8b3504d46de77212364045f31d9a023ad4448a1ace2a2fb4eed28", p.Oid) assert.Equal(t, int64(72982), p.Size) } assertScannerDone(t, scanner) } func TestLogScannerDeletionsFilterExclude(t *testing.T) { r := strings.NewReader(pointerParseLogOutput) scanner := newLogScanner(LogDiffDeletions, r) scanner.Filter = filepathfilter.New(nil, []string{"flare*"}) // deletion, - side assertNextScan(t, scanner) p := scanner.Pointer() if assert.NotNil(t, p) { assert.Equal(t, "smoke_1.png", p.Name) assert.Equal(t, "8eb65d66303acc60062f44b44ef1f7360d7189db8acf3d066e59e2528f39514e", p.Oid) assert.Equal(t, int64(35022), p.Size) } // modification, - side assertNextScan(t, scanner) p = scanner.Pointer() if assert.NotNil(t, p) { assert.Equal(t, "radial_1.png", p.Name) assert.Equal(t, "334c8a0a520cf9f58189dba5a9a26c7bff2769b4a3cc199650c00618bde5b9dd", p.Oid) assert.Equal(t, int64(16849), p.Size) } // modification, - side with extensions assertNextScan(t, scanner) p = scanner.Pointer() if assert.NotNil(t, p) { assert.Equal(t, "radial_2.png", p.Name) assert.Equal(t, "334c8a0a520cf9f58189dba5a9a26c7bff2769b4a3cc199650c00618bde5b9dd", p.Oid) assert.Equal(t, int64(16849), p.Size) } assertScannerDone(t, scanner) } func TestLsTreeParser(t *testing.T) { stdout := "100644 blob d899f6551a51cf19763c5955c7a06a2726f018e9 42 .gitattributes\000100644 blob 4d343e022e11a8618db494dc3c501e80c7e18197 126 PB SCN 16 Odhrán.wav" scanner := newLsTreeScanner(strings.NewReader(stdout)) assertNextTreeBlob(t, scanner, "d899f6551a51cf19763c5955c7a06a2726f018e9", ".gitattributes") assertNextTreeBlob(t, scanner, "4d343e022e11a8618db494dc3c501e80c7e18197", "PB SCN 16 Odhrán.wav") assertScannerDone(t, scanner) } func assertNextTreeBlob(t *testing.T, scanner *lsTreeScanner, oid, filename string) { assertNextScan(t, scanner) b := scanner.TreeBlob() assert.NotNil(t, b) assert.Equal(t, oid, b.Sha1) assert.Equal(t, filename, b.Filename) } func BenchmarkLsTreeParser(b *testing.B) { stdout := "100644 blob d899f6551a51cf19763c5955c7a06a2726f018e9 42 .gitattributes\000100644 blob 4d343e022e11a8618db494dc3c501e80c7e18197 126 PB SCN 16 Odhrán.wav" // run the Fib function b.N times for n := 0; n < b.N; n++ { scanner := newLsTreeScanner(strings.NewReader(stdout)) for scanner.Scan() { } } } git-lfs-2.3.4/lfs/setup.go000066400000000000000000000063441317167762300153640ustar00rootroot00000000000000package lfs import ( "fmt" "strings" "github.com/git-lfs/git-lfs/tools" ) var ( // prePushHook invokes `git lfs pre-push` at the pre-push phase. prePushHook = NewStandardHook("pre-push", []string{ "#!/bin/sh\ngit lfs push --stdin $*", "#!/bin/sh\ngit lfs push --stdin \"$@\"", "#!/bin/sh\ngit lfs pre-push \"$@\"", "#!/bin/sh\ncommand -v git-lfs >/dev/null 2>&1 || { echo >&2 \"\\nThis repository has been set up with Git LFS but Git LFS is not installed.\\n\"; exit 0; }\ngit lfs pre-push \"$@\"", "#!/bin/sh\ncommand -v git-lfs >/dev/null 2>&1 || { echo >&2 \"\\nThis repository has been set up with Git LFS but Git LFS is not installed.\\n\"; exit 2; }\ngit lfs pre-push \"$@\"", }) // postCheckoutHook invokes `git lfs post-checkout` postCheckoutHook = NewStandardHook("post-checkout", []string{}) postCommitHook = NewStandardHook("post-commit", []string{}) postMergeHook = NewStandardHook("post-merge", []string{}) hooks = []*Hook{ prePushHook, postCheckoutHook, postCommitHook, postMergeHook, } upgradeables = map[string][]string{ "clean": []string{"git-lfs clean %f"}, "smudge": []string{ "git-lfs smudge %f", "git-lfs smudge --skip %f", "git-lfs smudge -- %f", "git-lfs smudge --skip -- %f", }, "process": []string{ "git-lfs filter", "git-lfs filter --skip", "git-lfs filter-process", "git-lfs filter-process --skip", }, } filters = &Attribute{ Section: "filter.lfs", Properties: map[string]string{ "clean": "git-lfs clean -- %f", "smudge": "git-lfs smudge -- %f", "process": "git-lfs filter-process", "required": "true", }, Upgradeables: upgradeables, } passFilters = &Attribute{ Section: "filter.lfs", Properties: map[string]string{ "clean": "git-lfs clean -- %f", "smudge": "git-lfs smudge --skip -- %f", "process": "git-lfs filter-process --skip", "required": "true", }, Upgradeables: upgradeables, } ) // Get user-readable manual install steps for hooks func GetHookInstallSteps() string { steps := make([]string, 0, len(hooks)) for _, h := range hooks { steps = append(steps, fmt.Sprintf( "Add the following to .git/hooks/%s:\n\n%s", h.Type, tools.Indent(h.Contents))) } return strings.Join(steps, "\n\n") } // InstallHooks installs all hooks in the `hooks` var. func InstallHooks(force bool) error { for _, h := range hooks { if err := h.Install(force); err != nil { return err } } return nil } // UninstallHooks removes all hooks in range of the `hooks` var. func UninstallHooks() error { for _, h := range hooks { if err := h.Uninstall(); err != nil { return err } } return nil } // InstallFilters installs filters necessary for git-lfs to process normal git // operations. Currently, that list includes: // - smudge filter // - clean filter // // An error will be returned if a filter is unable to be set, or if the required // filters were not present. func InstallFilters(opt InstallOptions, passThrough bool) error { if passThrough { return passFilters.Install(opt) } return filters.Install(opt) } // UninstallFilters proxies into the Uninstall method on the Filters type to // remove all installed filters. func UninstallFilters(opt InstallOptions) error { filters.Uninstall(opt) return nil } git-lfs-2.3.4/lfs/util.go000066400000000000000000000123641317167762300152000ustar00rootroot00000000000000package lfs import ( "fmt" "io" "io/ioutil" "os" "path/filepath" "runtime" "github.com/git-lfs/git-lfs/config" "github.com/git-lfs/git-lfs/progress" "github.com/git-lfs/git-lfs/tools" ) type Platform int const ( PlatformWindows = Platform(iota) PlatformLinux = Platform(iota) PlatformOSX = Platform(iota) PlatformOther = Platform(iota) // most likely a *nix variant e.g. freebsd PlatformUndetermined = Platform(iota) ) var currentPlatform = PlatformUndetermined func CopyCallbackFile(event, filename string, index, totalFiles int) (progress.CopyCallback, *os.File, error) { logPath, _ := config.Config.Os.Get("GIT_LFS_PROGRESS") if len(logPath) == 0 || len(filename) == 0 || len(event) == 0 { return nil, nil, nil } if !filepath.IsAbs(logPath) { return nil, nil, fmt.Errorf("GIT_LFS_PROGRESS must be an absolute path") } cbDir := filepath.Dir(logPath) if err := os.MkdirAll(cbDir, 0755); err != nil { return nil, nil, wrapProgressError(err, event, logPath) } file, err := os.OpenFile(logPath, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666) if err != nil { return nil, file, wrapProgressError(err, event, logPath) } var prevWritten int64 cb := progress.CopyCallback(func(total int64, written int64, current int) error { if written != prevWritten { _, err := file.Write([]byte(fmt.Sprintf("%s %d/%d %d/%d %s\n", event, index, totalFiles, written, total, filename))) file.Sync() prevWritten = written return wrapProgressError(err, event, logPath) } return nil }) return cb, file, nil } func wrapProgressError(err error, event, filename string) error { if err != nil { return fmt.Errorf("Error writing Git LFS %s progress to %s: %s", event, filename, err.Error()) } return nil } var localDirSet = tools.NewStringSetFromSlice([]string{".", "./", ".\\"}) func GetPlatform() Platform { if currentPlatform == PlatformUndetermined { switch runtime.GOOS { case "windows": currentPlatform = PlatformWindows case "linux": currentPlatform = PlatformLinux case "darwin": currentPlatform = PlatformOSX default: currentPlatform = PlatformOther } } return currentPlatform } type PathConverter interface { Convert(string) string } // Convert filenames expressed relative to the root of the repo relative to the // current working dir. Useful when needing to calling git with results from a rooted command, // but the user is in a subdir of their repo // Pass in a channel which you will fill with relative files & receive a channel which will get results func NewRepoToCurrentPathConverter() (PathConverter, error) { r, c, p, err := pathConverterArgs() if err != nil { return nil, err } return &repoToCurrentPathConverter{ repoDir: r, currDir: c, passthrough: p, }, nil } type repoToCurrentPathConverter struct { repoDir string currDir string passthrough bool } func (p *repoToCurrentPathConverter) Convert(filename string) string { if p.passthrough { return filename } abs := filepath.Join(p.repoDir, filename) rel, err := filepath.Rel(p.currDir, abs) if err != nil { // Use absolute file instead return abs } else { return rel } } // Convert filenames expressed relative to the current directory to be // relative to the repo root. Useful when calling git with arguments that requires them // to be rooted but the user is in a subdir of their repo & expects to use relative args // Pass in a channel which you will fill with relative files & receive a channel which will get results func NewCurrentToRepoPathConverter() (PathConverter, error) { r, c, p, err := pathConverterArgs() if err != nil { return nil, err } return ¤tToRepoPathConverter{ repoDir: r, currDir: c, passthrough: p, }, nil } type currentToRepoPathConverter struct { repoDir string currDir string passthrough bool } func (p *currentToRepoPathConverter) Convert(filename string) string { if p.passthrough { return filename } var abs string if filepath.IsAbs(filename) { abs = tools.ResolveSymlinks(filename) } else { abs = filepath.Join(p.currDir, filename) } reltoroot, err := filepath.Rel(p.repoDir, abs) if err != nil { // Can't do this, use absolute as best fallback return abs } else { return reltoroot } } func pathConverterArgs() (string, string, bool, error) { currDir, err := os.Getwd() if err != nil { return "", "", false, fmt.Errorf("Unable to get working dir: %v", err) } currDir = tools.ResolveSymlinks(currDir) return config.LocalWorkingDir, currDir, config.LocalWorkingDir == currDir, nil } // Are we running on Windows? Need to handle some extra path shenanigans func IsWindows() bool { return GetPlatform() == PlatformWindows } func CopyFileContents(src string, dst string) error { tmp, err := ioutil.TempFile(TempDir(), filepath.Base(dst)) if err != nil { return err } defer func() { tmp.Close() os.Remove(tmp.Name()) }() in, err := os.Open(src) if err != nil { return err } defer in.Close() _, err = io.Copy(tmp, in) if err != nil { return err } err = tmp.Close() if err != nil { return err } return os.Rename(tmp.Name(), dst) } func LinkOrCopy(src string, dst string) error { if src == dst { return nil } err := os.Link(src, dst) if err == nil { return err } return CopyFileContents(src, dst) } git-lfs-2.3.4/lfs/util_generic.go000066400000000000000000000002161317167762300166650ustar00rootroot00000000000000// +build !linux !cgo package lfs import ( "io" ) func CloneFile(writer io.Writer, reader io.Reader) (bool, error) { return false, nil } git-lfs-2.3.4/lfs/util_test.go000066400000000000000000000027731317167762300162420ustar00rootroot00000000000000package lfs import ( "bytes" "testing" "github.com/git-lfs/git-lfs/progress" "github.com/stretchr/testify/assert" ) func TestBodyWithCallback(t *testing.T) { called := 0 calledRead := make([]int64, 0, 2) cb := func(total int64, read int64, current int) error { called += 1 calledRead = append(calledRead, read) assert.Equal(t, 5, int(total)) return nil } reader := progress.NewByteBodyWithCallback([]byte("BOOYA"), 5, cb) readBuf := make([]byte, 3) n, err := reader.Read(readBuf) assert.Nil(t, err) assert.Equal(t, "BOO", string(readBuf[0:n])) n, err = reader.Read(readBuf) assert.Nil(t, err) assert.Equal(t, "YA", string(readBuf[0:n])) assert.Equal(t, 2, called) assert.Len(t, calledRead, 2) assert.Equal(t, 3, int(calledRead[0])) assert.Equal(t, 5, int(calledRead[1])) } func TestReadWithCallback(t *testing.T) { called := 0 calledRead := make([]int64, 0, 2) reader := &progress.CallbackReader{ TotalSize: 5, Reader: bytes.NewBufferString("BOOYA"), C: func(total int64, read int64, current int) error { called += 1 calledRead = append(calledRead, read) assert.Equal(t, 5, int(total)) return nil }, } readBuf := make([]byte, 3) n, err := reader.Read(readBuf) assert.Nil(t, err) assert.Equal(t, "BOO", string(readBuf[0:n])) n, err = reader.Read(readBuf) assert.Nil(t, err) assert.Equal(t, "YA", string(readBuf[0:n])) assert.Equal(t, 2, called) assert.Len(t, calledRead, 2) assert.Equal(t, 3, int(calledRead[0])) assert.Equal(t, 5, int(calledRead[1])) } git-lfs-2.3.4/lfsapi/000077500000000000000000000000001317167762300143605ustar00rootroot00000000000000git-lfs-2.3.4/lfsapi/auth.go000066400000000000000000000225731317167762300156610ustar00rootroot00000000000000package lfsapi import ( "encoding/base64" "fmt" "net" "net/http" "net/url" "os" "strings" "github.com/bgentry/go-netrc/netrc" "github.com/git-lfs/git-lfs/errors" "github.com/rubyist/tracerx" ) var ( defaultCredentialHelper = &commandCredentialHelper{} defaultNetrcFinder = &noFinder{} defaultEndpointFinder = NewEndpointFinder(nil) ) func (c *Client) DoWithAuth(remote string, req *http.Request) (*http.Response, error) { credHelper := c.Credentials if credHelper == nil { credHelper = defaultCredentialHelper } netrcFinder := c.Netrc if netrcFinder == nil { netrcFinder = defaultNetrcFinder } ef := c.Endpoints if ef == nil { ef = defaultEndpointFinder } apiEndpoint, access, creds, credsURL, err := getCreds(credHelper, netrcFinder, ef, remote, req) if err != nil { return nil, err } res, err := c.doWithCreds(req, credHelper, creds, credsURL, access) if err != nil { if errors.IsAuthError(err) { newAccess := getAuthAccess(res) if newAccess != access { c.Endpoints.SetAccess(apiEndpoint.Url, newAccess) } if access == NoneAccess || creds != nil { tracerx.Printf("api: http response indicates %q authentication. Resubmitting...", newAccess) req.Header.Del("Authorization") if creds != nil { credHelper.Reject(creds) } return c.DoWithAuth(remote, req) } } } if res != nil && res.StatusCode < 300 && res.StatusCode > 199 { credHelper.Approve(creds) } return res, err } func (c *Client) doWithCreds(req *http.Request, credHelper CredentialHelper, creds Creds, credsURL *url.URL, access Access) (*http.Response, error) { if access == NTLMAccess { return c.doWithNTLM(req, credHelper, creds, credsURL) } return c.Do(req) } // getCreds fills the authorization header for the given request if possible, // from the following sources: // // 1. NTLM access is handled elsewhere. // 2. Existing Authorization or ?token query tells LFS that the request is ready. // 3. Netrc based on the hostname. // 4. URL authentication on the Endpoint URL or the Git Remote URL. // 5. Git Credential Helper, potentially prompting the user. // // There are three URLs in play, that make this a little confusing. // // 1. The request URL, which should be something like "https://git.com/repo.git/info/lfs/objects/batch" // 2. The LFS API URL, which should be something like "https://git.com/repo.git/info/lfs" // This URL used for the "lfs.URL.access" git config key, which determines // what kind of auth the LFS server expects. Could be BasicAccess, NTLMAccess, // or NoneAccess, in which the Git Credential Helper step is skipped. We do // not want to prompt the user for a password to fetch public repository data. // 3. The Git Remote URL, which should be something like "https://git.com/repo.git" // This URL is used for the Git Credential Helper. This way existing https // Git remote credentials can be re-used for LFS. func getCreds(credHelper CredentialHelper, netrcFinder NetrcFinder, ef EndpointFinder, remote string, req *http.Request) (Endpoint, Access, Creds, *url.URL, error) { operation := getReqOperation(req) apiEndpoint := ef.Endpoint(operation, remote) access := ef.AccessFor(apiEndpoint.Url) if access != NTLMAccess { if requestHasAuth(req) || setAuthFromNetrc(netrcFinder, req) || access == NoneAccess { return apiEndpoint, access, nil, nil, nil } credsURL, err := getCredURLForAPI(ef, operation, remote, apiEndpoint, req) if err != nil { return apiEndpoint, access, nil, nil, errors.Wrap(err, "creds") } if credsURL == nil { return apiEndpoint, access, nil, nil, nil } creds, err := fillGitCreds(credHelper, ef, req, credsURL) return apiEndpoint, access, creds, credsURL, err } credsURL, err := url.Parse(apiEndpoint.Url) if err != nil { return apiEndpoint, access, nil, nil, errors.Wrap(err, "creds") } if netrcMachine := getAuthFromNetrc(netrcFinder, req); netrcMachine != nil { creds := Creds{ "protocol": credsURL.Scheme, "host": credsURL.Host, "username": netrcMachine.Login, "password": netrcMachine.Password, "source": "netrc", } return apiEndpoint, access, creds, credsURL, nil } creds, err := getGitCreds(credHelper, ef, req, credsURL) return apiEndpoint, access, creds, credsURL, err } func getGitCreds(credHelper CredentialHelper, ef EndpointFinder, req *http.Request, u *url.URL) (Creds, error) { path := strings.TrimPrefix(u.Path, "/") input := Creds{"protocol": u.Scheme, "host": u.Host, "path": path} if u.User != nil && u.User.Username() != "" { input["username"] = u.User.Username() } creds, err := credHelper.Fill(input) if creds == nil || len(creds) < 1 { errmsg := fmt.Sprintf("Git credentials for %s not found", u) if err != nil { errmsg = errmsg + ":\n" + err.Error() } else { errmsg = errmsg + "." } err = errors.New(errmsg) } return creds, err } func fillGitCreds(credHelper CredentialHelper, ef EndpointFinder, req *http.Request, u *url.URL) (Creds, error) { creds, err := getGitCreds(credHelper, ef, req, u) if err == nil { tracerx.Printf("Filled credentials for %s", u) setRequestAuth(req, creds["username"], creds["password"]) } return creds, err } func getAuthFromNetrc(netrcFinder NetrcFinder, req *http.Request) *netrc.Machine { hostname := req.URL.Host var host string if strings.Contains(hostname, ":") { var err error host, _, err = net.SplitHostPort(hostname) if err != nil { tracerx.Printf("netrc: error parsing %q: %s", hostname, err) return nil } } else { host = hostname } return netrcFinder.FindMachine(host) } func setAuthFromNetrc(netrcFinder NetrcFinder, req *http.Request) bool { if machine := getAuthFromNetrc(netrcFinder, req); machine != nil { setRequestAuth(req, machine.Login, machine.Password) return true } return false } func getCredURLForAPI(ef EndpointFinder, operation, remote string, apiEndpoint Endpoint, req *http.Request) (*url.URL, error) { apiURL, err := url.Parse(apiEndpoint.Url) if err != nil { return nil, err } // if the LFS request doesn't match the current LFS url, don't bother // attempting to set the Authorization header from the LFS or Git remote URLs. if req.URL.Scheme != apiURL.Scheme || req.URL.Host != apiURL.Host { return req.URL, nil } if setRequestAuthFromURL(req, apiURL) { return nil, nil } if len(remote) > 0 { if u := ef.GitRemoteURL(remote, operation == "upload"); u != "" { schemedUrl, _ := prependEmptySchemeIfAbsent(u) gitRemoteURL, err := url.Parse(schemedUrl) if err != nil { return nil, err } if gitRemoteURL.Scheme == apiURL.Scheme && gitRemoteURL.Host == apiURL.Host { if setRequestAuthFromURL(req, gitRemoteURL) { return nil, nil } return gitRemoteURL, nil } } } return apiURL, nil } // prependEmptySchemeIfAbsent prepends an empty scheme "//" if none was found in // the URL in order to satisfy RFC 3986 §3.3, and `net/url.Parse()`. // // It returns a string parse-able with `net/url.Parse()` and a boolean whether // or not an empty scheme was added. func prependEmptySchemeIfAbsent(u string) (string, bool) { if hasScheme(u) { return u, false } colon := strings.Index(u, ":") slash := strings.Index(u, "/") if colon >= 0 && (slash < 0 || colon < slash) { // First path segment has a colon, assumed that it's a // scheme-less URL. Append an empty scheme on top to // satisfy RFC 3986 §3.3, and `net/url.Parse()`. return fmt.Sprintf("//%s", u), true } return u, true } var ( // supportedSchemes is the list of URL schemes the `lfsapi` package // supports. supportedSchemes = []string{"ssh", "http", "https"} ) // hasScheme returns whether or not a given string (taken to represent a RFC // 3986 URL) has a scheme that is supported by the `lfsapi` package. func hasScheme(what string) bool { for _, scheme := range supportedSchemes { if strings.HasPrefix(what, fmt.Sprintf("%s://", scheme)) { return true } } return false } func requestHasAuth(req *http.Request) bool { if len(req.Header.Get("Authorization")) > 0 { return true } return len(req.URL.Query().Get("token")) > 0 } func setRequestAuthFromURL(req *http.Request, u *url.URL) bool { if u.User == nil { return false } if pass, ok := u.User.Password(); ok { fmt.Fprintln(os.Stderr, "warning: current Git remote contains credentials") setRequestAuth(req, u.User.Username(), pass) return true } return false } func setRequestAuth(req *http.Request, user, pass string) { // better not be NTLM! if len(user) == 0 && len(pass) == 0 { return } token := fmt.Sprintf("%s:%s", user, pass) auth := "Basic " + strings.TrimSpace(base64.StdEncoding.EncodeToString([]byte(token))) req.Header.Set("Authorization", auth) } func getReqOperation(req *http.Request) string { operation := "download" if req.Method == "POST" || req.Method == "PUT" { operation = "upload" } return operation } var ( authenticateHeaders = []string{"Lfs-Authenticate", "Www-Authenticate"} ) func getAuthAccess(res *http.Response) Access { for _, headerName := range authenticateHeaders { for _, auth := range res.Header[headerName] { pieces := strings.SplitN(strings.ToLower(auth), " ", 2) if len(pieces) == 0 { continue } switch Access(pieces[0]) { case NegotiateAccess, NTLMAccess: // When server sends Www-Authentication: Negotiate, it supports both Kerberos and NTLM. // Since git-lfs current does not support Kerberos, we will return NTLM in this case. return NTLMAccess } } } return BasicAccess } git-lfs-2.3.4/lfsapi/auth_test.go000066400000000000000000000375031317167762300167170ustar00rootroot00000000000000package lfsapi import ( "encoding/base64" "encoding/json" "fmt" "net/http" "net/http/httptest" "strings" "sync/atomic" "testing" "github.com/git-lfs/git-lfs/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) type authRequest struct { Test string } func TestAuthenticateHeaderAccess(t *testing.T) { tests := map[string]Access{ "": BasicAccess, "basic 123": BasicAccess, "basic": BasicAccess, "unknown": BasicAccess, "NTLM": NTLMAccess, "ntlm": NTLMAccess, "NTLM 1 2 3": NTLMAccess, "ntlm 1 2 3": NTLMAccess, "NEGOTIATE": NTLMAccess, "negotiate": NTLMAccess, "NEGOTIATE 1 2 3": NTLMAccess, "negotiate 1 2 3": NTLMAccess, } for _, key := range authenticateHeaders { for value, expected := range tests { res := &http.Response{Header: make(http.Header)} res.Header.Set(key, value) t.Logf("%s: %s", key, value) assert.Equal(t, expected, getAuthAccess(res)) } } } func TestDoWithAuthApprove(t *testing.T) { var called uint32 srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { atomic.AddUint32(&called, 1) assert.Equal(t, "POST", req.Method) body := &authRequest{} err := json.NewDecoder(req.Body).Decode(body) assert.Nil(t, err) assert.Equal(t, "Approve", body.Test) w.Header().Set("Lfs-Authenticate", "Basic") actual := req.Header.Get("Authorization") if len(actual) == 0 { w.WriteHeader(http.StatusUnauthorized) return } expected := "Basic " + strings.TrimSpace( base64.StdEncoding.EncodeToString([]byte("user:pass")), ) assert.Equal(t, expected, actual) })) defer srv.Close() creds := newMockCredentialHelper() c, err := NewClient(nil, UniqTestEnv(map[string]string{ "lfs.url": srv.URL + "/repo/lfs", })) require.Nil(t, err) c.Credentials = creds assert.Equal(t, NoneAccess, c.Endpoints.AccessFor(srv.URL+"/repo/lfs")) req, err := http.NewRequest("POST", srv.URL+"/repo/lfs/foo", nil) require.Nil(t, err) err = MarshalToRequest(req, &authRequest{Test: "Approve"}) require.Nil(t, err) res, err := c.DoWithAuth("", req) require.Nil(t, err) assert.Equal(t, http.StatusOK, res.StatusCode) assert.True(t, creds.IsApproved(Creds(map[string]string{ "username": "user", "password": "pass", "path": "repo/lfs", "protocol": "http", "host": srv.Listener.Addr().String(), }))) assert.Equal(t, BasicAccess, c.Endpoints.AccessFor(srv.URL+"/repo/lfs")) assert.EqualValues(t, 2, called) } func TestDoWithAuthReject(t *testing.T) { var called uint32 srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { atomic.AddUint32(&called, 1) assert.Equal(t, "POST", req.Method) body := &authRequest{} err := json.NewDecoder(req.Body).Decode(body) assert.Nil(t, err) assert.Equal(t, "Reject", body.Test) actual := req.Header.Get("Authorization") expected := "Basic " + strings.TrimSpace( base64.StdEncoding.EncodeToString([]byte("user:pass")), ) w.Header().Set("Lfs-Authenticate", "Basic") if actual != expected { // Write http.StatusUnauthorized to force the credential // helper to reject the credentials w.WriteHeader(http.StatusUnauthorized) } else { w.WriteHeader(http.StatusOK) } })) defer srv.Close() invalidCreds := Creds(map[string]string{ "username": "user", "password": "wrong_pass", "path": "", "protocol": "http", "host": srv.Listener.Addr().String(), }) creds := newMockCredentialHelper() creds.Approve(invalidCreds) assert.True(t, creds.IsApproved(invalidCreds)) c := &Client{ Credentials: creds, Endpoints: NewEndpointFinder(UniqTestEnv(map[string]string{ "lfs.url": srv.URL, })), } req, err := http.NewRequest("POST", srv.URL, nil) require.Nil(t, err) err = MarshalToRequest(req, &authRequest{Test: "Reject"}) require.Nil(t, err) res, err := c.DoWithAuth("", req) require.Nil(t, err) assert.Equal(t, http.StatusOK, res.StatusCode) assert.False(t, creds.IsApproved(invalidCreds)) assert.True(t, creds.IsApproved(Creds(map[string]string{ "username": "user", "password": "pass", "path": "", "protocol": "http", "host": srv.Listener.Addr().String(), }))) assert.EqualValues(t, 3, called) } type mockCredentialHelper struct { Approved map[string]Creds } func newMockCredentialHelper() *mockCredentialHelper { return &mockCredentialHelper{ Approved: make(map[string]Creds), } } func (m *mockCredentialHelper) Fill(input Creds) (Creds, error) { if found, ok := m.Approved[credsToKey(input)]; ok { return found, nil } output := make(Creds) for key, value := range input { output[key] = value } if _, ok := output["username"]; !ok { output["username"] = "user" } output["password"] = "pass" return output, nil } func (m *mockCredentialHelper) Approve(creds Creds) error { m.Approved[credsToKey(creds)] = creds return nil } func (m *mockCredentialHelper) Reject(creds Creds) error { delete(m.Approved, credsToKey(creds)) return nil } func (m *mockCredentialHelper) IsApproved(creds Creds) bool { if found, ok := m.Approved[credsToKey(creds)]; ok { return found["password"] == creds["password"] } return false } func credsToKey(creds Creds) string { var kvs []string for _, k := range []string{"protocol", "host", "path"} { kvs = append(kvs, fmt.Sprintf("%s:%s", k, creds[k])) } return strings.Join(kvs, " ") } func basicAuth(user, pass string) string { value := fmt.Sprintf("%s:%s", user, pass) return fmt.Sprintf("Basic %s", strings.TrimSpace(base64.StdEncoding.EncodeToString([]byte(value)))) } type getCredsExpected struct { Endpoint string Access Access Creds Creds CredsURL string Authorization string } type getCredsTest struct { Remote string Method string Href string Header map[string]string Config map[string]string Expected getCredsExpected } func TestGetCreds(t *testing.T) { tests := map[string]getCredsTest{ "no access": getCredsTest{ Remote: "origin", Method: "GET", Href: "https://git-server.com/repo/lfs/locks", Config: map[string]string{ "lfs.url": "https://git-server.com/repo/lfs", }, Expected: getCredsExpected{ Access: NoneAccess, Endpoint: "https://git-server.com/repo/lfs", }, }, "basic access": getCredsTest{ Remote: "origin", Method: "GET", Href: "https://git-server.com/repo/lfs/locks", Config: map[string]string{ "lfs.url": "https://git-server.com/repo/lfs", "lfs.https://git-server.com/repo/lfs.access": "basic", }, Expected: getCredsExpected{ Access: BasicAccess, Endpoint: "https://git-server.com/repo/lfs", Authorization: basicAuth("git-server.com", "monkey"), CredsURL: "https://git-server.com/repo/lfs", Creds: map[string]string{ "protocol": "https", "host": "git-server.com", "username": "git-server.com", "password": "monkey", "path": "repo/lfs", }, }, }, "ntlm": getCredsTest{ Remote: "origin", Method: "GET", Href: "https://git-server.com/repo/lfs/locks", Config: map[string]string{ "lfs.url": "https://git-server.com/repo/lfs", "lfs.https://git-server.com/repo/lfs.access": "ntlm", }, Expected: getCredsExpected{ Access: NTLMAccess, Endpoint: "https://git-server.com/repo/lfs", CredsURL: "https://git-server.com/repo/lfs", Creds: map[string]string{ "protocol": "https", "host": "git-server.com", "username": "git-server.com", "password": "monkey", "path": "repo/lfs", }, }, }, "ntlm with netrc": getCredsTest{ Remote: "origin", Method: "GET", Href: "https://netrc-host.com/repo/lfs/locks", Config: map[string]string{ "lfs.url": "https://netrc-host.com/repo/lfs", "lfs.https://netrc-host.com/repo/lfs.access": "ntlm", }, Expected: getCredsExpected{ Access: NTLMAccess, Endpoint: "https://netrc-host.com/repo/lfs", CredsURL: "https://netrc-host.com/repo/lfs", Creds: map[string]string{ "protocol": "https", "host": "netrc-host.com", "username": "abc", "password": "def", "source": "netrc", }, }, }, "custom auth": getCredsTest{ Remote: "origin", Method: "GET", Href: "https://git-server.com/repo/lfs/locks", Header: map[string]string{ "Authorization": "custom", }, Config: map[string]string{ "lfs.url": "https://git-server.com/repo/lfs", "lfs.https://git-server.com/repo/lfs.access": "basic", }, Expected: getCredsExpected{ Access: BasicAccess, Endpoint: "https://git-server.com/repo/lfs", Authorization: "custom", }, }, "netrc": getCredsTest{ Remote: "origin", Method: "GET", Href: "https://netrc-host.com/repo/lfs/locks", Config: map[string]string{ "lfs.url": "https://netrc-host.com/repo/lfs", "lfs.https://netrc-host.com/repo/lfs.access": "basic", }, Expected: getCredsExpected{ Access: BasicAccess, Endpoint: "https://netrc-host.com/repo/lfs", Authorization: basicAuth("abc", "def"), }, }, "username in url": getCredsTest{ Remote: "origin", Method: "GET", Href: "https://git-server.com/repo/lfs/locks", Config: map[string]string{ "lfs.url": "https://user@git-server.com/repo/lfs", "lfs.https://git-server.com/repo/lfs.access": "basic", }, Expected: getCredsExpected{ Access: BasicAccess, Endpoint: "https://user@git-server.com/repo/lfs", Authorization: basicAuth("user", "monkey"), CredsURL: "https://user@git-server.com/repo/lfs", Creds: map[string]string{ "protocol": "https", "host": "git-server.com", "username": "user", "password": "monkey", "path": "repo/lfs", }, }, }, "different remote url, basic access": getCredsTest{ Remote: "origin", Method: "GET", Href: "https://git-server.com/repo/lfs/locks", Config: map[string]string{ "lfs.url": "https://git-server.com/repo/lfs", "lfs.https://git-server.com/repo/lfs.access": "basic", "remote.origin.url": "https://git-server.com/repo", }, Expected: getCredsExpected{ Access: BasicAccess, Endpoint: "https://git-server.com/repo/lfs", Authorization: basicAuth("git-server.com", "monkey"), CredsURL: "https://git-server.com/repo", Creds: map[string]string{ "protocol": "https", "host": "git-server.com", "username": "git-server.com", "password": "monkey", "path": "repo", }, }, }, "api url auth": getCredsTest{ Remote: "origin", Method: "GET", Href: "https://git-server.com/repo/locks", Config: map[string]string{ "lfs.url": "https://user:pass@git-server.com/repo", "lfs.https://git-server.com/repo.access": "basic", }, Expected: getCredsExpected{ Access: BasicAccess, Endpoint: "https://user:pass@git-server.com/repo", Authorization: basicAuth("user", "pass"), }, }, "git url auth": getCredsTest{ Remote: "origin", Method: "GET", Href: "https://git-server.com/repo/locks", Config: map[string]string{ "lfs.url": "https://git-server.com/repo", "lfs.https://git-server.com/repo.access": "basic", "remote.origin.url": "https://user:pass@git-server.com/repo", }, Expected: getCredsExpected{ Access: BasicAccess, Endpoint: "https://git-server.com/repo", Authorization: basicAuth("user", "pass"), }, }, "scheme mismatch": getCredsTest{ Remote: "origin", Method: "GET", Href: "http://git-server.com/repo/lfs/locks", Config: map[string]string{ "lfs.url": "https://git-server.com/repo/lfs", "lfs.https://git-server.com/repo/lfs.access": "basic", }, Expected: getCredsExpected{ Access: BasicAccess, Endpoint: "https://git-server.com/repo/lfs", Authorization: basicAuth("git-server.com", "monkey"), CredsURL: "http://git-server.com/repo/lfs/locks", Creds: map[string]string{ "protocol": "http", "host": "git-server.com", "username": "git-server.com", "password": "monkey", "path": "repo/lfs/locks", }, }, }, "host mismatch": getCredsTest{ Remote: "origin", Method: "GET", Href: "https://lfs-server.com/repo/lfs/locks", Config: map[string]string{ "lfs.url": "https://git-server.com/repo/lfs", "lfs.https://git-server.com/repo/lfs.access": "basic", }, Expected: getCredsExpected{ Access: BasicAccess, Endpoint: "https://git-server.com/repo/lfs", Authorization: basicAuth("lfs-server.com", "monkey"), CredsURL: "https://lfs-server.com/repo/lfs/locks", Creds: map[string]string{ "protocol": "https", "host": "lfs-server.com", "username": "lfs-server.com", "password": "monkey", "path": "repo/lfs/locks", }, }, }, "port mismatch": getCredsTest{ Remote: "origin", Method: "GET", Href: "https://git-server.com:8080/repo/lfs/locks", Config: map[string]string{ "lfs.url": "https://git-server.com/repo/lfs", "lfs.https://git-server.com/repo/lfs.access": "basic", }, Expected: getCredsExpected{ Access: BasicAccess, Endpoint: "https://git-server.com/repo/lfs", Authorization: basicAuth("git-server.com:8080", "monkey"), CredsURL: "https://git-server.com:8080/repo/lfs/locks", Creds: map[string]string{ "protocol": "https", "host": "git-server.com:8080", "username": "git-server.com:8080", "password": "monkey", "path": "repo/lfs/locks", }, }, }, "bare ssh URI": getCredsTest{ Remote: "origin", Method: "POST", Href: "https://git-server.com/repo/lfs/objects/batch", Config: map[string]string{ "lfs.url": "https://git-server.com/repo/lfs", "lfs.https://git-server.com/repo/lfs.access": "basic", "remote.origin.url": "git@git-server.com:repo.git", }, Expected: getCredsExpected{ Access: BasicAccess, Endpoint: "https://git-server.com/repo/lfs", Authorization: basicAuth("git-server.com", "monkey"), CredsURL: "https://git-server.com/repo/lfs", Creds: map[string]string{ "host": "git-server.com", "password": "monkey", "path": "repo/lfs", "protocol": "https", "username": "git-server.com", }, }, }, } credHelper := &fakeCredentialFiller{} netrcFinder := &fakeNetrc{} for desc, test := range tests { t.Log(desc) req, err := http.NewRequest(test.Method, test.Href, nil) if err != nil { t.Errorf("[%s] %s", desc, err) continue } for key, value := range test.Header { req.Header.Set(key, value) } ef := NewEndpointFinder(UniqTestEnv(test.Config)) endpoint, access, creds, credsURL, err := getCreds(credHelper, netrcFinder, ef, test.Remote, req) if !assert.Nil(t, err) { continue } assert.Equal(t, test.Expected.Endpoint, endpoint.Url, "endpoint") assert.Equal(t, test.Expected.Access, access, "access") assert.Equal(t, test.Expected.Authorization, req.Header.Get("Authorization"), "authorization") if test.Expected.Creds != nil { assert.EqualValues(t, test.Expected.Creds, creds) } else { assert.Nil(t, creds, "creds") } if len(test.Expected.CredsURL) > 0 { if assert.NotNil(t, credsURL, "credURL") { assert.Equal(t, test.Expected.CredsURL, credsURL.String(), "credURL") } } else { assert.Nil(t, credsURL) } } } type fakeCredentialFiller struct{} func (f *fakeCredentialFiller) Fill(input Creds) (Creds, error) { output := make(Creds) for key, value := range input { output[key] = value } if _, ok := output["username"]; !ok { output["username"] = input["host"] } output["password"] = "monkey" return output, nil } func (f *fakeCredentialFiller) Approve(creds Creds) error { return errors.New("Not implemented") } func (f *fakeCredentialFiller) Reject(creds Creds) error { return errors.New("Not implemented") } git-lfs-2.3.4/lfsapi/body.go000066400000000000000000000011711317167762300156440ustar00rootroot00000000000000package lfsapi import ( "bytes" "encoding/json" "io" "net/http" "strconv" ) type ReadSeekCloser interface { io.Seeker io.ReadCloser } func MarshalToRequest(req *http.Request, obj interface{}) error { by, err := json.Marshal(obj) if err != nil { return err } clen := len(by) req.Header.Set("Content-Length", strconv.Itoa(clen)) req.ContentLength = int64(clen) req.Body = NewByteBody(by) return nil } func NewByteBody(by []byte) ReadSeekCloser { return &closingByteReader{Reader: bytes.NewReader(by)} } type closingByteReader struct { *bytes.Reader } func (r *closingByteReader) Close() error { return nil } git-lfs-2.3.4/lfsapi/certs.go000066400000000000000000000106061317167762300160320ustar00rootroot00000000000000package lfsapi import ( "crypto/tls" "crypto/x509" "fmt" "io/ioutil" "path/filepath" "github.com/git-lfs/git-lfs/config" "github.com/rubyist/tracerx" ) // isCertVerificationDisabledForHost returns whether SSL certificate verification // has been disabled for the given host, or globally func isCertVerificationDisabledForHost(c *Client, host string) bool { hostSslVerify, _ := c.uc.Get("http", fmt.Sprintf("https://%v", host), "sslverify") if hostSslVerify == "false" { return true } return c.SkipSSLVerify } // isClientCertEnabledForHost returns whether client certificate // are configured for the given host func isClientCertEnabledForHost(c *Client, host string) bool { _, hostSslKeyOk := c.uc.Get("http", fmt.Sprintf("https://%v/", host), "sslKey") _, hostSslCertOk := c.uc.Get("http", fmt.Sprintf("https://%v/", host), "sslCert") return hostSslKeyOk && hostSslCertOk } // getClientCertForHost returns a client certificate for a specific host (which may // be "host:port" loaded from the gitconfig func getClientCertForHost(c *Client, host string) tls.Certificate { hostSslKey, _ := c.uc.Get("http", fmt.Sprintf("https://%v/", host), "sslKey") hostSslCert, _ := c.uc.Get("http", fmt.Sprintf("https://%v/", host), "sslCert") cert, err := tls.LoadX509KeyPair(hostSslCert, hostSslKey) if err != nil { tracerx.Printf("Error reading client cert/key %v", err) } return cert } // getRootCAsForHost returns a certificate pool for that specific host (which may // be "host:port" loaded from either the gitconfig or from a platform-specific // source which is not included by default in the golang certificate search) // May return nil if it doesn't have anything to add, in which case the default // RootCAs will be used if passed to TLSClientConfig.RootCAs func getRootCAsForHost(c *Client, host string) *x509.CertPool { // don't init pool, want to return nil not empty if none found; init only on successful add cert var pool *x509.CertPool // gitconfig first pool = appendRootCAsForHostFromGitconfig(c.osEnv, c.gitEnv, pool, host) // Platform specific return appendRootCAsForHostFromPlatform(pool, host) } func appendRootCAsForHostFromGitconfig(osEnv Env, gitEnv Env, pool *x509.CertPool, host string) *x509.CertPool { // Accumulate certs from all these locations: // GIT_SSL_CAINFO first if cafile, _ := osEnv.Get("GIT_SSL_CAINFO"); len(cafile) > 0 { return appendCertsFromFile(pool, cafile) } // http./.sslcainfo or http..sslcainfo uc := config.NewURLConfig(gitEnv) if cafile, ok := uc.Get("http", fmt.Sprintf("https://%v/", host), "sslcainfo"); ok { return appendCertsFromFile(pool, cafile) } // GIT_SSL_CAPATH if cadir, _ := osEnv.Get("GIT_SSL_CAPATH"); len(cadir) > 0 { return appendCertsFromFilesInDir(pool, cadir) } // http.sslcapath if cadir, ok := gitEnv.Get("http.sslcapath"); ok { return appendCertsFromFilesInDir(pool, cadir) } return pool } func appendCertsFromFilesInDir(pool *x509.CertPool, dir string) *x509.CertPool { files, err := ioutil.ReadDir(dir) if err != nil { tracerx.Printf("Error reading cert dir %q: %v", dir, err) return pool } for _, f := range files { pool = appendCertsFromFile(pool, filepath.Join(dir, f.Name())) } return pool } func appendCertsFromFile(pool *x509.CertPool, filename string) *x509.CertPool { data, err := ioutil.ReadFile(filename) if err != nil { tracerx.Printf("Error reading cert file %q: %v", filename, err) return pool } // Firstly, try parsing as binary certificate if certs, err := x509.ParseCertificates(data); err == nil { return appendCerts(pool, certs) } // If not binary certs, try PEM data return appendCertsFromPEMData(pool, data) } func appendCerts(pool *x509.CertPool, certs []*x509.Certificate) *x509.CertPool { if len(certs) == 0 { // important to return unmodified (may be nil) return pool } if pool == nil { pool = x509.NewCertPool() } for _, cert := range certs { pool.AddCert(cert) } return pool } func appendCertsFromPEMData(pool *x509.CertPool, data []byte) *x509.CertPool { if len(data) == 0 { return pool } // Bit of a dance, need to ensure if AppendCertsFromPEM fails we still return // nil and not an empty pool, so system roots still get used var ret *x509.CertPool if pool == nil { ret = x509.NewCertPool() } else { ret = pool } if !ret.AppendCertsFromPEM(data) { // Return unmodified input pool (may be nil, do not replace with empty) return pool } return ret } git-lfs-2.3.4/lfsapi/certs_darwin.go000066400000000000000000000036661317167762300174060ustar00rootroot00000000000000package lfsapi import ( "crypto/x509" "regexp" "strings" "github.com/git-lfs/git-lfs/subprocess" "github.com/rubyist/tracerx" ) func appendRootCAsForHostFromPlatform(pool *x509.CertPool, host string) *x509.CertPool { // Go loads only the system root certificates by default // see https://github.com/golang/go/blob/master/src/crypto/x509/root_darwin.go // We want to load certs configured in the System keychain too, this is separate // from the system root certificates. It's also where other tools such as // browsers (e.g. Chrome) will load custom trusted certs from. They often // don't load certs from the login keychain so that's not included here // either, for consistency. // find system.keychain for user-added certs (don't assume location) cmd := subprocess.ExecCommand("/usr/bin/security", "list-keychains") kcout, err := cmd.Output() if err != nil { tracerx.Printf("Error listing keychains: %v", err) return nil } var systemKeychain string keychains := strings.Split(string(kcout), "\n") for _, keychain := range keychains { lc := strings.ToLower(keychain) if !strings.Contains(lc, "/system.keychain") { continue } systemKeychain = strings.Trim(keychain, " \t\"") break } if len(systemKeychain) == 0 { return nil } pool = appendRootCAsFromKeychain(pool, host, systemKeychain) // Also check host without port portreg := regexp.MustCompile(`([^:]+):\d+`) if match := portreg.FindStringSubmatch(host); match != nil { hostwithoutport := match[1] pool = appendRootCAsFromKeychain(pool, hostwithoutport, systemKeychain) } return pool } func appendRootCAsFromKeychain(pool *x509.CertPool, name, keychain string) *x509.CertPool { cmd := subprocess.ExecCommand("/usr/bin/security", "find-certificate", "-a", "-p", "-c", name, keychain) data, err := cmd.Output() if err != nil { tracerx.Printf("Error reading keychain %q: %v", keychain, err) return pool } return appendCertsFromPEMData(pool, data) } git-lfs-2.3.4/lfsapi/certs_freebsd.go000066400000000000000000000002611317167762300175200ustar00rootroot00000000000000package lfsapi import "crypto/x509" func appendRootCAsForHostFromPlatform(pool *x509.CertPool, host string) *x509.CertPool { // Do nothing, use golang default return pool } git-lfs-2.3.4/lfsapi/certs_linux.go000066400000000000000000000002611317167762300172450ustar00rootroot00000000000000package lfsapi import "crypto/x509" func appendRootCAsForHostFromPlatform(pool *x509.CertPool, host string) *x509.CertPool { // Do nothing, use golang default return pool } git-lfs-2.3.4/lfsapi/certs_openbsd.go000066400000000000000000000002611317167762300175400ustar00rootroot00000000000000package lfsapi import "crypto/x509" func appendRootCAsForHostFromPlatform(pool *x509.CertPool, host string) *x509.CertPool { // Do nothing, use golang default return pool } git-lfs-2.3.4/lfsapi/certs_test.go000066400000000000000000000154721317167762300170770ustar00rootroot00000000000000package lfsapi import ( "fmt" "io/ioutil" "net/http" "os" "path/filepath" "testing" "github.com/stretchr/testify/assert" ) var testCert = `-----BEGIN CERTIFICATE----- MIIDyjCCArKgAwIBAgIJAMi9TouXnW+ZMA0GCSqGSIb3DQEBBQUAMEwxCzAJBgNV BAYTAlVTMRMwEQYDVQQIEwpTb21lLVN0YXRlMRAwDgYDVQQKEwdnaXQtbGZzMRYw FAYDVQQDEw1naXQtbGZzLmxvY2FsMB4XDTE2MDMwOTEwNTk1NFoXDTI2MDMwNzEw NTk1NFowTDELMAkGA1UEBhMCVVMxEzARBgNVBAgTClNvbWUtU3RhdGUxEDAOBgNV BAoTB2dpdC1sZnMxFjAUBgNVBAMTDWdpdC1sZnMubG9jYWwwggEiMA0GCSqGSIb3 DQEBAQUAA4IBDwAwggEKAoIBAQCXmsI2w44nOsP7n3kL1Lz04U5FMZRErBSXLOE+ dpd4tMpgrjOncJPD9NapHabsVIOnuVvMDuBbWYwU9PwbN4tjQzch8DRxBju6fCp/ Pm+QF6p2Ga+NuSHWoVfNFuF2776aF9gSLC0rFnBekD3HCz+h6I5HFgHBvRjeVyAs PRw471Y28Je609SoYugxaQNzRvahP0Qf43tE74/WN3FTGXy1+iU+uXpfp8KxnsuB gfj+Wi6mPt8Q2utcA1j82dJ0K8ZbHSbllzmI+N/UuRLsbTUEdeFWYdZ0AlZNd/Vc PlOSeoExwvOHIuUasT/cLIrEkdXNud2QLg2GpsB6fJi3NEUhAgMBAAGjga4wgasw HQYDVR0OBBYEFC8oVPRQbekTwfkntgdL7PADXNDbMHwGA1UdIwR1MHOAFC8oVPRQ bekTwfkntgdL7PADXNDboVCkTjBMMQswCQYDVQQGEwJVUzETMBEGA1UECBMKU29t ZS1TdGF0ZTEQMA4GA1UEChMHZ2l0LWxmczEWMBQGA1UEAxMNZ2l0LWxmcy5sb2Nh bIIJAMi9TouXnW+ZMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBACIl /CBLIhC3drrYme4cGArhWyXIyRpMoy9Z+9Dru8rSuOr/RXR6sbYhlE1iMGg4GsP8 4Cj7aIct6Vb9NFv5bGNyFJAmDesm3SZlEcWxU3YBzNPiJXGiUpQHCkp0BH+gvsXc tb58XoiDZPVqrl0jNfX/nHpHR9c3DaI3Tjx0F/No0ZM6mLQ1cNMikFyEWQ4U0zmW LvV+vvKuOixRqbcVnB5iTxqMwFG0X3tUql0cftGBgoCoR1+FSBOs0EXLODCck6ql aW6vZwkA+ccj/pDTx8LBe2lnpatrFeIt6znAUJW3G8r6SFHKVBWHwmESZS4kxhjx NpW5Hh0w4/5iIetCkJ0= -----END CERTIFICATE-----` var sslCAInfoConfigHostNames = []string{ "git-lfs.local", "git-lfs.local/", } var sslCAInfoMatchedHostTests = []struct { hostName string shouldMatch bool }{ {"git-lfs.local", true}, {"git-lfs.local:8443", false}, {"wronghost.com", false}, } func TestCertFromSSLCAInfoConfig(t *testing.T) { tempfile, err := ioutil.TempFile("", "testcert") assert.Nil(t, err, "Error creating temp cert file") defer os.Remove(tempfile.Name()) _, err = tempfile.WriteString(testCert) assert.Nil(t, err, "Error writing temp cert file") tempfile.Close() // Test http..sslcainfo for _, hostName := range sslCAInfoConfigHostNames { hostKey := fmt.Sprintf("http.https://%v.sslcainfo", hostName) c, err := NewClient(nil, UniqTestEnv(map[string]string{ hostKey: tempfile.Name(), })) assert.Nil(t, err) for _, matchedHostTest := range sslCAInfoMatchedHostTests { pool := getRootCAsForHost(c, matchedHostTest.hostName) var shouldOrShouldnt string if matchedHostTest.shouldMatch { shouldOrShouldnt = "should" } else { shouldOrShouldnt = "should not" } assert.Equal(t, matchedHostTest.shouldMatch, pool != nil, "Cert lookup for \"%v\" %v have succeeded with \"%v\"", matchedHostTest.hostName, shouldOrShouldnt, hostKey) } } // Test http.sslcainfo c, err := NewClient(nil, UniqTestEnv(map[string]string{ "http.sslcainfo": tempfile.Name(), })) assert.Nil(t, err) // Should match any host at all for _, matchedHostTest := range sslCAInfoMatchedHostTests { pool := getRootCAsForHost(c, matchedHostTest.hostName) assert.NotNil(t, pool) } } func TestCertFromSSLCAInfoEnv(t *testing.T) { tempfile, err := ioutil.TempFile("", "testcert") assert.Nil(t, err, "Error creating temp cert file") defer os.Remove(tempfile.Name()) _, err = tempfile.WriteString(testCert) assert.Nil(t, err, "Error writing temp cert file") tempfile.Close() c, err := NewClient(UniqTestEnv(map[string]string{ "GIT_SSL_CAINFO": tempfile.Name(), }), nil) assert.Nil(t, err) // Should match any host at all for _, matchedHostTest := range sslCAInfoMatchedHostTests { pool := getRootCAsForHost(c, matchedHostTest.hostName) assert.NotNil(t, pool) } } func TestCertFromSSLCAPathConfig(t *testing.T) { tempdir, err := ioutil.TempDir("", "testcertdir") assert.Nil(t, err, "Error creating temp cert dir") defer os.RemoveAll(tempdir) err = ioutil.WriteFile(filepath.Join(tempdir, "cert1.pem"), []byte(testCert), 0644) assert.Nil(t, err, "Error creating cert file") c, err := NewClient(nil, UniqTestEnv(map[string]string{ "http.sslcapath": tempdir, })) assert.Nil(t, err) // Should match any host at all for _, matchedHostTest := range sslCAInfoMatchedHostTests { pool := getRootCAsForHost(c, matchedHostTest.hostName) assert.NotNil(t, pool) } } func TestCertFromSSLCAPathEnv(t *testing.T) { tempdir, err := ioutil.TempDir("", "testcertdir") assert.Nil(t, err, "Error creating temp cert dir") defer os.RemoveAll(tempdir) err = ioutil.WriteFile(filepath.Join(tempdir, "cert1.pem"), []byte(testCert), 0644) assert.Nil(t, err, "Error creating cert file") c, err := NewClient(UniqTestEnv(map[string]string{ "GIT_SSL_CAPATH": tempdir, }), nil) assert.Nil(t, err) // Should match any host at all for _, matchedHostTest := range sslCAInfoMatchedHostTests { pool := getRootCAsForHost(c, matchedHostTest.hostName) assert.NotNil(t, pool) } } func TestCertVerifyDisabledGlobalEnv(t *testing.T) { empty := &Client{} httpClient := empty.httpClient("anyhost.com") tr, ok := httpClient.Transport.(*http.Transport) if assert.True(t, ok) { assert.False(t, tr.TLSClientConfig.InsecureSkipVerify) } c, err := NewClient(UniqTestEnv(map[string]string{ "GIT_SSL_NO_VERIFY": "1", }), nil) assert.Nil(t, err) httpClient = c.httpClient("anyhost.com") tr, ok = httpClient.Transport.(*http.Transport) if assert.True(t, ok) { assert.True(t, tr.TLSClientConfig.InsecureSkipVerify) } } func TestCertVerifyDisabledGlobalConfig(t *testing.T) { def := &Client{} httpClient := def.httpClient("anyhost.com") tr, ok := httpClient.Transport.(*http.Transport) if assert.True(t, ok) { assert.False(t, tr.TLSClientConfig.InsecureSkipVerify) } c, err := NewClient(nil, UniqTestEnv(map[string]string{ "http.sslverify": "false", })) assert.Nil(t, err) httpClient = c.httpClient("anyhost.com") tr, ok = httpClient.Transport.(*http.Transport) if assert.True(t, ok) { assert.True(t, tr.TLSClientConfig.InsecureSkipVerify) } } func TestCertVerifyDisabledHostConfig(t *testing.T) { def := &Client{} httpClient := def.httpClient("specifichost.com") tr, ok := httpClient.Transport.(*http.Transport) if assert.True(t, ok) { assert.False(t, tr.TLSClientConfig.InsecureSkipVerify) } httpClient = def.httpClient("otherhost.com") tr, ok = httpClient.Transport.(*http.Transport) if assert.True(t, ok) { assert.False(t, tr.TLSClientConfig.InsecureSkipVerify) } c, err := NewClient(nil, UniqTestEnv(map[string]string{ "http.https://specifichost.com/.sslverify": "false", })) assert.Nil(t, err) httpClient = c.httpClient("specifichost.com") tr, ok = httpClient.Transport.(*http.Transport) if assert.True(t, ok) { assert.True(t, tr.TLSClientConfig.InsecureSkipVerify) } httpClient = c.httpClient("otherhost.com") tr, ok = httpClient.Transport.(*http.Transport) if assert.True(t, ok) { assert.False(t, tr.TLSClientConfig.InsecureSkipVerify) } } git-lfs-2.3.4/lfsapi/certs_windows.go000066400000000000000000000003321317167762300175770ustar00rootroot00000000000000package lfsapi import "crypto/x509" func appendRootCAsForHostFromPlatform(pool *x509.CertPool, host string) *x509.CertPool { // golang already supports Windows Certificate Store for self-signed certs return pool } git-lfs-2.3.4/lfsapi/client.go000066400000000000000000000202241317167762300161650ustar00rootroot00000000000000package lfsapi import ( "context" "crypto/tls" "fmt" "io" "net" "net/http" "net/url" "os" "regexp" "strconv" "strings" "time" "github.com/git-lfs/git-lfs/config" "github.com/git-lfs/git-lfs/errors" "github.com/git-lfs/git-lfs/tools" "github.com/rubyist/tracerx" ) const MediaType = "application/vnd.git-lfs+json; charset=utf-8" var ( UserAgent = "git-lfs" httpRE = regexp.MustCompile(`\Ahttps?://`) ) func (c *Client) NewRequest(method string, e Endpoint, suffix string, body interface{}) (*http.Request, error) { sshRes, err := c.SSH.Resolve(e, method) if err != nil { tracerx.Printf("ssh: %s failed, error: %s, message: %s", e.SshUserAndHost, err.Error(), sshRes.Message, ) if len(sshRes.Message) > 0 { return nil, errors.Wrap(err, sshRes.Message) } return nil, err } prefix := e.Url if len(sshRes.Href) > 0 { prefix = sshRes.Href } if !httpRE.MatchString(prefix) { urlfragment := strings.SplitN(prefix, "?", 2)[0] return nil, fmt.Errorf("missing protocol: %q", urlfragment) } req, err := http.NewRequest(method, joinURL(prefix, suffix), nil) if err != nil { return req, err } for key, value := range sshRes.Header { req.Header.Set(key, value) } req.Header.Set("Accept", MediaType) if body != nil { if merr := MarshalToRequest(req, body); merr != nil { return req, merr } req.Header.Set("Content-Type", MediaType) } return req, err } const slash = "/" func joinURL(prefix, suffix string) string { if strings.HasSuffix(prefix, slash) { return prefix + suffix } return prefix + slash + suffix } func (c *Client) Do(req *http.Request) (*http.Response, error) { req.Header = c.extraHeadersFor(req) req.Header.Set("User-Agent", UserAgent) res, err := c.doWithRedirects(c.httpClient(req.Host), req, nil) if err != nil { return res, err } return res, c.handleResponse(res) } // Close closes any resources that this client opened. func (c *Client) Close() error { return c.httpLogger.Close() } func (c *Client) extraHeadersFor(req *http.Request) http.Header { extraHeaders := c.extraHeaders(req.URL) if len(extraHeaders) == 0 { return req.Header } copy := make(http.Header, len(req.Header)) for k, vs := range req.Header { copy[k] = vs } for k, vs := range extraHeaders { for _, v := range vs { copy[k] = append(copy[k], v) } } return copy } func (c *Client) extraHeaders(u *url.URL) map[string][]string { hdrs := c.uc.GetAll("http", u.String(), "extraHeader") m := make(map[string][]string, len(hdrs)) for _, hdr := range hdrs { parts := strings.SplitN(hdr, ":", 2) if len(parts) < 2 { continue } k, v := parts[0], strings.TrimSpace(parts[1]) m[k] = append(m[k], v) } return m } func (c *Client) doWithRedirects(cli *http.Client, req *http.Request, via []*http.Request) (*http.Response, error) { tracedReq, err := c.traceRequest(req) if err != nil { return nil, err } var retries int if n, ok := Retries(req); ok { retries = n } else { retries = defaultRequestRetries } var res *http.Response requests := tools.MaxInt(0, retries) + 1 for i := 0; i < requests; i++ { res, err = cli.Do(req) if err == nil { break } if seek, ok := req.Body.(io.Seeker); ok { seek.Seek(0, io.SeekStart) } c.traceResponse(req, tracedReq, nil) } if err != nil { c.traceResponse(req, tracedReq, nil) return nil, err } if res == nil { return nil, nil } c.traceResponse(req, tracedReq, res) if res.StatusCode != 301 && res.StatusCode != 302 && res.StatusCode != 303 && res.StatusCode != 307 && res.StatusCode != 308 { // Above are the list of 3xx status codes that we know // how to handle below. If the status code contained in // the HTTP response was none of them, return the (res, // err) tuple as-is, otherwise handle the redirect. return res, err } redirectTo := res.Header.Get("Location") locurl, err := url.Parse(redirectTo) if err == nil && !locurl.IsAbs() { locurl = req.URL.ResolveReference(locurl) redirectTo = locurl.String() } via = append(via, req) if len(via) >= 3 { return res, errors.New("too many redirects") } redirectedReq, err := newRequestForRetry(req, redirectTo) if err != nil { return res, err } return c.doWithRedirects(cli, redirectedReq, via) } func (c *Client) httpClient(host string) *http.Client { c.clientMu.Lock() defer c.clientMu.Unlock() if c.gitEnv == nil { c.gitEnv = make(TestEnv) } if c.osEnv == nil { c.osEnv = make(TestEnv) } if c.hostClients == nil { c.hostClients = make(map[string]*http.Client) } if client, ok := c.hostClients[host]; ok { return client } concurrentTransfers := c.ConcurrentTransfers if concurrentTransfers < 1 { concurrentTransfers = 8 } dialtime := c.DialTimeout if dialtime < 1 { dialtime = 30 } keepalivetime := c.KeepaliveTimeout if keepalivetime < 1 { keepalivetime = 1800 } tlstime := c.TLSTimeout if tlstime < 1 { tlstime = 30 } tr := &http.Transport{ Proxy: proxyFromClient(c), TLSHandshakeTimeout: time.Duration(tlstime) * time.Second, MaxIdleConnsPerHost: concurrentTransfers, } activityTimeout := 30 if v, ok := c.uc.Get("lfs", fmt.Sprintf("https://%v", host), "activitytimeout"); ok { if i, err := strconv.Atoi(v); err == nil { activityTimeout = i } else { activityTimeout = 0 } } dialer := &net.Dialer{ Timeout: time.Duration(dialtime) * time.Second, KeepAlive: time.Duration(keepalivetime) * time.Second, DualStack: true, } if activityTimeout > 0 { activityDuration := time.Duration(activityTimeout) * time.Second tr.DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) { c, err := dialer.DialContext(ctx, network, addr) if c == nil { return c, err } if tc, ok := c.(*net.TCPConn); ok { tc.SetKeepAlive(true) tc.SetKeepAlivePeriod(dialer.KeepAlive) } return &deadlineConn{Timeout: activityDuration, Conn: c}, err } } else { tr.DialContext = dialer.DialContext } tr.TLSClientConfig = &tls.Config{} if isClientCertEnabledForHost(c, host) { tracerx.Printf("http: client cert for %s", host) tr.TLSClientConfig.Certificates = []tls.Certificate{getClientCertForHost(c, host)} tr.TLSClientConfig.BuildNameToCertificate() } if isCertVerificationDisabledForHost(c, host) { tr.TLSClientConfig.InsecureSkipVerify = true } else { tr.TLSClientConfig.RootCAs = getRootCAsForHost(c, host) } httpClient := &http.Client{ Transport: tr, CheckRedirect: func(*http.Request, []*http.Request) error { return http.ErrUseLastResponse }, } c.hostClients[host] = httpClient if c.VerboseOut == nil { c.VerboseOut = os.Stderr } return httpClient } func (c *Client) CurrentUser() (string, string) { userName, _ := c.gitEnv.Get("user.name") userEmail, _ := c.gitEnv.Get("user.email") return userName, userEmail } func newRequestForRetry(req *http.Request, location string) (*http.Request, error) { newReq, err := http.NewRequest(req.Method, location, nil) if err != nil { return nil, err } if req.URL.Scheme == "https" && newReq.URL.Scheme == "http" { return nil, errors.New("lfsapi/client: refusing insecure redirect, https->http") } sameHost := req.URL.Host == newReq.URL.Host for key := range req.Header { if key == "Authorization" { if !sameHost { continue } } newReq.Header.Set(key, req.Header.Get(key)) } oldestURL := strings.SplitN(req.URL.String(), "?", 2)[0] newURL := strings.SplitN(newReq.URL.String(), "?", 2)[0] tracerx.Printf("api: redirect %s %s to %s", req.Method, oldestURL, newURL) // This body will have already been rewound from a call to // lfsapi.Client.traceRequest(). newReq.Body = req.Body newReq.ContentLength = req.ContentLength // Copy the request's context.Context, if any. newReq = newReq.WithContext(req.Context()) return newReq, nil } type deadlineConn struct { Timeout time.Duration net.Conn } func (c *deadlineConn) Read(b []byte) (int, error) { if err := c.Conn.SetDeadline(time.Now().Add(c.Timeout)); err != nil { return 0, err } return c.Conn.Read(b) } func (c *deadlineConn) Write(b []byte) (int, error) { if err := c.Conn.SetDeadline(time.Now().Add(c.Timeout)); err != nil { return 0, err } return c.Conn.Write(b) } func init() { UserAgent = config.VersionDesc } git-lfs-2.3.4/lfsapi/client_test.go000066400000000000000000000202611317167762300172250ustar00rootroot00000000000000package lfsapi import ( "encoding/json" "fmt" "net" "net/http" "net/http/httptest" "sync/atomic" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) type redirectTest struct { Test string } func TestClientRedirect(t *testing.T) { var srv3Https, srv3Http string var called1 uint32 var called2 uint32 var called3 uint32 srv3 := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { atomic.AddUint32(&called3, 1) t.Logf("srv3 req %s %s", r.Method, r.URL.Path) assert.Equal(t, "POST", r.Method) switch r.URL.Path { case "/upgrade": assert.Equal(t, "auth", r.Header.Get("Authorization")) assert.Equal(t, "1", r.Header.Get("A")) w.Header().Set("Location", srv3Https+"/upgraded") w.WriteHeader(301) case "/upgraded": // Since srv3 listens on both a TLS-enabled socket and a // TLS-disabled one, they are two different hosts. // Ensure that, even though this is a "secure" upgrade, // the authorization header is stripped. assert.Equal(t, "", r.Header.Get("Authorization")) assert.Equal(t, "1", r.Header.Get("A")) case "/downgrade": assert.Equal(t, "auth", r.Header.Get("Authorization")) assert.Equal(t, "1", r.Header.Get("A")) w.Header().Set("Location", srv3Http+"/404") w.WriteHeader(301) default: w.WriteHeader(404) } })) srv2 := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { atomic.AddUint32(&called2, 1) t.Logf("srv2 req %s %s", r.Method, r.URL.Path) assert.Equal(t, "POST", r.Method) switch r.URL.Path { case "/ok": assert.Equal(t, "", r.Header.Get("Authorization")) assert.Equal(t, "1", r.Header.Get("A")) body := &redirectTest{} err := json.NewDecoder(r.Body).Decode(body) assert.Nil(t, err) assert.Equal(t, "External", body.Test) w.WriteHeader(200) default: w.WriteHeader(404) } })) srv1 := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { atomic.AddUint32(&called1, 1) t.Logf("srv1 req %s %s", r.Method, r.URL.Path) assert.Equal(t, "POST", r.Method) switch r.URL.Path { case "/local": w.Header().Set("Location", "/ok") w.WriteHeader(307) case "/external": w.Header().Set("Location", srv2.URL+"/ok") w.WriteHeader(307) case "/ok": assert.Equal(t, "auth", r.Header.Get("Authorization")) assert.Equal(t, "1", r.Header.Get("A")) body := &redirectTest{} err := json.NewDecoder(r.Body).Decode(body) assert.Nil(t, err) assert.Equal(t, "Local", body.Test) w.WriteHeader(200) default: w.WriteHeader(404) } })) defer srv1.Close() defer srv2.Close() defer srv3.Close() srv3InsecureListener, err := net.Listen("tcp", "127.0.0.1:0") require.Nil(t, err) go http.Serve(srv3InsecureListener, srv3.Config.Handler) defer srv3InsecureListener.Close() srv3Https = srv3.URL srv3Http = fmt.Sprintf("http://%s", srv3InsecureListener.Addr().String()) c, err := NewClient(nil, UniqTestEnv(map[string]string{ fmt.Sprintf("http.%s.sslverify", srv3Https): "false", fmt.Sprintf("http.%s/.sslverify", srv3Https): "false", fmt.Sprintf("http.%s.sslverify", srv3Http): "false", fmt.Sprintf("http.%s/.sslverify", srv3Http): "false", fmt.Sprintf("http.sslverify"): "false", })) require.Nil(t, err) // local redirect req, err := http.NewRequest("POST", srv1.URL+"/local", nil) require.Nil(t, err) req.Header.Set("Authorization", "auth") req.Header.Set("A", "1") require.Nil(t, MarshalToRequest(req, &redirectTest{Test: "Local"})) res, err := c.Do(req) require.Nil(t, err) assert.Equal(t, 200, res.StatusCode) assert.EqualValues(t, 2, called1) assert.EqualValues(t, 0, called2) // external redirect req, err = http.NewRequest("POST", srv1.URL+"/external", nil) require.Nil(t, err) req.Header.Set("Authorization", "auth") req.Header.Set("A", "1") require.Nil(t, MarshalToRequest(req, &redirectTest{Test: "External"})) res, err = c.Do(req) require.Nil(t, err) assert.Equal(t, 200, res.StatusCode) assert.EqualValues(t, 3, called1) assert.EqualValues(t, 1, called2) // http -> https (secure upgrade) req, err = http.NewRequest("POST", srv3Http+"/upgrade", nil) require.Nil(t, err) req.Header.Set("Authorization", "auth") req.Header.Set("A", "1") require.Nil(t, MarshalToRequest(req, &redirectTest{Test: "http->https"})) res, err = c.Do(req) require.Nil(t, err) assert.Equal(t, 200, res.StatusCode) assert.EqualValues(t, 2, atomic.LoadUint32(&called3)) // https -> http (insecure downgrade) req, err = http.NewRequest("POST", srv3Https+"/downgrade", nil) require.Nil(t, err) req.Header.Set("Authorization", "auth") req.Header.Set("A", "1") require.Nil(t, MarshalToRequest(req, &redirectTest{Test: "https->http"})) _, err = c.Do(req) assert.EqualError(t, err, "lfsapi/client: refusing insecure redirect, https->http") } func TestNewClient(t *testing.T) { c, err := NewClient(UniqTestEnv(map[string]string{}), UniqTestEnv(map[string]string{ "lfs.dialtimeout": "151", "lfs.keepalive": "152", "lfs.tlstimeout": "153", "lfs.concurrenttransfers": "154", })) require.Nil(t, err) assert.Equal(t, 151, c.DialTimeout) assert.Equal(t, 152, c.KeepaliveTimeout) assert.Equal(t, 153, c.TLSTimeout) assert.Equal(t, 154, c.ConcurrentTransfers) } func TestNewClientWithGitSSLVerify(t *testing.T) { c, err := NewClient(nil, nil) assert.Nil(t, err) assert.False(t, c.SkipSSLVerify) for _, value := range []string{"true", "1", "t"} { c, err = NewClient(UniqTestEnv(map[string]string{}), UniqTestEnv(map[string]string{ "http.sslverify": value, })) t.Logf("http.sslverify: %q", value) assert.Nil(t, err) assert.False(t, c.SkipSSLVerify) } for _, value := range []string{"false", "0", "f"} { c, err = NewClient(UniqTestEnv(map[string]string{}), UniqTestEnv(map[string]string{ "http.sslverify": value, })) t.Logf("http.sslverify: %q", value) assert.Nil(t, err) assert.True(t, c.SkipSSLVerify) } } func TestNewClientWithOSSSLVerify(t *testing.T) { c, err := NewClient(nil, nil) assert.Nil(t, err) assert.False(t, c.SkipSSLVerify) for _, value := range []string{"false", "0", "f"} { c, err = NewClient(UniqTestEnv(map[string]string{ "GIT_SSL_NO_VERIFY": value, }), UniqTestEnv(map[string]string{})) t.Logf("GIT_SSL_NO_VERIFY: %q", value) assert.Nil(t, err) assert.False(t, c.SkipSSLVerify) } for _, value := range []string{"true", "1", "t"} { c, err = NewClient(UniqTestEnv(map[string]string{ "GIT_SSL_NO_VERIFY": value, }), UniqTestEnv(map[string]string{})) t.Logf("GIT_SSL_NO_VERIFY: %q", value) assert.Nil(t, err) assert.True(t, c.SkipSSLVerify) } } func TestNewRequest(t *testing.T) { tests := [][]string{ {"https://example.com", "a", "https://example.com/a"}, {"https://example.com/", "a", "https://example.com/a"}, {"https://example.com/a", "b", "https://example.com/a/b"}, {"https://example.com/a/", "b", "https://example.com/a/b"}, } for _, test := range tests { c, err := NewClient(nil, UniqTestEnv(map[string]string{ "lfs.url": test[0], })) require.Nil(t, err) req, err := c.NewRequest("POST", c.Endpoints.Endpoint("", ""), test[1], nil) require.Nil(t, err) assert.Equal(t, "POST", req.Method) assert.Equal(t, test[2], req.URL.String(), fmt.Sprintf("endpoint: %s, suffix: %s, expected: %s", test[0], test[1], test[2])) } } func TestNewRequestWithBody(t *testing.T) { c, err := NewClient(nil, UniqTestEnv(map[string]string{ "lfs.url": "https://example.com", })) require.Nil(t, err) body := struct { Test string }{Test: "test"} req, err := c.NewRequest("POST", c.Endpoints.Endpoint("", ""), "body", body) require.Nil(t, err) assert.NotNil(t, req.Body) assert.Equal(t, "15", req.Header.Get("Content-Length")) assert.EqualValues(t, 15, req.ContentLength) } func TestMarshalToRequest(t *testing.T) { req, err := http.NewRequest("POST", "https://foo/bar", nil) require.Nil(t, err) assert.Nil(t, req.Body) assert.Equal(t, "", req.Header.Get("Content-Length")) assert.EqualValues(t, 0, req.ContentLength) body := struct { Test string }{Test: "test"} require.Nil(t, MarshalToRequest(req, body)) assert.NotNil(t, req.Body) assert.Equal(t, "15", req.Header.Get("Content-Length")) assert.EqualValues(t, 15, req.ContentLength) } git-lfs-2.3.4/lfsapi/creds.go000066400000000000000000000236111317167762300160120ustar00rootroot00000000000000package lfsapi import ( "bytes" "fmt" "net/url" "os/exec" "strings" "sync" "github.com/git-lfs/git-lfs/config" "github.com/git-lfs/git-lfs/errors" "github.com/rubyist/tracerx" ) // credsConfig supplies configuration options pertaining to the authorization // process in package lfsapi. type credsConfig struct { // AskPass is a string containing an executable name as well as a // program arguments. // // See: https://git-scm.com/docs/gitcredentials#_requesting_credentials // for more. AskPass string `os:"GIT_ASKPASS" git:"core.askpass" os:"SSH_ASKPASS"` // Helper is a string defining the credential helper that Git should use. Helper string `git:"credential.helper"` // Cached is a boolean determining whether or not to enable the // credential cacher. Cached bool // SkipPrompt is a boolean determining whether or not to prompt the user // for a password. SkipPrompt bool `os:"GIT_TERMINAL_PROMPT"` } // getCredentialHelper parses a 'credsConfig' from the git and OS environments, // returning the appropriate CredentialHelper to authenticate requests with. // // It returns an error if any configuration was invalid, or otherwise // un-useable. func getCredentialHelper(cfg *config.Configuration) (CredentialHelper, error) { ccfg, err := getCredentialConfig(cfg) if err != nil { return nil, err } var hs []CredentialHelper if len(ccfg.Helper) == 0 && len(ccfg.AskPass) > 0 { hs = append(hs, &AskPassCredentialHelper{ Program: ccfg.AskPass, }) } var h CredentialHelper h = &commandCredentialHelper{ SkipPrompt: ccfg.SkipPrompt, } if ccfg.Cached { h = withCredentialCache(h) } hs = append(hs, h) switch len(hs) { case 0: return nil, nil case 1: return hs[0], nil } return CredentialHelpers(hs), nil } // getCredentialConfig parses a *credsConfig given the OS and Git // configurations. func getCredentialConfig(cfg *config.Configuration) (*credsConfig, error) { what := &credsConfig{ Cached: cfg.Git.Bool("lfs.cachecredentials", true), } if err := cfg.Unmarshal(what); err != nil { return nil, err } return what, nil } // CredentialHelpers is a []CredentialHelper that iterates through each // credential helper to fill, reject, or approve credentials. type CredentialHelpers []CredentialHelper // Fill implements CredentialHelper.Fill by asking each CredentialHelper in // order to fill the credentials. // // If a fill was successful, it is returned immediately, and no other // `CredentialHelper`s are consulted. If any CredentialHelper returns an error, // it is returned immediately. func (h CredentialHelpers) Fill(what Creds) (Creds, error) { for _, c := range h { creds, err := c.Fill(what) if err != nil { return nil, err } if creds != nil { return creds, nil } } return nil, nil } // Reject implements CredentialHelper.Reject and rejects the given Creds "what" // amongst all knonw CredentialHelpers. If any `CredentialHelper`s returned a // non-nil error, no further `CredentialHelper`s are notified, so as to prevent // inconsistent state. func (h CredentialHelpers) Reject(what Creds) error { for _, c := range h { if err := c.Reject(what); err != nil { return err } } return nil } // Approve implements CredentialHelper.Approve and approves the given Creds // "what" amongst all known CredentialHelpers. If any `CredentialHelper`s // returned a non-nil error, no further `CredentialHelper`s are notified, so as // to prevent inconsistent state. func (h CredentialHelpers) Approve(what Creds) error { for _, c := range h { if err := c.Approve(what); err != nil { return err } } return nil } // AskPassCredentialHelper implements the CredentialHelper type for GIT_ASKPASS // and 'core.askpass' configuration values. type AskPassCredentialHelper struct { // Program is the executable program's absolute or relative name. Program string } // Fill implements fill by running the ASKPASS program and returning its output // as a password encoded in the Creds type given the key "password". // // It accepts the password as coming from the program's stdout, as when invoked // with the given arguments (see (*AskPassCredentialHelper).args() below)./ // // If there was an error running the command, it is returned instead of a set of // filled credentials. func (a *AskPassCredentialHelper) Fill(what Creds) (Creds, error) { var user bytes.Buffer var pass bytes.Buffer var err bytes.Buffer u := &url.URL{ Scheme: what["protocol"], Host: what["host"], Path: what["path"], } // 'ucmd' will run the GIT_ASKPASS (or core.askpass) command prompting // for a username. ucmd := exec.Command(a.Program, a.args(fmt.Sprintf("Username for %q", u))...) ucmd.Stderr = &err ucmd.Stdout = &user tracerx.Printf("creds: filling with GIT_ASKPASS: %s", strings.Join(ucmd.Args, " ")) if err := ucmd.Run(); err != nil { return nil, err } if err.Len() > 0 { return nil, errors.New(err.String()) } if username := strings.TrimSpace(user.String()); len(username) > 0 { // If a non-empty username was given, add it to the URL via func // 'net/url.User()'. u.User = url.User(username) } // Regardless, create 'pcmd' to run the GIT_ASKPASS (or core.askpass) // command prompting for a password. pcmd := exec.Command(a.Program, a.args(fmt.Sprintf("Password for %q", u))...) pcmd.Stderr = &err pcmd.Stdout = &pass tracerx.Printf("creds: filling with GIT_ASKPASS: %s", strings.Join(pcmd.Args, " ")) if err := pcmd.Run(); err != nil { return nil, err } if err.Len() > 0 { return nil, errors.New(err.String()) } // Finally, now that we have the username and password information, // store it in the creds instance that we will return to the caller. creds := make(Creds) creds["username"] = strings.TrimSpace(user.String()) creds["password"] = strings.TrimSpace(pass.String()) return creds, nil } // Approve implements CredentialHelper.Approve, and returns nil. The ASKPASS // credential helper does not implement credential approval. func (a *AskPassCredentialHelper) Approve(_ Creds) error { return nil } // Reject implements CredentialHelper.Reject, and returns nil. The ASKPASS // credential helper does not implement credential rejection. func (a *AskPassCredentialHelper) Reject(_ Creds) error { return nil } // args returns the arguments given to the ASKPASS program, if a prompt was // given. // See: https://git-scm.com/docs/gitcredentials#_requesting_credentials for // more. func (a *AskPassCredentialHelper) args(prompt string) []string { if len(prompt) == 0 { return nil } return []string{prompt} } type CredentialHelper interface { Fill(Creds) (Creds, error) Reject(Creds) error Approve(Creds) error } type Creds map[string]string func bufferCreds(c Creds) *bytes.Buffer { buf := new(bytes.Buffer) for k, v := range c { buf.Write([]byte(k)) buf.Write([]byte("=")) buf.Write([]byte(v)) buf.Write([]byte("\n")) } return buf } func withCredentialCache(helper CredentialHelper) CredentialHelper { return &credentialCacher{ cmu: new(sync.Mutex), creds: make(map[string]Creds), helper: helper, } } type credentialCacher struct { // cmu guards creds cmu *sync.Mutex creds map[string]Creds helper CredentialHelper } func credCacheKey(creds Creds) string { parts := []string{ creds["protocol"], creds["host"], creds["path"], } return strings.Join(parts, "//") } func (c *credentialCacher) Fill(creds Creds) (Creds, error) { key := credCacheKey(creds) c.cmu.Lock() defer c.cmu.Unlock() if cache, ok := c.creds[key]; ok { tracerx.Printf("creds: git credential cache (%q, %q, %q)", creds["protocol"], creds["host"], creds["path"]) return cache, nil } creds, err := c.helper.Fill(creds) if err == nil && len(creds["username"]) > 0 && len(creds["password"]) > 0 { c.creds[key] = creds } return creds, err } func (c *credentialCacher) Reject(creds Creds) error { c.cmu.Lock() defer c.cmu.Unlock() delete(c.creds, credCacheKey(creds)) return c.helper.Reject(creds) } func (c *credentialCacher) Approve(creds Creds) error { err := c.helper.Approve(creds) if err == nil { c.cmu.Lock() c.creds[credCacheKey(creds)] = creds c.cmu.Unlock() } return err } type commandCredentialHelper struct { SkipPrompt bool } func (h *commandCredentialHelper) Fill(creds Creds) (Creds, error) { tracerx.Printf("creds: git credential fill (%q, %q, %q)", creds["protocol"], creds["host"], creds["path"]) return h.exec("fill", creds) } func (h *commandCredentialHelper) Reject(creds Creds) error { _, err := h.exec("reject", creds) return err } func (h *commandCredentialHelper) Approve(creds Creds) error { _, err := h.exec("approve", creds) return err } func (h *commandCredentialHelper) exec(subcommand string, input Creds) (Creds, error) { output := new(bytes.Buffer) cmd := exec.Command("git", "credential", subcommand) cmd.Stdin = bufferCreds(input) cmd.Stdout = output /* There is a reason we don't hook up stderr here: Git's credential cache daemon helper does not close its stderr, so if this process is the process that fires up the daemon, it will wait forever (until the daemon exits, really) trying to read from stderr. See https://github.com/git-lfs/git-lfs/issues/117 for more details. */ err := cmd.Start() if err == nil { err = cmd.Wait() } if _, ok := err.(*exec.ExitError); ok { if h.SkipPrompt { return nil, fmt.Errorf("Change the GIT_TERMINAL_PROMPT env var to be prompted to enter your credentials for %s://%s.", input["protocol"], input["host"]) } // 'git credential' exits with 128 if the helper doesn't fill the username // and password values. if subcommand == "fill" && err.Error() == "exit status 128" { return nil, nil } } if err != nil { return nil, fmt.Errorf("'git credential %s' error: %s\n", subcommand, err.Error()) } creds := make(Creds) for _, line := range strings.Split(output.String(), "\n") { pieces := strings.SplitN(line, "=", 2) if len(pieces) < 2 || len(pieces[1]) < 1 { continue } creds[pieces[0]] = pieces[1] } return creds, nil } git-lfs-2.3.4/lfsapi/creds_test.go000066400000000000000000000143031317167762300170470ustar00rootroot00000000000000package lfsapi import ( "errors" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) // test that cache satisfies Fill() without looking at creds func TestCredsCacheFillFromCache(t *testing.T) { creds := newFakeCreds() cache := withCredentialCache(creds).(*credentialCacher) cache.creds["http//lfs.test//foo/bar"] = Creds{ "protocol": "http", "host": "lfs.test", "path": "foo/bar", "username": "u", "password": "p", } filled, err := cache.Fill(Creds{ "protocol": "http", "host": "lfs.test", "path": "foo/bar", }) assert.Nil(t, err) require.NotNil(t, filled) assert.Equal(t, "u", filled["username"]) assert.Equal(t, "p", filled["password"]) assert.Equal(t, 1, len(cache.creds)) cached, ok := cache.creds["http//lfs.test//foo/bar"] assert.True(t, ok) assert.Equal(t, "u", cached["username"]) assert.Equal(t, "p", cached["password"]) } // test that cache caches Fill() value from creds func TestCredsCacheFillFromValidHelperFill(t *testing.T) { creds := newFakeCreds() cache := withCredentialCache(creds).(*credentialCacher) creds.list = append(creds.list, Creds{ "protocol": "http", "host": "lfs.test", "path": "foo/bar", "username": "u", "password": "p", }) assert.Equal(t, 0, len(cache.creds)) filled, err := cache.Fill(Creds{ "protocol": "http", "host": "lfs.test", "path": "foo/bar", }) assert.Nil(t, err) require.NotNil(t, filled) assert.Equal(t, "u", filled["username"]) assert.Equal(t, "p", filled["password"]) assert.Equal(t, 1, len(cache.creds)) cached, ok := cache.creds["http//lfs.test//foo/bar"] assert.True(t, ok) assert.Equal(t, "u", cached["username"]) assert.Equal(t, "p", cached["password"]) creds.list = make([]Creds, 0) filled2, err := cache.Fill(Creds{ "protocol": "http", "host": "lfs.test", "path": "foo/bar", }) assert.Nil(t, err) require.NotNil(t, filled2) assert.Equal(t, "u", filled2["username"]) assert.Equal(t, "p", filled2["password"]) } // test that cache ignores Fill() value from creds with missing username+password func TestCredsCacheFillFromInvalidHelperFill(t *testing.T) { creds := newFakeCreds() cache := withCredentialCache(creds).(*credentialCacher) creds.list = append(creds.list, Creds{ "protocol": "http", "host": "lfs.test", "path": "foo/bar", "username": "no-password", }) assert.Equal(t, 0, len(cache.creds)) filled, err := cache.Fill(Creds{ "protocol": "http", "host": "lfs.test", "path": "foo/bar", "username": "u", "password": "p", }) assert.Nil(t, err) require.NotNil(t, filled) assert.Equal(t, "no-password", filled["username"]) assert.Equal(t, "", filled["password"]) assert.Equal(t, 0, len(cache.creds)) } // test that cache ignores Fill() value from creds with error func TestCredsCacheFillFromErroringHelperFill(t *testing.T) { creds := newFakeCreds() cache := withCredentialCache(&erroringCreds{creds}).(*credentialCacher) creds.list = append(creds.list, Creds{ "protocol": "http", "host": "lfs.test", "path": "foo/bar", "username": "u", "password": "p", }) assert.Equal(t, 0, len(cache.creds)) filled, err := cache.Fill(Creds{ "protocol": "http", "host": "lfs.test", "path": "foo/bar", }) assert.NotNil(t, err) require.NotNil(t, filled) assert.Equal(t, "u", filled["username"]) assert.Equal(t, "p", filled["password"]) assert.Equal(t, 0, len(cache.creds)) } func TestCredsCacheRejectWithoutError(t *testing.T) { creds := newFakeCreds() cache := withCredentialCache(creds).(*credentialCacher) cache.creds["http//lfs.test//foo/bar"] = Creds{ "protocol": "http", "host": "lfs.test", "path": "foo/bar", "username": "u", "password": "p", } err := cache.Reject(Creds{ "protocol": "http", "host": "lfs.test", "path": "foo/bar", }) assert.Nil(t, err) assert.Equal(t, 0, len(cache.creds)) } func TestCredsCacheRejectWithError(t *testing.T) { creds := newFakeCreds() cache := withCredentialCache(&erroringCreds{creds}).(*credentialCacher) cache.creds["http//lfs.test//foo/bar"] = Creds{ "protocol": "http", "host": "lfs.test", "path": "foo/bar", "username": "u", "password": "p", } err := cache.Reject(Creds{ "protocol": "http", "host": "lfs.test", "path": "foo/bar", }) assert.NotNil(t, err) assert.Equal(t, 0, len(cache.creds)) } func TestCredsCacheApproveWithoutError(t *testing.T) { creds := newFakeCreds() cache := withCredentialCache(creds).(*credentialCacher) assert.Equal(t, 0, len(cache.creds)) err := cache.Approve(Creds{ "protocol": "http", "host": "lfs.test", "path": "foo/bar", "username": "U", "password": "P", }) assert.Nil(t, err) assert.Equal(t, 1, len(cache.creds)) cached, ok := cache.creds["http//lfs.test//foo/bar"] assert.True(t, ok) assert.Equal(t, "U", cached["username"]) assert.Equal(t, "P", cached["password"]) } func TestCredsCacheApproveWithError(t *testing.T) { creds := newFakeCreds() cache := withCredentialCache(&erroringCreds{creds}).(*credentialCacher) assert.Equal(t, 0, len(cache.creds)) err := cache.Approve(Creds{ "protocol": "http", "host": "lfs.test", "path": "foo/bar", "username": "u", "password": "p", }) assert.NotNil(t, err) assert.Equal(t, 0, len(cache.creds)) } func newFakeCreds() *fakeCreds { return &fakeCreds{list: make([]Creds, 0)} } type erroringCreds struct { helper CredentialHelper } func (e *erroringCreds) Fill(creds Creds) (Creds, error) { c, _ := e.helper.Fill(creds) return c, errors.New("fill error") } func (e *erroringCreds) Reject(creds Creds) error { e.helper.Reject(creds) return errors.New("reject error") } func (e *erroringCreds) Approve(creds Creds) error { e.helper.Approve(creds) return errors.New("approve error") } type fakeCreds struct { list []Creds } func credsMatch(c1, c2 Creds) bool { return c1["protocol"] == c2["protocol"] && c1["host"] == c2["host"] && c1["path"] == c2["path"] } func (f *fakeCreds) Fill(creds Creds) (Creds, error) { for _, saved := range f.list { if credsMatch(creds, saved) { return saved, nil } } return creds, nil } func (f *fakeCreds) Reject(creds Creds) error { return nil } func (f *fakeCreds) Approve(creds Creds) error { return nil } git-lfs-2.3.4/lfsapi/endpoint.go000066400000000000000000000050331317167762300165300ustar00rootroot00000000000000package lfsapi import ( "fmt" "net/url" "regexp" "strings" ) const UrlUnknown = "" // An Endpoint describes how to access a Git LFS server. type Endpoint struct { Url string SshUserAndHost string SshPath string SshPort string Operation string } func endpointOperation(e Endpoint, method string) string { if len(e.Operation) > 0 { return e.Operation } switch method { case "GET", "HEAD": return "download" default: return "upload" } } // endpointFromBareSshUrl constructs a new endpoint from a bare SSH URL: // // user@host.com:path/to/repo.git // func endpointFromBareSshUrl(rawurl string) Endpoint { parts := strings.Split(rawurl, ":") partsLen := len(parts) if partsLen < 2 { return Endpoint{Url: rawurl} } // Treat presence of ':' as a bare URL var newPath string if len(parts) > 2 { // port included; really should only ever be 3 parts newPath = fmt.Sprintf("%v:%v", parts[0], strings.Join(parts[1:], "/")) } else { newPath = strings.Join(parts, "/") } newrawurl := fmt.Sprintf("ssh://%v", newPath) newu, err := url.Parse(newrawurl) if err != nil { return Endpoint{Url: UrlUnknown} } return endpointFromSshUrl(newu) } // endpointFromSshUrl constructs a new endpoint from an ssh:// URL func endpointFromSshUrl(u *url.URL) Endpoint { var endpoint Endpoint // Pull out port now, we need it separately for SSH regex := regexp.MustCompile(`^([^\:]+)(?:\:(\d+))?$`) match := regex.FindStringSubmatch(u.Host) if match == nil || len(match) < 2 { endpoint.Url = UrlUnknown return endpoint } host := match[1] if u.User != nil && u.User.Username() != "" { endpoint.SshUserAndHost = fmt.Sprintf("%s@%s", u.User.Username(), host) } else { endpoint.SshUserAndHost = host } if len(match) > 2 { endpoint.SshPort = match[2] } // u.Path includes a preceding '/', strip off manually // rooted paths in the URL will be '//path/to/blah' // this is just how Go's URL parsing works if strings.HasPrefix(u.Path, "/") { endpoint.SshPath = u.Path[1:] } else { endpoint.SshPath = u.Path } // Fallback URL for using HTTPS while still using SSH for git // u.Host includes host & port so can't use SSH port endpoint.Url = fmt.Sprintf("https://%s%s", host, u.Path) return endpoint } // Construct a new endpoint from a HTTP URL func endpointFromHttpUrl(u *url.URL) Endpoint { // just pass this straight through return Endpoint{Url: u.String()} } func endpointFromGitUrl(u *url.URL, e *endpointGitFinder) Endpoint { u.Scheme = e.gitProtocol return Endpoint{Url: u.String()} } git-lfs-2.3.4/lfsapi/endpoint_finder.go000066400000000000000000000145271317167762300200670ustar00rootroot00000000000000package lfsapi import ( "fmt" "net/url" "os" "path" "strings" "sync" "github.com/git-lfs/git-lfs/config" "github.com/git-lfs/git-lfs/git" "github.com/rubyist/tracerx" ) type Access string const ( NoneAccess Access = "none" BasicAccess Access = "basic" PrivateAccess Access = "private" NegotiateAccess Access = "negotiate" NTLMAccess Access = "ntlm" emptyAccess Access = "" defaultRemote = "origin" ) type EndpointFinder interface { NewEndpointFromCloneURL(rawurl string) Endpoint NewEndpoint(rawurl string) Endpoint Endpoint(operation, remote string) Endpoint RemoteEndpoint(operation, remote string) Endpoint GitRemoteURL(remote string, forpush bool) string AccessFor(rawurl string) Access SetAccess(rawurl string, access Access) GitProtocol() string } type endpointGitFinder struct { git Env gitProtocol string aliasMu sync.Mutex aliases map[string]string accessMu sync.Mutex urlAccess map[string]Access urlConfig *config.URLConfig } func NewEndpointFinder(git Env) EndpointFinder { e := &endpointGitFinder{ gitProtocol: "https", aliases: make(map[string]string), urlAccess: make(map[string]Access), } if git != nil { e.git = git e.urlConfig = config.NewURLConfig(e.git) if v, ok := git.Get("lfs.gitprotocol"); ok { e.gitProtocol = v } initAliases(e, git) } return e } func (e *endpointGitFinder) Endpoint(operation, remote string) Endpoint { ep := e.getEndpoint(operation, remote) ep.Operation = operation return ep } func (e *endpointGitFinder) getEndpoint(operation, remote string) Endpoint { if e.git == nil { return Endpoint{} } if operation == "upload" { if url, ok := e.git.Get("lfs.pushurl"); ok { return e.NewEndpoint(url) } } if url, ok := e.git.Get("lfs.url"); ok { return e.NewEndpoint(url) } if len(remote) > 0 && remote != defaultRemote { if e := e.RemoteEndpoint(operation, remote); len(e.Url) > 0 { return e } } return e.RemoteEndpoint(operation, defaultRemote) } func (e *endpointGitFinder) RemoteEndpoint(operation, remote string) Endpoint { if e.git == nil { return Endpoint{} } if len(remote) == 0 { remote = defaultRemote } // Support separate push URL if specified and pushing if operation == "upload" { if url, ok := e.git.Get("remote." + remote + ".lfspushurl"); ok { return e.NewEndpoint(url) } } if url, ok := e.git.Get("remote." + remote + ".lfsurl"); ok { return e.NewEndpoint(url) } // finally fall back on git remote url (also supports pushurl) if url := e.GitRemoteURL(remote, operation == "upload"); url != "" { return e.NewEndpointFromCloneURL(url) } return Endpoint{} } func (e *endpointGitFinder) GitRemoteURL(remote string, forpush bool) string { if e.git != nil { if forpush { if u, ok := e.git.Get("remote." + remote + ".pushurl"); ok { return u } } if u, ok := e.git.Get("remote." + remote + ".url"); ok { return u } } if err := git.ValidateRemote(remote); err == nil { return remote } return "" } func (e *endpointGitFinder) NewEndpointFromCloneURL(rawurl string) Endpoint { ep := e.NewEndpoint(rawurl) if ep.Url == UrlUnknown { return ep } if strings.HasSuffix(rawurl, "/") { ep.Url = rawurl[0 : len(rawurl)-1] } // When using main remote URL for HTTP, append info/lfs if path.Ext(ep.Url) == ".git" { ep.Url += "/info/lfs" } else { ep.Url += ".git/info/lfs" } return ep } func (e *endpointGitFinder) NewEndpoint(rawurl string) Endpoint { rawurl = e.ReplaceUrlAlias(rawurl) u, err := url.Parse(rawurl) if err != nil { return endpointFromBareSshUrl(rawurl) } switch u.Scheme { case "ssh": return endpointFromSshUrl(u) case "http", "https": return endpointFromHttpUrl(u) case "git": return endpointFromGitUrl(u, e) case "": return endpointFromBareSshUrl(u.String()) default: // Just passthrough to preserve return Endpoint{Url: rawurl} } } func (e *endpointGitFinder) AccessFor(rawurl string) Access { if e.git == nil { return NoneAccess } accessurl := urlWithoutAuth(rawurl) e.accessMu.Lock() defer e.accessMu.Unlock() if cached, ok := e.urlAccess[accessurl]; ok { return cached } e.urlAccess[accessurl] = e.fetchGitAccess(accessurl) return e.urlAccess[accessurl] } func (e *endpointGitFinder) SetAccess(rawurl string, access Access) { accessurl := urlWithoutAuth(rawurl) key := fmt.Sprintf("lfs.%s.access", accessurl) tracerx.Printf("setting repository access to %s", access) e.accessMu.Lock() defer e.accessMu.Unlock() switch access { case emptyAccess, NoneAccess: git.Config.UnsetLocalKey("", key) e.urlAccess[accessurl] = NoneAccess default: git.Config.SetLocal("", key, string(access)) e.urlAccess[accessurl] = access } } func urlWithoutAuth(rawurl string) string { if !strings.Contains(rawurl, "@") { return rawurl } u, err := url.Parse(rawurl) if err != nil { fmt.Fprintf(os.Stderr, "Error parsing URL %q: %s", rawurl, err) return rawurl } u.User = nil return u.String() } func (e *endpointGitFinder) fetchGitAccess(rawurl string) Access { if v, _ := e.urlConfig.Get("lfs", rawurl, "access"); len(v) > 0 { access := Access(strings.ToLower(v)) if access == PrivateAccess { return BasicAccess } return access } return NoneAccess } func (e *endpointGitFinder) GitProtocol() string { return e.gitProtocol } // ReplaceUrlAlias returns a url with a prefix from a `url.*.insteadof` git // config setting. If multiple aliases match, use the longest one. // See https://git-scm.com/docs/git-config for Git's docs. func (e *endpointGitFinder) ReplaceUrlAlias(rawurl string) string { e.aliasMu.Lock() defer e.aliasMu.Unlock() var longestalias string for alias, _ := range e.aliases { if !strings.HasPrefix(rawurl, alias) { continue } if longestalias < alias { longestalias = alias } } if len(longestalias) > 0 { return e.aliases[longestalias] + rawurl[len(longestalias):] } return rawurl } func initAliases(e *endpointGitFinder, git Env) { prefix := "url." suffix := ".insteadof" for gitkey, gitval := range git.All() { if len(gitval) == 0 || !(strings.HasPrefix(gitkey, prefix) && strings.HasSuffix(gitkey, suffix)) { continue } if _, ok := e.aliases[gitval[len(gitval)-1]]; ok { fmt.Fprintf(os.Stderr, "WARNING: Multiple 'url.*.insteadof' keys with the same alias: %q\n", gitval) } e.aliases[gitval[len(gitval)-1]] = gitkey[len(prefix) : len(gitkey)-len(suffix)] } } git-lfs-2.3.4/lfsapi/endpoint_finder_test.go000066400000000000000000000262161317167762300211240ustar00rootroot00000000000000package lfsapi import ( "testing" "github.com/stretchr/testify/assert" ) func TestEndpointDefaultsToOrigin(t *testing.T) { finder := NewEndpointFinder(UniqTestEnv(map[string]string{ "remote.origin.lfsurl": "abc", })) e := finder.Endpoint("download", "") assert.Equal(t, "abc", e.Url) assert.Equal(t, "", e.SshUserAndHost) assert.Equal(t, "", e.SshPath) } func TestEndpointOverridesOrigin(t *testing.T) { finder := NewEndpointFinder(UniqTestEnv(map[string]string{ "lfs.url": "abc", "remote.origin.lfsurl": "def", })) e := finder.Endpoint("download", "") assert.Equal(t, "abc", e.Url) assert.Equal(t, "", e.SshUserAndHost) assert.Equal(t, "", e.SshPath) } func TestEndpointNoOverrideDefaultRemote(t *testing.T) { finder := NewEndpointFinder(UniqTestEnv(map[string]string{ "remote.origin.lfsurl": "abc", "remote.other.lfsurl": "def", })) e := finder.Endpoint("download", "") assert.Equal(t, "abc", e.Url) assert.Equal(t, "", e.SshUserAndHost) assert.Equal(t, "", e.SshPath) } func TestEndpointUseAlternateRemote(t *testing.T) { finder := NewEndpointFinder(UniqTestEnv(map[string]string{ "remote.origin.lfsurl": "abc", "remote.other.lfsurl": "def", })) e := finder.Endpoint("download", "other") assert.Equal(t, "def", e.Url) assert.Equal(t, "", e.SshUserAndHost) assert.Equal(t, "", e.SshPath) } func TestEndpointAddsLfsSuffix(t *testing.T) { finder := NewEndpointFinder(UniqTestEnv(map[string]string{ "remote.origin.url": "https://example.com/foo/bar", })) e := finder.Endpoint("download", "") assert.Equal(t, "https://example.com/foo/bar.git/info/lfs", e.Url) assert.Equal(t, "", e.SshUserAndHost) assert.Equal(t, "", e.SshPath) } func TestBareEndpointAddsLfsSuffix(t *testing.T) { finder := NewEndpointFinder(UniqTestEnv(map[string]string{ "remote.origin.url": "https://example.com/foo/bar.git", })) e := finder.Endpoint("download", "") assert.Equal(t, "https://example.com/foo/bar.git/info/lfs", e.Url) assert.Equal(t, "", e.SshUserAndHost) assert.Equal(t, "", e.SshPath) } func TestEndpointSeparateClonePushUrl(t *testing.T) { finder := NewEndpointFinder(UniqTestEnv(map[string]string{ "remote.origin.url": "https://example.com/foo/bar.git", "remote.origin.pushurl": "https://readwrite.com/foo/bar.git", })) e := finder.Endpoint("download", "") assert.Equal(t, "https://example.com/foo/bar.git/info/lfs", e.Url) assert.Equal(t, "", e.SshUserAndHost) assert.Equal(t, "", e.SshPath) e = finder.Endpoint("upload", "") assert.Equal(t, "https://readwrite.com/foo/bar.git/info/lfs", e.Url) assert.Equal(t, "", e.SshUserAndHost) assert.Equal(t, "", e.SshPath) } func TestEndpointOverriddenSeparateClonePushLfsUrl(t *testing.T) { finder := NewEndpointFinder(UniqTestEnv(map[string]string{ "remote.origin.url": "https://example.com/foo/bar.git", "remote.origin.pushurl": "https://readwrite.com/foo/bar.git", "remote.origin.lfsurl": "https://examplelfs.com/foo/bar", "remote.origin.lfspushurl": "https://readwritelfs.com/foo/bar", })) e := finder.Endpoint("download", "") assert.Equal(t, "https://examplelfs.com/foo/bar", e.Url) assert.Equal(t, "", e.SshUserAndHost) assert.Equal(t, "", e.SshPath) e = finder.Endpoint("upload", "") assert.Equal(t, "https://readwritelfs.com/foo/bar", e.Url) assert.Equal(t, "", e.SshUserAndHost) assert.Equal(t, "", e.SshPath) } func TestEndpointGlobalSeparateLfsPush(t *testing.T) { finder := NewEndpointFinder(UniqTestEnv(map[string]string{ "lfs.url": "https://readonly.com/foo/bar", "lfs.pushurl": "https://write.com/foo/bar", })) e := finder.Endpoint("download", "") assert.Equal(t, "https://readonly.com/foo/bar", e.Url) assert.Equal(t, "", e.SshUserAndHost) assert.Equal(t, "", e.SshPath) e = finder.Endpoint("upload", "") assert.Equal(t, "https://write.com/foo/bar", e.Url) assert.Equal(t, "", e.SshUserAndHost) assert.Equal(t, "", e.SshPath) } func TestSSHEndpointOverridden(t *testing.T) { finder := NewEndpointFinder(UniqTestEnv(map[string]string{ "remote.origin.url": "git@example.com:foo/bar", "remote.origin.lfsurl": "lfs", })) e := finder.Endpoint("download", "") assert.Equal(t, "lfs", e.Url) assert.Equal(t, "", e.SshUserAndHost) assert.Equal(t, "", e.SshPath) assert.Equal(t, "", e.SshPort) } func TestSSHEndpointAddsLfsSuffix(t *testing.T) { finder := NewEndpointFinder(UniqTestEnv(map[string]string{ "remote.origin.url": "ssh://git@example.com/foo/bar", })) e := finder.Endpoint("download", "") assert.Equal(t, "https://example.com/foo/bar.git/info/lfs", e.Url) assert.Equal(t, "git@example.com", e.SshUserAndHost) assert.Equal(t, "foo/bar", e.SshPath) assert.Equal(t, "", e.SshPort) } func TestSSHCustomPortEndpointAddsLfsSuffix(t *testing.T) { finder := NewEndpointFinder(UniqTestEnv(map[string]string{ "remote.origin.url": "ssh://git@example.com:9000/foo/bar", })) e := finder.Endpoint("download", "") assert.Equal(t, "https://example.com/foo/bar.git/info/lfs", e.Url) assert.Equal(t, "git@example.com", e.SshUserAndHost) assert.Equal(t, "foo/bar", e.SshPath) assert.Equal(t, "9000", e.SshPort) } func TestBareSSHEndpointAddsLfsSuffix(t *testing.T) { finder := NewEndpointFinder(UniqTestEnv(map[string]string{ "remote.origin.url": "git@example.com:foo/bar.git", })) e := finder.Endpoint("download", "") assert.Equal(t, "https://example.com/foo/bar.git/info/lfs", e.Url) assert.Equal(t, "git@example.com", e.SshUserAndHost) assert.Equal(t, "foo/bar.git", e.SshPath) assert.Equal(t, "", e.SshPort) } func TestSSHEndpointFromGlobalLfsUrl(t *testing.T) { finder := NewEndpointFinder(UniqTestEnv(map[string]string{ "lfs.url": "git@example.com:foo/bar.git", })) e := finder.Endpoint("download", "") assert.Equal(t, "https://example.com/foo/bar.git", e.Url) assert.Equal(t, "git@example.com", e.SshUserAndHost) assert.Equal(t, "foo/bar.git", e.SshPath) assert.Equal(t, "", e.SshPort) } func TestHTTPEndpointAddsLfsSuffix(t *testing.T) { finder := NewEndpointFinder(UniqTestEnv(map[string]string{ "remote.origin.url": "http://example.com/foo/bar", })) e := finder.Endpoint("download", "") assert.Equal(t, "http://example.com/foo/bar.git/info/lfs", e.Url) assert.Equal(t, "", e.SshUserAndHost) assert.Equal(t, "", e.SshPath) assert.Equal(t, "", e.SshPort) } func TestBareHTTPEndpointAddsLfsSuffix(t *testing.T) { finder := NewEndpointFinder(UniqTestEnv(map[string]string{ "remote.origin.url": "http://example.com/foo/bar.git", })) e := finder.Endpoint("download", "") assert.Equal(t, "http://example.com/foo/bar.git/info/lfs", e.Url) assert.Equal(t, "", e.SshUserAndHost) assert.Equal(t, "", e.SshPath) assert.Equal(t, "", e.SshPort) } func TestGitEndpointAddsLfsSuffix(t *testing.T) { finder := NewEndpointFinder(UniqTestEnv(map[string]string{ "remote.origin.url": "git://example.com/foo/bar", })) e := finder.Endpoint("download", "") assert.Equal(t, "https://example.com/foo/bar.git/info/lfs", e.Url) assert.Equal(t, "", e.SshUserAndHost) assert.Equal(t, "", e.SshPath) assert.Equal(t, "", e.SshPort) } func TestGitEndpointAddsLfsSuffixWithCustomProtocol(t *testing.T) { finder := NewEndpointFinder(UniqTestEnv(map[string]string{ "remote.origin.url": "git://example.com/foo/bar", "lfs.gitprotocol": "http", })) e := finder.Endpoint("download", "") assert.Equal(t, "http://example.com/foo/bar.git/info/lfs", e.Url) assert.Equal(t, "", e.SshUserAndHost) assert.Equal(t, "", e.SshPath) assert.Equal(t, "", e.SshPort) } func TestBareGitEndpointAddsLfsSuffix(t *testing.T) { finder := NewEndpointFinder(UniqTestEnv(map[string]string{ "remote.origin.url": "git://example.com/foo/bar.git", })) e := finder.Endpoint("download", "") assert.Equal(t, "https://example.com/foo/bar.git/info/lfs", e.Url) assert.Equal(t, "", e.SshUserAndHost) assert.Equal(t, "", e.SshPath) assert.Equal(t, "", e.SshPort) } func TestAccessConfig(t *testing.T) { type accessTest struct { Access string PrivateAccess bool } tests := map[string]accessTest{ "": {"none", false}, "basic": {"basic", true}, "BASIC": {"basic", true}, "private": {"basic", true}, "PRIVATE": {"basic", true}, "invalidauth": {"invalidauth", true}, } for value, expected := range tests { finder := NewEndpointFinder(UniqTestEnv(map[string]string{ "lfs.url": "http://example.com", "lfs.http://example.com.access": value, "lfs.https://example.com.access": "bad", })) dl := finder.Endpoint("upload", "") ul := finder.Endpoint("download", "") if access := finder.AccessFor(dl.Url); access != Access(expected.Access) { t.Errorf("Expected Access() with value %q to be %v, got %v", value, expected.Access, access) } if access := finder.AccessFor(ul.Url); access != Access(expected.Access) { t.Errorf("Expected Access() with value %q to be %v, got %v", value, expected.Access, access) } } // Test again but with separate push url for value, expected := range tests { finder := NewEndpointFinder(UniqTestEnv(map[string]string{ "lfs.url": "http://example.com", "lfs.pushurl": "http://examplepush.com", "lfs.http://example.com.access": value, "lfs.http://examplepush.com.access": value, "lfs.https://example.com.access": "bad", })) dl := finder.Endpoint("upload", "") ul := finder.Endpoint("download", "") if access := finder.AccessFor(dl.Url); access != Access(expected.Access) { t.Errorf("Expected Access() with value %q to be %v, got %v", value, expected.Access, access) } if access := finder.AccessFor(ul.Url); access != Access(expected.Access) { t.Errorf("Expected Access() with value %q to be %v, got %v", value, expected.Access, access) } } } func TestAccessAbsentConfig(t *testing.T) { finder := NewEndpointFinder(nil) assert.Equal(t, NoneAccess, finder.AccessFor(finder.Endpoint("download", "").Url)) assert.Equal(t, NoneAccess, finder.AccessFor(finder.Endpoint("upload", "").Url)) } func TestSetAccess(t *testing.T) { finder := NewEndpointFinder(UniqTestEnv(map[string]string{})) assert.Equal(t, NoneAccess, finder.AccessFor("http://example.com")) finder.SetAccess("http://example.com", NTLMAccess) assert.Equal(t, NTLMAccess, finder.AccessFor("http://example.com")) } func TestChangeAccess(t *testing.T) { finder := NewEndpointFinder(UniqTestEnv(map[string]string{ "lfs.http://example.com.access": "basic", })) assert.Equal(t, BasicAccess, finder.AccessFor("http://example.com")) finder.SetAccess("http://example.com", NTLMAccess) assert.Equal(t, NTLMAccess, finder.AccessFor("http://example.com")) } func TestDeleteAccessWithNone(t *testing.T) { finder := NewEndpointFinder(UniqTestEnv(map[string]string{ "lfs.http://example.com.access": "basic", })) assert.Equal(t, BasicAccess, finder.AccessFor("http://example.com")) finder.SetAccess("http://example.com", NoneAccess) assert.Equal(t, NoneAccess, finder.AccessFor("http://example.com")) } func TestDeleteAccessWithEmptyString(t *testing.T) { finder := NewEndpointFinder(UniqTestEnv(map[string]string{ "lfs.http://example.com.access": "basic", })) assert.Equal(t, BasicAccess, finder.AccessFor("http://example.com")) finder.SetAccess("http://example.com", Access("")) assert.Equal(t, NoneAccess, finder.AccessFor("http://example.com")) } git-lfs-2.3.4/lfsapi/endpoint_test.go000066400000000000000000000007241317167762300175710ustar00rootroot00000000000000package lfsapi import ( "testing" ) func TestNewEndpointFromCloneURLWithConfig(t *testing.T) { expected := "https://foo/bar.git/info/lfs" tests := []string{ "https://foo/bar", "https://foo/bar/", "https://foo/bar.git", "https://foo/bar.git/", } finder := NewEndpointFinder(nil) for _, actual := range tests { e := finder.NewEndpointFromCloneURL(actual) if e.Url != expected { t.Errorf("%s returned bad endpoint url %s", actual, e.Url) } } } git-lfs-2.3.4/lfsapi/errors.go000066400000000000000000000050351317167762300162260ustar00rootroot00000000000000package lfsapi import ( "fmt" "net/http" "strings" "github.com/git-lfs/git-lfs/errors" ) type httpError interface { Error() string HTTPResponse() *http.Response } func IsHTTP(err error) (*http.Response, bool) { if httpErr, ok := err.(httpError); ok { return httpErr.HTTPResponse(), true } return nil, false } type ClientError struct { Message string `json:"message"` DocumentationUrl string `json:"documentation_url,omitempty"` RequestId string `json:"request_id,omitempty"` response *http.Response } func (e *ClientError) HTTPResponse() *http.Response { return e.response } func (e *ClientError) Error() string { return e.Message } func (c *Client) handleResponse(res *http.Response) error { if res.StatusCode < 400 { return nil } cliErr := &ClientError{response: res} err := DecodeJSON(res, cliErr) if IsDecodeTypeError(err) { err = nil } if err == nil { if len(cliErr.Message) == 0 { err = defaultError(res) } else { err = cliErr } } if res.StatusCode == 401 { return errors.NewAuthError(err) } if res.StatusCode > 499 && res.StatusCode != 501 && res.StatusCode != 507 && res.StatusCode != 509 { return errors.NewFatalError(err) } return err } type statusCodeError struct { response *http.Response } func NewStatusCodeError(res *http.Response) error { return &statusCodeError{response: res} } func (e *statusCodeError) Error() string { req := e.response.Request return fmt.Sprintf("Invalid HTTP status for %s %s: %d", req.Method, strings.SplitN(req.URL.String(), "?", 2)[0], e.response.StatusCode, ) } func (e *statusCodeError) HTTPResponse() *http.Response { return e.response } var ( defaultErrors = map[int]string{ 400: "Client error: %s", 401: "Authorization error: %s\nCheck that you have proper access to the repository", 403: "Authorization error: %s\nCheck that you have proper access to the repository", 404: "Repository or object not found: %s\nCheck that it exists and that you have proper access to it", 429: "Rate limit exceeded: %s", 500: "Server error: %s", 501: "Not Implemented: %s", 507: "Insufficient server storage: %s", 509: "Bandwidth limit exceeded: %s", } ) func defaultError(res *http.Response) error { var msgFmt string if f, ok := defaultErrors[res.StatusCode]; ok { msgFmt = f } else if res.StatusCode < 500 { msgFmt = defaultErrors[400] + fmt.Sprintf(" from HTTP %d", res.StatusCode) } else { msgFmt = defaultErrors[500] + fmt.Sprintf(" from HTTP %d", res.StatusCode) } return errors.Errorf(msgFmt, res.Request.URL) } git-lfs-2.3.4/lfsapi/lfsapi.go000066400000000000000000000124611317167762300161710ustar00rootroot00000000000000package lfsapi import ( "encoding/json" "fmt" "io" "net/http" "regexp" "strconv" "strings" "sync" "github.com/ThomsonReutersEikon/go-ntlm/ntlm" "github.com/git-lfs/git-lfs/config" "github.com/git-lfs/git-lfs/errors" ) var ( lfsMediaTypeRE = regexp.MustCompile(`\Aapplication/vnd\.git\-lfs\+json(;|\z)`) jsonMediaTypeRE = regexp.MustCompile(`\Aapplication/json(;|\z)`) ) type Client struct { Endpoints EndpointFinder Credentials CredentialHelper SSH SSHResolver Netrc NetrcFinder DialTimeout int KeepaliveTimeout int TLSTimeout int ConcurrentTransfers int HTTPSProxy string HTTPProxy string NoProxy string SkipSSLVerify bool Verbose bool DebuggingVerbose bool VerboseOut io.Writer hostClients map[string]*http.Client clientMu sync.Mutex ntlmSessions map[string]ntlm.ClientSession ntlmMu sync.Mutex httpLogger *syncLogger LoggingStats bool // DEPRECATED // only used for per-host ssl certs gitEnv Env osEnv Env uc *config.URLConfig } func NewClient(osEnv Env, gitEnv Env) (*Client, error) { if osEnv == nil { osEnv = make(TestEnv) } if gitEnv == nil { gitEnv = make(TestEnv) } netrc, netrcfile, err := ParseNetrc(osEnv) if err != nil { return nil, errors.Wrap(err, fmt.Sprintf("bad netrc file %s", netrcfile)) } httpsProxy, httpProxy, noProxy := getProxyServers(osEnv, gitEnv) creds, err := getCredentialHelper(&config.Configuration{ Os: osEnv, Git: gitEnv}) if err != nil { return nil, errors.Wrap(err, "cannot find credential helper(s)") } var sshResolver SSHResolver = &sshAuthClient{os: osEnv} if gitEnv.Bool("lfs.cachecredentials", true) { sshResolver = withSSHCache(sshResolver) } c := &Client{ Endpoints: NewEndpointFinder(gitEnv), Credentials: creds, SSH: sshResolver, Netrc: netrc, DialTimeout: gitEnv.Int("lfs.dialtimeout", 0), KeepaliveTimeout: gitEnv.Int("lfs.keepalive", 0), TLSTimeout: gitEnv.Int("lfs.tlstimeout", 0), ConcurrentTransfers: gitEnv.Int("lfs.concurrenttransfers", 3), SkipSSLVerify: !gitEnv.Bool("http.sslverify", true) || osEnv.Bool("GIT_SSL_NO_VERIFY", false), Verbose: osEnv.Bool("GIT_CURL_VERBOSE", false), DebuggingVerbose: osEnv.Bool("LFS_DEBUG_HTTP", false), HTTPSProxy: httpsProxy, HTTPProxy: httpProxy, NoProxy: noProxy, gitEnv: gitEnv, osEnv: osEnv, uc: config.NewURLConfig(gitEnv), } return c, nil } func (c *Client) GitEnv() Env { return c.gitEnv } func (c *Client) OSEnv() Env { return c.osEnv } func IsDecodeTypeError(err error) bool { _, ok := err.(*decodeTypeError) return ok } type decodeTypeError struct { Type string } func (e *decodeTypeError) TypeError() {} func (e *decodeTypeError) Error() string { return fmt.Sprintf("Expected json type, got: %q", e.Type) } func DecodeJSON(res *http.Response, obj interface{}) error { ctype := res.Header.Get("Content-Type") if !(lfsMediaTypeRE.MatchString(ctype) || jsonMediaTypeRE.MatchString(ctype)) { return &decodeTypeError{Type: ctype} } err := json.NewDecoder(res.Body).Decode(obj) res.Body.Close() if err != nil { return errors.Wrapf(err, "Unable to parse HTTP response for %s %s", res.Request.Method, res.Request.URL) } return nil } // Env is an interface for the config.Environment methods that this package // relies on. type Env interface { Get(string) (string, bool) GetAll(string) []string Int(string, int) int Bool(string, bool) bool All() map[string][]string } type UniqTestEnv map[string]string func (e UniqTestEnv) Get(key string) (v string, ok bool) { v, ok = e[key] return } func (e UniqTestEnv) GetAll(key string) []string { if v, ok := e.Get(key); ok { return []string{v} } return make([]string, 0) } func (e UniqTestEnv) Int(key string, def int) (val int) { s, _ := e.Get(key) if len(s) == 0 { return def } i, err := strconv.Atoi(s) if err != nil { return def } return i } func (e UniqTestEnv) Bool(key string, def bool) (val bool) { s, _ := e.Get(key) if len(s) == 0 { return def } switch strings.ToLower(s) { case "true", "1", "on", "yes", "t": return true case "false", "0", "off", "no", "f": return false default: return false } } func (e UniqTestEnv) All() map[string][]string { m := make(map[string][]string) for k, _ := range e { m[k] = e.GetAll(k) } return m } // TestEnv is a basic config.Environment implementation. Only used in tests, or // as a zero value to NewClient(). type TestEnv map[string][]string func (e TestEnv) Get(key string) (string, bool) { all := e.GetAll(key) if len(all) == 0 { return "", false } return all[len(all)-1], true } func (e TestEnv) GetAll(key string) []string { return e[key] } func (e TestEnv) Int(key string, def int) (val int) { s, _ := e.Get(key) if len(s) == 0 { return def } i, err := strconv.Atoi(s) if err != nil { return def } return i } func (e TestEnv) Bool(key string, def bool) (val bool) { s, _ := e.Get(key) if len(s) == 0 { return def } switch strings.ToLower(s) { case "true", "1", "on", "yes", "t": return true case "false", "0", "off", "no", "f": return false default: return false } } func (e TestEnv) All() map[string][]string { return e } git-lfs-2.3.4/lfsapi/lfsapi_nix.go000066400000000000000000000001011317167762300170330ustar00rootroot00000000000000// +build !windows package lfsapi var netrcBasename = ".netrc" git-lfs-2.3.4/lfsapi/lfsapi_windows.go000066400000000000000000000001001317167762300177260ustar00rootroot00000000000000// +build windows package lfsapi var netrcBasename = "_netrc" git-lfs-2.3.4/lfsapi/netrc.go000066400000000000000000000011451317167762300160230ustar00rootroot00000000000000package lfsapi import ( "os" "path/filepath" "github.com/bgentry/go-netrc/netrc" ) type NetrcFinder interface { FindMachine(string) *netrc.Machine } func ParseNetrc(osEnv Env) (NetrcFinder, string, error) { home, _ := osEnv.Get("HOME") if len(home) == 0 { return &noFinder{}, "", nil } nrcfilename := filepath.Join(home, netrcBasename) if _, err := os.Stat(nrcfilename); err != nil { return &noFinder{}, nrcfilename, nil } f, err := netrc.ParseFile(nrcfilename) return f, nrcfilename, err } type noFinder struct{} func (f *noFinder) FindMachine(host string) *netrc.Machine { return nil } git-lfs-2.3.4/lfsapi/netrc_test.go000066400000000000000000000030231317167762300170570ustar00rootroot00000000000000package lfsapi import ( "net/http" "net/url" "strings" "testing" "github.com/bgentry/go-netrc/netrc" ) func TestNetrcWithHostAndPort(t *testing.T) { netrcFinder := &fakeNetrc{} u, err := url.Parse("http://netrc-host:123/foo/bar") if err != nil { t.Fatal(err) } req := &http.Request{ URL: u, Header: http.Header{}, } if !setAuthFromNetrc(netrcFinder, req) { t.Fatal("no netrc match") } auth := req.Header.Get("Authorization") if auth != "Basic YWJjOmRlZg==" { t.Fatalf("bad basic auth: %q", auth) } } func TestNetrcWithHost(t *testing.T) { netrcFinder := &fakeNetrc{} u, err := url.Parse("http://netrc-host/foo/bar") if err != nil { t.Fatal(err) } req := &http.Request{ URL: u, Header: http.Header{}, } if !setAuthFromNetrc(netrcFinder, req) { t.Fatalf("no netrc match") } auth := req.Header.Get("Authorization") if auth != "Basic YWJjOmRlZg==" { t.Fatalf("bad basic auth: %q", auth) } } func TestNetrcWithBadHost(t *testing.T) { netrcFinder := &fakeNetrc{} u, err := url.Parse("http://other-host/foo/bar") if err != nil { t.Fatal(err) } req := &http.Request{ URL: u, Header: http.Header{}, } if setAuthFromNetrc(netrcFinder, req) { t.Fatalf("unexpected netrc match") } auth := req.Header.Get("Authorization") if auth != "" { t.Fatalf("bad basic auth: %q", auth) } } type fakeNetrc struct{} func (n *fakeNetrc) FindMachine(host string) *netrc.Machine { if strings.Contains(host, "netrc") { return &netrc.Machine{Login: "abc", Password: "def"} } return nil } git-lfs-2.3.4/lfsapi/ntlm.go000066400000000000000000000076021317167762300156660ustar00rootroot00000000000000package lfsapi import ( "encoding/base64" "fmt" "io" "io/ioutil" "net/http" "net/url" "strings" "github.com/ThomsonReutersEikon/go-ntlm/ntlm" "github.com/git-lfs/git-lfs/errors" ) func (c *Client) doWithNTLM(req *http.Request, credHelper CredentialHelper, creds Creds, credsURL *url.URL) (*http.Response, error) { res, err := c.Do(req) if err != nil && !errors.IsAuthError(err) { return res, err } if res.StatusCode != 401 { return res, nil } return c.ntlmReAuth(req, credHelper, creds, true) } // If the status is 401 then we need to re-authenticate func (c *Client) ntlmReAuth(req *http.Request, credHelper CredentialHelper, creds Creds, retry bool) (*http.Response, error) { body, err := rewoundRequestBody(req) if err != nil { return nil, err } req.Body = body chRes, challengeMsg, err := c.ntlmNegotiate(req, ntlmNegotiateMessage) if err != nil { return chRes, err } body, err = rewoundRequestBody(req) if err != nil { return nil, err } req.Body = body res, err := c.ntlmChallenge(req, challengeMsg, creds) if err != nil { return res, err } switch res.StatusCode { case 401: credHelper.Reject(creds) if retry { return c.ntlmReAuth(req, credHelper, creds, false) } case 403: credHelper.Reject(creds) default: if res.StatusCode < 300 && res.StatusCode > 199 { credHelper.Approve(creds) } } return res, nil } func (c *Client) ntlmNegotiate(req *http.Request, message string) (*http.Response, []byte, error) { req.Header.Add("Authorization", message) res, err := c.Do(req) if err != nil && !errors.IsAuthError(err) { return res, nil, err } io.Copy(ioutil.Discard, res.Body) res.Body.Close() by, err := parseChallengeResponse(res) return res, by, err } func (c *Client) ntlmChallenge(req *http.Request, challengeBytes []byte, creds Creds) (*http.Response, error) { challenge, err := ntlm.ParseChallengeMessage(challengeBytes) if err != nil { return nil, err } session, err := c.ntlmClientSession(creds) if err != nil { return nil, err } session.ProcessChallengeMessage(challenge) authenticate, err := session.GenerateAuthenticateMessage() if err != nil { return nil, err } authMsg := base64.StdEncoding.EncodeToString(authenticate.Bytes()) req.Header.Set("Authorization", "NTLM "+authMsg) return c.Do(req) } func (c *Client) ntlmClientSession(creds Creds) (ntlm.ClientSession, error) { c.ntlmMu.Lock() defer c.ntlmMu.Unlock() splits := strings.Split(creds["username"], "\\") if len(splits) != 2 { return nil, fmt.Errorf("Your user name must be of the form DOMAIN\\user. It is currently %s", creds["username"]) } domain := strings.ToUpper(splits[0]) username := splits[1] if c.ntlmSessions == nil { c.ntlmSessions = make(map[string]ntlm.ClientSession) } if ses, ok := c.ntlmSessions[domain]; ok { return ses, nil } session, err := ntlm.CreateClientSession(ntlm.Version2, ntlm.ConnectionOrientedMode) if err != nil { return nil, err } session.SetUserInfo(username, creds["password"], strings.ToUpper(splits[0])) c.ntlmSessions[domain] = session return session, nil } func parseChallengeResponse(res *http.Response) ([]byte, error) { header := res.Header.Get("Www-Authenticate") if len(header) < 6 { return nil, fmt.Errorf("Invalid NTLM challenge response: %q", header) } //parse out the "NTLM " at the beginning of the response challenge := header[5:] val, err := base64.StdEncoding.DecodeString(challenge) if err != nil { return nil, err } return []byte(val), nil } func rewoundRequestBody(req *http.Request) (io.ReadCloser, error) { if req.Body == nil { return nil, nil } body, ok := req.Body.(ReadSeekCloser) if !ok { return nil, fmt.Errorf("Request body must implement io.ReadCloser and io.Seeker. Got: %T", body) } _, err := body.Seek(0, io.SeekStart) return body, err } const ntlmNegotiateMessage = "NTLM TlRMTVNTUAABAAAAB7IIogwADAAzAAAACwALACgAAAAKAAAoAAAAD1dJTExISS1NQUlOTk9SVEhBTUVSSUNB" git-lfs-2.3.4/lfsapi/ntlm_test.go000066400000000000000000000115331317167762300167230ustar00rootroot00000000000000package lfsapi import ( "encoding/base64" "io/ioutil" "net/http" "net/http/httptest" "net/url" "strings" "sync/atomic" "testing" "github.com/ThomsonReutersEikon/go-ntlm/ntlm" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestNTLMAuth(t *testing.T) { session, err := ntlm.CreateServerSession(ntlm.Version2, ntlm.ConnectionOrientedMode) require.Nil(t, err) session.SetUserInfo("ntlmuser", "ntlmpass", "NTLMDOMAIN") var called uint32 srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { reqIndex := atomic.LoadUint32(&called) atomic.AddUint32(&called, 1) authHeader := req.Header.Get("Authorization") t.Logf("REQUEST %d: %s %s", reqIndex, req.Method, req.URL) t.Logf("AUTH: %q", authHeader) // assert full body is sent each time by, err := ioutil.ReadAll(req.Body) req.Body.Close() if assert.Nil(t, err) { assert.Equal(t, "ntlm", string(by)) } switch authHeader { case "": w.Header().Set("Www-Authenticate", "ntlm") w.WriteHeader(401) case ntlmNegotiateMessage: assert.True(t, strings.HasPrefix(req.Header.Get("Authorization"), "NTLM ")) ch, err := session.GenerateChallengeMessage() if !assert.Nil(t, err) { t.Logf("challenge gen error: %+v", err) w.WriteHeader(500) return } chMsg := base64.StdEncoding.EncodeToString(ch.Bytes()) w.Header().Set("Www-Authenticate", "ntlm "+chMsg) w.WriteHeader(401) default: // should be an auth msg authHeader := req.Header.Get("Authorization") assert.True(t, strings.HasPrefix(strings.ToUpper(authHeader), "NTLM ")) auth := authHeader[5:] // strip "ntlm " prefix val, err := base64.StdEncoding.DecodeString(auth) if !assert.Nil(t, err) { t.Logf("auth base64 error: %+v", err) w.WriteHeader(500) return } _, err = ntlm.ParseAuthenticateMessage(val, 2) if !assert.Nil(t, err) { t.Logf("auth parse error: %+v", err) w.WriteHeader(500) return } w.WriteHeader(200) } })) defer srv.Close() req, err := http.NewRequest("POST", srv.URL+"/ntlm", NewByteBody([]byte("ntlm"))) require.Nil(t, err) credHelper := newMockCredentialHelper() cli, err := NewClient(nil, UniqTestEnv(map[string]string{ "lfs.url": srv.URL + "/ntlm", "lfs." + srv.URL + "/ntlm.access": "ntlm", })) cli.Credentials = credHelper require.Nil(t, err) // ntlm support pulls domain and login info from git credentials srvURL, err := url.Parse(srv.URL) require.Nil(t, err) creds := Creds{ "protocol": srvURL.Scheme, "host": srvURL.Host, "path": "ntlm", "username": "ntlmdomain\\ntlmuser", "password": "ntlmpass", } credHelper.Approve(creds) res, err := cli.DoWithAuth("remote", req) require.Nil(t, err) assert.Equal(t, 200, res.StatusCode) assert.True(t, credHelper.IsApproved(creds)) } func TestNtlmClientSession(t *testing.T) { cli, err := NewClient(nil, nil) require.Nil(t, err) creds := Creds{"username": "MOOSEDOMAIN\\canadian", "password": "MooseAntlersYeah"} session1, err := cli.ntlmClientSession(creds) assert.Nil(t, err) assert.NotNil(t, session1) // The second call should ignore creds and give the session we just created. badCreds := Creds{"username": "MOOSEDOMAIN\\badusername", "password": "MooseAntlersYeah"} session2, err := cli.ntlmClientSession(badCreds) assert.Nil(t, err) assert.NotNil(t, session2) assert.EqualValues(t, session1, session2) } func TestNtlmClientSessionBadCreds(t *testing.T) { cli, err := NewClient(nil, nil) require.Nil(t, err) creds := Creds{"username": "badusername", "password": "MooseAntlersYeah"} _, err = cli.ntlmClientSession(creds) assert.NotNil(t, err) } func TestNtlmHeaderParseValid(t *testing.T) { res := http.Response{} res.Header = make(map[string][]string) res.Header.Add("Www-Authenticate", "NTLM "+base64.StdEncoding.EncodeToString([]byte("I am a moose"))) bytes, err := parseChallengeResponse(&res) assert.Nil(t, err) assert.False(t, strings.HasPrefix(string(bytes), "NTLM")) } func TestNtlmHeaderParseInvalidLength(t *testing.T) { res := http.Response{} res.Header = make(map[string][]string) res.Header.Add("Www-Authenticate", "NTL") ret, err := parseChallengeResponse(&res) assert.NotNil(t, err) assert.Nil(t, ret) } func TestNtlmHeaderParseInvalid(t *testing.T) { res := http.Response{} res.Header = make(map[string][]string) res.Header.Add("Www-Authenticate", base64.StdEncoding.EncodeToString([]byte("NTLM I am a moose"))) ret, err := parseChallengeResponse(&res) assert.NotNil(t, err) assert.Nil(t, ret) } func assertRequestsEqual(t *testing.T, req1 *http.Request, req2 *http.Request, req1Body []byte) { assert.Equal(t, req1.Method, req2.Method) for k, v := range req1.Header { assert.Equal(t, v, req2.Header[k]) } if req1.Body == nil { assert.Nil(t, req2.Body) } else { bytes2, _ := ioutil.ReadAll(req2.Body) assert.Equal(t, req1Body, bytes2) } } git-lfs-2.3.4/lfsapi/proxy.go000066400000000000000000000066351317167762300161020ustar00rootroot00000000000000package lfsapi import ( "net/http" "net/url" "strings" "fmt" ) // Logic is copied, with small changes, from "net/http".ProxyFromEnvironment in the go std lib. func proxyFromClient(c *Client) func(req *http.Request) (*url.URL, error) { httpProxy := c.HTTPProxy httpsProxy := c.HTTPSProxy noProxy := c.NoProxy return func(req *http.Request) (*url.URL, error) { var proxy string if req.URL.Scheme == "https" { proxy = httpsProxy } if len(proxy) == 0 { proxy = httpProxy } if len(proxy) == 0 { return nil, nil } if !useProxy(noProxy, canonicalAddr(req.URL)) { return nil, nil } proxyURL, err := url.Parse(proxy) if err != nil || !strings.HasPrefix(proxyURL.Scheme, "http") { // proxy was bogus. Try prepending "http://" to it and // see if that parses correctly. If not, we fall // through and complain about the original one. if httpProxyURL, httpErr := url.Parse("http://" + proxy); httpErr == nil { return httpProxyURL, nil } } if err != nil { return nil, fmt.Errorf("invalid proxy address: %q: %v", proxy, err) } return proxyURL, nil } } func getProxyServers(osEnv Env, gitEnv Env) (string, string, string) { var httpsProxy string httpProxy, _ := gitEnv.Get("http.proxy") if strings.HasPrefix(httpProxy, "https://") { httpsProxy = httpProxy } if len(httpsProxy) == 0 { httpsProxy, _ = osEnv.Get("HTTPS_PROXY") } if len(httpsProxy) == 0 { httpsProxy, _ = osEnv.Get("https_proxy") } if len(httpProxy) == 0 { httpProxy, _ = osEnv.Get("HTTP_PROXY") } if len(httpProxy) == 0 { httpProxy, _ = osEnv.Get("http_proxy") } noProxy, _ := osEnv.Get("NO_PROXY") if len(noProxy) == 0 { noProxy, _ = osEnv.Get("no_proxy") } return httpsProxy, httpProxy, noProxy } // canonicalAddr returns url.Host but always with a ":port" suffix // Copied from "net/http".ProxyFromEnvironment in the go std lib. func canonicalAddr(url *url.URL) string { addr := url.Host if !hasPort(addr) { return addr + ":" + portMap[url.Scheme] } return addr } // useProxy reports whether requests to addr should use a proxy, // according to the noProxy or noProxy environment variable. // addr is always a canonicalAddr with a host and port. // Copied from "net/http".ProxyFromEnvironment in the go std lib // and adapted to allow proxy usage even for localhost. func useProxy(noProxy, addr string) bool { if len(addr) == 0 { return true } if noProxy == "*" { return false } addr = strings.ToLower(strings.TrimSpace(addr)) if hasPort(addr) { addr = addr[:strings.LastIndex(addr, ":")] } for _, p := range strings.Split(noProxy, ",") { p = strings.ToLower(strings.TrimSpace(p)) if len(p) == 0 { continue } if hasPort(p) { p = p[:strings.LastIndex(p, ":")] } if addr == p { return false } if p[0] == '.' && (strings.HasSuffix(addr, p) || addr == p[1:]) { // noProxy ".foo.com" matches "bar.foo.com" or "foo.com" return false } if p[0] != '.' && strings.HasSuffix(addr, p) && addr[len(addr)-len(p)-1] == '.' { // noProxy "foo.com" matches "bar.foo.com" return false } } return true } // Given a string of the form "host", "host:port", or "[ipv6::address]:port", // return true if the string includes a port. // Copied from "net/http".ProxyFromEnvironment in the go std lib. func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") } var ( portMap = map[string]string{ "http": "80", "https": "443", } ) git-lfs-2.3.4/lfsapi/proxy_test.go000066400000000000000000000041511317167762300171300ustar00rootroot00000000000000package lfsapi import ( "net/http" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestProxyFromGitConfig(t *testing.T) { c, err := NewClient(UniqTestEnv(map[string]string{ "HTTPS_PROXY": "https://proxy-from-env:8080", }), UniqTestEnv(map[string]string{ "http.proxy": "https://proxy-from-git-config:8080", })) require.Nil(t, err) req, err := http.NewRequest("GET", "https://some-host.com:123/foo/bar", nil) require.Nil(t, err) proxyURL, err := proxyFromClient(c)(req) assert.Equal(t, "proxy-from-git-config:8080", proxyURL.Host) assert.Nil(t, err) } func TestHttpProxyFromGitConfig(t *testing.T) { c, err := NewClient(UniqTestEnv(map[string]string{ "HTTPS_PROXY": "https://proxy-from-env:8080", }), UniqTestEnv(map[string]string{ "http.proxy": "http://proxy-from-git-config:8080", })) require.Nil(t, err) req, err := http.NewRequest("GET", "https://some-host.com:123/foo/bar", nil) require.Nil(t, err) proxyURL, err := proxyFromClient(c)(req) assert.Equal(t, "proxy-from-env:8080", proxyURL.Host) assert.Nil(t, err) } func TestProxyFromEnvironment(t *testing.T) { c, err := NewClient(UniqTestEnv(map[string]string{ "HTTPS_PROXY": "https://proxy-from-env:8080", }), nil) require.Nil(t, err) req, err := http.NewRequest("GET", "https://some-host.com:123/foo/bar", nil) require.Nil(t, err) proxyURL, err := proxyFromClient(c)(req) assert.Equal(t, "proxy-from-env:8080", proxyURL.Host) assert.Nil(t, err) } func TestProxyIsNil(t *testing.T) { c := &Client{} req, err := http.NewRequest("GET", "http://some-host.com:123/foo/bar", nil) require.Nil(t, err) proxyURL, err := proxyFromClient(c)(req) assert.Nil(t, proxyURL) assert.Nil(t, err) } func TestProxyNoProxy(t *testing.T) { c, err := NewClient(UniqTestEnv(map[string]string{ "NO_PROXY": "some-host", }), UniqTestEnv(map[string]string{ "http.proxy": "https://proxy-from-git-config:8080", })) require.Nil(t, err) req, err := http.NewRequest("GET", "https://some-host:8080", nil) require.Nil(t, err) proxyURL, err := proxyFromClient(c)(req) assert.Nil(t, proxyURL) assert.Nil(t, err) } git-lfs-2.3.4/lfsapi/response_test.go000066400000000000000000000107241317167762300176100ustar00rootroot00000000000000package lfsapi import ( "net/http" "net/http/httptest" "strings" "sync/atomic" "testing" "github.com/git-lfs/git-lfs/errors" "github.com/stretchr/testify/assert" ) func TestAuthErrWithBody(t *testing.T) { var called uint32 srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.String() != "/test" { w.WriteHeader(http.StatusNotFound) return } atomic.AddUint32(&called, 1) w.Header().Set("Content-Type", "application/json") w.WriteHeader(401) w.Write([]byte(`{"message":"custom auth error"}`)) })) defer srv.Close() req, err := http.NewRequest("GET", srv.URL+"/test", nil) assert.Nil(t, err) c := &Client{} _, err = c.Do(req) assert.NotNil(t, err) assert.True(t, errors.IsAuthError(err)) assert.Equal(t, "Authentication required: custom auth error", err.Error()) assert.EqualValues(t, 1, called) } func TestFatalWithBody(t *testing.T) { var called uint32 srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.String() != "/test" { w.WriteHeader(http.StatusNotFound) return } atomic.AddUint32(&called, 1) w.Header().Set("Content-Type", "application/json") w.WriteHeader(500) w.Write([]byte(`{"message":"custom fatal error"}`)) })) defer srv.Close() req, err := http.NewRequest("GET", srv.URL+"/test", nil) assert.Nil(t, err) c := &Client{} _, err = c.Do(req) assert.NotNil(t, err) assert.True(t, errors.IsFatalError(err)) assert.Equal(t, "Fatal error: custom fatal error", err.Error()) assert.EqualValues(t, 1, called) } func TestWithNonFatal500WithBody(t *testing.T) { c := &Client{} var called uint32 nonFatalCodes := map[int]string{ 501: "custom 501 error", 507: "custom 507 error", 509: "custom 509 error", } for nonFatalCode, expectedErr := range nonFatalCodes { srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.String() != "/test" { w.WriteHeader(http.StatusNotFound) return } atomic.AddUint32(&called, 1) w.Header().Set("Content-Type", "application/json") w.WriteHeader(nonFatalCode) w.Write([]byte(`{"message":"` + expectedErr + `"}`)) })) req, err := http.NewRequest("GET", srv.URL+"/test", nil) assert.Nil(t, err) _, err = c.Do(req) t.Logf("non fatal code %d", nonFatalCode) assert.NotNil(t, err) assert.Equal(t, expectedErr, err.Error()) srv.Close() } assert.EqualValues(t, 3, called) } func TestAuthErrWithoutBody(t *testing.T) { var called uint32 srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.String() != "/test" { w.WriteHeader(http.StatusNotFound) return } atomic.AddUint32(&called, 1) w.WriteHeader(401) })) defer srv.Close() req, err := http.NewRequest("GET", srv.URL+"/test", nil) assert.Nil(t, err) c := &Client{} _, err = c.Do(req) assert.NotNil(t, err) assert.True(t, errors.IsAuthError(err)) assert.True(t, strings.HasPrefix(err.Error(), "Authentication required: Authorization error:"), err.Error()) assert.EqualValues(t, 1, called) } func TestFatalWithoutBody(t *testing.T) { var called uint32 srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.String() != "/test" { w.WriteHeader(http.StatusNotFound) return } atomic.AddUint32(&called, 1) w.WriteHeader(500) })) defer srv.Close() req, err := http.NewRequest("GET", srv.URL+"/test", nil) assert.Nil(t, err) c := &Client{} _, err = c.Do(req) assert.NotNil(t, err) assert.True(t, errors.IsFatalError(err)) assert.True(t, strings.HasPrefix(err.Error(), "Fatal error: Server error:"), err.Error()) assert.EqualValues(t, 1, called) } func TestWithNonFatal500WithoutBody(t *testing.T) { c := &Client{} var called uint32 nonFatalCodes := map[int]string{ 501: "Not Implemented:", 507: "Insufficient server storage:", 509: "Bandwidth limit exceeded:", } for nonFatalCode, errPrefix := range nonFatalCodes { srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.String() != "/test" { w.WriteHeader(http.StatusNotFound) return } atomic.AddUint32(&called, 1) w.WriteHeader(nonFatalCode) })) req, err := http.NewRequest("GET", srv.URL+"/test", nil) assert.Nil(t, err) _, err = c.Do(req) t.Logf("non fatal code %d", nonFatalCode) assert.NotNil(t, err) assert.True(t, strings.HasPrefix(err.Error(), errPrefix)) srv.Close() } assert.EqualValues(t, 3, called) } git-lfs-2.3.4/lfsapi/retries.go000066400000000000000000000017141317167762300163670ustar00rootroot00000000000000package lfsapi import ( "context" "net/http" ) // ckey is a type that wraps a string for package-unique context.Context keys. type ckey string const ( // contextKeyRetries is a context.Context key for storing the desired // number of retries for a given request. contextKeyRetries ckey = "retries" // defaultRequestRetries is the default number of retries to perform on // a given HTTP request. defaultRequestRetries = 0 ) // WithRetries stores the desired number of retries "n" on the given // http.Request, and causes it to be retried "n" times in the case of a non-nil // network related error. func WithRetries(req *http.Request, n int) *http.Request { ctx := req.Context() ctx = context.WithValue(ctx, contextKeyRetries, n) return req.WithContext(ctx) } // Retries returns the number of retries requested for a given http.Request. func Retries(req *http.Request) (int, bool) { n, ok := req.Context().Value(contextKeyRetries).(int) return n, ok } git-lfs-2.3.4/lfsapi/retries_test.go000066400000000000000000000035231317167762300174260ustar00rootroot00000000000000package lfsapi import ( "encoding/json" "net/http" "net/http/httptest" "sync/atomic" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestWithRetries(t *testing.T) { req, _ := http.NewRequest("GET", "/", nil) req = WithRetries(req, 1) n, ok := Retries(req) assert.True(t, ok) assert.Equal(t, 1, n) } func TestRetriesOnUnannotatedRequest(t *testing.T) { req, _ := http.NewRequest("GET", "/", nil) n, ok := Retries(req) assert.False(t, ok) assert.Equal(t, 0, n) } func TestRequestWithRetries(t *testing.T) { type T struct { S string `json:"s"` } var hasRaw bool = true var requests uint32 var berr error srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { var payload T if err := json.NewDecoder(r.Body).Decode(&payload); err != nil { berr = err } assert.Equal(t, "Hello, world!", payload.S) if atomic.AddUint32(&requests, 1) < 3 { raw, ok := w.(http.Hijacker) if !ok { hasRaw = false return } conn, _, err := raw.Hijack() require.NoError(t, err) require.NoError(t, conn.Close()) return } })) defer srv.Close() c, err := NewClient(nil, nil) require.NoError(t, err) req, err := http.NewRequest("POST", srv.URL, nil) require.NoError(t, err) require.NoError(t, MarshalToRequest(req, &T{"Hello, world!"})) if !hasRaw { // Skip tests where the implementation of // net/http/httptest.Server does not provide raw access to the // connection. // // Defer the skip outside of the server, since t.Skip halts the // running goroutine. t.Skip("lfsapi: net/http/httptest.Server does not provide raw access") } res, err := c.Do(WithRetries(req, 8)) assert.NoError(t, berr) assert.NoError(t, err) require.NotNil(t, res, "lfsapi: expected response") assert.Equal(t, http.StatusOK, res.StatusCode) } git-lfs-2.3.4/lfsapi/ssh.go000066400000000000000000000113351317167762300155070ustar00rootroot00000000000000package lfsapi import ( "bytes" "encoding/json" "fmt" "os/exec" "path/filepath" "regexp" "strings" "time" "github.com/git-lfs/git-lfs/tools" "github.com/rubyist/tracerx" ) type SSHResolver interface { Resolve(Endpoint, string) (sshAuthResponse, error) } func withSSHCache(ssh SSHResolver) SSHResolver { return &sshCache{ endpoints: make(map[string]*sshAuthResponse), ssh: ssh, } } type sshCache struct { endpoints map[string]*sshAuthResponse ssh SSHResolver } func (c *sshCache) Resolve(e Endpoint, method string) (sshAuthResponse, error) { if len(e.SshUserAndHost) == 0 { return sshAuthResponse{}, nil } key := strings.Join([]string{e.SshUserAndHost, e.SshPort, e.SshPath, method}, "//") if res, ok := c.endpoints[key]; ok { if _, expired := res.IsExpiredWithin(5 * time.Second); !expired { tracerx.Printf("ssh cache: %s git-lfs-authenticate %s %s", e.SshUserAndHost, e.SshPath, endpointOperation(e, method)) return *res, nil } else { tracerx.Printf("ssh cache expired: %s git-lfs-authenticate %s %s", e.SshUserAndHost, e.SshPath, endpointOperation(e, method)) } } res, err := c.ssh.Resolve(e, method) if err == nil { c.endpoints[key] = &res } return res, err } type sshAuthResponse struct { Message string `json:"-"` Href string `json:"href"` Header map[string]string `json:"header"` ExpiresAt time.Time `json:"expires_at"` ExpiresIn int `json:"expires_in"` createdAt time.Time } func (r *sshAuthResponse) IsExpiredWithin(d time.Duration) (time.Time, bool) { return tools.IsExpiredAtOrIn(r.createdAt, d, r.ExpiresAt, time.Duration(r.ExpiresIn)*time.Second) } type sshAuthClient struct { os Env } func (c *sshAuthClient) Resolve(e Endpoint, method string) (sshAuthResponse, error) { res := sshAuthResponse{} if len(e.SshUserAndHost) == 0 { return res, nil } exe, args := sshGetLFSExeAndArgs(c.os, e, method) cmd := exec.Command(exe, args...) // Save stdout and stderr in separate buffers var outbuf, errbuf bytes.Buffer cmd.Stdout = &outbuf cmd.Stderr = &errbuf now := time.Now() // Execute command err := cmd.Start() if err == nil { err = cmd.Wait() } // Processing result if err != nil { res.Message = strings.TrimSpace(errbuf.String()) } else { err = json.Unmarshal(outbuf.Bytes(), &res) res.createdAt = now } return res, err } func sshGetLFSExeAndArgs(osEnv Env, e Endpoint, method string) (string, []string) { exe, args := sshGetExeAndArgs(osEnv, e) operation := endpointOperation(e, method) args = append(args, fmt.Sprintf("git-lfs-authenticate %s %s", e.SshPath, operation)) tracerx.Printf("run_command: %s %s", exe, strings.Join(args, " ")) return exe, args } // Return the executable name for ssh on this machine and the base args // Base args includes port settings, user/host, everything pre the command to execute func sshGetExeAndArgs(osEnv Env, e Endpoint) (exe string, baseargs []string) { isPlink := false isTortoise := false ssh, _ := osEnv.Get("GIT_SSH") sshCmd, _ := osEnv.Get("GIT_SSH_COMMAND") cmdArgs := tools.QuotedFields(sshCmd) if len(cmdArgs) > 0 { ssh = cmdArgs[0] cmdArgs = cmdArgs[1:] } if ssh == "" { ssh = defaultSSHCmd } basessh := filepath.Base(ssh) if basessh != defaultSSHCmd { // Strip extension for easier comparison if ext := filepath.Ext(basessh); len(ext) > 0 { basessh = basessh[:len(basessh)-len(ext)] } isPlink = strings.EqualFold(basessh, "plink") isTortoise = strings.EqualFold(basessh, "tortoiseplink") } args := make([]string, 0, 5+len(cmdArgs)) if len(cmdArgs) > 0 { args = append(args, cmdArgs...) } if isTortoise { // TortoisePlink requires the -batch argument to behave like ssh/plink args = append(args, "-batch") } if len(e.SshPort) > 0 { if isPlink || isTortoise { args = append(args, "-P") } else { args = append(args, "-p") } args = append(args, e.SshPort) } if sep, ok := sshSeparators[basessh]; ok { // inserts a separator between cli -options and host/cmd commands // example: $ ssh -p 12345 -- user@host.com git-lfs-authenticate ... args = append(args, sep, e.SshUserAndHost) } else { // no prefix supported, strip leading - off host to prevent cmd like: // $ git config lfs.url ssh://-proxycmd=whatever // $ plink -P 12345 -proxycmd=foo git-lfs-authenticate ... // // Instead, it'll attempt this, and eventually return an error // $ plink -P 12345 proxycmd=foo git-lfs-authenticate ... args = append(args, sshOptPrefixRE.ReplaceAllString(e.SshUserAndHost, "")) } return ssh, args } const defaultSSHCmd = "ssh" var ( sshOptPrefixRE = regexp.MustCompile(`\A\-+`) sshSeparators = map[string]string{ "ssh": "--", "lfs-ssh-echo": "--", // used in lfs integration tests only } ) git-lfs-2.3.4/lfsapi/ssh_test.go000066400000000000000000000412551317167762300165520ustar00rootroot00000000000000package lfsapi import ( "errors" "net/url" "path/filepath" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestSSHCacheResolveFromCache(t *testing.T) { ssh := newFakeResolver() cache := withSSHCache(ssh).(*sshCache) cache.endpoints["userandhost//1//path//post"] = &sshAuthResponse{ Href: "cache", createdAt: time.Now(), } ssh.responses["userandhost"] = sshAuthResponse{Href: "real"} e := Endpoint{ SshUserAndHost: "userandhost", SshPort: "1", SshPath: "path", } res, err := cache.Resolve(e, "post") assert.Nil(t, err) assert.Equal(t, "cache", res.Href) } func TestSSHCacheResolveFromCacheWithFutureExpiresAt(t *testing.T) { ssh := newFakeResolver() cache := withSSHCache(ssh).(*sshCache) cache.endpoints["userandhost//1//path//post"] = &sshAuthResponse{ Href: "cache", ExpiresAt: time.Now().Add(time.Duration(1) * time.Hour), createdAt: time.Now(), } ssh.responses["userandhost"] = sshAuthResponse{Href: "real"} e := Endpoint{ SshUserAndHost: "userandhost", SshPort: "1", SshPath: "path", } res, err := cache.Resolve(e, "post") assert.Nil(t, err) assert.Equal(t, "cache", res.Href) } func TestSSHCacheResolveFromCacheWithFutureExpiresIn(t *testing.T) { ssh := newFakeResolver() cache := withSSHCache(ssh).(*sshCache) cache.endpoints["userandhost//1//path//post"] = &sshAuthResponse{ Href: "cache", ExpiresIn: 60 * 60, createdAt: time.Now(), } ssh.responses["userandhost"] = sshAuthResponse{Href: "real"} e := Endpoint{ SshUserAndHost: "userandhost", SshPort: "1", SshPath: "path", } res, err := cache.Resolve(e, "post") assert.Nil(t, err) assert.Equal(t, "cache", res.Href) } func TestSSHCacheResolveFromCacheWithPastExpiresAt(t *testing.T) { ssh := newFakeResolver() cache := withSSHCache(ssh).(*sshCache) cache.endpoints["userandhost//1//path//post"] = &sshAuthResponse{ Href: "cache", ExpiresAt: time.Now().Add(time.Duration(-1) * time.Hour), createdAt: time.Now(), } ssh.responses["userandhost"] = sshAuthResponse{Href: "real"} e := Endpoint{ SshUserAndHost: "userandhost", SshPort: "1", SshPath: "path", } res, err := cache.Resolve(e, "post") assert.Nil(t, err) assert.Equal(t, "real", res.Href) } func TestSSHCacheResolveFromCacheWithPastExpiresIn(t *testing.T) { ssh := newFakeResolver() cache := withSSHCache(ssh).(*sshCache) cache.endpoints["userandhost//1//path//post"] = &sshAuthResponse{ Href: "cache", ExpiresIn: -60 * 60, createdAt: time.Now(), } ssh.responses["userandhost"] = sshAuthResponse{Href: "real"} e := Endpoint{ SshUserAndHost: "userandhost", SshPort: "1", SshPath: "path", } res, err := cache.Resolve(e, "post") assert.Nil(t, err) assert.Equal(t, "real", res.Href) } func TestSSHCacheResolveFromCacheWithAmbiguousExpirationInfo(t *testing.T) { ssh := newFakeResolver() cache := withSSHCache(ssh).(*sshCache) cache.endpoints["userandhost//1//path//post"] = &sshAuthResponse{ Href: "cache", ExpiresIn: 60 * 60, ExpiresAt: time.Now().Add(-1 * time.Hour), createdAt: time.Now(), } ssh.responses["userandhost"] = sshAuthResponse{Href: "real"} e := Endpoint{ SshUserAndHost: "userandhost", SshPort: "1", SshPath: "path", } res, err := cache.Resolve(e, "post") assert.Nil(t, err) assert.Equal(t, "cache", res.Href) } func TestSSHCacheResolveWithoutError(t *testing.T) { ssh := newFakeResolver() cache := withSSHCache(ssh).(*sshCache) assert.Equal(t, 0, len(cache.endpoints)) ssh.responses["userandhost"] = sshAuthResponse{Href: "real"} e := Endpoint{ SshUserAndHost: "userandhost", SshPort: "1", SshPath: "path", } res, err := cache.Resolve(e, "post") assert.Nil(t, err) assert.Equal(t, "real", res.Href) assert.Equal(t, 1, len(cache.endpoints)) cacheres, ok := cache.endpoints["userandhost//1//path//post"] assert.True(t, ok) assert.NotNil(t, cacheres) assert.Equal(t, "real", cacheres.Href) delete(ssh.responses, "userandhost") res2, err := cache.Resolve(e, "post") assert.Nil(t, err) assert.Equal(t, "real", res2.Href) } func TestSSHCacheResolveWithError(t *testing.T) { ssh := newFakeResolver() cache := withSSHCache(ssh).(*sshCache) assert.Equal(t, 0, len(cache.endpoints)) ssh.responses["userandhost"] = sshAuthResponse{Message: "resolve error", Href: "real"} e := Endpoint{ SshUserAndHost: "userandhost", SshPort: "1", SshPath: "path", } res, err := cache.Resolve(e, "post") assert.NotNil(t, err) assert.Equal(t, "real", res.Href) assert.Equal(t, 0, len(cache.endpoints)) delete(ssh.responses, "userandhost") res2, err := cache.Resolve(e, "post") assert.Nil(t, err) assert.Equal(t, "", res2.Href) } func newFakeResolver() *fakeResolver { return &fakeResolver{responses: make(map[string]sshAuthResponse)} } type fakeResolver struct { responses map[string]sshAuthResponse } func (r *fakeResolver) Resolve(e Endpoint, method string) (sshAuthResponse, error) { res := r.responses[e.SshUserAndHost] var err error if len(res.Message) > 0 { err = errors.New(res.Message) } res.createdAt = time.Now() return res, err } func TestSSHGetLFSExeAndArgs(t *testing.T) { cli, err := NewClient(UniqTestEnv(map[string]string{}), nil) require.Nil(t, err) endpoint := cli.Endpoints.Endpoint("download", "") endpoint.SshUserAndHost = "user@foo.com" endpoint.SshPath = "user/repo" exe, args := sshGetLFSExeAndArgs(cli.OSEnv(), endpoint, "GET") assert.Equal(t, "ssh", exe) assert.Equal(t, []string{ "--", "user@foo.com", "git-lfs-authenticate user/repo download", }, args) exe, args = sshGetLFSExeAndArgs(cli.OSEnv(), endpoint, "HEAD") assert.Equal(t, "ssh", exe) assert.Equal(t, []string{ "--", "user@foo.com", "git-lfs-authenticate user/repo download", }, args) // this is going by endpoint.Operation, implicitly set by Endpoint() on L15. exe, args = sshGetLFSExeAndArgs(cli.OSEnv(), endpoint, "POST") assert.Equal(t, "ssh", exe) assert.Equal(t, []string{ "--", "user@foo.com", "git-lfs-authenticate user/repo download", }, args) endpoint.Operation = "upload" exe, args = sshGetLFSExeAndArgs(cli.OSEnv(), endpoint, "POST") assert.Equal(t, "ssh", exe) assert.Equal(t, []string{ "--", "user@foo.com", "git-lfs-authenticate user/repo upload", }, args) } func TestSSHGetExeAndArgsSsh(t *testing.T) { cli, err := NewClient(UniqTestEnv(map[string]string{ "GIT_SSH_COMMAND": "", "GIT_SSH": "", }), nil) require.Nil(t, err) endpoint := cli.Endpoints.Endpoint("download", "") endpoint.SshUserAndHost = "user@foo.com" exe, args := sshGetExeAndArgs(cli.OSEnv(), endpoint) assert.Equal(t, "ssh", exe) assert.Equal(t, []string{"--", "user@foo.com"}, args) } func TestSSHGetExeAndArgsSshCustomPort(t *testing.T) { cli, err := NewClient(UniqTestEnv(map[string]string{ "GIT_SSH_COMMAND": "", "GIT_SSH": "", }), nil) require.Nil(t, err) endpoint := cli.Endpoints.Endpoint("download", "") endpoint.SshUserAndHost = "user@foo.com" endpoint.SshPort = "8888" exe, args := sshGetExeAndArgs(cli.OSEnv(), endpoint) assert.Equal(t, "ssh", exe) assert.Equal(t, []string{"-p", "8888", "--", "user@foo.com"}, args) } func TestSSHGetExeAndArgsPlink(t *testing.T) { plink := filepath.Join("Users", "joebloggs", "bin", "plink.exe") cli, err := NewClient(UniqTestEnv(map[string]string{ "GIT_SSH_COMMAND": "", "GIT_SSH": plink, }), nil) require.Nil(t, err) endpoint := cli.Endpoints.Endpoint("download", "") endpoint.SshUserAndHost = "user@foo.com" exe, args := sshGetExeAndArgs(cli.OSEnv(), endpoint) assert.Equal(t, plink, exe) assert.Equal(t, []string{"user@foo.com"}, args) } func TestSSHGetExeAndArgsPlinkCustomPort(t *testing.T) { plink := filepath.Join("Users", "joebloggs", "bin", "plink") cli, err := NewClient(UniqTestEnv(map[string]string{ "GIT_SSH_COMMAND": "", "GIT_SSH": plink, }), nil) require.Nil(t, err) endpoint := cli.Endpoints.Endpoint("download", "") endpoint.SshUserAndHost = "user@foo.com" endpoint.SshPort = "8888" exe, args := sshGetExeAndArgs(cli.OSEnv(), endpoint) assert.Equal(t, plink, exe) assert.Equal(t, []string{"-P", "8888", "user@foo.com"}, args) } func TestSSHGetExeAndArgsTortoisePlink(t *testing.T) { plink := filepath.Join("Users", "joebloggs", "bin", "tortoiseplink.exe") cli, err := NewClient(UniqTestEnv(map[string]string{ "GIT_SSH_COMMAND": "", "GIT_SSH": plink, }), nil) require.Nil(t, err) endpoint := cli.Endpoints.Endpoint("download", "") endpoint.SshUserAndHost = "user@foo.com" exe, args := sshGetExeAndArgs(cli.OSEnv(), endpoint) assert.Equal(t, plink, exe) assert.Equal(t, []string{"-batch", "user@foo.com"}, args) } func TestSSHGetExeAndArgsTortoisePlinkCustomPort(t *testing.T) { plink := filepath.Join("Users", "joebloggs", "bin", "tortoiseplink") cli, err := NewClient(UniqTestEnv(map[string]string{ "GIT_SSH_COMMAND": "", "GIT_SSH": plink, }), nil) require.Nil(t, err) endpoint := cli.Endpoints.Endpoint("download", "") endpoint.SshUserAndHost = "user@foo.com" endpoint.SshPort = "8888" exe, args := sshGetExeAndArgs(cli.OSEnv(), endpoint) assert.Equal(t, plink, exe) assert.Equal(t, []string{"-batch", "-P", "8888", "user@foo.com"}, args) } func TestSSHGetExeAndArgsSshCommandPrecedence(t *testing.T) { cli, err := NewClient(UniqTestEnv(map[string]string{ "GIT_SSH_COMMAND": "sshcmd", "GIT_SSH": "bad", }), nil) require.Nil(t, err) endpoint := cli.Endpoints.Endpoint("download", "") endpoint.SshUserAndHost = "user@foo.com" exe, args := sshGetExeAndArgs(cli.OSEnv(), endpoint) assert.Equal(t, "sshcmd", exe) assert.Equal(t, []string{"user@foo.com"}, args) } func TestSSHGetExeAndArgsSshCommandArgs(t *testing.T) { cli, err := NewClient(UniqTestEnv(map[string]string{ "GIT_SSH_COMMAND": "sshcmd --args 1", }), nil) require.Nil(t, err) endpoint := cli.Endpoints.Endpoint("download", "") endpoint.SshUserAndHost = "user@foo.com" exe, args := sshGetExeAndArgs(cli.OSEnv(), endpoint) assert.Equal(t, "sshcmd", exe) assert.Equal(t, []string{"--args", "1", "user@foo.com"}, args) } func TestSSHGetExeAndArgsSshCommandArgsWithMixedQuotes(t *testing.T) { cli, err := NewClient(UniqTestEnv(map[string]string{ "GIT_SSH_COMMAND": "sshcmd foo 'bar \"baz\"'", }), nil) require.Nil(t, err) endpoint := cli.Endpoints.Endpoint("download", "") endpoint.SshUserAndHost = "user@foo.com" exe, args := sshGetExeAndArgs(cli.OSEnv(), endpoint) assert.Equal(t, "sshcmd", exe) assert.Equal(t, []string{"foo", `bar "baz"`, "user@foo.com"}, args) } func TestSSHGetExeAndArgsSshCommandCustomPort(t *testing.T) { cli, err := NewClient(UniqTestEnv(map[string]string{ "GIT_SSH_COMMAND": "sshcmd", }), nil) require.Nil(t, err) endpoint := cli.Endpoints.Endpoint("download", "") endpoint.SshUserAndHost = "user@foo.com" endpoint.SshPort = "8888" exe, args := sshGetExeAndArgs(cli.OSEnv(), endpoint) assert.Equal(t, "sshcmd", exe) assert.Equal(t, []string{"-p", "8888", "user@foo.com"}, args) } func TestSSHGetLFSExeAndArgsWithCustomSSH(t *testing.T) { cli, err := NewClient(UniqTestEnv(map[string]string{ "GIT_SSH": "not-ssh", }), nil) require.Nil(t, err) u, err := url.Parse("ssh://git@host.com:12345/repo") require.Nil(t, err) e := endpointFromSshUrl(u) t.Logf("ENDPOINT: %+v", e) assert.Equal(t, "12345", e.SshPort) assert.Equal(t, "git@host.com", e.SshUserAndHost) assert.Equal(t, "repo", e.SshPath) exe, args := sshGetLFSExeAndArgs(cli.OSEnv(), e, "GET") assert.Equal(t, "not-ssh", exe) assert.Equal(t, []string{"-p", "12345", "git@host.com", "git-lfs-authenticate repo download"}, args) } func TestSSHGetLFSExeAndArgsInvalidOptionsAsHost(t *testing.T) { cli, err := NewClient(nil, nil) require.Nil(t, err) u, err := url.Parse("ssh://-oProxyCommand=gnome-calculator/repo") require.Nil(t, err) assert.Equal(t, "-oProxyCommand=gnome-calculator", u.Host) e := endpointFromSshUrl(u) t.Logf("ENDPOINT: %+v", e) assert.Equal(t, "-oProxyCommand=gnome-calculator", e.SshUserAndHost) assert.Equal(t, "repo", e.SshPath) exe, args := sshGetLFSExeAndArgs(cli.OSEnv(), e, "GET") assert.Equal(t, "ssh", exe) assert.Equal(t, []string{"--", "-oProxyCommand=gnome-calculator", "git-lfs-authenticate repo download"}, args) } func TestSSHGetLFSExeAndArgsInvalidOptionsAsHostWithCustomSSH(t *testing.T) { cli, err := NewClient(UniqTestEnv(map[string]string{ "GIT_SSH": "not-ssh", }), nil) require.Nil(t, err) u, err := url.Parse("ssh://--oProxyCommand=gnome-calculator/repo") require.Nil(t, err) assert.Equal(t, "--oProxyCommand=gnome-calculator", u.Host) e := endpointFromSshUrl(u) t.Logf("ENDPOINT: %+v", e) assert.Equal(t, "--oProxyCommand=gnome-calculator", e.SshUserAndHost) assert.Equal(t, "repo", e.SshPath) exe, args := sshGetLFSExeAndArgs(cli.OSEnv(), e, "GET") assert.Equal(t, "not-ssh", exe) assert.Equal(t, []string{"oProxyCommand=gnome-calculator", "git-lfs-authenticate repo download"}, args) } func TestSSHGetExeAndArgsInvalidOptionsAsHost(t *testing.T) { cli, err := NewClient(nil, nil) require.Nil(t, err) u, err := url.Parse("ssh://-oProxyCommand=gnome-calculator") require.Nil(t, err) assert.Equal(t, "-oProxyCommand=gnome-calculator", u.Host) e := endpointFromSshUrl(u) t.Logf("ENDPOINT: %+v", e) assert.Equal(t, "-oProxyCommand=gnome-calculator", e.SshUserAndHost) assert.Equal(t, "", e.SshPath) exe, args := sshGetExeAndArgs(cli.OSEnv(), e) assert.Equal(t, "ssh", exe) assert.Equal(t, []string{"--", "-oProxyCommand=gnome-calculator"}, args) } func TestSSHGetExeAndArgsInvalidOptionsAsPath(t *testing.T) { cli, err := NewClient(nil, nil) require.Nil(t, err) u, err := url.Parse("ssh://git@git-host.com/-oProxyCommand=gnome-calculator") require.Nil(t, err) assert.Equal(t, "git-host.com", u.Host) e := endpointFromSshUrl(u) t.Logf("ENDPOINT: %+v", e) assert.Equal(t, "git@git-host.com", e.SshUserAndHost) assert.Equal(t, "-oProxyCommand=gnome-calculator", e.SshPath) exe, args := sshGetExeAndArgs(cli.OSEnv(), e) assert.Equal(t, "ssh", exe) assert.Equal(t, []string{"--", "git@git-host.com"}, args) } func TestParseBareSSHUrl(t *testing.T) { e := endpointFromBareSshUrl("git@git-host.com:repo.git") t.Logf("endpoint: %+v", e) assert.Equal(t, "git@git-host.com", e.SshUserAndHost) assert.Equal(t, "repo.git", e.SshPath) e = endpointFromBareSshUrl("git@git-host.com/should-be-a-colon.git") t.Logf("endpoint: %+v", e) assert.Equal(t, "", e.SshUserAndHost) assert.Equal(t, "", e.SshPath) e = endpointFromBareSshUrl("-oProxyCommand=gnome-calculator") t.Logf("endpoint: %+v", e) assert.Equal(t, "", e.SshUserAndHost) assert.Equal(t, "", e.SshPath) e = endpointFromBareSshUrl("git@git-host.com:-oProxyCommand=gnome-calculator") t.Logf("endpoint: %+v", e) assert.Equal(t, "git@git-host.com", e.SshUserAndHost) assert.Equal(t, "-oProxyCommand=gnome-calculator", e.SshPath) } func TestSSHGetExeAndArgsPlinkCommand(t *testing.T) { plink := filepath.Join("Users", "joebloggs", "bin", "plink.exe") cli, err := NewClient(UniqTestEnv(map[string]string{ "GIT_SSH_COMMAND": plink, }), nil) require.Nil(t, err) endpoint := cli.Endpoints.Endpoint("download", "") endpoint.SshUserAndHost = "user@foo.com" exe, args := sshGetExeAndArgs(cli.OSEnv(), endpoint) assert.Equal(t, plink, exe) assert.Equal(t, []string{"user@foo.com"}, args) } func TestSSHGetExeAndArgsPlinkCommandCustomPort(t *testing.T) { plink := filepath.Join("Users", "joebloggs", "bin", "plink") cli, err := NewClient(UniqTestEnv(map[string]string{ "GIT_SSH_COMMAND": plink, }), nil) require.Nil(t, err) endpoint := cli.Endpoints.Endpoint("download", "") endpoint.SshUserAndHost = "user@foo.com" endpoint.SshPort = "8888" exe, args := sshGetExeAndArgs(cli.OSEnv(), endpoint) assert.Equal(t, plink, exe) assert.Equal(t, []string{"-P", "8888", "user@foo.com"}, args) } func TestSSHGetExeAndArgsTortoisePlinkCommand(t *testing.T) { plink := filepath.Join("Users", "joebloggs", "bin", "tortoiseplink.exe") cli, err := NewClient(UniqTestEnv(map[string]string{ "GIT_SSH_COMMAND": plink, }), nil) require.Nil(t, err) endpoint := cli.Endpoints.Endpoint("download", "") endpoint.SshUserAndHost = "user@foo.com" exe, args := sshGetExeAndArgs(cli.OSEnv(), endpoint) assert.Equal(t, plink, exe) assert.Equal(t, []string{"-batch", "user@foo.com"}, args) } func TestSSHGetExeAndArgsTortoisePlinkCommandCustomPort(t *testing.T) { plink := filepath.Join("Users", "joebloggs", "bin", "tortoiseplink") cli, err := NewClient(UniqTestEnv(map[string]string{ "GIT_SSH_COMMAND": plink, }), nil) require.Nil(t, err) endpoint := cli.Endpoints.Endpoint("download", "") endpoint.SshUserAndHost = "user@foo.com" endpoint.SshPort = "8888" exe, args := sshGetExeAndArgs(cli.OSEnv(), endpoint) assert.Equal(t, plink, exe) assert.Equal(t, []string{"-batch", "-P", "8888", "user@foo.com"}, args) } git-lfs-2.3.4/lfsapi/stats.go000066400000000000000000000101041317167762300160410ustar00rootroot00000000000000package lfsapi import ( "context" "crypto/tls" "fmt" "io" "net/http" "net/http/httptrace" "strings" "sync" "sync/atomic" "time" "github.com/git-lfs/git-lfs/tools" ) type httpTransfer struct { URL string Method string Key string RequestBodySize int64 Start int64 ResponseStart int64 ConnStart int64 ConnEnd int64 DNSStart int64 DNSEnd int64 TLSStart int64 TLSEnd int64 } type statsContextKey string const transferKey = statsContextKey("transfer") func (c *Client) LogHTTPStats(w io.WriteCloser) { fmt.Fprintf(w, "concurrent=%d time=%d version=%s\n", c.ConcurrentTransfers, time.Now().Unix(), UserAgent) c.httpLogger = newSyncLogger(w) } // LogStats is intended to be called after all HTTP operations for the // commmand have finished. It dumps k/v logs, one line per httpTransfer into // a log file with the current timestamp. // // DEPRECATED: Call LogHTTPStats() before the first HTTP request. func (c *Client) LogStats(out io.Writer) {} // LogRequest tells the client to log the request's stats to the http log // after the response body has been read. func (c *Client) LogRequest(r *http.Request, reqKey string) *http.Request { if c.httpLogger == nil { return r } t := &httpTransfer{ URL: strings.SplitN(r.URL.String(), "?", 2)[0], Method: r.Method, Key: reqKey, } ctx := httptrace.WithClientTrace(r.Context(), &httptrace.ClientTrace{ GetConn: func(_ string) { atomic.CompareAndSwapInt64(&t.Start, 0, time.Now().UnixNano()) }, DNSStart: func(_ httptrace.DNSStartInfo) { atomic.CompareAndSwapInt64(&t.DNSStart, 0, time.Now().UnixNano()) }, DNSDone: func(_ httptrace.DNSDoneInfo) { atomic.CompareAndSwapInt64(&t.DNSEnd, 0, time.Now().UnixNano()) }, ConnectStart: func(_, _ string) { atomic.CompareAndSwapInt64(&t.ConnStart, 0, time.Now().UnixNano()) }, ConnectDone: func(_, _ string, _ error) { atomic.CompareAndSwapInt64(&t.ConnEnd, 0, time.Now().UnixNano()) }, TLSHandshakeStart: func() { atomic.CompareAndSwapInt64(&t.TLSStart, 0, time.Now().UnixNano()) }, TLSHandshakeDone: func(_ tls.ConnectionState, _ error) { atomic.CompareAndSwapInt64(&t.TLSEnd, 0, time.Now().UnixNano()) }, GotFirstResponseByte: func() { atomic.CompareAndSwapInt64(&t.ResponseStart, 0, time.Now().UnixNano()) }, }) return r.WithContext(context.WithValue(ctx, transferKey, t)) } // LogResponse sends the current response stats to the http log. // // DEPRECATED: Use LogRequest() instead. func (c *Client) LogResponse(key string, res *http.Response) {} func newSyncLogger(w io.WriteCloser) *syncLogger { ch := make(chan string, 100) wg := &sync.WaitGroup{} wg.Add(1) go func(c chan string, w io.Writer, wg *sync.WaitGroup) { for l := range c { w.Write([]byte(l)) wg.Done() } }(ch, w, wg) return &syncLogger{w: w, ch: ch, wg: wg} } type syncLogger struct { w io.WriteCloser ch chan string wg *sync.WaitGroup } func (l *syncLogger) LogRequest(req *http.Request, bodySize int64) { if l == nil { return } if v := req.Context().Value(transferKey); v != nil { l.logTransfer(v.(*httpTransfer), "request", fmt.Sprintf(" body=%d", bodySize)) } } func (l *syncLogger) LogResponse(req *http.Request, status int, bodySize int64) { if l == nil { return } if v := req.Context().Value(transferKey); v != nil { t := v.(*httpTransfer) now := time.Now().UnixNano() l.logTransfer(t, "response", fmt.Sprintf(" status=%d body=%d conntime=%d dnstime=%d tlstime=%d restime=%d time=%d", status, bodySize, tools.MaxInt64(t.ConnEnd-t.ConnStart, 0), tools.MaxInt64(t.DNSEnd-t.DNSStart, 0), tools.MaxInt64(t.TLSEnd-t.TLSStart, 0), tools.MaxInt64(now-t.ResponseStart, 0), tools.MaxInt64(now-t.Start, 0), )) } } func (l *syncLogger) logTransfer(t *httpTransfer, event, extra string) { l.wg.Add(1) l.ch <- fmt.Sprintf("key=%s event=%s url=%s method=%s%s\n", t.Key, event, t.URL, t.Method, extra, ) } func (l *syncLogger) Close() error { if l == nil { return nil } l.wg.Done() l.wg.Wait() return l.w.Close() } git-lfs-2.3.4/lfsapi/stats_test.go000066400000000000000000000106511317167762300171070ustar00rootroot00000000000000package lfsapi import ( "bytes" "encoding/json" "io" "io/ioutil" "net/http" "net/http/httptest" "strings" "sync/atomic" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestStatsWithKey(t *testing.T) { var called uint32 srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { atomic.AddUint32(&called, 1) t.Logf("srv req %s %s", r.Method, r.URL.Path) assert.Equal(t, "POST", r.Method) assert.Equal(t, "Basic ABC", r.Header.Get("Authorization")) body := &verboseTest{} err := json.NewDecoder(r.Body).Decode(body) assert.Nil(t, err) assert.Equal(t, "Verbose", body.Test) w.Header().Set("Content-Type", "application/json") w.Write([]byte(`{"Status":"Ok"}`)) })) defer srv.Close() out := &bytes.Buffer{} c := &Client{ConcurrentTransfers: 5} c.LogHTTPStats(nopCloser(out)) req, err := http.NewRequest("POST", srv.URL, nil) req = c.LogRequest(req, "stats-test") req.Header.Set("Authorization", "Basic ABC") req.Header.Set("Content-Type", "application/json") require.Nil(t, err) require.Nil(t, MarshalToRequest(req, verboseTest{"Verbose"})) res, err := c.Do(req) require.Nil(t, err) io.Copy(ioutil.Discard, res.Body) res.Body.Close() assert.Nil(t, c.Close()) assert.Equal(t, 200, res.StatusCode) assert.EqualValues(t, 1, called) stats := strings.TrimSpace(out.String()) t.Log(stats) lines := strings.Split(stats, "\n") require.Equal(t, 3, len(lines)) assert.True(t, strings.Contains(lines[0], "concurrent=5")) expected := []string{ "key=stats-test", "event=request", "body=18", "url=" + srv.URL, } for _, substr := range expected { assert.True(t, strings.Contains(lines[1], substr), "missing: "+substr) } expected = []string{ "key=stats-test", "event=response", "url=" + srv.URL, } for _, substr := range expected { assert.True(t, strings.Contains(lines[2], substr), "missing: "+substr) } } func TestStatsWithoutKey(t *testing.T) { var called uint32 srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { atomic.AddUint32(&called, 1) t.Logf("srv req %s %s", r.Method, r.URL.Path) assert.Equal(t, "POST", r.Method) assert.Equal(t, "Basic ABC", r.Header.Get("Authorization")) body := &verboseTest{} err := json.NewDecoder(r.Body).Decode(body) assert.Nil(t, err) assert.Equal(t, "Verbose", body.Test) w.Header().Set("Content-Type", "application/json") w.Write([]byte(`{"Status":"Ok"}`)) })) defer srv.Close() out := &bytes.Buffer{} c := &Client{ConcurrentTransfers: 5} c.LogHTTPStats(nopCloser(out)) req, err := http.NewRequest("POST", srv.URL, nil) req.Header.Set("Authorization", "Basic ABC") req.Header.Set("Content-Type", "application/json") require.Nil(t, err) require.Nil(t, MarshalToRequest(req, verboseTest{"Verbose"})) res, err := c.Do(req) require.Nil(t, err) io.Copy(ioutil.Discard, res.Body) res.Body.Close() assert.Nil(t, c.Close()) assert.Equal(t, 200, res.StatusCode) assert.EqualValues(t, 1, called) stats := strings.TrimSpace(out.String()) t.Log(stats) assert.True(t, strings.Contains(stats, "concurrent=5")) assert.Equal(t, 1, len(strings.Split(stats, "\n"))) } func TestStatsDisabled(t *testing.T) { var called uint32 srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { atomic.AddUint32(&called, 1) t.Logf("srv req %s %s", r.Method, r.URL.Path) assert.Equal(t, "POST", r.Method) assert.Equal(t, "Basic ABC", r.Header.Get("Authorization")) body := &verboseTest{} err := json.NewDecoder(r.Body).Decode(body) assert.Nil(t, err) assert.Equal(t, "Verbose", body.Test) w.Header().Set("Content-Type", "application/json") w.Write([]byte(`{"Status":"Ok"}`)) })) defer srv.Close() c := &Client{ConcurrentTransfers: 5} req, err := http.NewRequest("POST", srv.URL, nil) req = c.LogRequest(req, "stats-test") req.Header.Set("Authorization", "Basic ABC") req.Header.Set("Content-Type", "application/json") require.Nil(t, err) require.Nil(t, MarshalToRequest(req, verboseTest{"Verbose"})) res, err := c.Do(req) require.Nil(t, err) io.Copy(ioutil.Discard, res.Body) res.Body.Close() assert.Equal(t, 200, res.StatusCode) assert.EqualValues(t, 1, called) out := &bytes.Buffer{} c.LogStats(out) assert.Equal(t, 0, out.Len()) } func nopCloser(w io.Writer) io.WriteCloser { return nopWCloser{w} } type nopWCloser struct { io.Writer } func (w nopWCloser) Close() error { return nil } git-lfs-2.3.4/lfsapi/verbose.go000066400000000000000000000067401317167762300163630ustar00rootroot00000000000000package lfsapi import ( "bufio" "bytes" "fmt" "io" "net/http" "net/http/httputil" "strings" "github.com/rubyist/tracerx" ) func (c *Client) traceRequest(req *http.Request) (*tracedRequest, error) { tracerx.Printf("HTTP: %s", traceReq(req)) if c.Verbose { if dump, err := httputil.DumpRequest(req, false); err == nil { c.traceHTTPDump(">", dump) } } body, ok := req.Body.(ReadSeekCloser) if body != nil && !ok { return nil, fmt.Errorf("Request body must implement io.ReadCloser and io.Seeker. Got: %T", body) } if body != nil && ok { body.Seek(0, io.SeekStart) tr := &tracedRequest{ verbose: c.Verbose && isTraceableContent(req.Header), verboseOut: c.VerboseOut, ReadSeekCloser: body, } req.Body = tr return tr, nil } return nil, nil } type tracedRequest struct { BodySize int64 verbose bool verboseOut io.Writer ReadSeekCloser } func (r *tracedRequest) Read(b []byte) (int, error) { n, err := tracedRead(r.ReadSeekCloser, b, r.verboseOut, false, r.verbose) r.BodySize += int64(n) return n, err } func (c *Client) traceResponse(req *http.Request, tracedReq *tracedRequest, res *http.Response) { if tracedReq != nil { c.httpLogger.LogRequest(req, tracedReq.BodySize) } if res == nil { c.httpLogger.LogResponse(req, -1, 0) return } tracerx.Printf("HTTP: %d", res.StatusCode) verboseBody := isTraceableContent(res.Header) res.Body = &tracedResponse{ httpLogger: c.httpLogger, response: res, gitTrace: verboseBody, verbose: verboseBody && c.Verbose, verboseOut: c.VerboseOut, ReadCloser: res.Body, } if !c.Verbose { return } if dump, err := httputil.DumpResponse(res, false); err == nil { if verboseBody { fmt.Fprintf(c.VerboseOut, "\n\n") } else { fmt.Fprintf(c.VerboseOut, "\n") } c.traceHTTPDump("<", dump) } } type tracedResponse struct { BodySize int64 httpLogger *syncLogger response *http.Response verbose bool gitTrace bool verboseOut io.Writer eof bool io.ReadCloser } func (r *tracedResponse) Read(b []byte) (int, error) { n, err := tracedRead(r.ReadCloser, b, r.verboseOut, r.gitTrace, r.verbose) r.BodySize += int64(n) if err == io.EOF && !r.eof { r.httpLogger.LogResponse(r.response.Request, r.response.StatusCode, r.BodySize) r.eof = true } return n, err } func tracedRead(r io.Reader, b []byte, verboseOut io.Writer, gitTrace, verbose bool) (int, error) { n, err := r.Read(b) if err == nil || err == io.EOF { if n > 0 && (gitTrace || verbose) { chunk := string(b[0:n]) if gitTrace { tracerx.Printf("HTTP: %s", chunk) } if verbose { fmt.Fprint(verboseOut, chunk) } } } return n, err } func (c *Client) traceHTTPDump(direction string, dump []byte) { scanner := bufio.NewScanner(bytes.NewBuffer(dump)) for scanner.Scan() { line := scanner.Text() if !c.DebuggingVerbose && strings.HasPrefix(strings.ToLower(line), "authorization: basic") { fmt.Fprintf(c.VerboseOut, "%s Authorization: Basic * * * * *\n", direction) } else { fmt.Fprintf(c.VerboseOut, "%s %s\n", direction, line) } } } var tracedTypes = []string{"json", "text", "xml", "html"} func isTraceableContent(h http.Header) bool { ctype := strings.ToLower(strings.SplitN(h.Get("Content-Type"), ";", 2)[0]) for _, tracedType := range tracedTypes { if strings.Contains(ctype, tracedType) { return true } } return false } func traceReq(req *http.Request) string { return fmt.Sprintf("%s %s", req.Method, strings.SplitN(req.URL.String(), "?", 2)[0]) } git-lfs-2.3.4/lfsapi/verbose_test.go000066400000000000000000000135411317167762300174170ustar00rootroot00000000000000package lfsapi import ( "bytes" "encoding/json" "io" "io/ioutil" "net/http" "net/http/httptest" "strings" "sync/atomic" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) type verboseTest struct { Test string } func TestVerboseEnabled(t *testing.T) { var called uint32 srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { atomic.AddUint32(&called, 1) t.Logf("srv req %s %s", r.Method, r.URL.Path) assert.Equal(t, "POST", r.Method) assert.Equal(t, "Basic ABC", r.Header.Get("Authorization")) body := &verboseTest{} err := json.NewDecoder(r.Body).Decode(body) assert.Nil(t, err) assert.Equal(t, "Verbose", body.Test) w.Header().Set("Content-Type", "application/json") w.Write([]byte(`{"Status":"Ok"}`)) })) defer srv.Close() out := &bytes.Buffer{} c := &Client{ Verbose: true, VerboseOut: out, } req, err := http.NewRequest("POST", srv.URL, nil) req.Header.Set("Authorization", "Basic ABC") req.Header.Set("Content-Type", "application/json") require.Nil(t, err) require.Nil(t, MarshalToRequest(req, verboseTest{"Verbose"})) res, err := c.Do(req) require.Nil(t, err) io.Copy(ioutil.Discard, res.Body) res.Body.Close() assert.Equal(t, 200, res.StatusCode) assert.EqualValues(t, 1, called) s := out.String() t.Log(s) expected := []string{ "> Host: 127.0.0.1:", "\n> Authorization: Basic * * * * *\n", "\n> Content-Type: application/json\n", "\n> \n" + `{"Test":"Verbose"}` + "\n\n", "\n< HTTP/1.1 200 OK\n", "\n< Content-Type: application/json\n", "\n< \n" + `{"Status":"Ok"}`, } for _, substr := range expected { if !assert.True(t, strings.Contains(s, substr)) { t.Logf("missing: %q", substr) } } } func TestVerboseWithBinaryBody(t *testing.T) { var called uint32 srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { atomic.AddUint32(&called, 1) t.Logf("srv req %s %s", r.Method, r.URL.Path) assert.Equal(t, "POST", r.Method) assert.Equal(t, "Basic ABC", r.Header.Get("Authorization")) by, err := ioutil.ReadAll(r.Body) assert.Nil(t, err) assert.Equal(t, "binary-request", string(by)) w.Header().Set("Content-Type", "application/octet-stream") w.Write([]byte(`binary-response`)) })) defer srv.Close() out := &bytes.Buffer{} c := &Client{ Verbose: true, VerboseOut: out, } buf := bytes.NewBufferString("binary-request") req, err := http.NewRequest("POST", srv.URL, buf) req.Header.Set("Authorization", "Basic ABC") req.Header.Set("Content-Type", "application/octet-stream") require.Nil(t, err) res, err := c.Do(req) require.Nil(t, err) io.Copy(ioutil.Discard, res.Body) res.Body.Close() assert.Equal(t, 200, res.StatusCode) assert.EqualValues(t, 1, called) s := out.String() t.Log(s) expected := []string{ "> Host: 127.0.0.1:", "\n> Authorization: Basic * * * * *\n", "\n> Content-Type: application/octet-stream\n", "\n< HTTP/1.1 200 OK\n", "\n< Content-Type: application/octet-stream\n", } for _, substr := range expected { if !assert.True(t, strings.Contains(s, substr)) { t.Logf("missing: %q", substr) } } assert.False(t, strings.Contains(s, "binary"), "contains binary request or response body") } func TestVerboseEnabledWithDebugging(t *testing.T) { var called uint32 srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { atomic.AddUint32(&called, 1) t.Logf("srv req %s %s", r.Method, r.URL.Path) assert.Equal(t, "POST", r.Method) assert.Equal(t, "Basic ABC", r.Header.Get("Authorization")) body := &verboseTest{} err := json.NewDecoder(r.Body).Decode(body) assert.Nil(t, err) assert.Equal(t, "Verbose", body.Test) w.Header().Set("Content-Type", "application/json") w.Write([]byte(`{"Status":"Ok"}`)) })) defer srv.Close() out := &bytes.Buffer{} c := &Client{ Verbose: true, VerboseOut: out, DebuggingVerbose: true, } req, err := http.NewRequest("POST", srv.URL, nil) req.Header.Set("Authorization", "Basic ABC") req.Header.Set("Content-Type", "application/json") require.Nil(t, err) require.Nil(t, MarshalToRequest(req, verboseTest{"Verbose"})) res, err := c.Do(req) require.Nil(t, err) io.Copy(ioutil.Discard, res.Body) res.Body.Close() assert.Equal(t, 200, res.StatusCode) assert.EqualValues(t, 1, called) s := out.String() t.Log(s) expected := []string{ "> Host: 127.0.0.1:", "\n> Authorization: Basic ABC\n", "\n> Content-Type: application/json\n", "\n> \n" + `{"Test":"Verbose"}` + "\n\n", "\n< HTTP/1.1 200 OK\n", "\n< Content-Type: application/json\n", "\n< \n" + `{"Status":"Ok"}`, } for _, substr := range expected { if !assert.True(t, strings.Contains(s, substr)) { t.Logf("missing: %q", substr) } } } func TestVerboseDisabled(t *testing.T) { var called uint32 srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { atomic.AddUint32(&called, 1) t.Logf("srv req %s %s", r.Method, r.URL.Path) assert.Equal(t, "POST", r.Method) assert.Equal(t, "Basic ABC", r.Header.Get("Authorization")) body := &verboseTest{} err := json.NewDecoder(r.Body).Decode(body) assert.Nil(t, err) assert.Equal(t, "Verbose", body.Test) w.Header().Set("Content-Type", "application/json") w.Write([]byte(`{"Status":"Ok"}`)) })) defer srv.Close() out := &bytes.Buffer{} c := &Client{ Verbose: false, VerboseOut: out, DebuggingVerbose: true, } req, err := http.NewRequest("POST", srv.URL, nil) req.Header.Set("Authorization", "Basic ABC") req.Header.Set("Content-Type", "application/json") require.Nil(t, err) require.Nil(t, MarshalToRequest(req, verboseTest{"Verbose"})) res, err := c.Do(req) require.Nil(t, err) io.Copy(ioutil.Discard, res.Body) res.Body.Close() assert.Equal(t, 200, res.StatusCode) assert.EqualValues(t, 1, called) assert.EqualValues(t, 0, out.Len(), out.String()) } git-lfs-2.3.4/localstorage/000077500000000000000000000000001317167762300155615ustar00rootroot00000000000000git-lfs-2.3.4/localstorage/currentstore.go000066400000000000000000000032071317167762300206510ustar00rootroot00000000000000package localstorage import ( "fmt" "io/ioutil" "os" "path/filepath" "github.com/git-lfs/git-lfs/config" "github.com/git-lfs/git-lfs/errors" ) const ( tempDirPerms = 0755 localMediaDirPerms = 0755 localLogDirPerms = 0755 ) var ( objects *LocalStorage notInRepoErr = errors.New("not in a repository") TempDir = filepath.Join(os.TempDir(), "git-lfs") checkedTempDir string ) func Objects() *LocalStorage { return objects } func InitStorage() error { if len(config.LocalGitStorageDir) == 0 || len(config.LocalGitDir) == 0 { return notInRepoErr } cfg := config.Config.StorageConfig() TempDir = filepath.Join(cfg.LfsStorageDir, "tmp") // temp files per worktree objs, err := NewStorage( filepath.Join(cfg.LfsStorageDir, "objects"), filepath.Join(TempDir, "objects"), ) if err != nil { return errors.Wrap(err, "init LocalStorage") } objects = objs config.LocalLogDir = filepath.Join(objs.RootDir, "logs") if err := os.MkdirAll(config.LocalLogDir, localLogDirPerms); err != nil { return errors.Wrap(err, "create log dir") } return nil } func InitStorageOrFail() { if err := InitStorage(); err != nil { if err == notInRepoErr { return } fmt.Fprintf(os.Stderr, "ERROR: %s\n", err) os.Exit(1) } } func ResolveDirs() { config.ResolveGitBasicDirs() InitStorageOrFail() } func TempFile(prefix string) (*os.File, error) { if checkedTempDir != TempDir { if err := os.MkdirAll(TempDir, tempDirPerms); err != nil { return nil, err } checkedTempDir = TempDir } return ioutil.TempFile(TempDir, prefix) } func ResetTempDir() error { checkedTempDir = "" return os.RemoveAll(TempDir) } git-lfs-2.3.4/localstorage/localstorage.go000066400000000000000000000025361317167762300205750ustar00rootroot00000000000000// Package localstorage handles LFS content stored locally // NOTE: Subject to change, do not rely on this package from outside git-lfs source package localstorage import ( "fmt" "os" "path/filepath" "regexp" ) const ( chanBufSize = 100 ) var ( oidRE = regexp.MustCompile(`\A[[:alnum:]]{64}`) dirPerms os.FileMode = 0755 ) // LocalStorage manages the locally stored LFS objects for a repository. type LocalStorage struct { RootDir string TempDir string } // Object represents a locally stored LFS object. type Object struct { Oid string Size int64 } func NewStorage(storageDir, tempDir string) (*LocalStorage, error) { if err := os.MkdirAll(storageDir, dirPerms); err != nil { return nil, err } if err := os.MkdirAll(tempDir, dirPerms); err != nil { return nil, err } return &LocalStorage{storageDir, tempDir}, nil } func (s *LocalStorage) ObjectPath(oid string) string { return filepath.Join(localObjectDir(s, oid), oid) } func (s *LocalStorage) BuildObjectPath(oid string) (string, error) { dir := localObjectDir(s, oid) if err := os.MkdirAll(dir, dirPerms); err != nil { return "", fmt.Errorf("Error trying to create local storage directory in %q: %s", dir, err) } return filepath.Join(dir, oid), nil } func localObjectDir(s *LocalStorage, oid string) string { return filepath.Join(s.RootDir, oid[0:2], oid[2:4]) } git-lfs-2.3.4/localstorage/scan.go000066400000000000000000000027221317167762300170370ustar00rootroot00000000000000package localstorage import ( "os" "path/filepath" "github.com/rubyist/tracerx" ) // AllObjects returns a slice of the the objects stored in this LocalStorage // object. This does not necessarily mean referenced by commits, just stored. // Note: reports final SHA only, extensions are ignored. func (s *LocalStorage) AllObjects() []Object { objects := make([]Object, 0, 100) for o := range s.ScanObjectsChan() { objects = append(objects, o) } return objects } // ScanObjectsChan returns a channel of all the objects stored in this // LocalStorage object. This does not necessarily mean referenced by commits, // just stored. You should not alter the store until this channel is closed. // Note: reports final SHA only, extensions are ignored. func (s *LocalStorage) ScanObjectsChan() <-chan Object { ch := make(chan Object, chanBufSize) go func() { defer close(ch) scanObjects(s.RootDir, ch) }() return ch } func scanObjects(dir string, ch chan<- Object) { dirf, err := os.Open(dir) if err != nil { return } defer dirf.Close() direntries, err := dirf.Readdir(0) if err != nil { tracerx.Printf("Problem with Readdir in %q: %s", dir, err) return } for _, dirfi := range direntries { if dirfi.IsDir() { subpath := filepath.Join(dir, dirfi.Name()) scanObjects(subpath, ch) } else { // Make sure it's really an object file & not .DS_Store etc if oidRE.MatchString(dirfi.Name()) { ch <- Object{dirfi.Name(), dirfi.Size()} } } } } git-lfs-2.3.4/localstorage/temp.go000066400000000000000000000021521317167762300170550ustar00rootroot00000000000000package localstorage import ( "os" "path/filepath" "strings" "time" "github.com/rubyist/tracerx" ) func (s *LocalStorage) ClearTempObjects() error { if len(s.TempDir) == 0 { return nil } d, err := os.Open(s.TempDir) if err != nil { return err } filenames, _ := d.Readdirnames(-1) for _, filename := range filenames { path := filepath.Join(s.TempDir, filename) if shouldDeleteTempObject(s, path) { os.RemoveAll(path) } } return nil } func shouldDeleteTempObject(s *LocalStorage, path string) bool { info, err := os.Stat(path) if err != nil { return false } if info.IsDir() { return false } base := filepath.Base(path) parts := strings.SplitN(base, "-", 2) oid := parts[0] if len(parts) < 2 || len(oid) != 64 { tracerx.Printf("Removing invalid tmp object file: %s", path) return true } fi, err := os.Stat(s.ObjectPath(oid)) if err == nil && !fi.IsDir() { tracerx.Printf("Removing existing tmp object file: %s", path) return true } if time.Since(info.ModTime()) > time.Hour { tracerx.Printf("Removing old tmp object file: %s", path) return true } return false } git-lfs-2.3.4/locking/000077500000000000000000000000001317167762300145305ustar00rootroot00000000000000git-lfs-2.3.4/locking/api.go000066400000000000000000000211541317167762300156330ustar00rootroot00000000000000package locking import ( "fmt" "net/http" "strconv" "github.com/git-lfs/git-lfs/lfsapi" ) type lockClient struct { *lfsapi.Client } // LockRequest encapsulates the payload sent across the API when a client would // like to obtain a lock against a particular path on a given remote. type lockRequest struct { // Path is the path that the client would like to obtain a lock against. Path string `json:"path"` } // LockResponse encapsulates the information sent over the API in response to // a `LockRequest`. type lockResponse struct { // Lock is the Lock that was optionally created in response to the // payload that was sent (see above). If the lock already exists, then // the existing lock is sent in this field instead, and the author of // that lock remains the same, meaning that the client failed to obtain // that lock. An HTTP status of "409 - Conflict" is used here. // // If the lock was unable to be created, this field will hold the // zero-value of Lock and the Err field will provide a more detailed set // of information. // // If an error was experienced in creating this lock, then the // zero-value of Lock should be sent here instead. Lock *Lock `json:"lock"` // Message is the optional error that was encountered while trying to create // the above lock. Message string `json:"message,omitempty"` DocumentationURL string `json:"documentation_url,omitempty"` RequestID string `json:"request_id,omitempty"` } func (c *lockClient) Lock(remote string, lockReq *lockRequest) (*lockResponse, *http.Response, error) { e := c.Endpoints.Endpoint("upload", remote) req, err := c.NewRequest("POST", e, "locks", lockReq) if err != nil { return nil, nil, err } req = c.LogRequest(req, "lfs.locks.lock") res, err := c.DoWithAuth(remote, req) if err != nil { return nil, res, err } lockRes := &lockResponse{} return lockRes, res, lfsapi.DecodeJSON(res, lockRes) } // UnlockRequest encapsulates the data sent in an API request to remove a lock. type unlockRequest struct { // Force determines whether or not the lock should be "forcibly" // unlocked; that is to say whether or not a given individual should be // able to break a different individual's lock. Force bool `json:"force"` } // UnlockResponse is the result sent back from the API when asked to remove a // lock. type unlockResponse struct { // Lock is the lock corresponding to the asked-about lock in the // `UnlockPayload` (see above). If no matching lock was found, this // field will take the zero-value of Lock, and Err will be non-nil. Lock *Lock `json:"lock"` // Message is an optional field which holds any error that was experienced // while removing the lock. Message string `json:"message,omitempty"` DocumentationURL string `json:"documentation_url,omitempty"` RequestID string `json:"request_id,omitempty"` } func (c *lockClient) Unlock(remote, id string, force bool) (*unlockResponse, *http.Response, error) { e := c.Endpoints.Endpoint("upload", remote) suffix := fmt.Sprintf("locks/%s/unlock", id) req, err := c.NewRequest("POST", e, suffix, &unlockRequest{Force: force}) if err != nil { return nil, nil, err } req = c.LogRequest(req, "lfs.locks.unlock") res, err := c.DoWithAuth(remote, req) if err != nil { return nil, res, err } unlockRes := &unlockResponse{} err = lfsapi.DecodeJSON(res, unlockRes) return unlockRes, res, err } // Filter represents a single qualifier to apply against a set of locks. type lockFilter struct { // Property is the property to search against. // Value is the value that the property must take. Property, Value string } // LockSearchRequest encapsulates the request sent to the server when the client // would like a list of locks that match the given criteria. type lockSearchRequest struct { // Filters is the set of filters to query against. If the client wishes // to obtain a list of all locks, an empty array should be passed here. Filters []lockFilter // Cursor is an optional field used to tell the server which lock was // seen last, if scanning through multiple pages of results. // // Servers must return a list of locks sorted in reverse chronological // order, so the Cursor provides a consistent method of viewing all // locks, even if more were created between two requests. Cursor string // Limit is the maximum number of locks to return in a single page. Limit int } func (r *lockSearchRequest) QueryValues() map[string]string { q := make(map[string]string) for _, filter := range r.Filters { q[filter.Property] = filter.Value } if len(r.Cursor) > 0 { q["cursor"] = r.Cursor } if r.Limit > 0 { q["limit"] = strconv.Itoa(r.Limit) } return q } // LockList encapsulates a set of Locks. type lockList struct { // Locks is the set of locks returned back, typically matching the query // parameters sent in the LockListRequest call. If no locks were matched // from a given query, then `Locks` will be represented as an empty // array. Locks []Lock `json:"locks"` // NextCursor returns the Id of the Lock the client should update its // cursor to, if there are multiple pages of results for a particular // `LockListRequest`. NextCursor string `json:"next_cursor,omitempty"` // Message populates any error that was encountered during the search. If no // error was encountered and the operation was succesful, then a value // of nil will be passed here. Message string `json:"message,omitempty"` DocumentationURL string `json:"documentation_url,omitempty"` RequestID string `json:"request_id,omitempty"` } func (c *lockClient) Search(remote string, searchReq *lockSearchRequest) (*lockList, *http.Response, error) { e := c.Endpoints.Endpoint("upload", remote) req, err := c.NewRequest("GET", e, "locks", nil) if err != nil { return nil, nil, err } q := req.URL.Query() for key, value := range searchReq.QueryValues() { q.Add(key, value) } req.URL.RawQuery = q.Encode() req = c.LogRequest(req, "lfs.locks.search") res, err := c.DoWithAuth(remote, req) if err != nil { return nil, res, err } locks := &lockList{} if res.StatusCode == http.StatusOK { err = lfsapi.DecodeJSON(res, locks) } return locks, res, err } // lockVerifiableRequest encapsulates the request sent to the server when the // client would like a list of locks to verify a Git push. type lockVerifiableRequest struct { // Cursor is an optional field used to tell the server which lock was // seen last, if scanning through multiple pages of results. // // Servers must return a list of locks sorted in reverse chronological // order, so the Cursor provides a consistent method of viewing all // locks, even if more were created between two requests. Cursor string `json:"cursor,omitempty"` // Limit is the maximum number of locks to return in a single page. Limit int `json:"limit,omitempty"` } // lockVerifiableList encapsulates a set of Locks to verify a Git push. type lockVerifiableList struct { // Ours is the set of locks returned back matching filenames that the user // is allowed to edit. Ours []Lock `json:"ours"` // Their is the set of locks returned back matching filenames that the user // is NOT allowed to edit. Any edits matching these files should reject // the Git push. Theirs []Lock `json:"theirs"` // NextCursor returns the Id of the Lock the client should update its // cursor to, if there are multiple pages of results for a particular // `LockListRequest`. NextCursor string `json:"next_cursor,omitempty"` // Message populates any error that was encountered during the search. If no // error was encountered and the operation was succesful, then a value // of nil will be passed here. Message string `json:"message,omitempty"` DocumentationURL string `json:"documentation_url,omitempty"` RequestID string `json:"request_id,omitempty"` } func (c *lockClient) SearchVerifiable(remote string, vreq *lockVerifiableRequest) (*lockVerifiableList, *http.Response, error) { e := c.Endpoints.Endpoint("upload", remote) req, err := c.NewRequest("POST", e, "locks/verify", vreq) if err != nil { return nil, nil, err } req = c.LogRequest(req, "lfs.locks.verify") res, err := c.DoWithAuth(remote, req) if err != nil { return nil, res, err } locks := &lockVerifiableList{} if res.StatusCode == http.StatusOK { err = lfsapi.DecodeJSON(res, locks) } return locks, res, err } // User represents the owner of a lock. type User struct { // Name is the name of the individual who would like to obtain the // lock, for instance: "Rick Sanchez". Name string `json:"name"` } func NewUser(name string) *User { return &User{Name: name} } // String implements the fmt.Stringer interface. func (u *User) String() string { return u.Name } git-lfs-2.3.4/locking/api_test.go000066400000000000000000000164351317167762300167000ustar00rootroot00000000000000package locking import ( "encoding/json" "fmt" "net/http" "net/http/httptest" "os" "path/filepath" "strings" "testing" "github.com/git-lfs/git-lfs/lfsapi" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/xeipuuv/gojsonschema" ) func TestAPILock(t *testing.T) { require.NotNil(t, createReqSchema) require.NotNil(t, createResSchema) srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path != "/api/locks" { w.WriteHeader(404) return } assert.Equal(t, "POST", r.Method) assert.Equal(t, lfsapi.MediaType, r.Header.Get("Accept")) assert.Equal(t, lfsapi.MediaType, r.Header.Get("Content-Type")) assert.Equal(t, "18", r.Header.Get("Content-Length")) reqLoader, body := gojsonschema.NewReaderLoader(r.Body) lockReq := &lockRequest{} err := json.NewDecoder(body).Decode(lockReq) r.Body.Close() assert.Nil(t, err) assert.Equal(t, "request", lockReq.Path) assertSchema(t, createReqSchema, reqLoader) w.Header().Set("Content-Type", "application/json") resLoader, resWriter := gojsonschema.NewWriterLoader(w) err = json.NewEncoder(resWriter).Encode(&lockResponse{ Lock: &Lock{ Id: "1", Path: "response", }, }) assert.Nil(t, err) assertSchema(t, createResSchema, resLoader) })) defer srv.Close() c, err := lfsapi.NewClient(nil, lfsapi.UniqTestEnv(map[string]string{ "lfs.url": srv.URL + "/api", })) require.Nil(t, err) lc := &lockClient{Client: c} lockRes, res, err := lc.Lock("", &lockRequest{Path: "request"}) require.Nil(t, err) assert.Equal(t, 200, res.StatusCode) assert.Equal(t, "1", lockRes.Lock.Id) assert.Equal(t, "response", lockRes.Lock.Path) } func TestAPIUnlock(t *testing.T) { require.NotNil(t, delReqSchema) require.NotNil(t, createResSchema) srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path != "/api/locks/123/unlock" { w.WriteHeader(404) return } assert.Equal(t, "POST", r.Method) assert.Equal(t, lfsapi.MediaType, r.Header.Get("Accept")) assert.Equal(t, lfsapi.MediaType, r.Header.Get("Content-Type")) reqLoader, body := gojsonschema.NewReaderLoader(r.Body) unlockReq := &unlockRequest{} err := json.NewDecoder(body).Decode(unlockReq) r.Body.Close() assert.Nil(t, err) assert.True(t, unlockReq.Force) assertSchema(t, delReqSchema, reqLoader) w.Header().Set("Content-Type", "application/json") resLoader, resWriter := gojsonschema.NewWriterLoader(w) err = json.NewEncoder(resWriter).Encode(&unlockResponse{ Lock: &Lock{ Id: "123", Path: "response", }, }) assert.Nil(t, err) assertSchema(t, createResSchema, resLoader) })) defer srv.Close() c, err := lfsapi.NewClient(nil, lfsapi.UniqTestEnv(map[string]string{ "lfs.url": srv.URL + "/api", })) require.Nil(t, err) lc := &lockClient{Client: c} unlockRes, res, err := lc.Unlock("", "123", true) require.Nil(t, err) assert.Equal(t, 200, res.StatusCode) assert.Equal(t, "123", unlockRes.Lock.Id) assert.Equal(t, "response", unlockRes.Lock.Path) } func TestAPISearch(t *testing.T) { require.NotNil(t, listResSchema) srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path != "/api/locks" { w.WriteHeader(404) return } assert.Equal(t, "GET", r.Method) assert.Equal(t, lfsapi.MediaType, r.Header.Get("Accept")) assert.Equal(t, "", r.Header.Get("Content-Type")) q := r.URL.Query() assert.Equal(t, "A", q.Get("a")) assert.Equal(t, "cursor", q.Get("cursor")) assert.Equal(t, "5", q.Get("limit")) w.Header().Set("Content-Type", "application/json") resLoader, resWriter := gojsonschema.NewWriterLoader(w) err := json.NewEncoder(resWriter).Encode(&lockList{ Locks: []Lock{ {Id: "1"}, {Id: "2"}, }, }) assert.Nil(t, err) assertSchema(t, listResSchema, resLoader) })) defer srv.Close() c, err := lfsapi.NewClient(nil, lfsapi.UniqTestEnv(map[string]string{ "lfs.url": srv.URL + "/api", })) require.Nil(t, err) lc := &lockClient{Client: c} locks, res, err := lc.Search("", &lockSearchRequest{ Filters: []lockFilter{ {Property: "a", Value: "A"}, }, Cursor: "cursor", Limit: 5, }) require.Nil(t, err) assert.Equal(t, 200, res.StatusCode) assert.Equal(t, 2, len(locks.Locks)) assert.Equal(t, "1", locks.Locks[0].Id) assert.Equal(t, "2", locks.Locks[1].Id) } func TestAPIVerifiableLocks(t *testing.T) { require.NotNil(t, verifyResSchema) srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path != "/api/locks/verify" { w.WriteHeader(404) return } assert.Equal(t, "POST", r.Method) assert.Equal(t, lfsapi.MediaType, r.Header.Get("Accept")) assert.Equal(t, lfsapi.MediaType, r.Header.Get("Content-Type")) body := lockVerifiableRequest{} if assert.Nil(t, json.NewDecoder(r.Body).Decode(&body)) { assert.Equal(t, "cursor", body.Cursor) assert.Equal(t, 5, body.Limit) } w.Header().Set("Content-Type", "application/json") resLoader, resWriter := gojsonschema.NewWriterLoader(w) err := json.NewEncoder(resWriter).Encode(&lockVerifiableList{ Ours: []Lock{ {Id: "1"}, {Id: "2"}, }, Theirs: []Lock{ {Id: "3"}, }, }) assert.Nil(t, err) assertSchema(t, verifyResSchema, resLoader) })) defer srv.Close() c, err := lfsapi.NewClient(nil, lfsapi.UniqTestEnv(map[string]string{ "lfs.url": srv.URL + "/api", })) require.Nil(t, err) lc := &lockClient{Client: c} locks, res, err := lc.SearchVerifiable("", &lockVerifiableRequest{ Cursor: "cursor", Limit: 5, }) require.Nil(t, err) assert.Equal(t, 200, res.StatusCode) assert.Equal(t, 2, len(locks.Ours)) assert.Equal(t, "1", locks.Ours[0].Id) assert.Equal(t, "2", locks.Ours[1].Id) assert.Equal(t, 1, len(locks.Theirs)) assert.Equal(t, "3", locks.Theirs[0].Id) } var ( createReqSchema *sourcedSchema createResSchema *sourcedSchema delReqSchema *sourcedSchema listResSchema *sourcedSchema verifyResSchema *sourcedSchema ) func init() { wd, err := os.Getwd() if err != nil { fmt.Println("getwd error:", err) return } createReqSchema = getSchema(wd, "schemas/http-lock-create-request-schema.json") createResSchema = getSchema(wd, "schemas/http-lock-create-response-schema.json") delReqSchema = getSchema(wd, "schemas/http-lock-delete-request-schema.json") listResSchema = getSchema(wd, "schemas/http-lock-list-response-schema.json") verifyResSchema = getSchema(wd, "schemas/http-lock-verify-response-schema.json") } type sourcedSchema struct { Source string *gojsonschema.Schema } func getSchema(wd, relpath string) *sourcedSchema { abspath := filepath.ToSlash(filepath.Join(wd, relpath)) s, err := gojsonschema.NewSchema(gojsonschema.NewReferenceLoader(fmt.Sprintf("file:///%s", abspath))) if err != nil { fmt.Printf("schema load error for %q: %+v\n", relpath, err) } return &sourcedSchema{Source: relpath, Schema: s} } func assertSchema(t *testing.T, schema *sourcedSchema, dataLoader gojsonschema.JSONLoader) { res, err := schema.Validate(dataLoader) if assert.Nil(t, err) { if res.Valid() { return } resErrors := res.Errors() valErrors := make([]string, 0, len(resErrors)) for _, resErr := range resErrors { valErrors = append(valErrors, resErr.String()) } t.Errorf("Schema: %s\n%s", schema.Source, strings.Join(valErrors, "\n")) } } git-lfs-2.3.4/locking/cache.go000066400000000000000000000043311317167762300161230ustar00rootroot00000000000000package locking import ( "strings" "github.com/git-lfs/git-lfs/tools/kv" ) const ( // We want to use a single cache file for integrity, but to make it easy to // list all locks, prefix the id->path map in a way we can identify (something // that won't be in a path) idKeyPrefix string = "*id*://" ) type LockCache struct { kv *kv.Store } func NewLockCache(filepath string) (*LockCache, error) { kv, err := kv.NewStore(filepath) if err != nil { return nil, err } return &LockCache{kv}, nil } // Cache a successful lock for faster local lookup later func (c *LockCache) Add(l Lock) error { // Store reference in both directions // Path -> Lock c.kv.Set(l.Path, &l) // EncodedId -> Lock (encoded so we can easily identify) c.kv.Set(c.encodeIdKey(l.Id), &l) return nil } // Remove a cached lock by path becuase it's been relinquished func (c *LockCache) RemoveByPath(filePath string) error { ilock := c.kv.Get(filePath) if lock, ok := ilock.(*Lock); ok && lock != nil { c.kv.Remove(lock.Path) // Id as key is encoded c.kv.Remove(c.encodeIdKey(lock.Id)) } return nil } // Remove a cached lock by id because it's been relinquished func (c *LockCache) RemoveById(id string) error { // Id as key is encoded idkey := c.encodeIdKey(id) ilock := c.kv.Get(idkey) if lock, ok := ilock.(*Lock); ok && lock != nil { c.kv.Remove(idkey) c.kv.Remove(lock.Path) } return nil } // Get the list of cached locked files func (c *LockCache) Locks() []Lock { var locks []Lock c.kv.Visit(func(key string, val interface{}) bool { // Only report file->id entries not reverse if !c.isIdKey(key) { lock := val.(*Lock) locks = append(locks, *lock) } return true // continue }) return locks } // Clear the cache func (c *LockCache) Clear() { c.kv.RemoveAll() } // Save the cache func (c *LockCache) Save() error { return c.kv.Save() } func (c *LockCache) encodeIdKey(id string) string { // Safety against accidents if !c.isIdKey(id) { return idKeyPrefix + id } return id } func (c *LockCache) decodeIdKey(key string) string { // Safety against accidents if c.isIdKey(key) { return key[len(idKeyPrefix):] } return key } func (c *LockCache) isIdKey(key string) bool { return strings.HasPrefix(key, idKeyPrefix) } git-lfs-2.3.4/locking/cache_test.go000066400000000000000000000022721317167762300171640ustar00rootroot00000000000000package locking import ( "io/ioutil" "os" "testing" "github.com/stretchr/testify/assert" ) func TestLockCache(t *testing.T) { var err error tmpf, err := ioutil.TempFile("", "testCacheLock") assert.Nil(t, err) defer func() { os.Remove(tmpf.Name()) }() tmpf.Close() cache, err := NewLockCache(tmpf.Name()) assert.Nil(t, err) testLocks := []Lock{ Lock{Path: "folder/test1.dat", Id: "101"}, Lock{Path: "folder/test2.dat", Id: "102"}, Lock{Path: "root.dat", Id: "103"}, } for _, l := range testLocks { err = cache.Add(l) assert.Nil(t, err) } locks := cache.Locks() for _, l := range testLocks { assert.Contains(t, locks, l) } assert.Equal(t, len(testLocks), len(locks)) err = cache.RemoveByPath("folder/test2.dat") assert.Nil(t, err) locks = cache.Locks() // delete item 1 from test locls testLocks = append(testLocks[:1], testLocks[2:]...) for _, l := range testLocks { assert.Contains(t, locks, l) } assert.Equal(t, len(testLocks), len(locks)) err = cache.RemoveById("101") assert.Nil(t, err) locks = cache.Locks() testLocks = testLocks[1:] for _, l := range testLocks { assert.Contains(t, locks, l) } assert.Equal(t, len(testLocks), len(locks)) } git-lfs-2.3.4/locking/lockable.go000066400000000000000000000155531317167762300166440ustar00rootroot00000000000000package locking import ( "fmt" "os" "path/filepath" "strings" "sync" "github.com/git-lfs/git-lfs/errors" "github.com/git-lfs/git-lfs/filepathfilter" "github.com/git-lfs/git-lfs/git" "github.com/git-lfs/git-lfs/tools" ) // GetLockablePatterns returns a list of patterns in .gitattributes which are // marked as lockable func (c *Client) GetLockablePatterns() []string { c.ensureLockablesLoaded() return c.lockablePatterns } // getLockableFilter returns the internal filter used to check if a file is lockable func (c *Client) getLockableFilter() *filepathfilter.Filter { c.ensureLockablesLoaded() return c.lockableFilter } func (c *Client) ensureLockablesLoaded() { c.lockableMutex.Lock() defer c.lockableMutex.Unlock() // Only load once if c.lockablePatterns == nil { c.refreshLockablePatterns() } } // Internal function to repopulate lockable patterns // You must have locked the c.lockableMutex in the caller func (c *Client) refreshLockablePatterns() { paths := git.GetAttributePaths(c.LocalWorkingDir, c.LocalGitDir) // Always make non-nil even if empty c.lockablePatterns = make([]string, 0, len(paths)) for _, p := range paths { if p.Lockable { c.lockablePatterns = append(c.lockablePatterns, p.Path) } } c.lockableFilter = filepathfilter.New(c.lockablePatterns, nil) } // IsFileLockable returns whether a specific file path is marked as Lockable, // ie has the 'lockable' attribute in .gitattributes // Lockable patterns are cached once for performance, unless you call RefreshLockablePatterns // path should be relative to repository root func (c *Client) IsFileLockable(path string) bool { return c.getLockableFilter().Allows(path) } // FixAllLockableFileWriteFlags recursively scans the repo looking for files which // are lockable, and makes sure their write flags are set correctly based on // whether they are currently locked or unlocked. // Files which are unlocked are made read-only, files which are locked are made // writeable. // This function can be used after a clone or checkout to ensure that file // state correctly reflects the locking state func (c *Client) FixAllLockableFileWriteFlags() error { return c.fixFileWriteFlags(c.LocalWorkingDir, c.LocalWorkingDir, c.getLockableFilter(), nil) } // FixFileWriteFlagsInDir scans dir (which can either be a relative dir // from the root of the repo, or an absolute dir within the repo) looking for // files to change permissions for. // If lockablePatterns is non-nil, then any file matching those patterns will be // checked to see if it is currently locked by the current committer, and if so // it will be writeable, and if not locked it will be read-only. // If unlockablePatterns is non-nil, then any file matching those patterns will // be made writeable if it is not already. This can be used to reset files to // writeable when their 'lockable' attribute is turned off. func (c *Client) FixFileWriteFlagsInDir(dir string, lockablePatterns, unlockablePatterns []string) error { // early-out if no patterns if len(lockablePatterns) == 0 && len(unlockablePatterns) == 0 { return nil } absPath := dir if !filepath.IsAbs(dir) { absPath = filepath.Join(c.LocalWorkingDir, dir) } stat, err := os.Stat(absPath) if err != nil { return err } if !stat.IsDir() { return fmt.Errorf("%q is not a valid directory", dir) } var lockableFilter *filepathfilter.Filter var unlockableFilter *filepathfilter.Filter if lockablePatterns != nil { lockableFilter = filepathfilter.New(lockablePatterns, nil) } if unlockablePatterns != nil { unlockableFilter = filepathfilter.New(unlockablePatterns, nil) } return c.fixFileWriteFlags(absPath, c.LocalWorkingDir, lockableFilter, unlockableFilter) } // Internal implementation of fixing file write flags with precompiled filters func (c *Client) fixFileWriteFlags(absPath, workingDir string, lockable, unlockable *filepathfilter.Filter) error { var errs []error var errMux sync.Mutex addErr := func(err error) { errMux.Lock() defer errMux.Unlock() errs = append(errs, err) } tools.FastWalkGitRepo(absPath, func(parentDir string, fi os.FileInfo, err error) { if err != nil { addErr(err) return } // Skip dirs, we only need to check files if fi.IsDir() { return } abschild := filepath.Join(parentDir, fi.Name()) // This is a file, get relative to repo root relpath, err := filepath.Rel(workingDir, abschild) if err != nil { addErr(err) return } err = c.fixSingleFileWriteFlags(relpath, lockable, unlockable) if err != nil { addErr(err) } }) return errors.Combine(errs) } // FixLockableFileWriteFlags checks each file in the provided list, and for // those which are lockable, makes sure their write flags are set correctly // based on whether they are currently locked or unlocked. Files which are // unlocked are made read-only, files which are locked are made writeable. // Files which are not lockable are ignored. // This function can be used after a clone or checkout to ensure that file // state correctly reflects the locking state, and is more efficient than // FixAllLockableFileWriteFlags when you know which files changed func (c *Client) FixLockableFileWriteFlags(files []string) error { // early-out if no lockable patterns if len(c.GetLockablePatterns()) == 0 { return nil } var errs []error for _, f := range files { err := c.fixSingleFileWriteFlags(f, c.getLockableFilter(), nil) if err != nil { errs = append(errs, err) } } return errors.Combine(errs) } // fixSingleFileWriteFlags fixes write flags on a single file // If lockablePatterns is non-nil, then any file matching those patterns will be // checked to see if it is currently locked by the current committer, and if so // it will be writeable, and if not locked it will be read-only. // If unlockablePatterns is non-nil, then any file matching those patterns will // be made writeable if it is not already. This can be used to reset files to // writeable when their 'lockable' attribute is turned off. func (c *Client) fixSingleFileWriteFlags(file string, lockable, unlockable *filepathfilter.Filter) error { // Convert to git-style forward slash separators if necessary // Necessary to match attributes if filepath.Separator == '\\' { file = strings.Replace(file, "\\", "/", -1) } if lockable != nil && lockable.Allows(file) { // Lockable files are writeable only if they're currently locked err := tools.SetFileWriteFlag(file, c.IsFileLockedByCurrentCommitter(file)) // Ignore not exist errors if err != nil && !os.IsNotExist(err) { return err } } else if unlockable != nil && unlockable.Allows(file) { // Unlockable files are always writeable // We only check files which match the incoming patterns to avoid // checking every file in the system all the time, and only do it // when a file has had its lockable attribute removed err := tools.SetFileWriteFlag(file, true) if err != nil && !os.IsNotExist(err) { return err } } return nil } git-lfs-2.3.4/locking/locks.go000066400000000000000000000246111317167762300161760ustar00rootroot00000000000000package locking import ( "fmt" "net/http" "os" "path/filepath" "sync" "time" "github.com/git-lfs/git-lfs/errors" "github.com/git-lfs/git-lfs/filepathfilter" "github.com/git-lfs/git-lfs/git" "github.com/git-lfs/git-lfs/lfsapi" "github.com/git-lfs/git-lfs/tools" "github.com/git-lfs/git-lfs/tools/kv" "github.com/rubyist/tracerx" ) var ( // ErrNoMatchingLocks is an error returned when no matching locks were // able to be resolved ErrNoMatchingLocks = errors.New("lfs: no matching locks found") // ErrLockAmbiguous is an error returned when multiple matching locks // were found ErrLockAmbiguous = errors.New("lfs: multiple locks found; ambiguous") ) type LockCacher interface { Add(l Lock) error RemoveByPath(filePath string) error RemoveById(id string) error Locks() []Lock Clear() Save() error } // Client is the main interface object for the locking package type Client struct { Remote string client *lockClient cache LockCacher lockablePatterns []string lockableFilter *filepathfilter.Filter lockableMutex sync.Mutex LocalWorkingDir string LocalGitDir string SetLockableFilesReadOnly bool } // NewClient creates a new locking client with the given configuration // You must call the returned object's `Close` method when you are finished with // it func NewClient(remote string, lfsClient *lfsapi.Client) (*Client, error) { return &Client{ Remote: remote, client: &lockClient{Client: lfsClient}, cache: &nilLockCacher{}, }, nil } func (c *Client) SetupFileCache(path string) error { stat, err := os.Stat(path) if err != nil { return errors.Wrap(err, "init lock cache") } lockFile := path if stat.IsDir() { lockFile = filepath.Join(path, "lockcache.db") } cache, err := NewLockCache(lockFile) if err != nil { return errors.Wrap(err, "init lock cache") } c.cache = cache return nil } // Close this client instance; must be called to dispose of resources func (c *Client) Close() error { return c.cache.Save() } // LockFile attempts to lock a file on the current remote // path must be relative to the root of the repository // Returns the lock id if successful, or an error func (c *Client) LockFile(path string) (Lock, error) { lockRes, _, err := c.client.Lock(c.Remote, &lockRequest{Path: path}) if err != nil { return Lock{}, errors.Wrap(err, "api") } if len(lockRes.Message) > 0 { if len(lockRes.RequestID) > 0 { tracerx.Printf("Server Request ID: %s", lockRes.RequestID) } return Lock{}, fmt.Errorf("Server unable to create lock: %s", lockRes.Message) } lock := *lockRes.Lock if err := c.cache.Add(lock); err != nil { return Lock{}, errors.Wrap(err, "lock cache") } abs, err := getAbsolutePath(path) if err != nil { return Lock{}, errors.Wrap(err, "make lockpath absolute") } // Ensure writeable on return if err := tools.SetFileWriteFlag(abs, true); err != nil { return Lock{}, err } return lock, nil } // getAbsolutePath takes a repository-relative path and makes it absolute. // // For instance, given a repository in /usr/local/src/my-repo and a file called // dir/foo/bar.txt, getAbsolutePath will return: // // /usr/local/src/my-repo/dir/foo/bar.txt func getAbsolutePath(p string) (string, error) { root, err := git.RootDir() if err != nil { return "", err } return filepath.Join(root, p), nil } // UnlockFile attempts to unlock a file on the current remote // path must be relative to the root of the repository // Force causes the file to be unlocked from other users as well func (c *Client) UnlockFile(path string, force bool) error { id, err := c.lockIdFromPath(path) if err != nil { return fmt.Errorf("Unable to get lock id: %v", err) } return c.UnlockFileById(id, force) } // UnlockFileById attempts to unlock a lock with a given id on the current remote // Force causes the file to be unlocked from other users as well func (c *Client) UnlockFileById(id string, force bool) error { unlockRes, _, err := c.client.Unlock(c.Remote, id, force) if err != nil { return errors.Wrap(err, "api") } if len(unlockRes.Message) > 0 { if len(unlockRes.RequestID) > 0 { tracerx.Printf("Server Request ID: %s", unlockRes.RequestID) } return fmt.Errorf("Server unable to unlock: %s", unlockRes.Message) } if err := c.cache.RemoveById(id); err != nil { return fmt.Errorf("Error caching unlock information: %v", err) } if unlockRes.Lock != nil { abs, err := getAbsolutePath(unlockRes.Lock.Path) if err != nil { return errors.Wrap(err, "make lockpath absolute") } // Make non-writeable if required if c.SetLockableFilesReadOnly && c.IsFileLockable(unlockRes.Lock.Path) { return tools.SetFileWriteFlag(abs, false) } } return nil } // Lock is a record of a locked file type Lock struct { // Id is the unique identifier corresponding to this particular Lock. It // must be consistent with the local copy, and the server's copy. Id string `json:"id"` // Path is an absolute path to the file that is locked as a part of this // lock. Path string `json:"path"` // Owner is the identity of the user that created this lock. Owner *User `json:"owner,omitempty"` // LockedAt is the time at which this lock was acquired. LockedAt time.Time `json:"locked_at"` } // SearchLocks returns a channel of locks which match the given name/value filter // If limit > 0 then search stops at that number of locks // If localOnly = true, don't query the server & report only own local locks func (c *Client) SearchLocks(filter map[string]string, limit int, localOnly bool) ([]Lock, error) { if localOnly { return c.searchCachedLocks(filter, limit) } else { return c.searchRemoteLocks(filter, limit) } } func (c *Client) VerifiableLocks(limit int) (ourLocks, theirLocks []Lock, err error) { ourLocks = make([]Lock, 0, limit) theirLocks = make([]Lock, 0, limit) body := &lockVerifiableRequest{ Limit: limit, } for { list, res, err := c.client.SearchVerifiable(c.Remote, body) if res != nil { switch res.StatusCode { case http.StatusNotFound, http.StatusNotImplemented: return ourLocks, theirLocks, errors.NewNotImplementedError(err) case http.StatusForbidden: return ourLocks, theirLocks, errors.NewAuthError(err) } } if err != nil { return ourLocks, theirLocks, err } if list.Message != "" { if len(list.RequestID) > 0 { tracerx.Printf("Server Request ID: %s", list.RequestID) } return ourLocks, theirLocks, fmt.Errorf("Server error searching locks: %s", list.Message) } for _, l := range list.Ours { ourLocks = append(ourLocks, l) if limit > 0 && (len(ourLocks)+len(theirLocks)) >= limit { return ourLocks, theirLocks, nil } } for _, l := range list.Theirs { theirLocks = append(theirLocks, l) if limit > 0 && (len(ourLocks)+len(theirLocks)) >= limit { return ourLocks, theirLocks, nil } } if list.NextCursor != "" { body.Cursor = list.NextCursor } else { break } } return ourLocks, theirLocks, nil } func (c *Client) searchCachedLocks(filter map[string]string, limit int) ([]Lock, error) { cachedlocks := c.cache.Locks() path, filterByPath := filter["path"] id, filterById := filter["id"] lockCount := 0 locks := make([]Lock, 0, len(cachedlocks)) for _, l := range cachedlocks { // Manually filter by Path/Id if (filterByPath && path != l.Path) || (filterById && id != l.Id) { continue } locks = append(locks, l) lockCount++ if limit > 0 && lockCount >= limit { break } } return locks, nil } func (c *Client) searchRemoteLocks(filter map[string]string, limit int) ([]Lock, error) { locks := make([]Lock, 0, limit) apifilters := make([]lockFilter, 0, len(filter)) for k, v := range filter { apifilters = append(apifilters, lockFilter{Property: k, Value: v}) } query := &lockSearchRequest{Filters: apifilters, Limit: limit} for { list, _, err := c.client.Search(c.Remote, query) if err != nil { return locks, errors.Wrap(err, "locking") } if list.Message != "" { if len(list.RequestID) > 0 { tracerx.Printf("Server Request ID: %s", list.RequestID) } return locks, fmt.Errorf("Server error searching for locks: %s", list.Message) } for _, l := range list.Locks { locks = append(locks, l) if limit > 0 && len(locks) >= limit { // Exit outer loop too return locks, nil } } if list.NextCursor != "" { query.Cursor = list.NextCursor } else { break } } return locks, nil } // lockIdFromPath makes a call to the LFS API and resolves the ID for the locked // locked at the given path. // // If the API call failed, an error will be returned. If multiple locks matched // the given path (should not happen during real-world usage), an error will be // returnd. If no locks matched the given path, an error will be returned. // // If the API call is successful, and only one lock matches the given filepath, // then its ID will be returned, along with a value of "nil" for the error. func (c *Client) lockIdFromPath(path string) (string, error) { list, _, err := c.client.Search(c.Remote, &lockSearchRequest{ Filters: []lockFilter{ {Property: "path", Value: path}, }, }) if err != nil { return "", err } switch len(list.Locks) { case 0: return "", ErrNoMatchingLocks case 1: return list.Locks[0].Id, nil default: return "", ErrLockAmbiguous } } // Fetch locked files for the current user and cache them locally // This can be used to sync up locked files when moving machines func (c *Client) refreshLockCache() error { ourLocks, _, err := c.VerifiableLocks(0) if err != nil { return err } // We're going to overwrite the entire local cache c.cache.Clear() for _, l := range ourLocks { c.cache.Add(l) } return nil } // IsFileLockedByCurrentCommitter returns whether a file is locked by the // current user, as cached locally func (c *Client) IsFileLockedByCurrentCommitter(path string) bool { filter := map[string]string{"path": path} locks, err := c.searchCachedLocks(filter, 1) if err != nil { tracerx.Printf("Error searching cached locks: %s\nForcing remote search", err) locks, _ = c.searchRemoteLocks(filter, 1) } return len(locks) > 0 } func init() { kv.RegisterTypeForStorage(&Lock{}) } type nilLockCacher struct{} func (c *nilLockCacher) Add(l Lock) error { return nil } func (c *nilLockCacher) RemoveByPath(filePath string) error { return nil } func (c *nilLockCacher) RemoveById(id string) error { return nil } func (c *nilLockCacher) Locks() []Lock { return nil } func (c *nilLockCacher) Clear() {} func (c *nilLockCacher) Save() error { return nil } git-lfs-2.3.4/locking/locks_test.go000066400000000000000000000106401317167762300172320ustar00rootroot00000000000000package locking import ( "encoding/json" "io/ioutil" "net/http" "net/http/httptest" "sort" "testing" "time" "github.com/git-lfs/git-lfs/lfsapi" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) type LocksById []Lock func (a LocksById) Len() int { return len(a) } func (a LocksById) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a LocksById) Less(i, j int) bool { return a[i].Id < a[j].Id } func TestRefreshCache(t *testing.T) { var err error tempDir, err := ioutil.TempDir("", "testCacheLock") assert.Nil(t, err) srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, "POST", r.Method) assert.Equal(t, "/api/locks/verify", r.URL.Path) w.Header().Set("Content-Type", "application/json") err = json.NewEncoder(w).Encode(lockVerifiableList{ Theirs: []Lock{ Lock{Id: "99", Path: "folder/test3.dat", Owner: &User{Name: "Alice"}}, Lock{Id: "199", Path: "other/test1.dat", Owner: &User{Name: "Charles"}}, }, Ours: []Lock{ Lock{Id: "101", Path: "folder/test1.dat", Owner: &User{Name: "Fred"}}, Lock{Id: "102", Path: "folder/test2.dat", Owner: &User{Name: "Fred"}}, Lock{Id: "103", Path: "root.dat", Owner: &User{Name: "Fred"}}, }, }) assert.Nil(t, err) })) defer func() { srv.Close() }() lfsclient, err := lfsapi.NewClient(nil, lfsapi.UniqTestEnv(map[string]string{ "lfs.url": srv.URL + "/api", "user.name": "Fred", "user.email": "fred@bloggs.com", })) require.Nil(t, err) client, err := NewClient("", lfsclient) assert.Nil(t, err) assert.Nil(t, client.SetupFileCache(tempDir)) // Should start with no cached items locks, err := client.SearchLocks(nil, 0, true) assert.Nil(t, err) assert.Empty(t, locks) // Should load from test data, just Fred's err = client.refreshLockCache() assert.Nil(t, err) locks, err = client.SearchLocks(nil, 0, true) assert.Nil(t, err) // Need to include zero time in structure for equal to work zeroTime := time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC) // Sort locks for stable comparison sort.Sort(LocksById(locks)) assert.Equal(t, []Lock{ Lock{Path: "folder/test1.dat", Id: "101", Owner: &User{Name: "Fred"}, LockedAt: zeroTime}, Lock{Path: "folder/test2.dat", Id: "102", Owner: &User{Name: "Fred"}, LockedAt: zeroTime}, Lock{Path: "root.dat", Id: "103", Owner: &User{Name: "Fred"}, LockedAt: zeroTime}, }, locks) } func TestGetVerifiableLocks(t *testing.T) { srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, "POST", r.Method) assert.Equal(t, "/api/locks/verify", r.URL.Path) body := lockVerifiableRequest{} if assert.Nil(t, json.NewDecoder(r.Body).Decode(&body)) { w.Header().Set("Content-Type", "application/json") list := lockVerifiableList{} if body.Cursor == "1" { list.Ours = []Lock{ Lock{Path: "folder/1/test1.dat", Id: "111"}, } list.Theirs = []Lock{ Lock{Path: "folder/1/test2.dat", Id: "112"}, Lock{Path: "folder/1/test3.dat", Id: "113"}, } } else { list.Ours = []Lock{ Lock{Path: "folder/0/test1.dat", Id: "101"}, Lock{Path: "folder/0/test2.dat", Id: "102"}, } list.Theirs = []Lock{ Lock{Path: "folder/0/test3.dat", Id: "103"}, } list.NextCursor = "1" } err := json.NewEncoder(w).Encode(&list) assert.Nil(t, err) } else { w.WriteHeader(500) } })) defer srv.Close() lfsclient, err := lfsapi.NewClient(nil, lfsapi.UniqTestEnv(map[string]string{ "lfs.url": srv.URL + "/api", "user.name": "Fred", "user.email": "fred@bloggs.com", })) require.Nil(t, err) client, err := NewClient("", lfsclient) assert.Nil(t, err) ourLocks, theirLocks, err := client.VerifiableLocks(0) assert.Nil(t, err) // Need to include zero time in structure for equal to work zeroTime := time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC) // Sort locks for stable comparison sort.Sort(LocksById(ourLocks)) assert.Equal(t, []Lock{ Lock{Path: "folder/0/test1.dat", Id: "101", LockedAt: zeroTime}, Lock{Path: "folder/0/test2.dat", Id: "102", LockedAt: zeroTime}, Lock{Path: "folder/1/test1.dat", Id: "111", LockedAt: zeroTime}, }, ourLocks) sort.Sort(LocksById(theirLocks)) assert.Equal(t, []Lock{ Lock{Path: "folder/0/test3.dat", Id: "103", LockedAt: zeroTime}, Lock{Path: "folder/1/test2.dat", Id: "112", LockedAt: zeroTime}, Lock{Path: "folder/1/test3.dat", Id: "113", LockedAt: zeroTime}, }, theirLocks) } git-lfs-2.3.4/locking/schemas/000077500000000000000000000000001317167762300161535ustar00rootroot00000000000000git-lfs-2.3.4/locking/schemas/http-lock-create-request-schema.json000066400000000000000000000003361317167762300251420ustar00rootroot00000000000000{ "$schema": "http://json-schema.org/draft-04/schema", "title": "Git LFS HTTPS Lock Creation API Request", "type": "object", "properties": { "path": { "type": "string" } }, "required": ["path"] } git-lfs-2.3.4/locking/schemas/http-lock-create-response-schema.json000066400000000000000000000014071317167762300253100ustar00rootroot00000000000000{ "$schema": "http://json-schema.org/draft-04/schema", "title": "Git LFS HTTPS Lock Creation API Response", "type": "object", "properties": { "lock": { "type": "object", "properties": { "id": { "type": "string" }, "path": { "type": "string" }, "locked_at": { "type": "string" }, "owner": { "type": "object", "properties": { "name": { "type": "string" } } } }, "required": ["id", "path"] }, "message": { "type": "string" }, "request_id": { "type": "string" }, "documentation_url": { "type": "string" } }, "required": ["lock"] } git-lfs-2.3.4/locking/schemas/http-lock-delete-request-schema.json000066400000000000000000000003101317167762300251310ustar00rootroot00000000000000{ "$schema": "http://json-schema.org/draft-04/schema", "title": "Git LFS HTTPS Lock Deletion API Request", "type": "object", "properties": { "force": { "type": "boolean" } } } git-lfs-2.3.4/locking/schemas/http-lock-list-response-schema.json000066400000000000000000000013241317167762300250160ustar00rootroot00000000000000{ "$schema": "http://json-schema.org/draft-04/schema", "title": "Git LFS HTTPS Lock List API Response", "type": "object", "properties": { "locks": { "type": "array", "items": { "type": "object", "properties": { "id": { "type": "string" }, "path": { "type": "string" }, "locked_at": { "type": "string" }, "owner": { "type": "object", "properties": { "name": { "type": "string" } } } } } }, "next_cursor": { "type": "string" } }, "required": ["locks"] } git-lfs-2.3.4/locking/schemas/http-lock-verify-response-schema.json000066400000000000000000000016271317167762300253550ustar00rootroot00000000000000{ "$schema": "http://json-schema.org/draft-04/schema", "title": "Git LFS HTTPS Lock Verify API Response", "type": "object", "definitions": { "lock": { "type": "object", "properties": { "id": { "type": "string" }, "path": { "type": "string" }, "locked_at": { "type": "string" }, "owner": { "type": "object", "properties": { "name": { "type": "string" } } } }, "required": ["id", "path"] } }, "properties": { "ours": { "type": "array", "items": { "$ref": "#/definitions/lock" } }, "theirs": { "type": "array", "items": { "$ref": "#/definitions/lock" } }, "next_cursor": { "type": "string" } }, "required": ["ours", "theirs"] } git-lfs-2.3.4/progress/000077500000000000000000000000001317167762300147465ustar00rootroot00000000000000git-lfs-2.3.4/progress/copycallback.go000066400000000000000000000043511317167762300177270ustar00rootroot00000000000000package progress import ( "bytes" "io" ) type CopyCallback func(totalSize int64, readSoFar int64, readSinceLast int) error type BodyWithCallback struct { c CopyCallback totalSize int64 readSize int64 ReadSeekCloser } func NewByteBodyWithCallback(by []byte, totalSize int64, cb CopyCallback) *BodyWithCallback { return NewBodyWithCallback(NewByteBody(by), totalSize, cb) } func NewBodyWithCallback(body ReadSeekCloser, totalSize int64, cb CopyCallback) *BodyWithCallback { return &BodyWithCallback{ c: cb, totalSize: totalSize, ReadSeekCloser: body, } } // Read wraps the underlying Reader's "Read" method. It also captures the number // of bytes read, and calls the callback. func (r *BodyWithCallback) Read(p []byte) (int, error) { n, err := r.ReadSeekCloser.Read(p) if n > 0 { r.readSize += int64(n) if (err == nil || err == io.EOF) && r.c != nil { err = r.c(r.totalSize, r.readSize, n) } } return n, err } // Seek wraps the underlying Seeker's "Seek" method, updating the number of // bytes that have been consumed by this reader. func (r *BodyWithCallback) Seek(offset int64, whence int) (int64, error) { switch whence { case io.SeekStart: r.readSize = offset case io.SeekCurrent: r.readSize += offset case io.SeekEnd: r.readSize = r.totalSize + offset } return r.ReadSeekCloser.Seek(offset, whence) } // ResetProgress calls the callback with a negative read size equal to the // total number of bytes read so far, effectively "resetting" the progress. func (r *BodyWithCallback) ResetProgress() error { return r.c(r.totalSize, r.readSize, -int(r.readSize)) } type CallbackReader struct { C CopyCallback TotalSize int64 ReadSize int64 io.Reader } func (w *CallbackReader) Read(p []byte) (int, error) { n, err := w.Reader.Read(p) if n > 0 { w.ReadSize += int64(n) if (err == nil || err == io.EOF) && w.C != nil { err = w.C(w.TotalSize, w.ReadSize, n) } } return n, err } // prevent import cycle type ReadSeekCloser interface { io.Seeker io.ReadCloser } func NewByteBody(by []byte) ReadSeekCloser { return &closingByteReader{Reader: bytes.NewReader(by)} } type closingByteReader struct { *bytes.Reader } func (r *closingByteReader) Close() error { return nil } git-lfs-2.3.4/progress/copycallback_test.go000066400000000000000000000037051317167762300207700ustar00rootroot00000000000000package progress import ( "io" "sync/atomic" "testing" "github.com/stretchr/testify/assert" ) func TestCopyCallbackReaderCallsCallbackUnderfilledBuffer(t *testing.T) { var ( calls uint32 actualTotalSize int64 actualReadSoFar int64 actualReadSinceLast int ) cb := func(totalSize int64, readSoFar int64, readSinceLast int) error { atomic.AddUint32(&calls, 1) actualTotalSize = totalSize actualReadSoFar = readSoFar actualReadSinceLast = readSinceLast return nil } buf := []byte{0x1} r := &CallbackReader{ C: cb, TotalSize: 3, ReadSize: 2, Reader: &EOFReader{b: buf}, } p := make([]byte, len(buf)+1) n, err := r.Read(p) assert.Equal(t, 1, n) assert.Nil(t, err) assert.EqualValues(t, 1, calls, "expected 1 call(s) to callback, got %d", calls) assert.EqualValues(t, 3, actualTotalSize) assert.EqualValues(t, 2+1, actualReadSoFar) assert.EqualValues(t, 1, actualReadSinceLast) } type EOFReader struct { b []byte i int } var _ io.Reader = (*EOFReader)(nil) func (r *EOFReader) Read(p []byte) (n int, err error) { n = copy(p, r.b[r.i:]) r.i += n if r.i == len(r.b) { err = io.EOF } return } func TestEOFReaderReturnsEOFs(t *testing.T) { r := EOFReader{[]byte{0x1}, 0} p := make([]byte, 2) n, err := r.Read(p) assert.Equal(t, 1, n) assert.Equal(t, io.EOF, err) } func TestBodyCallbackReaderCountsReads(t *testing.T) { br := NewByteBodyWithCallback([]byte{0x1, 0x2, 0x3, 0x4}, 4, nil) assert.EqualValues(t, 0, br.readSize) p := make([]byte, 8) n, err := br.Read(p) assert.Equal(t, 4, n) assert.Nil(t, err) assert.EqualValues(t, 4, br.readSize) } func TestBodyCallbackReaderUpdatesOffsetOnSeek(t *testing.T) { br := NewByteBodyWithCallback([]byte{0x1, 0x2, 0x3, 0x4}, 4, nil) br.Seek(1, io.SeekStart) assert.EqualValues(t, 1, br.readSize) br.Seek(1, io.SeekCurrent) assert.EqualValues(t, 2, br.readSize) br.Seek(-1, io.SeekEnd) assert.EqualValues(t, 3, br.readSize) } git-lfs-2.3.4/progress/logger.go000066400000000000000000000014301317167762300165520ustar00rootroot00000000000000package progress import "os" // progressLogger provides a wrapper around an os.File that can either // write to the file or ignore all writes completely. type progressLogger struct { writeData bool log *os.File } // Write will write to the file and perform a Sync() if writing succeeds. func (l *progressLogger) Write(b []byte) error { if !l.writeData { return nil } if _, err := l.log.Write(b); err != nil { return err } return l.log.Sync() } // Close will call Close() on the underlying file func (l *progressLogger) Close() error { if l.log != nil { return l.log.Close() } return nil } // Shutdown will cause the logger to ignore any further writes. It should // be used when writing causes an error. func (l *progressLogger) Shutdown() { l.writeData = false } git-lfs-2.3.4/progress/meter.go000066400000000000000000000160421317167762300164140ustar00rootroot00000000000000package progress import ( "fmt" "os" "path/filepath" "strings" "sync" "sync/atomic" "time" "github.com/olekukonko/ts" ) // ProgressMeter provides a progress bar type output for the TransferQueue. It // is given an estimated file count and size up front and tracks the number of // files and bytes transferred as well as the number of files and bytes that // get skipped because the transfer is unnecessary. type ProgressMeter struct { finishedFiles int64 // int64s must come first for struct alignment skippedFiles int64 transferringFiles int64 estimatedBytes int64 currentBytes int64 skippedBytes int64 started int32 estimatedFiles int32 startTime time.Time finished chan interface{} logger *progressLogger fileIndex map[string]int64 // Maps a file name to its transfer number fileIndexMutex *sync.Mutex dryRun bool } type env interface { Get(key string) (val string, ok bool) } type meterOption func(*ProgressMeter) // DryRun is an option for NewMeter() that determines whether updates should be // sent to stdout. func DryRun(dryRun bool) meterOption { return func(m *ProgressMeter) { m.dryRun = dryRun } } // WithLogFile is an option for NewMeter() that sends updates to a text file. func WithLogFile(name string) meterOption { printErr := func(err string) { fmt.Fprintf(os.Stderr, "Error creating progress logger: %s\n", err) } return func(m *ProgressMeter) { if len(name) == 0 { return } if !filepath.IsAbs(name) { printErr("GIT_LFS_PROGRESS must be an absolute path") return } cbDir := filepath.Dir(name) if err := os.MkdirAll(cbDir, 0755); err != nil { printErr(err.Error()) return } file, err := os.OpenFile(name, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666) if err != nil { printErr(err.Error()) return } m.logger.writeData = true m.logger.log = file } } // WithOSEnv is an option for NewMeter() that sends updates to the text file // path specified in the OS Env. func WithOSEnv(os env) meterOption { name, _ := os.Get("GIT_LFS_PROGRESS") return WithLogFile(name) } // NewMeter creates a new ProgressMeter. func NewMeter(options ...meterOption) *ProgressMeter { m := &ProgressMeter{ logger: &progressLogger{}, startTime: time.Now(), fileIndex: make(map[string]int64), fileIndexMutex: &sync.Mutex{}, finished: make(chan interface{}), } for _, opt := range options { opt(m) } return m } // Start begins sending status updates to the optional log file, and stdout. func (p *ProgressMeter) Start() { if atomic.CompareAndSwapInt32(&p.started, 0, 1) { go p.writer() } } // Pause stops sending status updates temporarily, until Start() is called again. func (p *ProgressMeter) Pause() { if atomic.CompareAndSwapInt32(&p.started, 1, 0) { p.finished <- true } } // Add tells the progress meter that a single file of the given size will // possibly be transferred. If a file doesn't need to be transferred for some // reason, be sure to call Skip(int64) with the same size. func (p *ProgressMeter) Add(size int64) { atomic.AddInt32(&p.estimatedFiles, 1) atomic.AddInt64(&p.estimatedBytes, size) } // Skip tells the progress meter that a file of size `size` is being skipped // because the transfer is unnecessary. func (p *ProgressMeter) Skip(size int64) { atomic.AddInt64(&p.skippedFiles, 1) atomic.AddInt64(&p.skippedBytes, size) // Reduce bytes and files so progress easier to parse atomic.AddInt32(&p.estimatedFiles, -1) atomic.AddInt64(&p.estimatedBytes, -size) } // StartTransfer tells the progress meter that a transferring file is being // added to the TransferQueue. func (p *ProgressMeter) StartTransfer(name string) { idx := atomic.AddInt64(&p.transferringFiles, 1) p.fileIndexMutex.Lock() p.fileIndex[name] = idx p.fileIndexMutex.Unlock() } // TransferBytes increments the number of bytes transferred func (p *ProgressMeter) TransferBytes(direction, name string, read, total int64, current int) { atomic.AddInt64(&p.currentBytes, int64(current)) p.logBytes(direction, name, read, total) } // FinishTransfer increments the finished transfer count func (p *ProgressMeter) FinishTransfer(name string) { atomic.AddInt64(&p.finishedFiles, 1) p.fileIndexMutex.Lock() delete(p.fileIndex, name) p.fileIndexMutex.Unlock() } // Finish shuts down the ProgressMeter func (p *ProgressMeter) Finish() { close(p.finished) p.update() p.logger.Close() if !p.dryRun && p.estimatedBytes > 0 { fmt.Fprintf(os.Stdout, "\n") } } func (p *ProgressMeter) logBytes(direction, name string, read, total int64) { p.fileIndexMutex.Lock() idx := p.fileIndex[name] p.fileIndexMutex.Unlock() line := fmt.Sprintf("%s %d/%d %d/%d %s\n", direction, idx, p.estimatedFiles, read, total, name) if err := p.logger.Write([]byte(line)); err != nil { p.logger.Shutdown() } } func (p *ProgressMeter) writer() { p.update() for { select { case <-p.finished: return case <-time.After(time.Millisecond * 200): p.update() } } } func (p *ProgressMeter) update() { if p.dryRun || (p.estimatedFiles == 0 && p.skippedFiles == 0) { return } // (%d of %d files, %d skipped) %f B / %f B, %f B skipped // skipped counts only show when > 0 out := fmt.Sprintf("\rGit LFS: (%d of %d files", p.finishedFiles, p.estimatedFiles) if p.skippedFiles > 0 { out += fmt.Sprintf(", %d skipped", p.skippedFiles) } out += fmt.Sprintf(") %s / %s", formatBytes(p.currentBytes), formatBytes(p.estimatedBytes)) if p.skippedBytes > 0 { out += fmt.Sprintf(", %s skipped", formatBytes(p.skippedBytes)) } fmt.Fprintf(os.Stdout, pad(out)) } func formatBytes(i int64) string { switch { case i > 1099511627776: return fmt.Sprintf("%#0.2f TB", float64(i)/1099511627776) case i > 1073741824: return fmt.Sprintf("%#0.2f GB", float64(i)/1073741824) case i > 1048576: return fmt.Sprintf("%#0.2f MB", float64(i)/1048576) case i > 1024: return fmt.Sprintf("%#0.2f KB", float64(i)/1024) } return fmt.Sprintf("%d B", i) } const defaultWidth = 80 // pad pads the given message to occupy the entire maximum width of the terminal // LFS is attached to. In doing so, this safeguards subsequent prints of shorter // messages from leaving stray characters from the previous message on the // screen by writing over them with whitespace padding. func pad(msg string) string { width := defaultWidth size, err := ts.GetSize() if err == nil { // If `ts.GetSize()` was successful, set the width to the number // of columns present in the terminal LFS is attached to. // Otherwise, fall-back to `defaultWidth`. width = size.Col() } // Pad the string with whitespace so that printing at the start of the // line removes all traces from the last print.removes all traces from // the last print. padding := strings.Repeat(" ", maxInt(0, width-len(msg))) return msg + padding } // maxInt returns the greater of two `int`s, "a", or "b". This function // originally comes from `github.com/git-lfs/git-lfs/tools#MaxInt`, but would // introduce an import cycle if depended on directly. func maxInt(a, b int) int { if a > b { return a } return b } git-lfs-2.3.4/progress/noop.go000066400000000000000000000014651317167762300162560ustar00rootroot00000000000000package progress func Noop() Meter { return &nonMeter{} } type nonMeter struct{} func (m *nonMeter) Start() {} func (m *nonMeter) Pause() {} func (m *nonMeter) Add(size int64) {} func (m *nonMeter) Skip(size int64) {} func (m *nonMeter) StartTransfer(name string) {} func (m *nonMeter) TransferBytes(direction, name string, read, total int64, current int) {} func (m *nonMeter) FinishTransfer(name string) {} func (m *nonMeter) Finish() {} git-lfs-2.3.4/progress/progress.go000066400000000000000000000006041317167762300171410ustar00rootroot00000000000000// Package progress provides common progress monitoring / display features // NOTE: Subject to change, do not rely on this package from outside git-lfs source package progress type Meter interface { Start() Pause() Add(int64) Skip(size int64) StartTransfer(name string) TransferBytes(direction, name string, read, total int64, current int) FinishTransfer(name string) Finish() } git-lfs-2.3.4/progress/spinner.go000066400000000000000000000022211317167762300167500ustar00rootroot00000000000000package progress import ( "fmt" "io" "runtime" ) // Indeterminate progress indicator 'spinner' type Spinner struct { stage int msg string } var spinnerChars = []byte{'|', '/', '-', '\\'} // Print a spinner (stage) to out followed by msg (no linefeed) func (s *Spinner) Print(out io.Writer, msg string) { s.msg = msg s.Spin(out) } // Just spin the spinner one more notch & use the last message func (s *Spinner) Spin(out io.Writer) { s.stage = (s.stage + 1) % len(spinnerChars) s.update(out, string(spinnerChars[s.stage]), s.msg) } // Finish the spinner with a completion message & newline func (s *Spinner) Finish(out io.Writer, finishMsg string) { s.msg = finishMsg s.stage = 0 var sym string if runtime.GOOS == "windows" { // Windows console sucks, can't do nice check mark except in ConEmu (not cmd or git bash) // So play it safe & boring sym = "*" } else { sym = fmt.Sprintf("%c", '\u2714') } s.update(out, sym, finishMsg) out.Write([]byte{'\n'}) } func (s *Spinner) update(out io.Writer, prefix, msg string) { fmt.Fprintf(out, "\r%v", pad(fmt.Sprintf("%v %v", prefix, msg))) } func NewSpinner() *Spinner { return &Spinner{} } git-lfs-2.3.4/rpm/000077500000000000000000000000001317167762300137005ustar00rootroot00000000000000git-lfs-2.3.4/rpm/INSTALL.md000066400000000000000000000060251317167762300153330ustar00rootroot00000000000000# Building RPMs # All of the code to build the RPM is stored in a SPECS/git-lfs.spec file. The source code tarball needs to be put in a SOURCES directory. The BUILD and BUILDROOT directories are used during the build process. The final RPM ends up in the RPMS directory and a source-rpm in SRPMS. In order to expedite installing all dependencies (mainly ruby-ronn and golang) and download any needed files a build_rpms.bsh script is included. This is the **RECOMMENDED** way to build the rpms. It will install all yum packages in order to build the rpm. This can be especially difficult in CentOS 5 and 6, but it will build and install a suitable golang/ruby so that git-lfs can be built. Simple run: ``` ./clean.bsh ./build_rpms.bsh ``` The clean.bsh script removes previous rpms, etc... and removed the source tar.gz file. Otherwise you might end up creating an rpm with pieces from different versions. Practice is to run rpmbuild as non-root user. This prevents inadvertently installing files in the operating system. The intent is to run build_rpms.bsh as a non-root user with sudo privileges. If you have a different command for sudo, set the SUDO environment variable to the other command. When all is down, install (or distribute) RPMS/git-lfs.rpm ``` yum install RPMS/x86_64/git-lfs*.rpm ``` ### Alternative build method ### If you want to use your own ruby/golang without using the version from build_rpms.bsh, you will have to disable dependencies on the rpms. It's pretty easy, just make sure ronn and go are in the path, and run ``` NODEPS=1 ./build_rpms.bsh ``` ### Manual build method ### If you want to use your own ruby/golang without using build_rpms.bsh, it's a little more complicated. You have to make sure ronn and go are in the path, and create the build structure, and download/create the tar.gz file used. This is not recommended, but it is possible. ``` mkdir -p {BUILD,BUILDROOT,SOURCES,RPMS,SRPMS} #download file to SOURCES/v{version number}.tar.gz rpmbuild --define "_topdir `pwd`" -bb SPECS/git-lfs.spec --nodeps #(and optionally) rpmbuild --define "_topdir `pwd`" -bs SPECS/git-lfs.spec --nodeps ``` ### Releases ### It is no longer necessary to update SPECS/git-lfs.spec every version. As long as lfs/lfs.go is updated, build_rpms.bsh parses the version number using the pattern ```s|const Version = "\([0-9.]*\)"|\1|``` and updates SPECS/git-lfs.spec. The version number is then used to download: https://github.com/git-lfs/git-lfs/archive/v%{version}.tar.gz This way when a new version is archived, it will get downloaded and built against. When developing, it is advantageous to use the currently checked out version to test against. In order do that, after running ```./clean.bsh```, set the environment variable BUILD_LOCAL to 1 ``` ./clean.bsh BUILD_LOCAL=1 ./build_rpms.bsh ``` ### Troubleshooting ### **Q**) I ran build_rpms.bsh as root and now there are root owned files in the rpm dir **A**) That happens. Either run build_rpms.bsh as a user with sudo permissions or ```chown -R username:groupname rpm``` as root after building.git-lfs-2.3.4/rpm/SOURCES/000077500000000000000000000000001317167762300150235ustar00rootroot00000000000000git-lfs-2.3.4/rpm/SOURCES/git-lfs.repo000066400000000000000000000006551317167762300172650ustar00rootroot00000000000000[git-lfs] name=Packages for git-lfs for Enterprise Linux $releasever - $basearch baseurl=http://git-lfs.github.com/centos/$releasever/RPMS enabled=1 gpgcheck=1 gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-GITLFS [git-lfs-source] name=Packages for git-lfs for Enterprise Linux $releasever - $basearch baseurl=http://git-lfs.github.com/centos/$releasever/SRPMS enabled=0 gpgcheck=1 gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-GITLFS git-lfs-2.3.4/rpm/SPECS/000077500000000000000000000000001317167762300145555ustar00rootroot00000000000000git-lfs-2.3.4/rpm/SPECS/git-lfs-repo-release.spec000066400000000000000000000022401317167762300213550ustar00rootroot00000000000000Name: git-lfs-repo-release Version: 1 Release: 1%{?dist} Summary: Packges for git-lfs for Enterprise Linux repository configuration Group: System Environment/Base License: MIT %if 0%{?fedora} URL: https://git-lfs.github.com/fedora/%{fedora}/ %elseif 0%{?rhel} URL: https://git-lfs.github.com/centos/%{rhel}/ %endif Source0: RPM-GPG-KEY-GITLFS Source1: git-lfs.repo BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) BuildArch: noarch %description This package contains the Extra Packages for Enterprise Linux (EPEL) repository GPG key as well as configuration for yum. %prep %setup -q -c -T %build %install [ "$RPM_BUILD_ROOT" != "/" ] && rm -rf $RPM_BUILD_ROOT #GPG Key install -Dpm 644 %{SOURCE0} \ $RPM_BUILD_ROOT%{_sysconfdir}/pki/rpm-gpg/RPM-GPG-KEY-GITLFS # yum install -dm 755 $RPM_BUILD_ROOT%{_sysconfdir}/yum.repos.d install -pm 644 %{SOURCE1} \ $RPM_BUILD_ROOT%{_sysconfdir}/yum.repos.d %clean [ "$RPM_BUILD_ROOT" != "/" ] && rm -rf $RPM_BUILD_ROOT %files %defattr(-,root,root,-) %config(noreplace) /etc/yum.repos.d/* /etc/pki/rpm-gpg/* git-lfs-2.3.4/rpm/SPECS/git-lfs.spec000066400000000000000000000053201317167762300167760ustar00rootroot00000000000000Name: git-lfs Version: 2.3.4 Release: 1%{?dist} Summary: Git extension for versioning large files Group: Applications/Archiving License: MIT URL: https://git-lfs.github.com/ Source0: https://github.com/git-lfs/git-lfs/archive/v%{version}/%{name}-%{version}.tar.gz BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) BuildRequires: perl-Digest-SHA BuildRequires: golang, tar, rubygem-ronn, which, git >= 1.8.2 Requires: git >= 1.8.2 %define debug_package %{nil} #I think this is because go links with --build-id=none for linux %description Git Large File Storage (LFS) replaces large files such as audio samples, videos, datasets, and graphics with text pointers inside Git, while storing the file contents on a remote server like GitHub.com or GitHub Enterprise. %prep %setup -q -n %{name}-%{version} export GOPATH=`pwd` mkdir -p src/github.com/git-lfs ln -s $(pwd) src/github.com/git-lfs/%{name} %build %if 0%{?rhel} == 5 export CGO_ENABLED=0 %endif pushd src/github.com/git-lfs/%{name} %if %{_arch} == i386 GOARCH=386 ./script/bootstrap %else GOARCH=amd64 ./script/bootstrap %endif popd ./script/man %install [ "$RPM_BUILD_ROOT" != "/" ] && rm -rf $RPM_BUILD_ROOT install -D bin/git-lfs ${RPM_BUILD_ROOT}/usr/bin/git-lfs mkdir -p -m 755 ${RPM_BUILD_ROOT}/usr/share/man/man1 mkdir -p -m 755 ${RPM_BUILD_ROOT}/usr/share/man/man5 install -D man/*.1 ${RPM_BUILD_ROOT}/usr/share/man/man1 install -D man/*.5 ${RPM_BUILD_ROOT}/usr/share/man/man5 %post git lfs install --system %preun git lfs uninstall %check export GOPATH=`pwd` export GIT_LFS_TEST_DIR=$(mktemp -d) # test/git-lfs-server-api/main.go does not compile because github.com/spf13/cobra # cannot be found in vendor, for some reason. It's not needed for installs, so # skip it. export SKIPAPITESTCOMPILE=1 pushd src/github.com/git-lfs/%{name} ./script/test go get github.com/ThomsonReutersEikon/go-ntlm/ntlm ./script/integration popd rmdir ${GIT_LFS_TEST_DIR} %clean rm -rf %{buildroot} %files %defattr(-,root,root,-) %doc LICENSE.md README.md /usr/bin/git-lfs /usr/share/man/man1/*.1.gz /usr/share/man/man5/*.5.gz %changelog * Sun Dec 6 2015 Andrew Neff - 1.1.0-1 - Added Requires and version for git back in * Sat Oct 31 2015 Andrew Neff - 1.0.3-1 - Added GIT_LFS_TEST_DIR to prevent future test race condition * Sun Aug 2 2015 Andrew Neff - 0.5.4-1 - Added tests back in * Sat Jul 18 2015 Andrew Neff - 0.5.2-1 - Changed Source0 filename * Mon May 18 2015 Andrew Neff - 0.5.1-1 - Initial Spec git-lfs-2.3.4/rpm/SPECS/ruby.spec000066400000000000000000000020231317167762300164070ustar00rootroot00000000000000Name: ruby Version: 2.2.2 Release: 1%{?dist} Summary: Ruby Programming Language Group: Applications/Programming License: BSDL URL: https://www.ruby-lang.org/ Source0: http://cache.ruby-lang.org/pub/ruby/2.2/ruby-2.2.2.tar.gz BuildRoot: %(echo %{_topdir}/BUILDROOT/%{name}-%{version}) BuildRequires: patch, libyaml-devel, glibc-headers, autoconf, gcc-c++, glibc-devel, patch, readline-devel, zlib-devel, libffi-devel, openssl-devel, automake, libtool, sqlite-devel Provides: gem = %{version}-%{release} %description A dynamic, open source programming language with a focus on simplicity and productivity. It has an elegant syntax that is natural to read and easy to write. %prep %setup -q %build ./configure make -j 8 %install [ "$RPM_BUILD_ROOT" != "/" ] && rm -rf $RPM_BUILD_ROOT make install DESTDIR=${RPM_BUILD_ROOT} %clean rm -rf %{buildroot} %files %defattr(-,root,root,-) / %changelog * Tue May 19 2015 Andrew Neff - 2.2.2-1 - Initial Spec git-lfs-2.3.4/rpm/SPECS/rubygem-hpricot.spec000066400000000000000000000020731317167762300205530ustar00rootroot00000000000000#global gemdir %(ruby -rubygems -e 'puts Gem::dir' 2>/dev/null) %global gemdir %(IFS=: R=($(gem env gempath)); echo ${R[${#R[@]}-1]}) %define gem_name hpricot Name: rubygem-%{gem_name} Version: 0.8.6 Release: 1%{?dist} Summary: a swift, liberal HTML parser with a fantastic library Group: Applications/Programming License: N/A URL: https://rubygems.org/gems/%{gem_name} Source0: https://rubygems.org/downloads/%{gem_name}-%{version}.gem BuildRoot: %(echo %{_topdir}/BUILDROOT/%{gem_name}-%{version}) BuildRequires: gem Requires: ruby %description a swift, liberal HTML parser with a fantastic library %prep %setup -q -c -T gem install -V --local --force --install-dir ./%{gemdir} %{SOURCE0} #mv ./%{gemdir}/bin ./usr/local %build %install [ "$RPM_BUILD_ROOT" != "/" ] && rm -rf $RPM_BUILD_ROOT mkdir -p ${RPM_BUILD_ROOT} cp -a ./usr ${RPM_BUILD_ROOT}/usr %clean rm -rf %{buildroot} %files %defattr(-,root,root,-) %{gemdir} %changelog * Wed May 20 2015 Andrew Neff - 2.1.8 - Initial Spec git-lfs-2.3.4/rpm/SPECS/rubygem-mustache.spec000066400000000000000000000027701317167762300207200ustar00rootroot00000000000000#global gemdir %(ruby -rubygems -e 'puts Gem::dir' 2>/dev/null) %global gemdir %(IFS=: R=($(gem env gempath)); echo ${R[${#R[@]}-1]}) %define gem_name mustache Name: rubygem-%{gem_name} Version: 1.0.1 Release: 1%{?dist} Summary: A framework-agnostic way to render logic-free views Group: Applications/Programming License: MIT URL: https://rubygems.org/gems/%{gem_name} Source0: https://rubygems.org/downloads/%{gem_name}-%{version}.gem BuildRoot: %(echo %{_topdir}/BUILDROOT/%{gem_name}-%{version}) BuildRequires: gem > 2.0 Requires: ruby > 2.0 BuildArch: noarch %description Inspired by ctemplate, Mustache is a framework-agnostic way to render logic-free views. As ctemplates says, "It emphasizes separating logic from presentation: it is impossible to embed application logic in this template language. Think of Mustache as a replacement for your views. Instead of views consisting of ERB or HAML with random helpers and arbitrary logic, your views are broken into two parts: a Ruby class and an HTML template. %prep %setup -q -c -T gem install -V --local --force --install-dir ./%{gemdir} %{SOURCE0} mv ./%{gemdir}/bin ./usr/local %build %install [ "$RPM_BUILD_ROOT" != "/" ] && rm -rf $RPM_BUILD_ROOT mkdir -p ${RPM_BUILD_ROOT} cp -a ./usr ${RPM_BUILD_ROOT}/usr %clean rm -rf %{buildroot} %files %defattr(-,root,root,-) %{gemdir} /usr/local/bin/%{gem_name} %changelog * Wed May 20 2015 Andrew Neff - 2.1.8 - Initial Spec git-lfs-2.3.4/rpm/SPECS/rubygem-rdiscount.spec000066400000000000000000000021271317167762300211150ustar00rootroot00000000000000#global gemdir %(ruby -rubygems -e 'puts Gem::dir' 2>/dev/null) %global gemdir %(IFS=: R=($(gem env gempath)); echo ${R[${#R[@]}-1]}) %define gem_name rdiscount Name: rubygem-%{gem_name} Version: 2.1.8 Release: 1%{?dist} Summary: Fast Implementation of Gruber's Markdown in C Group: Applications/Programming License: BSD URL: https://rubygems.org/gems/%{gem_name} Source0: https://rubygems.org/downloads/%{gem_name}-%{version}.gem BuildRoot: %(echo %{_topdir}/BUILDROOT/%{gem_name}-%{version}) BuildRequires: gem > 1.9.2 Requires: ruby > 1.9.2 %description Fast Implementation of Gruber's Markdown in C %prep %setup -q -c -T gem install -V --local --force --install-dir ./%{gemdir} %{SOURCE0} mv ./%{gemdir}/bin ./usr/local %build %install [ "$RPM_BUILD_ROOT" != "/" ] && rm -rf $RPM_BUILD_ROOT mkdir -p ${RPM_BUILD_ROOT} cp -a ./usr ${RPM_BUILD_ROOT}/usr %clean rm -rf %{buildroot} %files %defattr(-,root,root,-) %{gemdir} /usr/local/bin/%{gem_name} %changelog * Wed May 20 2015 Andrew Neff - 2.1.8 - Initial Spec git-lfs-2.3.4/rpm/SPECS/rubygem-ronn.spec000066400000000000000000000022321317167762300200540ustar00rootroot00000000000000#global gemdir %(ruby -rubygems -e 'puts Gem::dir' 2>/dev/null) %global gemdir %(IFS=: R=($(gem env gempath)); echo ${R[${#R[@]}-1]}) %define gem_name ronn Name: rubygem-%{gem_name} Version: 0.7.3 Release: 1%{?dist} Summary: Builds manuals Group: Applications/Programming License: N/A URL: https://rubygems.org/gems/%{gem_name} Source0: https://rubygems.org/downloads/%{gem_name}-%{version}.gem BuildRoot: %(echo %{_topdir}/BUILDROOT/%{gem_name}-%{version}) BuildRequires: gem Requires: ruby Requires: rubygem-hpricot >= 0.8.2 Requires: rubygem-mustache >= 0.7.0 Requires: rubygem-rdiscount >= 1.5.8 BuildArch: noarch %description Builds Manuals %prep %setup -q -c -T gem install -V --local --force --install-dir ./%{gemdir} %{SOURCE0} mv ./%{gemdir}/bin ./usr/local %build %install [ "$RPM_BUILD_ROOT" != "/" ] && rm -rf $RPM_BUILD_ROOT mkdir -p ${RPM_BUILD_ROOT} cp -a ./usr ${RPM_BUILD_ROOT}/usr %clean rm -rf %{buildroot} %files %defattr(-,root,root,-) %{gemdir} /usr/local/bin/%{gem_name} %changelog * Wed May 20 2015 Andrew Neff - 2.1.8 - Initial Spec git-lfs-2.3.4/rpm/build_rpms.bsh000077500000000000000000000125251317167762300165460ustar00rootroot00000000000000#!/usr/bin/env bash set -eu CURDIR=$(cd $(dirname ${BASH_SOURCE[0]}); pwd) if [ -e /etc/os-release ]; then VERSION_ID=$(source /etc/os-release; echo ${VERSION_ID}) OS_NAME=$(source /etc/os-release; echo ${NAME}) OS_NAME=${OS_NAME,,} else #Basically Centos 5/6 VERSION_ID=($(head -n 1 /etc/redhat-release | \grep -Eo '[0-9]+')) OS_NAME=$(awk '{print tolower($1)}' /etc/redhat-release) #Stupid ancient bash 3... fi case "${OS_NAME}" in centos*|red*) RPM_DIST=".el${VERSION_ID}" ;; fedora) RPM_DIST=".fc${VERSION_ID}" ;; sles) RPM_DIST=".sles${VERSION_ID}" ;; opensuse) RPM_DIST=".opensuse${VERSION_ID}" ;; *) RPM_DIST="%{nil}" ;; esac RPMBUILD=(rpmbuild --define "_topdir ${CURDIR}" --define "dist ${RPM_DIST}") if [[ ${NODEPS:-0} != 0 ]]; then RPMBUILD=("${RPMBUILD[@]}" --nodeps) fi SUDO=${SUDO=`if which sudo > /dev/null 2>&1; then echo sudo; fi`} export PATH=${PATH}:/usr/local/bin set -vx echo "Downloading/checking for some essentials..." if which git > /dev/null 2>&1; then GIT_VERSION=($(git --version)) IFS_OLD=${IFS} IFS=. GIT_VERSION=(${GIT_VERSION[2]}) IFS=${IFS_OLD} else GIT_VERSION=(0 0 0) fi SPEC=${CURDIR}/SPECS/git-lfs.spec if [[ ${VERSION_ID[0]} == 5 ]]; then if ! rpm -q epel-release > /dev/null 2>&1; then $SUDO yum install -y epel-release fi fi $SUDO yum install -y make curl which rpm-build tar bison perl-Digest-SHA mkdir -p ${CURDIR}/{BUILD,BUILDROOT,SOURCES,RPMS,SRPMS} if ( [[ ${GIT_VERSION[0]} == 1 ]] && [[ ${GIT_VERSION[1]} < 8 ]] ) || [[ ${GIT_VERSION[0]} < 1 ]]; then if [[ ${VERSION_ID[0]} != 6 ]]; then $SUDO yum install -y git else curl https://setup.ius.io/ | $SUDO bash yum install -y "git >= 1.8.2" fi fi if ! which go; then echo "Installing go... one way or another" if [[ ${VERSION_ID[0]} == 5 ]]; then $SUDO yum install -y curl.x86_64 glibc gcc ${CURDIR}/golang_patch.bsh "${RPMBUILD[@]}" -ba ${CURDIR}/SPECS/golang.spec $SUDO yum install -y --nogpgcheck ${CURDIR}/RPMS/noarch/golang-1*.rpm \ ${CURDIR}/RPMS/noarch/golang-pkg-bin-linux-amd64-1*.rpm \ ${CURDIR}/RPMS/noarch/golang-src-1*.noarch.rpm \ ${CURDIR}/RPMS/noarch/golang-pkg-linux-amd64-1*.noarch.rpm \ ${CURDIR}/RPMS/noarch/golang-pkg-linux-386-1*.noarch.rpm else $SUDO yum install -y epel-release $SUDO yum install -y golang fi fi if which ruby > /dev/null 2>&1; then IFS_OLD=${IFS} IFS=. RUBY_VERSION=($(ruby -e "print RUBY_VERSION")) IFS=${IFS_OLD} else RUBY_VERSION=(0 0 0) fi if [[ ${RUBY_VERSION[0]} < 2 ]]; then if [[ ${VERSION_ID[0]} < 7 ]]; then echo "Downloading ruby..." if ! rpm -q epel-release; then $SUDO yum install -y epel-release #Optional part of centos fi $SUDO yum install -y patch libyaml-devel glibc-headers autoconf gcc-c++ glibc-devel readline-devel zlib-devel libffi-devel openssl-devel automake libtool sqlite-devel pushd ${CURDIR}/SOURCES curl -L -O http://cache.ruby-lang.org/pub/ruby/2.2/ruby-2.2.2.tar.gz popd echo "Building ruby..." "${RPMBUILD[@]}" -ba ${CURDIR}/SPECS/ruby.spec echo "Installing ruby..." $SUDO yum install -y --nogpgcheck ${CURDIR}/RPMS/x86_64/ruby*.rpm else $SUDO yum install -y ruby ruby-devel fi fi if ! which ronn; then echo "Downloading some ruby gems..." pushd ${CURDIR}/SOURCES curl -L -O https://rubygems.org/downloads/rdiscount-2.1.8.gem curl -L -O https://rubygems.org/downloads/hpricot-0.8.6.gem curl -L -O https://rubygems.org/downloads/mustache-1.0.1.gem curl -L -O https://rubygems.org/downloads/ronn-0.7.3.gem popd echo "Building ruby gems..." "${RPMBUILD[@]}" -ba ${CURDIR}/SPECS/rubygem-rdiscount.spec "${RPMBUILD[@]}" -ba ${CURDIR}/SPECS/rubygem-mustache.spec "${RPMBUILD[@]}" -ba ${CURDIR}/SPECS/rubygem-hpricot.spec "${RPMBUILD[@]}" -ba ${CURDIR}/SPECS/rubygem-ronn.spec echo "Installing ruby gems..." $SUDO yum install -y --nogpgcheck $(ls ${CURDIR}/RPMS/noarch/rubygem-*.rpm ${CURDIR}/RPMS/x86_64/rubygem-*.rpm | grep -v debuginfo) fi pushd ${CURDIR}/.. #Yes, compile lfs before compiling lfs... ./script/bootstrap #Use the version output to grab the version number and short sha #(that yes, I could have gotten from git myself) LFS_VERSION=$(./bin/git-lfs version | sed -r 's|.*/([0-9.]*).*|\1|') sed -i 's|\(^Version:\s*\).*|\1'"${LFS_VERSION}"'|' ${CURDIR}/SPECS/git-lfs.spec #LFS_SHA=$(git rev-parse --short HEAD) #sed -i 's|\(^commit=*\).*|\1'"${LFS_SHA}"'|' ./script/run #Not needed thanks to #549 popd #Prep the SOURCES dir for git-lfs echo "Zipping up current checkout of git-lfs..." echo "Cleaning ${CURDIR}/tmptar" rm -rf ${CURDIR}/tmptar mkdir -p ${CURDIR}/tmptar/git-lfs-${LFS_VERSION} pushd ${CURDIR}/.. #I started running out of space in the docker, so I needed to copy a little less waste tar -c . --exclude tmptar --exclude repos | tar -x -C ${CURDIR}/tmptar/git-lfs-${LFS_VERSION}/ popd pushd ${CURDIR}/tmptar tar -zcf ${CURDIR}/SOURCES/git-lfs-${LFS_VERSION}.tar.gz git-lfs-${LFS_VERSION} popd echo "Cleaning ${CURDIR}/tmptar again" rm -rf ${CURDIR}/tmptar #TODO TASK 2 #cp ${CURDIR}/../docker/public.key ${CURDIR}/SOURCES/RPM-GPG-KEY-GITLFS touch ${CURDIR}/SOURCES/RPM-GPG-KEY-GITLFS echo "Build git-lfs rpm..." #--no-deps added for now so you can compile without offical rpms installed "${RPMBUILD[@]}" --nodeps -ba ${CURDIR}/SPECS/git-lfs.spec echo "All Done!" git-lfs-2.3.4/rpm/clean.bsh000077500000000000000000000005151317167762300154640ustar00rootroot00000000000000#!/usr/bin/env bash set -eu #Emulate the important parts of git clean -xdf for CentOS 6 full build CWD=$(cd $(dirname ${BASH_SOURCE[0]}); pwd) rm -rv ${CWD}/BUILD ${CWD}/BUILDROOT ${CWD}/RPMS ${CWD}/SRPMS || : find ${CWD}/SOURCES -not -name git-lfs.repo -delete || : rm ${CWD}/SPECS/golang.spec || : rm ${CWD}/build.log || : git-lfs-2.3.4/rpm/golang_patch.bsh000077500000000000000000000017671317167762300170420ustar00rootroot00000000000000#!/usr/bin/env bash set -eu cd $(dirname ${BASH_SOURCE[0]})/SOURCES #Get EPEL full list curl -L -O https://dl.fedoraproject.org/pub/epel/fullfilelist #Get latest golang src rpm curl -L -O https://dl.fedoraproject.org/pub/epel/$(grep '^6/SRPMS/golang-[0-9].*src.rpm' fullfilelist) rpm2cpio golang-*.src.rpm | cpio -diuv #Patch the spec file to patch the build to work on CentOS 5 sed -ri 's|(^%build)|\1\nsed -i '"'"'s:.*--build-id.*::'"'"' ./src/cmd/go/build.go|' golang*.spec #Make SPEC CentOS 5 compliant sed -ri 's|(^Name:.*)|\1\nGroup: Software|' golang.spec sed -ri 's|(^Name:.*)|\1\nBuildRoot: %(echo %{_topdir}/BUILDROOT/%{name}-%{version})|' golang.spec sed -ri 's|(^%package\s.*)|\1\nGroup: Software|' golang.spec sed -i 's|%ifarch %{ix86}|%if %_arch == i686|' golang.spec sed -i 's|%ifarch %{arm}|%if %_arch == armv6l|' golang.spec sed -i 's|%ifarch|%if %_arch ==|' golang.spec #The test WILL fail, so make the rpm not fail sed -ri 's;(.*run.bash.*);\1|true;' golang.spec mv golang.spec ../SPECS/ git-lfs-2.3.4/script/000077500000000000000000000000001317167762300144065ustar00rootroot00000000000000git-lfs-2.3.4/script/backport-pr000077500000000000000000000031461317167762300165640ustar00rootroot00000000000000#!/usr/bin/env bash # # Backports a PR into a release branch: # # # backport PR #123 into release-0.5-backport-123 # $ git checkout master # $ git pull # $ script/backport-pr 1.1 1023 relversion="v$1.x" relbranch="release-$1" pr="$2" prbranch="$relbranch-backport-$pr" pullsurl="https://api.github.com/repos/git-lfs/git-lfs/pulls" prurl="https://api.github.com/repos/git-lfs/git-lfs/pulls/$pr" prjson="$(curl -n $pullsurl/$pr 2>/dev/null)" headref="$(echo $prjson | jq -r -e ".head.ref")" [ "$?" -ne 0 ] && { echo "PR #$pr is invalid." exit 1 } prtitle="$(echo $prjson | jq -r ".title" | sed "s/\"/'/g")" git checkout -q -f $relbranch git clean -q -fdx git pull -q git checkout -q -f -B $prbranch commit=`git log -1 --pretty=%H "--grep=Merge pull request #$pr" "--grep=Merge branch '.*$headref'" master` echo "Backporting:\n" git log -1 $commit conflicts="" git cherry-pick -x --allow-empty -m1 $commit &> /dev/null || { unmerged=$(git ls-files --unmerged --stage | cut -f 2 -d$'\t' | uniq) conflicts="\n\nConflicting files:" for file in $unmerged; do git add "$file" conflicts="$conflicts\n- $file" done git commit -q --no-edit } commitmsg="Backport $headref from #$pr to $relbranch" if [ "$conflicts" ]; then commitmsg="$commitmsg [merge conflicts]" fi git commit -q --allow-empty --amend -m "$commitmsg" git push -q -f origin $prbranch git checkout -q -f $relbranch git branch -q -D $prbranch curl -in $pullsurl -d "{ \"title\": \"Backport #$pr for $relversion: $prtitle\", \"head\": \"$prbranch\", \"base\": \"$relbranch\", \"body\": \"This backports #$pr.$conflicts\" }" 2>/dev/null git-lfs-2.3.4/script/bootstrap000077500000000000000000000014331317167762300163520ustar00rootroot00000000000000#!/usr/bin/env bash set -e if uname -s | grep -q "_NT-"; then # Tell Cygwin / MSYS to really create symbolic links. export CYGWIN="$CYGWIN winsymlinks:nativestrict" export MSYS="$MSYS winsymlinks:nativestrict" fi if [ -z "$GOPATH" ]; then export GOPATH="$(pwd)" mkdir -p src/github.com/git-lfs [ -h src/github.com/git-lfs/git-lfs ] || ln -s "$GOPATH" src/github.com/git-lfs/git-lfs fi if uname -s | grep -q "_NT-"; then echo "Installing goversioninfo to embed resources into Windows executables..." go get github.com/josephspurrier/goversioninfo/cmd/goversioninfo export PATH=$PATH:$GOPATH/bin/windows_386 echo "Creating the resource.syso version information file..." go generate fi script/fmt rm -rf bin/* GO15VENDOREXPERIMENT=1 go run script/*.go -cmd build "$@" git-lfs-2.3.4/script/build.go000066400000000000000000000176431317167762300160470ustar00rootroot00000000000000package main import ( "crypto/sha256" "encoding/hex" "encoding/json" "flag" "fmt" "io" "io/ioutil" "log" "os" "os/exec" "path/filepath" "runtime" "strings" "github.com/git-lfs/git-lfs/config" ) var ( BuildOS = flag.String("os", "", "OS to target: darwin,freebsd,linux,windows") BuildArch = flag.String("arch", "", "Arch to target: 386,amd64") BuildAll = flag.Bool("all", false, "Builds all architectures") BuildDwarf = flag.Bool("dwarf", false, "Includes DWARF tables in build artifacts") BuildLdFlags = flag.String("ldflags", "", "-ldflags to pass to the compiler") BuildGcFlags = flag.String("gcflags", "", "-gcflags to pass to the compiler") ShowHelp = flag.Bool("help", false, "Shows help") matrixKeys = map[string]string{ "darwin": "Mac", "freebsd": "FreeBSD", "linux": "Linux", "windows": "Windows", "amd64": "AMD64", } LdFlags []string ) func mainBuild() { if *ShowHelp { fmt.Println("usage: script/bootstrap [-os] [-arch] [-all]") flag.PrintDefaults() return } fmt.Printf("Using %s\n", runtime.Version()) genOut, err := exec.Command("go", "generate", "./commands").CombinedOutput() if err != nil { fmt.Fprintf(os.Stderr, "go generate failed:\n%v", string(genOut)) os.Exit(1) } cmd, _ := exec.Command("git", "rev-parse", "--short", "HEAD").Output() if len(cmd) > 0 { LdFlags = append(LdFlags, "-X", strings.TrimSpace( "github.com/git-lfs/git-lfs/config.GitCommit="+string(cmd), )) } if !*BuildDwarf { LdFlags = append(LdFlags, "-s", "-w") } buildMatrix := make(map[string]Release) errored := false var platforms, arches []string if len(*BuildOS) > 0 { platforms = strings.Split(*BuildOS, ",") } if len(*BuildArch) > 0 { arches = strings.Split(*BuildArch, ",") } if *BuildAll { platforms = []string{"linux", "darwin", "freebsd", "windows"} arches = []string{"amd64", "386"} } if len(platforms) < 1 || len(arches) < 1 { if err := build("", "", buildMatrix); err != nil { log.Fatalln(err) } return // skip build matrix stuff } for _, buildos := range platforms { for _, buildarch := range arches { err := build(strings.TrimSpace(buildos), strings.TrimSpace(buildarch), buildMatrix) if err != nil { errored = true } } } if errored { os.Exit(1) } by, err := json.Marshal(buildMatrix) if err != nil { log.Fatalln("Error encoding build matrix to json:", err) } file, err := os.Create("bin/releases/build_matrix.json") if err != nil { log.Fatalln("Error creating build_matrix.json:", err) } written, err := file.Write(by) file.Close() if err != nil { log.Fatalln("Error writing build_matrix.json", err) } if jsonSize := len(by); written != jsonSize { log.Fatalf("Expected to write %d bytes, actually wrote %d.\n", jsonSize, written) } } func build(buildos, buildarch string, buildMatrix map[string]Release) error { addenv := len(buildos) > 0 && len(buildarch) > 0 name := "git-lfs-" + config.Version dir := "bin" if addenv { fmt.Printf("Building for %s/%s\n", buildos, buildarch) dir = filepath.Join(dir, "releases", buildos+"-"+buildarch, name) } if err := buildCommand(dir, buildos, buildarch); err != nil { return err } if addenv { err := os.MkdirAll(dir, 0755) if err != nil { log.Println("Error setting up installer:\n", err.Error()) return err } err = setupInstaller(buildos, buildarch, dir, buildMatrix) if err != nil { log.Println("Error setting up installer:\n", err.Error()) return err } } return nil } func buildCommand(dir, buildos, buildarch string) error { addenv := len(buildos) > 0 && len(buildarch) > 0 bin := filepath.Join(dir, "git-lfs") cmdOS := runtime.GOOS if len(buildos) > 0 { cmdOS = buildos } if cmdOS == "windows" { bin = bin + ".exe" } args := make([]string, 1, 6) args[0] = "build" if len(*BuildLdFlags) > 0 { args = append(args, "-ldflags", *BuildLdFlags) } else if len(LdFlags) > 0 { args = append(args, "-ldflags", strings.Join(LdFlags, " ")) } if len(*BuildGcFlags) > 0 { args = append(args, "-gcflags", *BuildGcFlags) } args = append(args, "-o", bin, ".") cmd := exec.Command("go", args...) if addenv { cmd.Env = buildGoEnv(buildos, buildarch) } output, err := cmd.CombinedOutput() if len(output) > 0 { fmt.Println(string(output)) } return err } func buildGoEnv(buildos, buildarch string) []string { env := make([]string, 6, 9) env[0] = "GOOS=" + buildos env[1] = "GOARCH=" + buildarch env[2] = "GOPATH=" + os.Getenv("GOPATH") env[3] = "GOROOT=" + os.Getenv("GOROOT") env[4] = "PATH=" + os.Getenv("PATH") env[5] = "GO15VENDOREXPERIMENT=" + os.Getenv("GO15VENDOREXPERIMENT") for _, key := range []string{"TMP", "TEMP", "TEMPDIR"} { v := os.Getenv(key) if len(v) == 0 { continue } env = append(env, key+"="+v) } return env } func setupInstaller(buildos, buildarch, dir string, buildMatrix map[string]Release) error { textfiles := []string{ "README.md", "CHANGELOG.md", } if buildos == "windows" { return winInstaller(textfiles, buildos, buildarch, dir, buildMatrix) } else { return unixInstaller(textfiles, buildos, buildarch, dir, buildMatrix) } } func unixInstaller(textfiles []string, buildos, buildarch, dir string, buildMatrix map[string]Release) error { for _, filename := range textfiles { cmd := exec.Command("cp", filename, filepath.Join(dir, filename)) if err := logAndRun(cmd); err != nil { return err } } fullInstallPath := filepath.Join(dir, "install.sh") cmd := exec.Command("cp", "script/install.sh.example", fullInstallPath) if err := logAndRun(cmd); err != nil { return err } if err := os.Chmod(fullInstallPath, 0755); err != nil { return err } name := zipName(buildos, buildarch) + ".tar.gz" cmd = exec.Command("tar", "czf", "../"+name, filepath.Base(dir)) cmd.Dir = filepath.Dir(dir) if err := logAndRun(cmd); err != nil { return nil } addToMatrix(buildMatrix, buildos, buildarch, name) return nil } func winInstaller(textfiles []string, buildos, buildarch, dir string, buildMatrix map[string]Release) error { for _, filename := range textfiles { by, err := ioutil.ReadFile(filename) if err != nil { return err } winEndings := strings.Replace(string(by), "\n", "\r\n", -1) err = ioutil.WriteFile(filepath.Join(dir, filename), []byte(winEndings), 0644) if err != nil { return err } } installerPath := filepath.Dir(filepath.Dir(dir)) name := zipName(buildos, buildarch) + ".zip" full := filepath.Join(installerPath, name) matches, err := filepath.Glob(dir + "/*") if err != nil { return err } args := make([]string, len(matches)+2) args[0] = "-j" // junk the zip paths args[1] = full copy(args[2:], matches) cmd := exec.Command("zip", args...) if err := logAndRun(cmd); err != nil { return err } addToMatrix(buildMatrix, buildos, buildarch, name) return nil } func addToMatrix(buildMatrix map[string]Release, buildos, buildarch, name string) { buildMatrix[fmt.Sprintf("%s-%s", buildos, buildarch)] = Release{ Label: releaseLabel(buildos, buildarch), Filename: name, SHA256: hashRelease(name), } } func hashRelease(name string) string { full := filepath.Join("bin/releases", name) file, err := os.Open(full) if err != nil { fmt.Printf("unable to open release %q: %+v\n", full, err) os.Exit(1) } defer file.Close() h := sha256.New() if _, err = io.Copy(h, file); err != nil { fmt.Printf("error reading release %q: %+v\n", full, err) os.Exit(1) } return hex.EncodeToString(h.Sum(nil)) } func logAndRun(cmd *exec.Cmd) error { fmt.Printf(" - %s\n", strings.Join(cmd.Args, " ")) if len(cmd.Dir) > 0 { fmt.Printf(" - in %s\n", cmd.Dir) } output, err := cmd.CombinedOutput() fmt.Println(string(output)) return err } func zipName(os, arch string) string { return fmt.Sprintf("git-lfs-%s-%s-%s", os, arch, config.Version) } func releaseLabel(buildos, buildarch string) string { return fmt.Sprintf("%s %s", key(buildos), key(buildarch)) } func key(k string) string { if s, ok := matrixKeys[k]; ok { return s } return k } git-lfs-2.3.4/script/changelog000077500000000000000000000034141317167762300162650ustar00rootroot00000000000000#!/usr/bin/env bash # # Interactively generates a changelog over a range of commits: commit_summary() { local hash=$1 pr=$(git show $hash | grep -o "#\([0-9]*\)" | cut -c 2-) prjson="$(curl -n https://api.github.com/repos/git-lfs/git-lfs/pulls/$pr 2>/dev/null)" title="$(echo $prjson | jq -r -e ".title")" id="$(echo $prjson | jq -r -e ".number")" author="$(echo $prjson | jq -r -e ".user.login")" # If the title begins with "Backport", then strip everything until the actual # pull-request title. if grep -q "Backport" <(echo $title); then title="$(echo $title | sed 's/^[^:]*: //g')" fi echo "* $title #$id (@$author)" } range=$1 if [ "$range" = "" ]; then echo "Usage: $0 [options] base..next" exit 1 fi features="" bugs="" misc="" for rev in $(git rev-list --merges --first-parent $range); do git show $rev processed=0 while [ $processed -eq 0 ]; do echo "Categorize this change: [f,b,m,s,?] ?" read -n 1 opt echo "" case $opt in [fbms]) processed=1 ;; ?) echo "f - mark this merge as a feature" echo "b - mark this merge as a bugfix" echo "m - make this merge as a misc. change" echo "s - skip this merge, excluding it from the changelog" echo "? - display this help message" ;; *) echo "Unknown option: $opt, try again." ;; esac done if [ $opt != "s" ]; then summary="$(commit_summary $rev)" fi case $opt in f) features="$(printf "%s\n%s\n" "$features" "$summary")" ;; b) bugs="$(printf "%s\n%s\n" "$bugs" "$summary")" ;; m) misc="$(printf "%s\n%s\n" "$misc" "$summary")" ;; esac done echo "" >&2 cat <<- EOF ### Features $features ### Bugs $bugs ### Misc $misc EOF git-lfs-2.3.4/script/cibuild000077500000000000000000000002621317167762300157470ustar00rootroot00000000000000#!/usr/bin/env bash set -e script/test # re-run test to ensure GIT_TRACE output doesn't leak into the git package GIT_TRACE=1 script/test git VERBOSE_LOGS=1 script/integration git-lfs-2.3.4/script/compile-win-installer-unsigned.bat000066400000000000000000000001541317167762300231260ustar00rootroot00000000000000"%ProgramFiles(x86)%\Inno Setup 5\iscc.exe" /Qp "%~dp0\windows-installer\inno-setup-git-lfs-installer.iss" git-lfs-2.3.4/script/fmt000077500000000000000000000006001317167762300151160ustar00rootroot00000000000000#!/usr/bin/env bash formatter=gofmt hash goimports 2>/dev/null && { formatter=goimports } # don't run gofmt in these directories ignored=(/bin/ /docs/ /log/ /man/ /tmp/ /vendor/ /rpm/ /docker/ /debian/ /src/) for i in */ ; do if [[ ! ${ignored[*]} =~ "/$i" ]]; then $formatter -w -l "$@" "${i%?}" fi done msg=`script/lint` if [ $? -ne 0 ]; then echo "$msg" exit 1 fi git-lfs-2.3.4/script/genmakefile/000077500000000000000000000000001317167762300166555ustar00rootroot00000000000000git-lfs-2.3.4/script/genmakefile/genmakefile.go000066400000000000000000000025721317167762300214610ustar00rootroot00000000000000package main import ( "fmt" "go/build" "os" "strings" ) var packages map[string]string = make(map[string]string) func generate_target(srcdir string, pkgdir string, prefix string, ctx build.Context) string { pkg, _ := ctx.ImportDir(srcdir+pkgdir, 0) name := pkg.Name var deps []string for _, imp := range pkg.Imports { if strings.HasPrefix(imp, prefix) { imp = strings.TrimPrefix(imp, prefix) if packages[imp] == "" { packages[imp] = generate_target(srcdir, imp, prefix, ctx) } deps = append(deps, "$(LIBS_"+packages[imp]+")") } } if pkgdir != "" { fmt.Printf("SRCDIR_%s := $(SRCDIR)%s/\n", name, pkgdir) } else { fmt.Printf("SRCDIR_%s := $(SRCDIR)\n", name) } fmt.Printf("SRC_%s := $(addprefix $(SRCDIR_%s), %s)\n", name, name, strings.Join(pkg.GoFiles, " ")) fmt.Printf("DEPS_%s := %s\n", name, strings.Join(deps, " ")) if pkgdir != "" { fmt.Printf("OBJ_%s := $(LIBDIR)/%s.o\n", name, pkgdir) fmt.Printf("LIB_%s := $(LIBDIR)/%s.a\n", name, pkgdir) fmt.Printf("LIBS_%s := $(LIB_%s) $(DEPS_%s)\n", name, name, name) fmt.Printf("$(OBJ_%s) : $(SRC_%s) $(DEPS_%s)\n", name, name, name) fmt.Printf("\t@mkdir -p $(dir $@)\n") fmt.Printf("\t$(GOC) $(GOFLAGS) -c -o $@ $(SRC_%s)\n", name) } return name } func main() { srcdir := os.Args[1] prefix := os.Args[2] ctx := build.Default ctx.CgoEnabled = false generate_target(srcdir, "", prefix, ctx) } git-lfs-2.3.4/script/install-git-source000077500000000000000000000002261317167762300200610ustar00rootroot00000000000000#!/usr/bin/env bash # Builds git from a given git ref. Used for CircleCI OSX builds cd git-source git checkout "$1" make --jobs=2 make install cd .. git-lfs-2.3.4/script/install.sh.example000066400000000000000000000006051317167762300200430ustar00rootroot00000000000000#!/usr/bin/env bash set -eu prefix="/usr/local" if [ "${PREFIX:-}" != "" ] ; then prefix=${PREFIX:-} elif [ "${BOXEN_HOME:-}" != "" ] ; then prefix=${BOXEN_HOME:-} fi mkdir -p $prefix/bin rm -rf $prefix/bin/git-lfs* pushd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null for g in git*; do install $g "$prefix/bin/$g" done popd > /dev/null PATH+=:$prefix/bin git lfs install git-lfs-2.3.4/script/integration000077500000000000000000000014751317167762300166660ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testenv.sh" set -e SHUTDOWN_LFS=no SHOW_LOGS=yes atexit() { res=${1:-$?} SHUTDOWN_LFS=yes if [ "$res" = "0" ]; then SHOW_LOGS=no fi if [ "$SHOW_LOGS" = "yes" ] && [ "$VERBOSE_LOGS" = "1" ]; then if [ -s "$REMOTEDIR/gitserver.log" ]; then echo "" echo "gitserver.log:" cat "$REMOTEDIR/gitserver.log" fi echo "" echo "env:" env fi shutdown exit $res } trap "atexit" EXIT if [ -s "$LFS_URL_FILE" ]; then SHOW_LOGS=no echo "$LFS_URL_FILE still exists!" echo "Confirm other tests are done, and run:" echo " $ curl $(cat "$LFS_URL_FILE")/shutdown" exit 1 fi setup GO15VENDOREXPERIMENT=1 GIT_LFS_TEST_MAXPROCS=$GIT_LFS_TEST_MAXPROCS GIT_LFS_TEST_DIR="$GIT_LFS_TEST_DIR" SHUTDOWN_LFS="no" go run script/*.go -cmd integration "$@" git-lfs-2.3.4/script/integration.go000066400000000000000000000075731317167762300172740ustar00rootroot00000000000000package main import ( "bytes" "errors" "fmt" "os" "os/exec" "path/filepath" "regexp" "runtime" "strconv" "strings" "sync" "time" ) var ( bashPath string debugging = false erroring = false maxprocs = 4 testPattern = regexp.MustCompile(`test[/\\]test-([a-z\-]+)\.sh$`) ) func mainIntegration() { if len(os.Getenv("DEBUG")) > 0 { debugging = true } setBash() if max, _ := strconv.Atoi(os.Getenv("GIT_LFS_TEST_MAXPROCS")); max > 0 { maxprocs = max } fmt.Println("Running this maxprocs", maxprocs) files := testFiles() if len(files) == 0 { fmt.Println("no tests to run") os.Exit(1) } var wg sync.WaitGroup tests := make(chan string, len(files)) output := make(chan string, len(files)) for _, file := range files { tests <- file } go printOutput(output) for i := 0; i < maxprocs; i++ { wg.Add(1) go worker(tests, output, &wg) } close(tests) wg.Wait() close(output) printOutput(output) if erroring { os.Exit(1) } } func runTest(output chan string, testname string) { buf := &bytes.Buffer{} cmd := exec.Command(bashPath, testname) cmd.Stdout = buf cmd.Stderr = buf err := cmd.Start() if err != nil { sendTestOutput(output, testname, buf, err) return } done := make(chan error) go func() { if err := cmd.Wait(); err != nil { done <- err } close(done) }() select { case err = <-done: sendTestOutput(output, testname, buf, err) return case <-time.After(3 * time.Minute): sendTestOutput(output, testname, buf, errors.New("Timed out")) cmd.Process.Kill() return } } func sendTestOutput(output chan string, testname string, buf *bytes.Buffer, err error) { cli := strings.TrimSpace(buf.String()) if len(cli) == 0 { cli = fmt.Sprintf("", testname) } if err == nil { output <- cli } else { basetestname := filepath.Base(testname) if debugging { fmt.Printf("Error on %s: %s\n", basetestname, err) } erroring = true output <- fmt.Sprintf("error: %s => %s\n%s", basetestname, err, cli) } } func printOutput(output <-chan string) { for { select { case out, ok := <-output: if !ok { return } fmt.Println(out) } } } func worker(tests <-chan string, output chan string, wg *sync.WaitGroup) { defer wg.Done() for { select { case testname, ok := <-tests: if !ok { return } runTest(output, testname) } } } func testFiles() []string { if len(os.Args) < 4 { return allTestFiles() } fileMap := make(map[string]bool) for _, file := range allTestFiles() { fileMap[file] = true } files := make([]string, 0, len(os.Args)-3) for _, arg := range os.Args { fullname := "test/test-" + arg + ".sh" if fileMap[fullname] { files = append(files, fullname) } } return files } func allTestFiles() []string { files := make([]string, 0, 100) filepath.Walk("test", func(path string, info os.FileInfo, err error) error { if debugging { fmt.Println("FOUND:", path) } if err != nil || info.IsDir() || !testPattern.MatchString(path) { return nil } if debugging { fmt.Println("MATCHING:", path) } files = append(files, path) return nil }) return files } func setBash() { findcmd := "which" if runtime.GOOS == "windows" { // Can't use paths returned from which even if it's on PATH in Windows // Because our Go binary is a separate Windows app & not MinGW, it // can't understand paths like '/usr/bin/bash', needs Windows version findcmd = "where" } out, err := exec.Command(findcmd, "bash").Output() if err != nil { fmt.Println("Unable to find bash:", err) os.Exit(1) } if len(out) == 0 { fmt.Printf("No output from '%s bash'\n", findcmd) os.Exit(1) } bashPath = strings.TrimSpace(strings.Split(string(out), "\n")[0]) if debugging { fmt.Println("Using", bashPath) } // Test _, err = exec.Command(bashPath, "--version").CombinedOutput() if err != nil { fmt.Println("Error calling bash:", err) os.Exit(1) } } git-lfs-2.3.4/script/lint000077500000000000000000000010221317167762300152750ustar00rootroot00000000000000#!/usr/bin/env bash deps=$(GO15VENDOREXPERIMENT=1 go list -f '{{join .Deps "\n"}}' . | xargs go list -f '{{if not .Standard}}{{.ImportPath}}{{end}}' | grep -v "github.com/git-lfs/git-lfs") # exit 0 means non-vendored deps were found if [ $? -eq 0 ]; then echo "Non vendored dependencies found:" for d in $deps; do echo "\t$d"; done echo echo "These dependencies should be tracked in 'glide.yaml'." echo "Consider running "glide update" or "glide get" to vendor a new dependency." exit 1 else echo "Looks good!" fi git-lfs-2.3.4/script/man000077500000000000000000000004451317167762300151120ustar00rootroot00000000000000#!/usr/bin/env bash ronn=`which ronn` if [ -x "$ronn" ]; then mkdir -p man rm -rf man/*.ronn rm -rf man/*.txt cp docs/man/*.ronn man cp docs/man/*.txt man $ronn man/*.ronn rm -rf man/*.ronn rm -rf man/*.txt else echo "Install the 'ronn' ruby gem to build the man pages." fi git-lfs-2.3.4/script/packagecloud.rb000066400000000000000000000060161317167762300173600ustar00rootroot00000000000000# Pushes all deb and rpm files from ./repos to PackageCloud. packagecloud_user = ENV["PACKAGECLOUD_USER"] || "github" packagecloud_token = ENV["PACKAGECLOUD_TOKEN"] || begin puts "PACKAGECLOUD_TOKEN env required" exit 1 end require "json" packagecloud_ruby_minimum_version = "1.0.4" begin gem "packagecloud-ruby", ">=#{packagecloud_ruby_minimum_version}" require "packagecloud" puts "Using packagecloud-ruby:#{Gem.loaded_specs["packagecloud-ruby"].version}" rescue LoadError puts "Requires packagecloud-ruby >=#{packagecloud_ruby_minimum_version}" puts %(gem install packagecloud-ruby) exit 1 end credentials = Packagecloud::Credentials.new(packagecloud_user, packagecloud_token) $client = Packagecloud::Client.new(credentials) # matches package directories built by docker to one or more packagecloud distros # https://packagecloud.io/docs#os_distro_version $distro_name_map = { "centos/5" => %w( el/5 ), "centos/6" => %w( el/6 ), "centos/7" => %w( el/7 fedora/22 fedora/23 fedora/24 fedora/25 fedora/26 ), "debian/7" => %w( debian/wheezy ubuntu/precise ), "debian/8" => %w( debian/jessie linuxmint/rafaela linuxmint/rebecca linuxmint/rosa ubuntu/trusty ubuntu/vivid ubuntu/wily ), "debian/9" => %W( debian/stretch linuxmint/sarah linuxmint/serena linuxmint/sonya ubuntu/xenial ubuntu/yakkety ubuntu/zesty ), } # caches distro id lookups $distro_id_map = {} def distro_names_for(filename) $distro_name_map.each do |pattern, distros| return distros if filename.include?(pattern) end raise "no distro for #{filename.inspect}" end package_files = Dir.glob("repos/**/*.rpm") + Dir.glob("repos/**/*.deb") package_files.each do |full_path| next if full_path =~ /repo-release/ pkg = Packagecloud::Package.new(:file => full_path) distro_names = distro_names_for(full_path) distro_names.map do |distro_name| distro_id = $distro_id_map[distro_name] ||= $client.find_distribution_id(distro_name) if !distro_id raise "no distro id for #{distro_name.inspect}" end puts "pushing #{full_path} to #{$distro_id_map.key(distro_id).inspect}" result = $client.put_package("git-lfs", pkg, distro_id) result.succeeded || begin raise "packagecloud put_package failed, error: #{result.response}" end end end package_files.each do |full_path| next if full_path.include?("SRPM") || full_path.include?("i386") || full_path.include?("i686") next unless full_path =~ /\/git-lfs[-|_]\d/ os, distro = case full_path when /debian\/7/ then ["Debian 7", "debian/wheezy"] when /debian\/8/ then ["Debian 8", "debian/jessie"] when /debian\/9/ then ["Debian 9", "debian/stretch"] when /centos\/5/ then ["RPM RHEL 5/CentOS 5", "el/5"] when /centos\/6/ then ["RPM RHEL 6/CentOS 6", "el/6"] when /centos\/7/ then ["RPM RHEL 7/CentOS 7", "el/7"] end next unless os puts "[#{os}](https://packagecloud.io/#{packagecloud_user}/git-lfs/packages/#{distro}/#{File.basename(full_path)}/download)" end git-lfs-2.3.4/script/release000077500000000000000000000001331317167762300157510ustar00rootroot00000000000000#!/usr/bin/env bash script/fmt GO15VENDOREXPERIMENT=1 go run script/*.go -cmd release "$@" git-lfs-2.3.4/script/release.go000066400000000000000000000027751317167762300163700ustar00rootroot00000000000000package main import ( "encoding/json" "flag" "fmt" "log" "net/url" "os" "os/exec" "strings" ) var ( ReleaseId = flag.Int("id", 0, "git-lfs/git-lfs Release ID") uploadUrlFmt = "https://uploads.github.com/repos/git-lfs/git-lfs/releases/%d/assets?%s" ) func mainRelease() { if *ReleaseId < 1 { log.Println("Need a valid git-lfs/git-lfs release id.") log.Fatalln("usage: script/release -id") } file, err := os.Open("bin/releases/build_matrix.json") if err != nil { log.Println("Error opening build_matrix.json:", err) log.Fatalln("Ensure `script/bootstrap -all` has completed successfully") } defer file.Close() buildMatrix := make(map[string]Release) if err := json.NewDecoder(file).Decode(&buildMatrix); err != nil { log.Fatalln("Error reading build_matrix.json:", err) } for _, rel := range buildMatrix { release(rel) fmt.Println() } fmt.Println("SHA-256 hashes:") for _, rel := range buildMatrix { fmt.Printf("**%s**\n%s\n\n", rel.Filename, rel.SHA256) } } func release(rel Release) { query := url.Values{} query.Add("name", rel.Filename) query.Add("label", rel.Label) args := []string{ "-in", "-H", "Content-Type: application/octet-stream", "-X", "POST", "--data-binary", "@bin/releases/" + rel.Filename, fmt.Sprintf(uploadUrlFmt, *ReleaseId, query.Encode()), } fmt.Println("curl", strings.Join(args, " ")) cmd := exec.Command("curl", args...) by, err := cmd.Output() if err != nil { log.Fatalln("Error running curl:", err) } fmt.Println(string(by)) } git-lfs-2.3.4/script/run000077500000000000000000000002651317167762300151430ustar00rootroot00000000000000#!/usr/bin/env bash script/fmt commit=`git rev-parse --short HEAD` GO15VENDOREXPERIMENT=1 go run -ldflags="-X github.com/git-lfs/git-lfs/config.GitCommit=$commit" ./git-lfs.go "$@" git-lfs-2.3.4/script/script.go000066400000000000000000000006221317167762300162410ustar00rootroot00000000000000package main import ( "flag" "log" ) type Release struct { Label string Filename string SHA256 string } var SubCommand = flag.String("cmd", "", "Command: build or release") func main() { flag.Parse() switch *SubCommand { case "build": mainBuild() case "release": mainRelease() case "integration": mainIntegration() default: log.Fatalln("Unknown command:", *SubCommand) } } git-lfs-2.3.4/script/test000077500000000000000000000015021317167762300153110ustar00rootroot00000000000000#!/usr/bin/env bash #/ Usage: script/test # run all non-vendored tests #/ script/test # run just a package's tests script/fmt if [ $# -gt 0 ]; then GO15VENDOREXPERIMENT=1 go test "./$@" else # The following vendor test-exclusion grep-s typically need to match the same set in # debian/rules variable DH_GOLANG_EXCLUDES, so update those when adding here. GO15VENDOREXPERIMENT=1 go test \ $(GO15VENDOREXPERIMENT=1 go list ./... \ | grep -v "github.com/kr/pty" \ | grep -v "github.com/kr/text" \ | grep -v "github.com/olekukonko/ts" \ | grep -v "github.com/pkg/errors" \ | grep -v "github.com/stretchr/testify" \ | grep -v "github.com/xeipuuv/gojsonreference" \ | grep -v "github.com/xeipuuv/gojsonschema" \ ) fi git-lfs-2.3.4/script/update-version000077500000000000000000000015651317167762300173100ustar00rootroot00000000000000#!/usr/bin/env bash VERSION_STRING=$1 VERSION_ARRAY=( ${VERSION_STRING//./ } ) VERSION_MAJOR=${VERSION_ARRAY[0]} VERSION_MINOR=${VERSION_ARRAY[1]} VERSION_PATCH=${VERSION_ARRAY[2]:-0} VERSION_BUILD=${VERSION_ARRAY[3]:-0} # Update the version number git-lfs is reporting. sed -i "s,\(Version = \"\).*\(\"\),\1$VERSION_STRING\2," config/version.go # Update the version number in the RPM package. sed -i "s,\(Version:[[:space:]]*\).*,\1$VERSION_STRING," rpm/SPECS/git-lfs.spec # Update the version numbers in the Windows installer. sed -i "s,\(\"Major\": \).*\,,\1$VERSION_MAJOR\,," versioninfo.json sed -i "s,\(\"Minor\": \).*\,,\1$VERSION_MINOR\,," versioninfo.json sed -i "s,\(\"Patch\": \).*\,,\1$VERSION_PATCH\,," versioninfo.json sed -i "s,\(\"Build\": \).*,\1$VERSION_BUILD," versioninfo.json sed -i "s,\(\"ProductVersion\": \"\).*\(\"\),\1$VERSION_STRING\2," versioninfo.json git-lfs-2.3.4/script/vendor000077500000000000000000000003141317167762300156270ustar00rootroot00000000000000#!/usr/bin/env bash glide update -s -u glide install -s -u rm -rf vendor/github.com/ThomsonReutersEikon/go-ntlm/utils rm -rf vendor/github.com/davecgh/go-spew rm -rf vendor/github.com/pmezard/go-difflib git-lfs-2.3.4/script/windows-installer/000077500000000000000000000000001317167762300200735ustar00rootroot00000000000000git-lfs-2.3.4/script/windows-installer/git-lfs-logo.bmp000066400000000000000000000231061317167762300231000ustar00rootroot00000000000000BMF&6(7:&  kyZa1I1I1=!1kuBU1I1I1I1=!1!1)5Ze1I1I1I1I1I)1!1!1!1!1:Is1I1I1I1I1I1I1I)5!1!1!1!1!1!1R]:I1I1I1I1I1I1I1I1I)5!1!1!1!1!1!1!1!1{}BU1I1I1I1I1I1I1I1I1I1I)5!1!1!1!1!1!1!1!1!1)9Zm1I1I1I1I1I1I1I1I1I1I1I1I)5!1!1!1!1!1!1!1!1!1!1!1BM{1I1I1I1I1I1I1I1I1I1I1I1I1I1I)5!1!1!1!1!1!1!1!1!1!1!1!1!1ce:M1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I)5!1!1!1!1!1!1!1!1!1!1!1!1!1!1)1J]1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I)5!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!11=cq1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I)5!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1JQ1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I)5!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1km:Q1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I)5!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1)11I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I)5!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!11I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I)5!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!11I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I)5!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!11I1I1I1I1I1I1I1I1I1I1I1I1Iky1I1I1I1I1I1I1I1I1I)5!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!11I1I1I1I1I1I1I1I1I1I1IRa1I1I1I1I1I1I1I1I1I)5!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!11I1I1I1I1I1I1I1I1I:Q1I1I1I1I1I1I1I1I1IBQ)1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!11I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1IBU:I!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!11I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1IJY19!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!11I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I:IJY)1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!11I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1IBQBU!1!1!1!1!1!1!1!1!1!1!1!1!1!1!1!11I1I1I1I1I1I1I1I1I:M1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1IJY:E!1!1!1!1!1!1!1!1!1!1!1!1!1!11I1I1I1I1I1I1I1I1Is1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1IR]1=!1!1!1!1!1!1!1!1!1!1!1!11I1I1I1I1I1I1I1I1IRe1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1MR])1!1!1!1!1!1!1!1!1!1!11I1I1I1I1I1I1I1I1IBQ1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1IBQJU!1!1!1!1!1!1!1!1!11I1I1I1I1I1I1I1I1IBQ1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1IJ]BI!1!1!1!1!1!1!11I1I1I1I1I1I1I1I1I1I1IJaky1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1IRa)=!1!1!1!1!11I1I1I1I1I1I1I1I1I1I1I1I1IkuRa1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I:IRa)1!1!1!11I1I1I1I1I1I1I1I1I1I1I1I1I1I1I:Q1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1IBUJY!1!11I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1IBQ1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1IRaBI:M1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1IRecu1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I:Qs1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1Ik}J]1I1I1I1I1I1I1I1I1I1I1I1IRe1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I:M:M1I1I1I1I1I1I1I1IcqBQ1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1IBU{1I1I1I1I1IJ]1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1IZiZm1I:Mky1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1IsRa1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I:M:Q1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1IJY1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1I1Icqcq1I1I1I1I1I1I1I1I1I1I1I1I1I1IsJ]1I1I1I1I1I1I1I1I1I1IZe:M1I1I1I1I1I1IBU{1I1I1I1IZmkygit-lfs-2.3.4/script/windows-installer/git-lfs-logo.ico000066400000000000000000001033361317167762300231000ustar00rootroot00000000000000 ( h . 00 h~! % ,  &6(( hF00 %6a( &1:CAK/=5IO[ZbqyHZUfgvyx UUUAUUU,UU UA UUAUTA UTU UUV UY UUYUZ[w??( @%0>HQZ/;5I[ds{GYVfix}y p BUAUUUUUA'UUUU+ UUUUUA0 UUUUU` UUUUUUApUUUUUUUAUUUUUUUUUUUUUAUUUUAUXUUTUXUUUTUXUUUUTUXUUUUXDUXUUUUUAUX UUUUU!UU UUUUUUUU UUUUBUUUU UUUUU UUUUXUUUUUUUUXUUUUUUYUUUUZ UUUU UUUU UUX Uِ???( @%0+5'2'4(4(4*6,92<:C@JGP/=2@2B3C7H3F4G6H6I4G5H5I9MQO[ZbqyASATFYI[L^N`QcSdWgZk]mbrgvhwl{y{sw}Qq/Pp",6@J[1qQq/Pp  =1[Qyq/"P0p=LYgx1Qq&/@PZpt1Qq/&PAp[tϩ1Qq/P"p0>M[iy1Qqұ/Pp  >1\Qzq/Pp!+6@IZ1pQq/ P6pLbx1Qq,/KPip1Qq/-P?pRcv1Qqϑܱ/Pp!&,>X1qQq/7" 3' ,4  .$$ #8!  #"" $!8" #81 &)*(0%)9#!562&+-w??( @%0*4-7.9(4+7,8(5-9-:5?8A=F>HKTMUT]Zc,:0<2@4B6D6F8G:I:J7I6HM[iy1Qqұ/Pp  >1\Qzq/Pp!+6@IZ1pQq/ P6pLbx1Qq,/KPip1Qq/-P?pRcv1Qqϑܱ/Pp!&,>X1qQqH0U8% F>)&C1&&&&Q4&&&&& .W:(&&&&&&&O>*&&&&&&&&E1&&&&&&&&&&&,R5&&&&&&&&&&&& /@(&&&&&&&&&&&&&&7&&&&&&&&&&&&&&7&&&&&&(:E+&&&&&7&&&&&4N4&&&&7&&&&14&&&&& 7&&&&44&&&&&&%7&&&&44&&&&&&&&( 7&&&&4E+&&&&&&&&&#7&&&&4=)&&&&&&&&(!7&&&&1T8&&&&&&&&&(7&&&&&4NM4(&&&&&&&&' 7&&&&&&(:WA*&&&&&&&&(" :&&&&&&&&)><)&&&&&&&&(-<)&&&&&&&&2ES6&&&&&&&)>S6&&&&&&&&&5QG2&&&(8VK1&&&&&&&&):@*2I?*&&&&&&&&*@W:(&&&&&&&&2GR5&&&&&&&&&8B1&&&&&&2L>*&&+AV:<???(0` '/%0%0,4-:'3)2-;.:3==GBKHOV_:F>I:G6G4H5I9J8LM[iy1Qqұ/Pp  >1\Qzq/Pp!+6@IZ1pQq/ P6pLbx1Qq,/KPip1Qq/-P?pRcv1Qqϑܱ/Pp!&,>X1qQq=J !) F6 AL"*8 @D. : G E=4'& $+A% 5#((I,3<2;40H'(G1K5:A>/G5K:(*C.I78<B4M-G:9A.6N)?J8=??>~~~~~???(  @$H6K=3D'3'/A?UU3F(5I4H3F'3%/%/#/+UU5J5Hm5H4I5I3F'4%0%/$/&.r'1:N 5HQ4H4H5I5I5I3F'4%0%0%0%/%/$0U$64H4H5I5I5I5I5I3F'4%0%0%0%0%0%0$/4I5I5I5H4I4I5I3F'4%0%0%0%0%0%0$04I5I4H4I4GN4H5I4G+:&1%0%0%0%0%0$04I5I4H2G27H.5I5I5I4G/@(6%0%0%0%0$04I5I4H2G28F4I5I5I5I5I3G-='3%0%0$04I5I4H4I1J8T 4H?4I4I5I5I5H2E,:&2$04I5I5I5H4H5H_8F/?3JY4H5I5I5I4H1C)85Id4I4I5I5I5H5H4GD3L 6H6Iv4H4H5I4H1E\7D%5H5H5I5I5I4I5H2I-??1H.4H5I}6M!*U5G95G5H5I5I4H5I4Hf$H33??:N 4GR4H4I5H4I5FH332G3Ih5Gd9E(( ??4H?2F'4%0D3 ??7J)5G4I3E'3%/$1!--??2G2Gn5H4I5I3F(4%0$/$/%/u$-6H4JR4I5H5I5I5I3F(4%0%0%0%/%/%.X/$H5G96I4I5I5I5I5I5I3F(4%0%0%0%0%0%0$/%1>4H5I5I5I5I5I5I5I5I3F(4%0%0%0%0%0%0%0%0$.5H5I5I5I5I5I4H5I5I3F(4%0%0%0%0%0%0%0%0$05H5I5I4H5I4H4H5I5I3F(4%0%0%0%0%0%0%0%0$05H5I5I4H7H<2G4I5I5I4H0@(5%0%0%0%0%0%0%0$05H5I5I5I7M3L5I5I5I5I5I3F-<&4%0%0%0%0%0$05H5I5I5I7M??3Hq5I4H5I5I5I5H2D+:&2%0%0%0$05H5I5I4H3H<33*U5I44G4I5I5I5I5I4G0B*7%1%0$05H5I5I5H4H5Hx3L?? 5IL5H5I5I5I5I5I3G.@(5$04H5I5I5I5I4I5H4H\'4%0%0%0%0%0%0%0%0%0$05H5I5I5I5I5I4II3F(5H4I5I5I5I5I5I5I5I5I4H1D,;&3%0%0%0%0%0%0%0$05H5I5I5I5I5I4IIUU1J3Hq4I4I5I5I5I5I5I5I5I5I4H/B*8&2%0%0%0%0%0$05H5I5I5I5I4I5Hw5C??5J05F4H4H5I5I5I5I5I5I5I5I4G/@)6%1%0%0%0$05H5I5I5I5I5I5I4H2JA.E ?? 3FE4H5I5I5I5I5I5I5I5I5I5H3F-='4%0%0$05H5I5I5I5I5I5I5I4I2H2I-337M5H_5I4H5I5I5I5I5I5I5I5I4H2D,:&2$05I5I5I5I5I5I5I5I5I4I5H6Il4FUUUU7K%4Gu5I5I5I5I5I5I5I5I5I5I4G0B*85Hw4I5H5I5I5I5I5I5I5I5I5H4J6IP** ??7K%5Gr5H4I5I3F'4%0$/%0$0}&,.??UU9E4G]4H4H5I5I5I3F(4%0%0%0%0$0$1h%/???O6JA4I5H4I5I5I5I5I3F(5%0%0%0%0%0$/%0%0J&33L 5J04H4H4I5I5I5I5I5I5I3F(5%0%0%0%0%0%0$0#0#0%.7'' UU4K"5Ih4G5H5I5I5I5I5I5I5I5I3F(5%0%0%0%0%0%0%0%0$/%/#.r&,(335P4FW5H4I5I5I5I5I5I5I5I5I5I5I3F(5%0%0%0%0%0%0%0%0%0%0%/%.%/`***? 3G@4H5H4H5I5I5I5I5I5I5I5I5I5I5I3F(5%0%0%0%0%0%0%0%0%0%0%0%/%/%0%0J$68T /G+4H4H4I5I5I5I5I5I5I5I5I5I5I5I5I5I3F(5%0%0%0%0%0%0%0%0%0%0%0%0%0%0$/$/#2233 7G 4Ia5I4H5H5I5I5I5I5I5I5I5I5I5I5I5I5I5I3F(5%0%0%0%0%0%0%0%0%0%0%0%0%0%0%0$0$0#/l"0%4G4G4H5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I3F(5%0%0%0%0%0%0%0%0%0%0%0%0%0%0%0%0%0$.%05I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I3F(5%0%0%0%0%0%0%0%0%0%0%0%0%0%0%0%0%0%0$05H5I5I5I5I5I5I5I5I5I5I5I4I5I5I5I5I5I5I3F(5%0%0%0%0%0%0%0%0%0%0%0%0%0%0%0%0%0%0$05H5I5I5I5I5I5I5I5I5H4H4H5H5I5I5I5I5I5I3F(5%0%0%0%0%0%0%0%0%0%0%0%0%0%0%0%0%0%0$05H5I5I5I5I5I5I4H3H2H6IP/G+5H5I5I5I5I5I5I3F(6%0%0%0%0%0%0%0%0%0%0%0%0%0%0%0%0%0%0$05H5I5I5I5I5I5I4I4Ht3L339E4H5I5I5I5I5I5I4H/@)6&1%0%0%0%0%0%0%0%0%0%0%0%0%0%0%0%0$05H5I5I5I5I5I5I5H3L9E4H5I5I5I5I5I5I5I4H2E.>(3%0%0%0%0%0%0%0%0%0%0%0%0%0%0%0$05H5I5I5I5I5I5I5I8K9E4H5I5I5I5I5I5I5I5I5I5H1A+:&2%0%0%0%0%0%0%0%0%0%0%0%0%0$05H5I5I5I5I5I5I5I8K(5%1%0%0%0%0%0%0%0%0%0$05H5I5I5I5I5I5I5I8K3L5G`3H4I5I5I5I5I5I5I5I5I5I5I5I4H2E,<'4%0%0%0%0%0%0%0%0$05H5I5I5I5I5I5I4H3D??4G'4Jt4H5G5I5I5I5I5I5I5I5I5I5I5I3H1C,:'2%0%0%0%0%0%0$05H5I5I5I5I5I5I5I6Ju3L338T 2J76G4G4H5I5I5I5I5I5I5I5I5I5I5I4G/A*8%1%0%0%0%0$05H5I5I5I5I5I5I5I4H3I4JR3LUU3?5FH3G5I4I5I5I5I5I5I5I5I5I5I5I4H3E/@'5%0%0%0$05H5I5I5I5I5I5I5I5I5H5I4I3H<6H33/K5J`4I4H5I5I5I5I5I5I5I5I5I5I5I4H2C-;(4%0$05H5I5I5I5I5I5I5I5I5I5I4I3G5Hw7H.*U*U4G'4H~4H4I5I5I5I5I5I5I5I5I5I5I5I5G1C*9'36H5I5I5I5I5I5I5I5I5I5I5I5I5H5H6Jg3L8T 6K=5H5I4H5I5I5I5I5I5I5I5I5I5I5I3F.>٣6H84I5H5I5I5I5I5I5I5I5I5I5I5I5I4H4I6IP&1%0%0%05I5I5I5I5I5I5I5I5I5I5I5I5I5I2F4H5C*U4Gu2F5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5F);%0%05I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5G4Hp??7M4H5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I4G)34H5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5JH2G24I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I2F5Hw1F$5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I&4IW5H5I5I5I5I5I5I5I5I5I5I5I5I4I7M3IE3I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I3D8T 4H~5F5I5I5I5I5I5I5I5I5I2G2??3Il4F5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I3Gg6H4H5I5I5I5I5I5I4IW8F5H2F5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I3I4H?4J:5I5I2F4H~8T 5G+5H5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I6E!5JV6H4GN5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I2I5H.E *U6Iv2F5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I4J]7M5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I5I3G*? 2G25G5I5I5I5I5I5I5I5I5I5I5I5I5I5I4G4K"4HX5I5I5I5I5I5I5I5I5I5I5I5F6IB8T 5I2F5I5I5I5I5I5I2I5Hi??4F5I5I5I5I2F5H??????git-lfs-2.3.4/script/windows-installer/git-lfs-wizard-image.bmp000066400000000000000000004556561317167762300245430ustar00rootroot00000000000000BM[6(:x[  չ̣̣̣꿯_ #ǘԭ<ԸԸԸԸԸԸԸԸԸԸҲ̤ϬΩخ?Ωϫ־Ψ̤ѱͦϫԸֻگ  ^ hʯϹIϫخ ̥ '̭' *ذ"4੊`ջ d齫UǽǘǗ_خșȿfʟ$ֽ ȿHÎƼ~"ٱ#5᪊fջ e汘&خ9ٰ!2bĹt$ⵞ5"ٱ#5᪊fջ e寔(***Ɩخ9޳,&Źw#JȮ.ⵞ5"ٱ#5᪊fջ e汘&ƖʠǼخ9ܱ&,ʨƼ}۰ ⵞ5"ٱ#5੊fջ e鼪Q ҳfخʞ̢@ͨĐִ3ҳZ׾"٬fջ e²'Q꽬WΨ꽬WC4Ѱղ( "ҳ,ջջ eӴǖ׿ЭʞȚˡϩɜƵ6"۱%2Ψջ e䶠:"緢? ŒԷaͦŻze׽"µl5+3öo໨MMĐƼ~Md忯_TTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTT‹⬏U⬏U⬏'ϬϬϬϬϬϬϬϬϬϬϬϬϬϬϬϬϬϬϬϬϬϬϬϬϬϬϬǼU⬏7ϬU⬏7ϬU⬏7ϬU⬏7ϬU⬏7ϬU⬏7Ⱦ 깥GϬU⬏7Ǽ~߬ϬU⬏7Ǽ~ƕϬU⬏7Ǽ~eʱ%ϬU⬏7Ǽ~>ѲϬU⬏7Ǽ~7ͥƻ{ϬU⬏7Ǽ~ ömư!ϬU⬏7Ǽ~Ǿ۴4ϬU⬏7Ǽ~+ͥϬU⬏7Ǽ~Ț歑ϬU⬏7Ǽ~깥FϬU⬏7Ǽ~꺧IϬU⬏7Ǽ~Ȝ孑ϬU⬏7Ǽ~7ϭϬU⬏7Ǽ~鵞6ϬU⬏7Ǽ~ Îõ4ϬU⬏7Ǽ~Tǽöp2ϬU⬏7Ǽ~ҲϬU⬏7Ǽ~ҲϬU⬏7Ǽ~ѰϬU⬏7Ǽ~ Я̥U⬏7Ǽ~Яִ2U⬏7ΪΪΪΪΪΪΪΪΪΪΪΪΪΪΪΪγ.⬏7洜0ǘ⬏7ʳ+Ⱦ⬏7䳚+đ⬏7ʹ0⬏0̳-⬏ƕ⬏Ⱦ佫UHHHHHHHHHHHHHHHHHHHHHHHHɜŮ긽긽긽ŭι긽긽긽긽긽긽긽ŭϹ¹긽긽긽긽踽԰к·Ӧܷ֦Ԧܮ겷稭趻鵺ϲ޳޼ŮĨت׬賸çۨ깾຿§اѩĺ߳ıҧڨ۪ͫɨ水ӧ絻Ǧͦ鵻ا⬱Ḿר˦ᶻȦ좷uOv0]LKKKKKKKKKKM4aV|⨬ꜲOv1_!RKKKKKKKKKKKKKKKKKKKL#S4aX}ˮȦ냟/]KKKKKKKKKKKKKKKKKKKKKKKKKKKK:e⩭簵/]KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKL;f鵺ܧ!RKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK-\Ҧ궽qKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK骮⦪aKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKʦܻ²aKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKݨaKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK宲aKKKKKKKO5aIrZrnX}Fp0^LKKKKKKKaKKKKKBl눣2`KKKKKaKK@j}2`KKlO톢hMu:f0^&VMO*X2`:fRykjLꮲ쩽럵hGp0^OKKKKKKKKKKKKKKKKP4aLtpꏩ穭w%UKKKKKKKKKKKKKKKKKKKKKKKKKK0^⨬h'WKKKKKKKKKKKKKKKKKKKKKKKKKKKKKL,ZԦާIqKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKhاۧꆡKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKۧاdKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKި֦aKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKਬզaKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKᨬaKKKKKKKKL"S,[5b "' + TmpFileName + '"', '', SW_HIDE, ewWaitUntilTerminated, ResultCode ); if LoadStringFromFile(TmpFileName, ExecStdOut) then begin if not (Pos('Git\cmd', ExtractFilePath(ExecStdOut)) = 0) then begin // Proxy Git path detected Result := ExpandConstant('{pf}'); if Is64BitInstallMode then Result := Result + '\Git\mingw64\bin' else Result := Result + '\Git\mingw32\bin'; end else begin Result := ExtractFilePath(ExecStdOut); end; DeleteFile(TmpFileName); end; end; // Checks to see if we need to add the dir to the env PATH variable. function NeedsAddPath(Param: string): boolean; var OrigPath: string; ParamExpanded: string; begin //expand the setup constants like {app} from Param ParamExpanded := ExpandConstant(Param); if not RegQueryStringValue(HKEY_LOCAL_MACHINE, 'SYSTEM\CurrentControlSet\Control\Session Manager\Environment', 'Path', OrigPath) then begin Result := True; exit; end; // look for the path with leading and trailing semicolon and with or without \ ending // Pos() returns 0 if not found Result := Pos(';' + UpperCase(ParamExpanded) + ';', ';' + UpperCase(OrigPath) + ';') = 0; if Result = True then Result := Pos(';' + UpperCase(ParamExpanded) + '\;', ';' + UpperCase(OrigPath) + ';') = 0; end; // Runs the lfs initialization. procedure InstallGitLFS(); var ResultCode: integer; begin Exec( ExpandConstant('{cmd}'), ExpandConstant('/C ""{app}\git-lfs.exe" install"'), '', SW_HIDE, ewWaitUntilTerminated, ResultCode ); if not ResultCode = 1 then MsgBox( 'Git LFS was not able to automatically initialize itself. ' + 'Please run "git lfs install" from the commandline.', mbInformation, MB_OK); end; // Event function automatically called when uninstalling: function InitializeUninstall(): Boolean; var ResultCode: integer; begin Exec( ExpandConstant('{cmd}'), ExpandConstant('/C ""{app}\git-lfs.exe" uninstall"'), '', SW_HIDE, ewWaitUntilTerminated, ResultCode ); Result := True; end; git-lfs-2.3.4/subprocess/000077500000000000000000000000001317167762300152725ustar00rootroot00000000000000git-lfs-2.3.4/subprocess/buffered_cmd.go000066400000000000000000000004021317167762300202220ustar00rootroot00000000000000package subprocess import ( "bufio" "io" ) const ( // stdoutBufSize is the size of the buffers given to a sub-process stdout stdoutBufSize = 16384 ) type BufferedCmd struct { *Cmd Stdin io.WriteCloser Stdout *bufio.Reader Stderr *bufio.Reader } git-lfs-2.3.4/subprocess/cmd.go000066400000000000000000000016021317167762300163630ustar00rootroot00000000000000package subprocess import ( "io" "os/exec" ) // Thin wrapper around exec.Cmd. Takes care of pipe shutdown by // keeping an internal reference to any created pipes. Whenever // Cmd.Wait() is called, all created pipes are closed. type Cmd struct { *exec.Cmd pipes []io.Closer } func (c *Cmd) StdoutPipe() (io.ReadCloser, error) { stdout, err := c.Cmd.StdoutPipe() c.pipes = append(c.pipes, stdout) return stdout, err } func (c *Cmd) StderrPipe() (io.ReadCloser, error) { stderr, err := c.Cmd.StderrPipe() c.pipes = append(c.pipes, stderr) return stderr, err } func (c *Cmd) StdinPipe() (io.WriteCloser, error) { stdin, err := c.Cmd.StdinPipe() c.pipes = append(c.pipes, stdin) return stdin, err } func (c *Cmd) Wait() error { for _, pipe := range c.pipes { pipe.Close() } return c.Cmd.Wait() } func newCmd(cmd *exec.Cmd) *Cmd { wrapped := &Cmd{Cmd: cmd} return wrapped } git-lfs-2.3.4/subprocess/pty.go000066400000000000000000000014461317167762300164420ustar00rootroot00000000000000package subprocess import ( "io" "os" "os/exec" ) // Tty is a convenience wrapper to allow pseudo-TTYs on *nix systems, create with NewTty() // Do not use any of the struct members directly, call the Stderr() and Stdout() methods // Remember to call Close() when finished type Tty struct { cmd *exec.Cmd outpty *os.File outtty *os.File errpty *os.File errtty *os.File } func (t *Tty) Close() { if t.outtty != nil { t.outtty.Close() t.outtty = nil } if t.errtty != nil { t.errtty.Close() t.errtty = nil } } func (t *Tty) Stdout() (io.ReadCloser, error) { if t.outpty != nil { return t.outpty, nil } else { return t.cmd.StdoutPipe() } } func (t *Tty) Stderr() (io.ReadCloser, error) { if t.errpty != nil { return t.errpty, nil } else { return t.cmd.StderrPipe() } } git-lfs-2.3.4/subprocess/pty_nix.go000066400000000000000000000012021317167762300173060ustar00rootroot00000000000000// +build !windows package subprocess import ( "os/exec" "syscall" "github.com/kr/pty" ) // NewTty creates a pseudo-TTY for a command and modifies it appropriately so // the command thinks it's a real terminal func NewTty(cmd *exec.Cmd) *Tty { tty := &Tty{} tty.cmd = cmd // Assign pty/tty so git thinks it's a real terminal tty.outpty, tty.outtty, _ = pty.Open() cmd.Stdin = tty.outtty cmd.Stdout = tty.outtty tty.errpty, tty.errtty, _ = pty.Open() cmd.Stderr = tty.errtty if cmd.SysProcAttr == nil { cmd.SysProcAttr = &syscall.SysProcAttr{} } cmd.SysProcAttr.Setctty = true cmd.SysProcAttr.Setsid = true return tty } git-lfs-2.3.4/subprocess/pty_windows.go000066400000000000000000000004321317167762300202060ustar00rootroot00000000000000package subprocess import "os/exec" // NewTty creates a pseudo-TTY for a command and modifies it appropriately so // the command thinks it's a real terminal func NewTty(cmd *exec.Cmd) *Tty { // Nothing special for Windows at this time tty := &Tty{} tty.cmd = cmd return tty } git-lfs-2.3.4/subprocess/subprocess.go000066400000000000000000000115211317167762300200110ustar00rootroot00000000000000// Package subprocess provides helper functions for forking new processes // NOTE: Subject to change, do not rely on this package from outside git-lfs source package subprocess import ( "bufio" "bytes" "fmt" "os" "os/exec" "strconv" "strings" "github.com/rubyist/tracerx" ) // BufferedExec starts up a command and creates a stdin pipe and a buffered // stdout & stderr pipes, wrapped in a BufferedCmd. The stdout buffer will be // of stdoutBufSize bytes. func BufferedExec(name string, args ...string) (*BufferedCmd, error) { cmd := ExecCommand(name, args...) stdout, err := cmd.StdoutPipe() if err != nil { return nil, err } stderr, err := cmd.StderrPipe() if err != nil { return nil, err } stdin, err := cmd.StdinPipe() if err != nil { return nil, err } if err := cmd.Start(); err != nil { return nil, err } return &BufferedCmd{ cmd, stdin, bufio.NewReaderSize(stdout, stdoutBufSize), bufio.NewReaderSize(stderr, stdoutBufSize), }, nil } // SimpleExec is a small wrapper around os/exec.Command. func SimpleExec(name string, args ...string) (string, error) { tracerx.Printf("run_command: '%s' %s", name, strings.Join(args, " ")) cmd := ExecCommand(name, args...) //start copied from Go 1.6 exec.go captureErr := cmd.Stderr == nil if captureErr { cmd.Stderr = &prefixSuffixSaver{N: 32 << 10} } //end copied from Go 1.6 exec.go output, err := cmd.Output() if exitError, ok := err.(*exec.ExitError); ok { // TODO for min Go 1.6+, replace with ExitError.Stderr errorOutput := strings.TrimSpace(string(cmd.Stderr.(*prefixSuffixSaver).Bytes())) if errorOutput == "" { // some commands might write nothing to stderr but something to stdout in error-conditions, in which case, we'll use that // in the error string errorOutput = strings.TrimSpace(string(output)) } formattedErr := fmt.Errorf("Error running %s %s: '%s' '%s'", name, args, errorOutput, strings.TrimSpace(exitError.Error())) // return "" as output in error case, for callers that don't care about errors but rely on "" returned, in-case stdout != "" return "", formattedErr } return strings.Trim(string(output), " \n"), err } // An env for an exec.Command without GIT_TRACE var env []string var traceEnv = "GIT_TRACE=" func init() { realEnv := os.Environ() env = make([]string, 0, len(realEnv)) for _, kv := range realEnv { if strings.HasPrefix(kv, traceEnv) { continue } env = append(env, kv) } } // remaining code in file copied from Go 1.6 (c4fa25f4fc8f4419d0b0707bcdae9199a745face) exec.go and can be removed if moving to Go 1.6 minimum. // go 1.6 adds ExitError.Stderr with nice prefix/suffix trimming, which could replace cmd.Stderr above //start copied from Go 1.6 exec.go // prefixSuffixSaver is an io.Writer which retains the first N bytes // and the last N bytes written to it. The Bytes() methods reconstructs // it with a pretty error message. type prefixSuffixSaver struct { N int // max size of prefix or suffix prefix []byte suffix []byte // ring buffer once len(suffix) == N suffixOff int // offset to write into suffix skipped int64 // TODO(bradfitz): we could keep one large []byte and use part of it for // the prefix, reserve space for the '... Omitting N bytes ...' message, // then the ring buffer suffix, and just rearrange the ring buffer // suffix when Bytes() is called, but it doesn't seem worth it for // now just for error messages. It's only ~64KB anyway. } func (w *prefixSuffixSaver) Write(p []byte) (n int, err error) { lenp := len(p) p = w.fill(&w.prefix, p) // Only keep the last w.N bytes of suffix data. if overage := len(p) - w.N; overage > 0 { p = p[overage:] w.skipped += int64(overage) } p = w.fill(&w.suffix, p) // w.suffix is full now if p is non-empty. Overwrite it in a circle. for len(p) > 0 { // 0, 1, or 2 iterations. n := copy(w.suffix[w.suffixOff:], p) p = p[n:] w.skipped += int64(n) w.suffixOff += n if w.suffixOff == w.N { w.suffixOff = 0 } } return lenp, nil } // fill appends up to len(p) bytes of p to *dst, such that *dst does not // grow larger than w.N. It returns the un-appended suffix of p. func (w *prefixSuffixSaver) fill(dst *[]byte, p []byte) (pRemain []byte) { if remain := w.N - len(*dst); remain > 0 { add := minInt(len(p), remain) *dst = append(*dst, p[:add]...) p = p[add:] } return p } func (w *prefixSuffixSaver) Bytes() []byte { if w.suffix == nil { return w.prefix } if w.skipped == 0 { return append(w.prefix, w.suffix...) } var buf bytes.Buffer buf.Grow(len(w.prefix) + len(w.suffix) + 50) buf.Write(w.prefix) buf.WriteString("\n... omitting ") buf.WriteString(strconv.FormatInt(w.skipped, 10)) buf.WriteString(" bytes ...\n") buf.Write(w.suffix[w.suffixOff:]) buf.Write(w.suffix[:w.suffixOff]) return buf.Bytes() } func minInt(a, b int) int { if a < b { return a } return b } //end copied from Go 1.6 exec.go git-lfs-2.3.4/subprocess/subprocess_nix.go000066400000000000000000000004061317167762300206670ustar00rootroot00000000000000// +build !windows package subprocess import ( "os/exec" ) // ExecCommand is a small platform specific wrapper around os/exec.Command func ExecCommand(name string, arg ...string) *Cmd { cmd := exec.Command(name, arg...) cmd.Env = env return newCmd(cmd) } git-lfs-2.3.4/subprocess/subprocess_windows.go000066400000000000000000000005121317167762300215610ustar00rootroot00000000000000// +build windows package subprocess import ( "os/exec" "syscall" ) // ExecCommand is a small platform specific wrapper around os/exec.Command func ExecCommand(name string, arg ...string) *Cmd { cmd := exec.Command(name, arg...) cmd.SysProcAttr = &syscall.SysProcAttr{HideWindow: true} cmd.Env = env return newCmd(cmd) } git-lfs-2.3.4/test/000077500000000000000000000000001317167762300140615ustar00rootroot00000000000000git-lfs-2.3.4/test/README.md000066400000000000000000000115001317167762300153350ustar00rootroot00000000000000# Git LFS Tests Git LFS uses two form of tests: unit tests for the internals written in Go, and integration tests that run `git` and `git-lfs` in a real shell environment. You can run them separately: ``` $ script/test # Tests the Go packages. $ script/integration # Tests the commands in shell scripts. ``` CI servers should always run both: ``` $ script/cibuild ``` ## Internal Package Tests The internal tests use Go's builtin [testing][t] package. You can run individual tests by passing arguments to `script/test`: ``` # test a specific Go package $ script/test lfs # pass other `go test` arguments $ script/test lfs -run TestSuccessStatus -v github.com/rubyist/tracerx github.com/git-lfs/git-lfs/git github.com/technoweenie/assert === RUN TestSuccessStatus --- PASS: TestSuccessStatus (0.00 seconds) PASS ok _/Users/rick/git-lfs/git-lfs/lfs 0.011s ``` [t]: http://golang.org/pkg/testing/ ## Integration Tests Git LFS integration tests are shell scripts that test the `git-lfs` command from the shell. Each test file can be run individually, or in parallel through `script/integration`. Some tests will change the `pwd`, so it's important that they run in separate OS processes. ``` $ test/test-happy-path.sh compile git-lfs for test/test-happy-path.sh LFSTEST_URL=/Users/rick/git-lfs/git-lfs/test/remote/url LFSTEST_DIR=/Users/rick/git-lfs/git-lfs/test/remote lfstest-gitserver test: happy path ... OK ``` 1. The integration tests should not rely on global system or git config. 2. The tests should be cross platform (Linux, Mac, Windows). 3. Tests should bootstrap an isolated, clean environment. See the Test Suite section. 4. Successful test runs should have minimal output. 5. Failing test runs should dump enough information to diagnose the bug. This includes stdout, stderr, any log files, and even the OS environment. There are a few environment variables that you can set to change the test suite behavior: * `GIT_LFS_TEST_DIR=path` - This sets the directory that is used as the current working directory of the tests. By default, this will be in your temp dir. It's recommended that this is set to a directory outside of any Git repository. * `GIT_LFS_TEST_MAXPROCS=N` - This tells `script/integration` how many tests to run in parallel. Default: 4. * `KEEPTRASH=1` - This will leave the local repository data in a `tmp` directory and the remote repository data in `test/remote`. * `SKIPCOMPILE=1` - This skips the Git LFS compilation step. Speeds up the tests when you're running the same test script multiple times without changing any Go code. Also ensure that your `noproxy` environment variable contains `127.0.0.1` host, to allow git commands to reach the local Git server `lfstest-gitserver`. ### Test Suite The `testenv.sh` script includes some global variables used in tests. This should be automatically included in every `test/test-*.sh` script and `script/integration`. `testhelpers.sh` defines some shell functions. Most are only used in the test scripts themselves. `script/integration` uses the `setup()` and `shutdown()` functions. `testlib.sh` is a [fork of a lightweight shell testing lib][testlib] that is used internally at GitHub. Only the `test/test-*.sh` scripts should include this. Tests live in this `./test` directory, and must have a unique name like: `test-{name}.sh`. All tests should start with a basic template. See `test/test-happy-path.sh` for an example. ``` #!/usr/bin/env bash . "test/testlib.sh" begin_test "template" ( set -e echo "your tests go here" ) end_test ``` The `set -e` command will bail on the test at the first command that returns a non zero exit status. Use simple shell commands like `grep` as assertions. The test suite has standard `setup` and `shutdown` functions that should be run only once, before/after running the tests. The `setup` and `shutdown` functions are run by `script/integration` and also by individual test scripts when they are executed directly. Setup does the following: * Resets temporary test directories. * Compiles git-lfs with the latest code changes. * Compiles Go files in `test/cmd` to `bin`, and adds them the PATH. * Spins up a test Git and Git LFS server so the entire push/pull flow can be exercised. * Sets up a git credential helper that always returns a set username and password. The test Git server writes a `test/remote/url` file when it's complete. This file is how individual test scripts detect if `script/integration` is being run. You can fake this by manually spinning up the Git server using the `lfstest-gitserver` line that is output after Git LFS is compiled. By default, temporary directories in `tmp` and the `test/remote` directory are cleared after test runs. Send the "KEEPTRASH" if you want to keep these files around for debugging failed tests. [testlib]: https://gist3.github.com/rtomayko/3877539 git-lfs-2.3.4/test/cmd/000077500000000000000000000000001317167762300146245ustar00rootroot00000000000000git-lfs-2.3.4/test/cmd/git-credential-lfsnoop.go000066400000000000000000000000631317167762300215230ustar00rootroot00000000000000// +build testtools package main func main() { } git-lfs-2.3.4/test/cmd/git-credential-lfstest.go000066400000000000000000000043351317167762300215350ustar00rootroot00000000000000// +build testtools package main import ( "bufio" "fmt" "io/ioutil" "os" "path/filepath" "strings" ) var ( commands = map[string]func(){ "get": fill, "store": noop, "erase": noop, } delim = '\n' credsDir = "" ) func init() { if len(credsDir) == 0 { credsDir = os.Getenv("CREDSDIR") } } func main() { if argsize := len(os.Args); argsize != 2 { fmt.Fprintf(os.Stderr, "wrong number of args: %d\n", argsize) os.Exit(1) } arg := os.Args[1] cmd := commands[arg] if cmd == nil { fmt.Fprintf(os.Stderr, "bad cmd: %s\n", arg) os.Exit(1) } cmd() } func fill() { scanner := bufio.NewScanner(os.Stdin) creds := map[string]string{} for scanner.Scan() { line := scanner.Text() parts := strings.SplitN(line, "=", 2) if len(parts) != 2 { fmt.Fprintf(os.Stderr, "bad line: %s\n", line) os.Exit(1) } fmt.Fprintf(os.Stderr, "CREDS RECV: %s\n", line) creds[parts[0]] = strings.TrimSpace(parts[1]) } if err := scanner.Err(); err != nil { fmt.Fprintf(os.Stderr, "reading standard input: %v", err) os.Exit(1) } hostPieces := strings.SplitN(creds["host"], ":", 2) user, pass, err := credsForHostAndPath(hostPieces[0], creds["path"]) if err != nil { fmt.Fprintln(os.Stderr, err.Error()) os.Exit(1) } if user != "skip" { if _, ok := creds["username"]; !ok { creds["username"] = user } if _, ok := creds["password"]; !ok { creds["password"] = pass } } for key, value := range creds { fmt.Fprintf(os.Stderr, "CREDS SEND: %s=%s\n", key, value) fmt.Fprintf(os.Stdout, "%s=%s\n", key, value) } } func credsForHostAndPath(host, path string) (string, string, error) { hostFilename := filepath.Join(credsDir, host) if len(path) > 0 { pathFilename := fmt.Sprintf("%s--%s", hostFilename, strings.Replace(path, "/", "-", -1)) u, p, err := credsFromFilename(pathFilename) if err == nil { return u, p, err } } return credsFromFilename(hostFilename) } func credsFromFilename(file string) (string, string, error) { userPass, err := ioutil.ReadFile(file) if err != nil { return "", "", fmt.Errorf("Error opening %q: %s", file, err) } credsPieces := strings.SplitN(strings.TrimSpace(string(userPass)), ":", 2) return credsPieces[0], credsPieces[1], nil } func noop() {} git-lfs-2.3.4/test/cmd/lfs-askpass.go000066400000000000000000000007161317167762300174060ustar00rootroot00000000000000// +build testtools package main import ( "fmt" "os" "strings" ) func main() { prompt := strings.Join(os.Args[1:], " ") var answer string if strings.Contains(prompt, "Username") { answer = "user" if env, ok := os.LookupEnv("LFS_ASKPASS_USERNAME"); ok { answer = env } } else if strings.Contains(prompt, "Password") { answer = "pass" if env, ok := os.LookupEnv("LFS_ASKPASS_PASSWORD"); ok { answer = env } } fmt.Println(answer) } git-lfs-2.3.4/test/cmd/lfs-ssh-echo.go000066400000000000000000000027521317167762300174540ustar00rootroot00000000000000// +build testtools package main import ( "encoding/json" "fmt" "os" "strings" "time" ) type sshResponse struct { Href string `json:"href"` Header map[string]string `json:"header"` ExpiresAt time.Time `json:"expires_at,omitempty"` ExpiresIn int `json:"expires_in,omitempty"` } func main() { // expect args: // lfs-ssh-echo -p PORT -- git@127.0.0.1 git-lfs-authenticate REPO OPERATION if len(os.Args) != 6 { fmt.Fprintf(os.Stderr, "got %d args: %v", len(os.Args), os.Args) os.Exit(1) } if os.Args[1] != "-p" { fmt.Fprintf(os.Stderr, "$1 expected \"-p\", got %q", os.Args[1]) os.Exit(1) } if os.Args[3] != "--" { fmt.Fprintf(os.Stderr, "$3 expected \"--\", got %q", os.Args[3]) os.Exit(1) } if os.Args[4] != "git@127.0.0.1" { fmt.Fprintf(os.Stderr, "$4 expected \"git@127.0.0.1\", got %q", os.Args[4]) os.Exit(1) } // just "git-lfs-authenticate REPO OPERATION" authLine := strings.Split(os.Args[5], " ") if len(authLine) < 13 { fmt.Fprintf(os.Stderr, "bad git-lfs-authenticate line: %s\nargs: %v", authLine, os.Args) } repo := authLine[1] r := &sshResponse{ Href: fmt.Sprintf("http://127.0.0.1:%s/%s.git/info/lfs", os.Args[2], repo), } switch repo { case "ssh-expired-absolute": r.ExpiresAt = time.Now().Add(-5 * time.Minute) case "ssh-expired-relative": r.ExpiresIn = -5 case "ssh-expired-both": r.ExpiresAt = time.Now().Add(-5 * time.Minute) r.ExpiresIn = -5 } json.NewEncoder(os.Stdout).Encode(r) } git-lfs-2.3.4/test/cmd/lfs-ssh-proxy-test.go000066400000000000000000000001471317167762300206700ustar00rootroot00000000000000// +build testtools package main import "fmt" func main() { fmt.Println("SSH PROXY TEST called") } git-lfs-2.3.4/test/cmd/lfstest-customadapter.go000066400000000000000000000175111317167762300215150ustar00rootroot00000000000000// +build testtools package main import ( "bufio" "encoding/json" "fmt" "io" "io/ioutil" "net/http" "os" "strconv" "strings" "time" "github.com/git-lfs/git-lfs/config" "github.com/git-lfs/git-lfs/lfsapi" "github.com/git-lfs/git-lfs/progress" "github.com/git-lfs/git-lfs/tools" ) var cfg = config.New() // This test custom adapter just acts as a bridge for uploads/downloads // in order to demonstrate & test the custom transfer adapter protocols // All we actually do is relay the requests back to the normal storage URLs // of our test server for simplicity, but this proves the principle func main() { scanner := bufio.NewScanner(os.Stdin) writer := bufio.NewWriter(os.Stdout) errWriter := bufio.NewWriter(os.Stderr) apiClient, err := lfsapi.NewClient(cfg.Os, cfg.Git) if err != nil { writeToStderr("Error creating api client: "+err.Error(), errWriter) os.Exit(1) } for scanner.Scan() { line := scanner.Text() var req request if err := json.Unmarshal([]byte(line), &req); err != nil { writeToStderr(fmt.Sprintf("Unable to parse request: %v\n", line), errWriter) continue } switch req.Event { case "init": writeToStderr(fmt.Sprintf("Initialised test custom adapter for %s\n", req.Operation), errWriter) resp := &initResponse{} sendResponse(resp, writer, errWriter) case "download": writeToStderr(fmt.Sprintf("Received download request for %s\n", req.Oid), errWriter) performDownload(apiClient, req.Oid, req.Size, req.Action, writer, errWriter) case "upload": writeToStderr(fmt.Sprintf("Received upload request for %s\n", req.Oid), errWriter) performUpload(apiClient, req.Oid, req.Size, req.Action, req.Path, writer, errWriter) case "terminate": writeToStderr("Terminating test custom adapter gracefully.\n", errWriter) break } } } func writeToStderr(msg string, errWriter *bufio.Writer) { if !strings.HasSuffix(msg, "\n") { msg = msg + "\n" } errWriter.WriteString(msg) errWriter.Flush() } func sendResponse(r interface{}, writer, errWriter *bufio.Writer) error { b, err := json.Marshal(r) if err != nil { return err } // Line oriented JSON b = append(b, '\n') _, err = writer.Write(b) if err != nil { return err } writer.Flush() writeToStderr(fmt.Sprintf("Sent message %v", string(b)), errWriter) return nil } func sendTransferError(oid string, code int, message string, writer, errWriter *bufio.Writer) { resp := &transferResponse{"complete", oid, "", &transferError{code, message}} err := sendResponse(resp, writer, errWriter) if err != nil { writeToStderr(fmt.Sprintf("Unable to send transfer error: %v\n", err), errWriter) } } func sendProgress(oid string, bytesSoFar int64, bytesSinceLast int, writer, errWriter *bufio.Writer) { resp := &progressResponse{"progress", oid, bytesSoFar, bytesSinceLast} err := sendResponse(resp, writer, errWriter) if err != nil { writeToStderr(fmt.Sprintf("Unable to send progress update: %v\n", err), errWriter) } } func performDownload(apiClient *lfsapi.Client, oid string, size int64, a *action, writer, errWriter *bufio.Writer) { // We just use the URLs we're given, so we're just a proxy for the direct method // but this is enough to test intermediate custom adapters req, err := http.NewRequest("GET", a.Href, nil) if err != nil { sendTransferError(oid, 2, err.Error(), writer, errWriter) return } for k := range a.Header { req.Header.Set(k, a.Header[k]) } res, err := apiClient.DoWithAuth("origin", req) if err != nil { sendTransferError(oid, res.StatusCode, err.Error(), writer, errWriter) return } defer res.Body.Close() dlFile, err := ioutil.TempFile("", "lfscustomdl") if err != nil { sendTransferError(oid, 3, err.Error(), writer, errWriter) return } defer dlFile.Close() dlfilename := dlFile.Name() // Turn callback into progress messages cb := func(totalSize int64, readSoFar int64, readSinceLast int) error { sendProgress(oid, readSoFar, readSinceLast, writer, errWriter) return nil } _, err = tools.CopyWithCallback(dlFile, res.Body, res.ContentLength, cb) if err != nil { sendTransferError(oid, 4, fmt.Sprintf("cannot write data to tempfile %q: %v", dlfilename, err), writer, errWriter) os.Remove(dlfilename) return } if err := dlFile.Close(); err != nil { sendTransferError(oid, 5, fmt.Sprintf("can't close tempfile %q: %v", dlfilename, err), writer, errWriter) os.Remove(dlfilename) return } // completed complete := &transferResponse{"complete", oid, dlfilename, nil} err = sendResponse(complete, writer, errWriter) if err != nil { writeToStderr(fmt.Sprintf("Unable to send completion message: %v\n", err), errWriter) } } func performUpload(apiClient *lfsapi.Client, oid string, size int64, a *action, fromPath string, writer, errWriter *bufio.Writer) { // We just use the URLs we're given, so we're just a proxy for the direct method // but this is enough to test intermediate custom adapters req, err := http.NewRequest("PUT", a.Href, nil) if err != nil { sendTransferError(oid, 2, err.Error(), writer, errWriter) return } for k := range a.Header { req.Header.Set(k, a.Header[k]) } if len(req.Header.Get("Content-Type")) == 0 { req.Header.Set("Content-Type", "application/octet-stream") } if req.Header.Get("Transfer-Encoding") == "chunked" { req.TransferEncoding = []string{"chunked"} } else { req.Header.Set("Content-Length", strconv.FormatInt(size, 10)) } req.ContentLength = size f, err := os.OpenFile(fromPath, os.O_RDONLY, 0644) if err != nil { sendTransferError(oid, 3, fmt.Sprintf("Cannot read data from %q: %v", fromPath, err), writer, errWriter) return } defer f.Close() // Turn callback into progress messages cb := func(totalSize int64, readSoFar int64, readSinceLast int) error { sendProgress(oid, readSoFar, readSinceLast, writer, errWriter) return nil } req.Body = progress.NewBodyWithCallback(f, size, cb) res, err := apiClient.DoWithAuth("origin", req) if err != nil { sendTransferError(oid, res.StatusCode, fmt.Sprintf("Error uploading data for %s: %v", oid, err), writer, errWriter) return } if res.StatusCode > 299 { msg := fmt.Sprintf("Invalid status for %s %s: %d", req.Method, strings.SplitN(req.URL.String(), "?", 2)[0], res.StatusCode) sendTransferError(oid, res.StatusCode, msg, writer, errWriter) return } io.Copy(ioutil.Discard, res.Body) res.Body.Close() // completed complete := &transferResponse{"complete", oid, "", nil} err = sendResponse(complete, writer, errWriter) if err != nil { writeToStderr(fmt.Sprintf("Unable to send completion message: %v\n", err), errWriter) } } // Structs reimplemented so closer to a real external implementation type header struct { Key string `json:"key"` Value string `json:"value"` } type action struct { Href string `json:"href"` Header map[string]string `json:"header,omitempty"` ExpiresAt time.Time `json:"expires_at,omitempty"` } type transferError struct { Code int `json:"code"` Message string `json:"message"` } // Combined request struct which can accept anything type request struct { Event string `json:"event"` Operation string `json:"operation"` Concurrent bool `json:"concurrent"` ConcurrentTransfers int `json:"concurrenttransfers"` Oid string `json:"oid"` Size int64 `json:"size"` Path string `json:"path"` Action *action `json:"action"` } type initResponse struct { Error *transferError `json:"error,omitempty"` } type transferResponse struct { Event string `json:"event"` Oid string `json:"oid"` Path string `json:"path,omitempty"` // always blank for upload Error *transferError `json:"error,omitempty"` } type progressResponse struct { Event string `json:"event"` Oid string `json:"oid"` BytesSoFar int64 `json:"bytesSoFar"` BytesSinceLast int `json:"bytesSinceLast"` } git-lfs-2.3.4/test/cmd/lfstest-gitserver.go000066400000000000000000001115101317167762300206460ustar00rootroot00000000000000// +build testtools package main import ( "bufio" "bytes" "crypto/rand" "crypto/rsa" "crypto/sha256" "crypto/tls" "crypto/x509" "crypto/x509/pkix" "encoding/base64" "encoding/hex" "encoding/json" "encoding/pem" "errors" "fmt" "io" "io/ioutil" "log" "math" "math/big" "net/http" "net/http/httptest" "net/textproto" "os" "os/exec" "regexp" "sort" "strconv" "strings" "sync" "time" "github.com/ThomsonReutersEikon/go-ntlm/ntlm" ) var ( repoDir string largeObjects = newLfsStorage() server *httptest.Server serverTLS *httptest.Server serverClientCert *httptest.Server // maps OIDs to content strings. Both the LFS and Storage test servers below // see OIDs. oidHandlers map[string]string // These magic strings tell the test lfs server change their behavior so the // integration tests can check those use cases. Tests will create objects with // the magic strings as the contents. // // printf "status:lfs:404" > 404.dat // contentHandlers = []string{ "status-batch-403", "status-batch-404", "status-batch-410", "status-batch-422", "status-batch-500", "status-storage-403", "status-storage-404", "status-storage-410", "status-storage-422", "status-storage-500", "status-storage-503", "status-batch-resume-206", "batch-resume-fail-fallback", "return-expired-action", "return-expired-action-forever", "return-invalid-size", "object-authenticated", "storage-download-retry", "storage-upload-retry", "unknown-oid", "send-verify-action", "send-deprecated-links", } ) func main() { repoDir = os.Getenv("LFSTEST_DIR") mux := http.NewServeMux() server = httptest.NewServer(mux) serverTLS = httptest.NewTLSServer(mux) serverClientCert = httptest.NewUnstartedServer(mux) //setup Client Cert server rootKey, rootCert := generateCARootCertificates() _, clientCertPEM, clientKeyPEM := generateClientCertificates(rootCert, rootKey) certPool := x509.NewCertPool() certPool.AddCert(rootCert) serverClientCert.TLS = &tls.Config{ Certificates: []tls.Certificate{serverTLS.TLS.Certificates[0]}, ClientAuth: tls.RequireAndVerifyClientCert, ClientCAs: certPool, } serverClientCert.StartTLS() ntlmSession, err := ntlm.CreateServerSession(ntlm.Version2, ntlm.ConnectionOrientedMode) if err != nil { fmt.Println("Error creating ntlm session:", err) os.Exit(1) } ntlmSession.SetUserInfo("ntlmuser", "ntlmpass", "NTLMDOMAIN") stopch := make(chan bool) mux.HandleFunc("/shutdown", func(w http.ResponseWriter, r *http.Request) { stopch <- true }) mux.HandleFunc("/storage/", storageHandler) mux.HandleFunc("/verify", verifyHandler) mux.HandleFunc("/redirect307/", redirect307Handler) mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { id, ok := reqId(w) if !ok { return } if strings.Contains(r.URL.Path, "/info/lfs") { if !skipIfBadAuth(w, r, id, ntlmSession) { lfsHandler(w, r, id) } return } debug(id, "git http-backend %s %s", r.Method, r.URL) gitHandler(w, r) }) urlname := writeTestStateFile([]byte(server.URL), "LFSTEST_URL", "lfstest-gitserver") defer os.RemoveAll(urlname) sslurlname := writeTestStateFile([]byte(serverTLS.URL), "LFSTEST_SSL_URL", "lfstest-gitserver-ssl") defer os.RemoveAll(sslurlname) clientCertUrlname := writeTestStateFile([]byte(serverClientCert.URL), "LFSTEST_CLIENT_CERT_URL", "lfstest-gitserver-ssl") defer os.RemoveAll(clientCertUrlname) block := &pem.Block{} block.Type = "CERTIFICATE" block.Bytes = serverTLS.TLS.Certificates[0].Certificate[0] pembytes := pem.EncodeToMemory(block) certname := writeTestStateFile(pembytes, "LFSTEST_CERT", "lfstest-gitserver-cert") defer os.RemoveAll(certname) cccertname := writeTestStateFile(clientCertPEM, "LFSTEST_CLIENT_CERT", "lfstest-gitserver-client-cert") defer os.RemoveAll(cccertname) ckcertname := writeTestStateFile(clientKeyPEM, "LFSTEST_CLIENT_KEY", "lfstest-gitserver-client-key") defer os.RemoveAll(ckcertname) debug("init", "server url: %s", server.URL) debug("init", "server tls url: %s", serverTLS.URL) debug("init", "server client cert url: %s", serverClientCert.URL) <-stopch debug("init", "git server done") } // writeTestStateFile writes contents to either the file referenced by the // environment variable envVar, or defaultFilename if that's not set. Returns // the filename that was used func writeTestStateFile(contents []byte, envVar, defaultFilename string) string { f := os.Getenv(envVar) if len(f) == 0 { f = defaultFilename } file, err := os.Create(f) if err != nil { log.Fatalln(err) } file.Write(contents) file.Close() return f } type lfsObject struct { Oid string `json:"oid,omitempty"` Size int64 `json:"size,omitempty"` Authenticated bool `json:"authenticated,omitempty"` Actions map[string]*lfsLink `json:"actions,omitempty"` Links map[string]*lfsLink `json:"_links,omitempty"` Err *lfsError `json:"error,omitempty"` } type lfsLink struct { Href string `json:"href"` Header map[string]string `json:"header,omitempty"` ExpiresAt time.Time `json:"expires_at,omitempty"` ExpiresIn int `json:"expires_in,omitempty"` } type lfsError struct { Code int `json:"code,omitempty"` Message string `json:"message"` } func writeLFSError(w http.ResponseWriter, code int, msg string) { by, err := json.Marshal(&lfsError{Message: msg}) if err != nil { http.Error(w, "json encoding error: "+err.Error(), 500) return } w.Header().Set("Content-Type", "application/vnd.git-lfs+json") w.WriteHeader(code) w.Write(by) } // handles any requests with "{name}.server.git/info/lfs" in the path func lfsHandler(w http.ResponseWriter, r *http.Request, id string) { repo, err := repoFromLfsUrl(r.URL.Path) if err != nil { w.WriteHeader(500) w.Write([]byte(err.Error())) return } debug(id, "git lfs %s %s repo: %s", r.Method, r.URL, repo) w.Header().Set("Content-Type", "application/vnd.git-lfs+json") switch r.Method { case "POST": if strings.HasSuffix(r.URL.String(), "batch") { lfsBatchHandler(w, r, id, repo) } else { locksHandler(w, r, repo) } case "DELETE": lfsDeleteHandler(w, r, id, repo) case "GET": if strings.Contains(r.URL.String(), "/locks") { locksHandler(w, r, repo) } else { w.WriteHeader(404) w.Write([]byte("lock request")) } default: w.WriteHeader(405) } } func lfsUrl(repo, oid string) string { return server.URL + "/storage/" + oid + "?r=" + repo } var ( retries = make(map[string]uint32) retriesMu sync.Mutex ) func incrementRetriesFor(api, direction, repo, oid string, check bool) (after uint32, ok bool) { // fmtStr formats a string like "--[check]-", // i.e., "legacy-upload-check-retry", or "storage-download-retry". var fmtStr string if check { fmtStr = "%s-%s-check-retry" } else { fmtStr = "%s-%s-retry" } if oidHandlers[oid] != fmt.Sprintf(fmtStr, api, direction) { return 0, false } retriesMu.Lock() defer retriesMu.Unlock() retryKey := strings.Join([]string{direction, repo, oid}, ":") retries[retryKey]++ retries := retries[retryKey] return retries, true } func lfsDeleteHandler(w http.ResponseWriter, r *http.Request, id, repo string) { parts := strings.Split(r.URL.Path, "/") oid := parts[len(parts)-1] largeObjects.Delete(repo, oid) debug(id, "DELETE:", oid) w.WriteHeader(200) } func lfsBatchHandler(w http.ResponseWriter, r *http.Request, id, repo string) { checkingObject := r.Header.Get("X-Check-Object") == "1" if !checkingObject && repo == "batchunsupported" { w.WriteHeader(404) return } if !checkingObject && repo == "badbatch" { w.WriteHeader(203) return } if repo == "netrctest" { user, pass, err := extractAuth(r.Header.Get("Authorization")) if err != nil || (user != "netrcuser" || pass != "netrcpass") { w.WriteHeader(403) return } } if missingRequiredCreds(w, r, repo) { return } type batchReq struct { Transfers []string `json:"transfers"` Operation string `json:"operation"` Objects []lfsObject `json:"objects"` } type batchResp struct { Transfer string `json:"transfer,omitempty"` Objects []lfsObject `json:"objects"` } buf := &bytes.Buffer{} tee := io.TeeReader(r.Body, buf) var objs batchReq err := json.NewDecoder(tee).Decode(&objs) io.Copy(ioutil.Discard, r.Body) r.Body.Close() debug(id, "REQUEST") debug(id, buf.String()) if err != nil { log.Fatal(err) } res := []lfsObject{} testingChunked := testingChunkedTransferEncoding(r) testingTus := testingTusUploadInBatchReq(r) testingTusInterrupt := testingTusUploadInterruptedInBatchReq(r) testingCustomTransfer := testingCustomTransfer(r) var transferChoice string var searchForTransfer string if testingTus { searchForTransfer = "tus" } else if testingCustomTransfer { searchForTransfer = "testcustom" } if len(searchForTransfer) > 0 { for _, t := range objs.Transfers { if t == searchForTransfer { transferChoice = searchForTransfer break } } } for _, obj := range objs.Objects { handler := oidHandlers[obj.Oid] action := objs.Operation o := lfsObject{ Size: obj.Size, Actions: make(map[string]*lfsLink), } // Clobber the OID if told to do so. if handler == "unknown-oid" { o.Oid = "unknown-oid" } else { o.Oid = obj.Oid } exists := largeObjects.Has(repo, obj.Oid) addAction := true if action == "download" { if !exists { o.Err = &lfsError{Code: 404, Message: fmt.Sprintf("Object %v does not exist", obj.Oid)} addAction = false } } else { if exists { // not an error but don't add an action addAction = false } } if handler == "object-authenticated" { o.Authenticated = true } switch handler { case "status-batch-403": o.Err = &lfsError{Code: 403, Message: "welp"} case "status-batch-404": o.Err = &lfsError{Code: 404, Message: "welp"} case "status-batch-410": o.Err = &lfsError{Code: 410, Message: "welp"} case "status-batch-422": o.Err = &lfsError{Code: 422, Message: "welp"} case "status-batch-500": o.Err = &lfsError{Code: 500, Message: "welp"} default: // regular 200 response if handler == "return-invalid-size" { o.Size = -1 } if handler == "send-deprecated-links" { o.Links = make(map[string]*lfsLink) } if addAction { a := &lfsLink{ Href: lfsUrl(repo, obj.Oid), Header: map[string]string{}, } a = serveExpired(a, repo, handler) if handler == "send-deprecated-links" { o.Links[action] = a } else { o.Actions[action] = a } } if handler == "send-verify-action" { o.Actions["verify"] = &lfsLink{ Href: server.URL + "/verify", Header: map[string]string{ "repo": repo, }, } } } if testingChunked && addAction { if handler == "send-deprecated-links" { o.Links[action].Header["Transfer-Encoding"] = "chunked" } else { o.Actions[action].Header["Transfer-Encoding"] = "chunked" } } if testingTusInterrupt && addAction { if handler == "send-deprecated-links" { o.Links[action].Header["Lfs-Tus-Interrupt"] = "true" } else { o.Actions[action].Header["Lfs-Tus-Interrupt"] = "true" } } res = append(res, o) } ores := batchResp{Transfer: transferChoice, Objects: res} by, err := json.Marshal(ores) if err != nil { log.Fatal(err) } debug(id, "RESPONSE: 200") debug(id, string(by)) w.WriteHeader(200) w.Write(by) } // emu guards expiredRepos var emu sync.Mutex // expiredRepos is a map keyed by repository name, valuing to whether or not it // has yet served an expired object. var expiredRepos = map[string]bool{} // serveExpired marks the given repo as having served an expired object, making // it unable for that same repository to return an expired object in the future, func serveExpired(a *lfsLink, repo, handler string) *lfsLink { var ( dur = -5 * time.Minute at = time.Now().Add(dur) ) if handler == "return-expired-action-forever" || (handler == "return-expired-action" && canServeExpired(repo)) { emu.Lock() expiredRepos[repo] = true emu.Unlock() a.ExpiresAt = at return a } switch repo { case "expired-absolute": a.ExpiresAt = at case "expired-relative": a.ExpiresIn = -5 case "expired-both": a.ExpiresAt = at a.ExpiresIn = -5 } return a } // canServeExpired returns whether or not a repository is capable of serving an // expired object. In other words, canServeExpired returns whether or not the // given repo has yet served an expired object. func canServeExpired(repo string) bool { emu.Lock() defer emu.Unlock() return !expiredRepos[repo] } // Persistent state across requests var batchResumeFailFallbackStorageAttempts = 0 var tusStorageAttempts = 0 var ( vmu sync.Mutex verifyCounts = make(map[string]int) verifyRetryRe = regexp.MustCompile(`verify-fail-(\d+)-times?$`) ) func verifyHandler(w http.ResponseWriter, r *http.Request) { repo := r.Header.Get("repo") var payload struct { Oid string `json:"oid"` Size int64 `json:"size"` } if err := json.NewDecoder(r.Body).Decode(&payload); err != nil { writeLFSError(w, http.StatusUnprocessableEntity, err.Error()) return } var max int if matches := verifyRetryRe.FindStringSubmatch(repo); len(matches) < 2 { return } else { max, _ = strconv.Atoi(matches[1]) } key := strings.Join([]string{repo, payload.Oid}, ":") vmu.Lock() verifyCounts[key] = verifyCounts[key] + 1 count := verifyCounts[key] vmu.Unlock() if count < max { writeLFSError(w, http.StatusServiceUnavailable, fmt.Sprintf( "intentionally failing verify request %d (out of %d)", count, max, )) return } } // handles any /storage/{oid} requests func storageHandler(w http.ResponseWriter, r *http.Request) { id, ok := reqId(w) if !ok { return } repo := r.URL.Query().Get("r") parts := strings.Split(r.URL.Path, "/") oid := parts[len(parts)-1] if missingRequiredCreds(w, r, repo) { return } debug(id, "storage %s %s repo: %s", r.Method, oid, repo) switch r.Method { case "PUT": switch oidHandlers[oid] { case "status-storage-403": w.WriteHeader(403) return case "status-storage-404": w.WriteHeader(404) return case "status-storage-410": w.WriteHeader(410) return case "status-storage-422": w.WriteHeader(422) return case "status-storage-500": w.WriteHeader(500) return case "status-storage-503": writeLFSError(w, 503, "LFS is temporarily unavailable") return case "object-authenticated": if len(r.Header.Get("Authorization")) > 0 { w.WriteHeader(400) w.Write([]byte("Should not send authentication")) } return case "storage-upload-retry": if retries, ok := incrementRetriesFor("storage", "upload", repo, oid, false); ok && retries < 3 { w.WriteHeader(500) w.Write([]byte("malformed content")) return } } if testingChunkedTransferEncoding(r) { valid := false for _, value := range r.TransferEncoding { if value == "chunked" { valid = true break } } if !valid { debug(id, "Chunked transfer encoding expected") } } hash := sha256.New() buf := &bytes.Buffer{} io.Copy(io.MultiWriter(hash, buf), r.Body) oid := hex.EncodeToString(hash.Sum(nil)) if !strings.HasSuffix(r.URL.Path, "/"+oid) { w.WriteHeader(403) return } largeObjects.Set(repo, oid, buf.Bytes()) case "GET": parts := strings.Split(r.URL.Path, "/") oid := parts[len(parts)-1] statusCode := 200 byteLimit := 0 resumeAt := int64(0) if by, ok := largeObjects.Get(repo, oid); ok { if len(by) == len("storage-download-retry") && string(by) == "storage-download-retry" { if retries, ok := incrementRetriesFor("storage", "download", repo, oid, false); ok && retries < 3 { statusCode = 500 by = []byte("malformed content") } } else if len(by) == len("status-batch-resume-206") && string(by) == "status-batch-resume-206" { // Resume if header includes range, otherwise deliberately interrupt if rangeHdr := r.Header.Get("Range"); rangeHdr != "" { regex := regexp.MustCompile(`bytes=(\d+)\-.*`) match := regex.FindStringSubmatch(rangeHdr) if match != nil && len(match) > 1 { statusCode = 206 resumeAt, _ = strconv.ParseInt(match[1], 10, 32) w.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", resumeAt, len(by), resumeAt-int64(len(by)))) } } else { byteLimit = 10 } } else if len(by) == len("batch-resume-fail-fallback") && string(by) == "batch-resume-fail-fallback" { // Fail any Range: request even though we said we supported it // To make sure client can fall back if rangeHdr := r.Header.Get("Range"); rangeHdr != "" { w.WriteHeader(416) return } if batchResumeFailFallbackStorageAttempts == 0 { // Truncate output on FIRST attempt to cause resume // Second attempt (without range header) is fallback, complete successfully byteLimit = 8 batchResumeFailFallbackStorageAttempts++ } } w.WriteHeader(statusCode) if byteLimit > 0 { w.Write(by[0:byteLimit]) } else if resumeAt > 0 { w.Write(by[resumeAt:]) } else { w.Write(by) } return } w.WriteHeader(404) case "HEAD": // tus.io if !validateTusHeaders(r, id) { w.WriteHeader(400) return } parts := strings.Split(r.URL.Path, "/") oid := parts[len(parts)-1] var offset int64 if by, ok := largeObjects.GetIncomplete(repo, oid); ok { offset = int64(len(by)) } w.Header().Set("Upload-Offset", strconv.FormatInt(offset, 10)) w.WriteHeader(200) case "PATCH": // tus.io if !validateTusHeaders(r, id) { w.WriteHeader(400) return } parts := strings.Split(r.URL.Path, "/") oid := parts[len(parts)-1] offsetHdr := r.Header.Get("Upload-Offset") offset, err := strconv.ParseInt(offsetHdr, 10, 64) if err != nil { log.Fatal("Unable to parse Upload-Offset header in request: ", err) w.WriteHeader(400) return } hash := sha256.New() buf := &bytes.Buffer{} out := io.MultiWriter(hash, buf) if by, ok := largeObjects.GetIncomplete(repo, oid); ok { if offset != int64(len(by)) { log.Fatal(fmt.Sprintf("Incorrect offset in request, got %d expected %d", offset, len(by))) w.WriteHeader(400) return } _, err := out.Write(by) if err != nil { log.Fatal("Error reading incomplete bytes from store: ", err) w.WriteHeader(500) return } largeObjects.DeleteIncomplete(repo, oid) debug(id, "Resuming upload of %v at byte %d", oid, offset) } // As a test, we intentionally break the upload from byte 0 by only // reading some bytes the quitting & erroring, this forces a resume // any offset > 0 will work ok var copyErr error if r.Header.Get("Lfs-Tus-Interrupt") == "true" && offset == 0 { chdr := r.Header.Get("Content-Length") contentLen, err := strconv.ParseInt(chdr, 10, 64) if err != nil { log.Fatal(fmt.Sprintf("Invalid Content-Length %q", chdr)) w.WriteHeader(400) return } truncated := contentLen / 3 _, _ = io.CopyN(out, r.Body, truncated) r.Body.Close() copyErr = fmt.Errorf("Simulated copy error") } else { _, copyErr = io.Copy(out, r.Body) } if copyErr != nil { b := buf.Bytes() if len(b) > 0 { debug(id, "Incomplete upload of %v, %d bytes", oid, len(b)) largeObjects.SetIncomplete(repo, oid, b) } w.WriteHeader(500) } else { checkoid := hex.EncodeToString(hash.Sum(nil)) if checkoid != oid { log.Fatal(fmt.Sprintf("Incorrect oid after calculation, got %q expected %q", checkoid, oid)) w.WriteHeader(403) return } b := buf.Bytes() largeObjects.Set(repo, oid, b) w.Header().Set("Upload-Offset", strconv.FormatInt(int64(len(b)), 10)) w.WriteHeader(204) } default: w.WriteHeader(405) } } func validateTusHeaders(r *http.Request, id string) bool { if len(r.Header.Get("Tus-Resumable")) == 0 { debug(id, "Missing Tus-Resumable header in request") return false } return true } func gitHandler(w http.ResponseWriter, r *http.Request) { defer func() { io.Copy(ioutil.Discard, r.Body) r.Body.Close() }() cmd := exec.Command("git", "http-backend") cmd.Env = []string{ fmt.Sprintf("GIT_PROJECT_ROOT=%s", repoDir), fmt.Sprintf("GIT_HTTP_EXPORT_ALL="), fmt.Sprintf("PATH_INFO=%s", r.URL.Path), fmt.Sprintf("QUERY_STRING=%s", r.URL.RawQuery), fmt.Sprintf("REQUEST_METHOD=%s", r.Method), fmt.Sprintf("CONTENT_TYPE=%s", r.Header.Get("Content-Type")), } buffer := &bytes.Buffer{} cmd.Stdin = r.Body cmd.Stdout = buffer cmd.Stderr = os.Stderr if err := cmd.Run(); err != nil { log.Fatal(err) } text := textproto.NewReader(bufio.NewReader(buffer)) code, _, _ := text.ReadCodeLine(-1) if code != 0 { w.WriteHeader(code) } headers, _ := text.ReadMIMEHeader() head := w.Header() for key, values := range headers { for _, value := range values { head.Add(key, value) } } io.Copy(w, text.R) } func redirect307Handler(w http.ResponseWriter, r *http.Request) { id, ok := reqId(w) if !ok { return } // Send a redirect to info/lfs // Make it either absolute or relative depending on subpath parts := strings.Split(r.URL.Path, "/") // first element is always blank since rooted var redirectTo string if parts[2] == "rel" { redirectTo = "/" + strings.Join(parts[3:], "/") } else if parts[2] == "abs" { redirectTo = server.URL + "/" + strings.Join(parts[3:], "/") } else { debug(id, "Invalid URL for redirect: %v", r.URL) w.WriteHeader(404) return } w.Header().Set("Location", redirectTo) w.WriteHeader(307) } type User struct { Name string `json:"name"` } type Lock struct { Id string `json:"id"` Path string `json:"path"` Owner User `json:"owner"` LockedAt time.Time `json:"locked_at"` } type LockRequest struct { Path string `json:"path"` } type LockResponse struct { Lock *Lock `json:"lock"` Message string `json:"message,omitempty"` } type UnlockRequest struct { Force bool `json:"force"` } type UnlockResponse struct { Lock *Lock `json:"lock"` Message string `json:"message,omitempty"` } type LockList struct { Locks []Lock `json:"locks"` NextCursor string `json:"next_cursor,omitempty"` Message string `json:"message,omitempty"` } type VerifiableLockRequest struct { Cursor string `json:"cursor,omitempty"` Limit int `json:"limit,omitempty"` } type VerifiableLockList struct { Ours []Lock `json:"ours"` Theirs []Lock `json:"theirs"` NextCursor string `json:"next_cursor,omitempty"` Message string `json:"message,omitempty"` } var ( lmu sync.RWMutex repoLocks = map[string][]Lock{} ) func addLocks(repo string, l ...Lock) { lmu.Lock() defer lmu.Unlock() repoLocks[repo] = append(repoLocks[repo], l...) sort.Sort(LocksByCreatedAt(repoLocks[repo])) } func getLocks(repo string) []Lock { lmu.RLock() defer lmu.RUnlock() locks := repoLocks[repo] cp := make([]Lock, len(locks)) for i, l := range locks { cp[i] = l } return cp } func getFilteredLocks(repo, path, cursor, limit string) ([]Lock, string, error) { locks := getLocks(repo) if cursor != "" { lastSeen := -1 for i, l := range locks { if l.Id == cursor { lastSeen = i break } } if lastSeen > -1 { locks = locks[lastSeen:] } else { return nil, "", fmt.Errorf("cursor (%s) not found", cursor) } } if path != "" { var filtered []Lock for _, l := range locks { if l.Path == path { filtered = append(filtered, l) } } locks = filtered } if limit != "" { size, err := strconv.Atoi(limit) if err != nil { return nil, "", errors.New("unable to parse limit amount") } size = int(math.Min(float64(len(locks)), 3)) if size < 0 { return nil, "", nil } if size+1 < len(locks) { return locks[:size], locks[size+1].Id, nil } } return locks, "", nil } func delLock(repo string, id string) *Lock { lmu.RLock() defer lmu.RUnlock() var deleted *Lock locks := make([]Lock, 0, len(repoLocks[repo])) for _, l := range repoLocks[repo] { if l.Id == id { deleted = &l continue } locks = append(locks, l) } repoLocks[repo] = locks return deleted } type LocksByCreatedAt []Lock func (c LocksByCreatedAt) Len() int { return len(c) } func (c LocksByCreatedAt) Less(i, j int) bool { return c[i].LockedAt.Before(c[j].LockedAt) } func (c LocksByCreatedAt) Swap(i, j int) { c[i], c[j] = c[j], c[i] } var ( lockRe = regexp.MustCompile(`/locks/?$`) unlockRe = regexp.MustCompile(`locks/([^/]+)/unlock\z`) ) func locksHandler(w http.ResponseWriter, r *http.Request, repo string) { dec := json.NewDecoder(r.Body) enc := json.NewEncoder(w) switch r.Method { case "GET": if !lockRe.MatchString(r.URL.Path) { w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusNotFound) w.Write([]byte(`{"message":"unknown path: ` + r.URL.Path + `"}`)) return } if err := r.ParseForm(); err != nil { http.Error(w, "could not parse form values", http.StatusInternalServerError) return } ll := &LockList{} w.Header().Set("Content-Type", "application/json") locks, nextCursor, err := getFilteredLocks(repo, r.FormValue("path"), r.FormValue("cursor"), r.FormValue("limit")) if err != nil { ll.Message = err.Error() } else { ll.Locks = locks ll.NextCursor = nextCursor } enc.Encode(ll) return case "POST": w.Header().Set("Content-Type", "application/json") if strings.HasSuffix(r.URL.Path, "unlock") { var unlockRequest UnlockRequest var lockId string if matches := unlockRe.FindStringSubmatch(r.URL.Path); len(matches) > 1 { lockId = matches[1] } if len(lockId) == 0 { enc.Encode(&UnlockResponse{Message: "Invalid lock"}) } if err := dec.Decode(&unlockRequest); err != nil { enc.Encode(&UnlockResponse{Message: err.Error()}) return } if l := delLock(repo, lockId); l != nil { enc.Encode(&UnlockResponse{Lock: l}) } else { enc.Encode(&UnlockResponse{Message: "unable to find lock"}) } return } if strings.HasSuffix(r.URL.Path, "/locks/verify") { if strings.HasSuffix(repo, "verify-5xx") { w.WriteHeader(500) return } if strings.HasSuffix(repo, "verify-501") { w.WriteHeader(501) return } if strings.HasSuffix(repo, "verify-403") { w.WriteHeader(403) return } switch repo { case "pre_push_locks_verify_404": w.WriteHeader(http.StatusNotFound) w.Write([]byte(`{"message":"pre_push_locks_verify_404"}`)) return case "pre_push_locks_verify_410": w.WriteHeader(http.StatusGone) w.Write([]byte(`{"message":"pre_push_locks_verify_410"}`)) return } reqBody := &VerifiableLockRequest{} if err := dec.Decode(reqBody); err != nil { w.WriteHeader(http.StatusBadRequest) enc.Encode(struct { Message string `json:"message"` }{"json decode error: " + err.Error()}) return } ll := &VerifiableLockList{} locks, nextCursor, err := getFilteredLocks(repo, "", reqBody.Cursor, strconv.Itoa(reqBody.Limit)) if err != nil { ll.Message = err.Error() } else { ll.NextCursor = nextCursor for _, l := range locks { if strings.Contains(l.Path, "theirs") { ll.Theirs = append(ll.Theirs, l) } else { ll.Ours = append(ll.Ours, l) } } } enc.Encode(ll) return } if strings.HasSuffix(r.URL.Path, "/locks") { var lockRequest LockRequest if err := dec.Decode(&lockRequest); err != nil { enc.Encode(&LockResponse{Message: err.Error()}) } for _, l := range getLocks(repo) { if l.Path == lockRequest.Path { enc.Encode(&LockResponse{Message: "lock already created"}) return } } var id [20]byte rand.Read(id[:]) lock := &Lock{ Id: fmt.Sprintf("%x", id[:]), Path: lockRequest.Path, Owner: User{Name: "Git LFS Tests"}, LockedAt: time.Now(), } addLocks(repo, *lock) // TODO(taylor): commit_needed case // TODO(taylor): err case enc.Encode(&LockResponse{ Lock: lock, }) return } } http.NotFound(w, r) } func missingRequiredCreds(w http.ResponseWriter, r *http.Request, repo string) bool { if repo != "requirecreds" { return false } auth := r.Header.Get("Authorization") user, pass, err := extractAuth(auth) if err != nil { writeLFSError(w, 403, err.Error()) return true } if user != "requirecreds" || pass != "pass" { writeLFSError(w, 403, fmt.Sprintf("Got: '%s' => '%s' : '%s'", auth, user, pass)) return true } return false } func testingChunkedTransferEncoding(r *http.Request) bool { return strings.HasPrefix(r.URL.String(), "/test-chunked-transfer-encoding") } func testingTusUploadInBatchReq(r *http.Request) bool { return strings.HasPrefix(r.URL.String(), "/test-tus-upload") } func testingTusUploadInterruptedInBatchReq(r *http.Request) bool { return strings.HasPrefix(r.URL.String(), "/test-tus-upload-interrupt") } func testingCustomTransfer(r *http.Request) bool { return strings.HasPrefix(r.URL.String(), "/test-custom-transfer") } var lfsUrlRE = regexp.MustCompile(`\A/?([^/]+)/info/lfs`) func repoFromLfsUrl(urlpath string) (string, error) { matches := lfsUrlRE.FindStringSubmatch(urlpath) if len(matches) != 2 { return "", fmt.Errorf("LFS url '%s' does not match %v", urlpath, lfsUrlRE) } repo := matches[1] if strings.HasSuffix(repo, ".git") { return repo[0 : len(repo)-4], nil } return repo, nil } type lfsStorage struct { objects map[string]map[string][]byte incomplete map[string]map[string][]byte mutex *sync.Mutex } func (s *lfsStorage) Get(repo, oid string) ([]byte, bool) { s.mutex.Lock() defer s.mutex.Unlock() repoObjects, ok := s.objects[repo] if !ok { return nil, ok } by, ok := repoObjects[oid] return by, ok } func (s *lfsStorage) Has(repo, oid string) bool { s.mutex.Lock() defer s.mutex.Unlock() repoObjects, ok := s.objects[repo] if !ok { return false } _, ok = repoObjects[oid] return ok } func (s *lfsStorage) Set(repo, oid string, by []byte) { s.mutex.Lock() defer s.mutex.Unlock() repoObjects, ok := s.objects[repo] if !ok { repoObjects = make(map[string][]byte) s.objects[repo] = repoObjects } repoObjects[oid] = by } func (s *lfsStorage) Delete(repo, oid string) { s.mutex.Lock() defer s.mutex.Unlock() repoObjects, ok := s.objects[repo] if ok { delete(repoObjects, oid) } } func (s *lfsStorage) GetIncomplete(repo, oid string) ([]byte, bool) { s.mutex.Lock() defer s.mutex.Unlock() repoObjects, ok := s.incomplete[repo] if !ok { return nil, ok } by, ok := repoObjects[oid] return by, ok } func (s *lfsStorage) SetIncomplete(repo, oid string, by []byte) { s.mutex.Lock() defer s.mutex.Unlock() repoObjects, ok := s.incomplete[repo] if !ok { repoObjects = make(map[string][]byte) s.incomplete[repo] = repoObjects } repoObjects[oid] = by } func (s *lfsStorage) DeleteIncomplete(repo, oid string) { s.mutex.Lock() defer s.mutex.Unlock() repoObjects, ok := s.incomplete[repo] if ok { delete(repoObjects, oid) } } func newLfsStorage() *lfsStorage { return &lfsStorage{ objects: make(map[string]map[string][]byte), incomplete: make(map[string]map[string][]byte), mutex: &sync.Mutex{}, } } func extractAuth(auth string) (string, string, error) { if strings.HasPrefix(auth, "Basic ") { decodeBy, err := base64.StdEncoding.DecodeString(auth[6:len(auth)]) decoded := string(decodeBy) if err != nil { return "", "", err } parts := strings.SplitN(decoded, ":", 2) if len(parts) == 2 { return parts[0], parts[1], nil } return "", "", nil } return "", "", nil } func skipIfBadAuth(w http.ResponseWriter, r *http.Request, id string, ntlmSession ntlm.ServerSession) bool { auth := r.Header.Get("Authorization") if strings.Contains(r.URL.Path, "ntlm") { return false } if auth == "" { w.WriteHeader(401) return true } user, pass, err := extractAuth(auth) if err != nil { w.WriteHeader(403) debug(id, "Error decoding auth: %s", err) return true } switch user { case "user": if pass == "pass" { return false } case "netrcuser", "requirecreds": return false case "path": if strings.HasPrefix(r.URL.Path, "/"+pass) { return false } debug(id, "auth attempt against: %q", r.URL.Path) } w.WriteHeader(403) debug(id, "Bad auth: %q", auth) return true } func handleNTLM(w http.ResponseWriter, r *http.Request, authHeader string, session ntlm.ServerSession) { if strings.HasPrefix(strings.ToUpper(authHeader), "BASIC ") { authHeader = "" } switch authHeader { case "": w.Header().Set("Www-Authenticate", "ntlm") w.WriteHeader(401) // ntlmNegotiateMessage from httputil pkg case "NTLM TlRMTVNTUAABAAAAB7IIogwADAAzAAAACwALACgAAAAKAAAoAAAAD1dJTExISS1NQUlOTk9SVEhBTUVSSUNB": ch, err := session.GenerateChallengeMessage() if err != nil { writeLFSError(w, 500, err.Error()) return } chMsg := base64.StdEncoding.EncodeToString(ch.Bytes()) w.Header().Set("Www-Authenticate", "ntlm "+chMsg) w.WriteHeader(401) default: if !strings.HasPrefix(strings.ToUpper(authHeader), "NTLM ") { writeLFSError(w, 500, "bad authorization header: "+authHeader) return } auth := authHeader[5:] // strip "ntlm " prefix val, err := base64.StdEncoding.DecodeString(auth) if err != nil { writeLFSError(w, 500, "base64 decode error: "+err.Error()) return } _, err = ntlm.ParseAuthenticateMessage(val, 2) if err != nil { writeLFSError(w, 500, "auth parse error: "+err.Error()) return } } } func init() { oidHandlers = make(map[string]string) for _, content := range contentHandlers { h := sha256.New() h.Write([]byte(content)) oidHandlers[hex.EncodeToString(h.Sum(nil))] = content } } func debug(reqid, msg string, args ...interface{}) { fullargs := make([]interface{}, len(args)+1) fullargs[0] = reqid for i, a := range args { fullargs[i+1] = a } log.Printf("[%s] "+msg+"\n", fullargs...) } func reqId(w http.ResponseWriter) (string, bool) { b := make([]byte, 16) _, err := rand.Read(b) if err != nil { http.Error(w, "error generating id: "+err.Error(), 500) return "", false } return fmt.Sprintf("%x-%x-%x-%x-%x", b[0:4], b[4:6], b[6:8], b[8:10], b[10:]), true } // https://ericchiang.github.io/post/go-tls/ func generateCARootCertificates() (rootKey *rsa.PrivateKey, rootCert *x509.Certificate) { // generate a new key-pair rootKey, err := rsa.GenerateKey(rand.Reader, 2048) if err != nil { log.Fatalf("generating random key: %v", err) } rootCertTmpl, err := CertTemplate() if err != nil { log.Fatalf("creating cert template: %v", err) } // describe what the certificate will be used for rootCertTmpl.IsCA = true rootCertTmpl.KeyUsage = x509.KeyUsageCertSign | x509.KeyUsageDigitalSignature rootCertTmpl.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth} // rootCertTmpl.IPAddresses = []net.IP{net.ParseIP("127.0.0.1")} rootCert, _, err = CreateCert(rootCertTmpl, rootCertTmpl, &rootKey.PublicKey, rootKey) return } func generateClientCertificates(rootCert *x509.Certificate, rootKey interface{}) (clientKey *rsa.PrivateKey, clientCertPEM []byte, clientKeyPEM []byte) { // create a key-pair for the client clientKey, err := rsa.GenerateKey(rand.Reader, 2048) if err != nil { log.Fatalf("generating random key: %v", err) } // create a template for the client clientCertTmpl, err1 := CertTemplate() if err1 != nil { log.Fatalf("creating cert template: %v", err1) } clientCertTmpl.KeyUsage = x509.KeyUsageDigitalSignature clientCertTmpl.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth} // the root cert signs the cert by again providing its private key _, clientCertPEM, err2 := CreateCert(clientCertTmpl, rootCert, &clientKey.PublicKey, rootKey) if err2 != nil { log.Fatalf("error creating cert: %v", err2) } // encode and load the cert and private key for the client clientKeyPEM = pem.EncodeToMemory(&pem.Block{ Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(clientKey), }) return } // helper function to create a cert template with a serial number and other required fields func CertTemplate() (*x509.Certificate, error) { // generate a random serial number (a real cert authority would have some logic behind this) serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) if err != nil { return nil, errors.New("failed to generate serial number: " + err.Error()) } tmpl := x509.Certificate{ SerialNumber: serialNumber, Subject: pkix.Name{Organization: []string{"Yhat, Inc."}}, SignatureAlgorithm: x509.SHA256WithRSA, NotBefore: time.Now(), NotAfter: time.Now().Add(time.Hour), // valid for an hour BasicConstraintsValid: true, } return &tmpl, nil } func CreateCert(template, parent *x509.Certificate, pub interface{}, parentPriv interface{}) ( cert *x509.Certificate, certPEM []byte, err error) { certDER, err := x509.CreateCertificate(rand.Reader, template, parent, pub, parentPriv) if err != nil { return } // parse the resulting certificate so we can use it again cert, err = x509.ParseCertificate(certDER) if err != nil { return } // PEM encode the certificate (this is a standard TLS encoding) b := pem.Block{Type: "CERTIFICATE", Bytes: certDER} certPEM = pem.EncodeToMemory(&b) return } git-lfs-2.3.4/test/cmd/lfstest-standalonecustomadapter.go000066400000000000000000000141731317167762300235670ustar00rootroot00000000000000// +build testtools package main import ( "bufio" "encoding/json" "fmt" "io/ioutil" "os" "path/filepath" "strings" "time" "github.com/git-lfs/git-lfs/tools" ) var backupDir string // This test custom adapter just copies the files to a folder. func main() { scanner := bufio.NewScanner(os.Stdin) writer := bufio.NewWriter(os.Stdout) errWriter := bufio.NewWriter(os.Stderr) backupDir = os.Getenv("TEST_STANDALONE_BACKUP_PATH") if backupDir == "" { writeToStderr("TEST_STANDALONE_BACKUP_PATH backup dir not set", errWriter) os.Exit(1) } for scanner.Scan() { line := scanner.Text() var req request if err := json.Unmarshal([]byte(line), &req); err != nil { writeToStderr(fmt.Sprintf("Unable to parse request: %v\n", line), errWriter) continue } switch req.Event { case "init": writeToStderr(fmt.Sprintf("Initialised test custom adapter for %s\n", req.Operation), errWriter) resp := &initResponse{} sendResponse(resp, writer, errWriter) case "download": writeToStderr(fmt.Sprintf("Received download request for %s\n", req.Oid), errWriter) performDownload(req.Oid, req.Size, writer, errWriter) case "upload": writeToStderr(fmt.Sprintf("Received upload request for %s\n", req.Oid), errWriter) performUpload(req.Oid, req.Size, req.Path, writer, errWriter) case "terminate": writeToStderr("Terminating test custom adapter gracefully.\n", errWriter) break } } } func writeToStderr(msg string, errWriter *bufio.Writer) { if !strings.HasSuffix(msg, "\n") { msg = msg + "\n" } errWriter.WriteString(msg) errWriter.Flush() } func sendResponse(r interface{}, writer, errWriter *bufio.Writer) error { b, err := json.Marshal(r) if err != nil { return err } // Line oriented JSON b = append(b, '\n') _, err = writer.Write(b) if err != nil { return err } writer.Flush() writeToStderr(fmt.Sprintf("Sent message %v", string(b)), errWriter) return nil } func sendTransferError(oid string, code int, message string, writer, errWriter *bufio.Writer) { resp := &transferResponse{"complete", oid, "", &transferError{code, message}} err := sendResponse(resp, writer, errWriter) if err != nil { writeToStderr(fmt.Sprintf("Unable to send transfer error: %v\n", err), errWriter) } } func sendProgress(oid string, bytesSoFar int64, bytesSinceLast int, writer, errWriter *bufio.Writer) { resp := &progressResponse{"progress", oid, bytesSoFar, bytesSinceLast} err := sendResponse(resp, writer, errWriter) if err != nil { writeToStderr(fmt.Sprintf("Unable to send progress update: %v\n", err), errWriter) } } func performCopy(oid, src, dst string, size int64, writer, errWriter *bufio.Writer) error { writeToStderr(fmt.Sprintf("Copying %s to %s\n", src, dst), errWriter) srcFile, err := os.OpenFile(src, os.O_RDONLY, 0644) if err != nil { sendTransferError(oid, 10, err.Error(), writer, errWriter) return err } defer srcFile.Close() dstFile, err := os.Create(dst) if err != nil { sendTransferError(oid, 11, err.Error(), writer, errWriter) return err } defer dstFile.Close() // Turn callback into progress messages cb := func(totalSize int64, readSoFar int64, readSinceLast int) error { sendProgress(oid, readSoFar, readSinceLast, writer, errWriter) return nil } _, err = tools.CopyWithCallback(dstFile, srcFile, size, cb) if err != nil { sendTransferError(oid, 4, fmt.Sprintf("cannot write data to dst %q: %v", dst, err), writer, errWriter) os.Remove(dst) return err } if err := dstFile.Close(); err != nil { sendTransferError(oid, 5, fmt.Sprintf("can't close dst %q: %v", dst, err), writer, errWriter) os.Remove(dst) return err } return nil } func performDownload(oid string, size int64, writer, errWriter *bufio.Writer) { dlFile, err := ioutil.TempFile("", "lfscustomdl") if err != nil { sendTransferError(oid, 1, err.Error(), writer, errWriter) return } if err = dlFile.Close(); err != nil { sendTransferError(oid, 2, err.Error(), writer, errWriter) return } dlfilename := dlFile.Name() backupPath := filepath.Join(backupDir, oid) if err = performCopy(oid, backupPath, dlfilename, size, writer, errWriter); err != nil { return } // completed complete := &transferResponse{"complete", oid, dlfilename, nil} if err := sendResponse(complete, writer, errWriter); err != nil { writeToStderr(fmt.Sprintf("Unable to send completion message: %v\n", err), errWriter) } } func performUpload(oid string, size int64, fromPath string, writer, errWriter *bufio.Writer) { backupPath := filepath.Join(backupDir, oid) if err := performCopy(oid, fromPath, backupPath, size, writer, errWriter); err != nil { return } // completed complete := &transferResponse{"complete", oid, "", nil} if err := sendResponse(complete, writer, errWriter); err != nil { writeToStderr(fmt.Sprintf("Unable to send completion message: %v\n", err), errWriter) } } // Structs reimplemented so closer to a real external implementation type header struct { Key string `json:"key"` Value string `json:"value"` } type action struct { Href string `json:"href"` Header map[string]string `json:"header,omitempty"` ExpiresAt time.Time `json:"expires_at,omitempty"` } type transferError struct { Code int `json:"code"` Message string `json:"message"` } // Combined request struct which can accept anything type request struct { Event string `json:"event"` Operation string `json:"operation"` Concurrent bool `json:"concurrent"` ConcurrentTransfers int `json:"concurrenttransfers"` Oid string `json:"oid"` Size int64 `json:"size"` Path string `json:"path"` Action *action `json:"action"` } type initResponse struct { Error *transferError `json:"error,omitempty"` } type transferResponse struct { Event string `json:"event"` Oid string `json:"oid"` Path string `json:"path,omitempty"` // always blank for upload Error *transferError `json:"error,omitempty"` } type progressResponse struct { Event string `json:"event"` Oid string `json:"oid"` BytesSoFar int64 `json:"bytesSoFar"` BytesSinceLast int `json:"bytesSinceLast"` } git-lfs-2.3.4/test/cmd/lfstest-testutils.go000066400000000000000000000042121317167762300206740ustar00rootroot00000000000000// +build testtools package main import ( "encoding/json" "fmt" "io/ioutil" "os" "path/filepath" "github.com/git-lfs/git-lfs/test" ) type TestUtilRepoCallback struct{} func (*TestUtilRepoCallback) Fatalf(format string, args ...interface{}) { fmt.Fprintf(os.Stderr, format, args...) os.Exit(4) } func (*TestUtilRepoCallback) Errorf(format string, args ...interface{}) { fmt.Fprintf(os.Stderr, format, args...) } func main() { commandMap := map[string]func(*test.Repo){ "addcommits": AddCommits, } if len(os.Args) < 2 { fmt.Fprintf(os.Stderr, "Command required (e.g. addcommits)\n") os.Exit(2) } f, ok := commandMap[os.Args[1]] if !ok { fmt.Fprintf(os.Stderr, "Unknown command: %v\n", os.Args[1]) os.Exit(2) } // Construct test repo context (note: no Cleanup() call since managed outside) // also assume we're in the same folder wd, err := os.Getwd() if err != nil { fmt.Fprintf(os.Stderr, "Problem getting working dir: %v\n", err) os.Exit(2) } // Make sure we're directly inside directory which contains .git // don't want to accidentally end up committing to some other parent git _, err = os.Stat(filepath.Join(wd, ".git")) if err != nil { fmt.Fprintf(os.Stderr, "You're in the wrong directory, should be in root of a test repo: %v\n", err) os.Exit(2) } repo := test.WrapRepo(&TestUtilRepoCallback{}, wd) f(repo) } func AddCommits(repo *test.Repo) { // Read stdin as JSON []*test.CommitInput in, err := ioutil.ReadAll(os.Stdin) if err != nil { fmt.Fprintf(os.Stderr, "addcommits: Unable to read input data: %v\n", err) os.Exit(3) } inputs := make([]*test.CommitInput, 0) err = json.Unmarshal(in, &inputs) if err != nil { fmt.Fprintf(os.Stderr, "addcommits: Unable to unmarshal JSON: %v\n%v\n", string(in), err) os.Exit(3) } outputs := repo.AddCommits(inputs) by, err := json.Marshal(outputs) if err != nil { fmt.Fprintf(os.Stderr, "addcommits: Unable to marshal output JSON: %v\n", err) os.Exit(3) } // Write response to stdout _, err = os.Stdout.Write(by) if err != nil { fmt.Fprintf(os.Stderr, "addcommits: Error writing JSON to stdout: %v\n", err) os.Exit(3) } os.Stdout.WriteString("\n") } git-lfs-2.3.4/test/git-lfs-test-server-api/000077500000000000000000000000001317167762300204565ustar00rootroot00000000000000git-lfs-2.3.4/test/git-lfs-test-server-api/.gitignore000066400000000000000000000000301317167762300224370ustar00rootroot00000000000000git-lfs-test-server-api*git-lfs-2.3.4/test/git-lfs-test-server-api/README.md000066400000000000000000000060711317167762300217410ustar00rootroot00000000000000# Git LFS Server API compliance test utility This package exists to provide automated testing of server API implementations, to ensure that they conform to the behaviour expected by the client. You can run this utility against any server that implements the Git LFS API. ## Automatic or data-driven testing This utility is primarily intended to test the API implementation, but in order to correctly test the responses, the tests have to know what objects exist on the server already and which don't. In 'automatic' mode, the tests require that both the API and the content server it links to via upload and download links are both available & free to use. The content server must be empty at the start of the tests, and the tests will upload some data as part of the tests. Therefore obviously this cannot be a production system. Alternatively, in 'data-driven' mode, the tests must be provided with a list of object IDs that already exist on the server (minimum 10), and a list of other object IDs that are known to not exist. The test will use these IDs to construct its data sets, will only call the API (not the content server), and thus will not update any data - meaning you can in theory run this against a production system. ## Calling the test tool ``` git-lfs-test-server-api [--url= | --clone=] [ ] [--save=] ``` |Argument|Purpose| |------|-------| |`--url=`|URL of the server API to call. This must point directly at the API root and not the clone URL, and must be HTTP[S]. You must supply either this argument or the `--clone` argument| |`--clone=`|The clone URL from which to derive the API URL. If it is HTTP[S], the test will try to find the API at `/info/lfs`; if it is an SSH URL, then the test will call git-lfs-authenticate on the server to derive the API (with auth token if needed) just like the git-lfs client does. You must supply either this argument or the `--url` argument| |` `|Optional input files for data-driven mode (both must be supplied if this is used); each must be a file with ` ` per line. The first file must be a list of oids that exist on the server, the second must be a list of oids known not to exist. If supplied, the tests will not call the content server or modify any data. If omitted, the test will generate its own list of oids and will modify the server (and expects that the server is empty of oids at the start)| |`--save=`|If specified and no input files were provided, saves generated test data in the files `_exists` and `_missing`. These can be used as parameters to subsequent runs if required, if the server content remains unchanged between runs.| ## Authentication Authentication will behave just like the git-lfs client, so for HTTP[S] URLs the git credential helper system will be used to obtain logins, and for SSH URLs, keys can be used to automate login. Otherwise you will receive prompts on the command line. git-lfs-2.3.4/test/git-lfs-test-server-api/main.go000066400000000000000000000222531317167762300217350ustar00rootroot00000000000000package main import ( "bufio" "crypto/sha256" "encoding/hex" "fmt" "math/rand" "os" "strconv" "strings" "github.com/git-lfs/git-lfs/config" "github.com/git-lfs/git-lfs/errors" "github.com/git-lfs/git-lfs/lfs" "github.com/git-lfs/git-lfs/lfsapi" "github.com/git-lfs/git-lfs/progress" "github.com/git-lfs/git-lfs/test" "github.com/git-lfs/git-lfs/tq" "github.com/spf13/cobra" ) type TestObject struct { Oid string Size int64 } type ServerTest struct { Name string F func(m *tq.Manifest, oidsExist, oidsMissing []TestObject) error } var ( RootCmd = &cobra.Command{ Use: "git-lfs-test-server-api [--url= | --clone=] [ ]", Short: "Test a Git LFS API server for compliance", Run: testServerApi, } apiUrl string cloneUrl string savePrefix string tests []ServerTest ) func main() { RootCmd.Execute() } func testServerApi(cmd *cobra.Command, args []string) { if (len(apiUrl) == 0 && len(cloneUrl) == 0) || (len(apiUrl) != 0 && len(cloneUrl) != 0) { exit("Must supply either --url or --clone (and not both)") } if len(args) != 0 && len(args) != 2 { exit("Must supply either no file arguments or both the exists AND missing file") } if len(args) != 0 && len(savePrefix) > 0 { exit("Cannot combine input files and --save option") } // Force loading of config before we alter it config.Config.Git.All() manifest, err := buildManifest() if err != nil { exit("error building tq.Manifest: " + err.Error()) } var oidsExist, oidsMissing []TestObject if len(args) >= 2 { fmt.Printf("Reading test data from files (no server content changes)\n") oidsExist = readTestOids(args[0]) oidsMissing = readTestOids(args[1]) } else { fmt.Printf("Creating test data (will upload to server)\n") var err error oidsExist, oidsMissing, err = buildTestData(manifest) if err != nil { exit("Failed to set up test data, aborting") } if len(savePrefix) > 0 { existFile := savePrefix + "_exists" missingFile := savePrefix + "_missing" saveTestOids(existFile, oidsExist) saveTestOids(missingFile, oidsMissing) fmt.Printf("Wrote test to %s, %s for future use\n", existFile, missingFile) } } ok := runTests(manifest, oidsExist, oidsMissing) if !ok { exit("One or more tests failed, see above") } fmt.Println("All tests passed") } func readTestOids(filename string) []TestObject { f, err := os.OpenFile(filename, os.O_RDONLY, 0644) if err != nil { exit("Error opening file %s", filename) } defer f.Close() var ret []TestObject rdr := bufio.NewReader(f) line, err := rdr.ReadString('\n') for err == nil { fields := strings.Fields(strings.TrimSpace(line)) if len(fields) == 2 { sz, _ := strconv.ParseInt(fields[1], 10, 64) ret = append(ret, TestObject{Oid: fields[0], Size: sz}) } line, err = rdr.ReadString('\n') } return ret } type testDataCallback struct{} func (*testDataCallback) Fatalf(format string, args ...interface{}) { exit(format, args...) } func (*testDataCallback) Errorf(format string, args ...interface{}) { fmt.Printf(format, args...) } func buildManifest() (*tq.Manifest, error) { cfg := config.Config // Configure the endpoint manually finder := lfsapi.NewEndpointFinder(config.Config.Git) var endp lfsapi.Endpoint if len(cloneUrl) > 0 { endp = finder.NewEndpointFromCloneURL(cloneUrl) } else { endp = finder.NewEndpoint(apiUrl) } apiClient, err := lfsapi.NewClient(cfg.Os, cfg.Git) apiClient.Endpoints = &constantEndpoint{ e: endp, EndpointFinder: apiClient.Endpoints, } if err != nil { return nil, err } return tq.NewManifestWithClient(apiClient), nil } type constantEndpoint struct { e lfsapi.Endpoint lfsapi.EndpointFinder } func (c *constantEndpoint) NewEndpointFromCloneURL(rawurl string) lfsapi.Endpoint { return c.e } func (c *constantEndpoint) NewEndpoint(rawurl string) lfsapi.Endpoint { return c.e } func (c *constantEndpoint) Endpoint(operation, remote string) lfsapi.Endpoint { return c.e } func (c *constantEndpoint) RemoteEndpoint(operation, remote string) lfsapi.Endpoint { return c.e } func buildTestData(manifest *tq.Manifest) (oidsExist, oidsMissing []TestObject, err error) { const oidCount = 50 oidsExist = make([]TestObject, 0, oidCount) oidsMissing = make([]TestObject, 0, oidCount) meter := progress.NewMeter(progress.WithOSEnv(config.Config.Os)) // Build test data for existing files & upload // Use test repo for this to simplify the process of making sure data matches oid // We're not performing a real test at this point (although an upload fail will break it) var callback testDataCallback repo := test.NewRepo(&callback) repo.Pushd() defer repo.Cleanup() // just one commit commit := test.CommitInput{CommitterName: "A N Other", CommitterEmail: "noone@somewhere.com"} for i := 0; i < oidCount; i++ { filename := fmt.Sprintf("file%d.dat", i) sz := int64(rand.Intn(200)) + 50 commit.Files = append(commit.Files, &test.FileInput{Filename: filename, Size: sz}) meter.Add(sz) } outputs := repo.AddCommits([]*test.CommitInput{&commit}) // now upload uploadQueue := tq.NewTransferQueue(tq.Upload, manifest, "origin", tq.WithProgress(meter)) for _, f := range outputs[0].Files { oidsExist = append(oidsExist, TestObject{Oid: f.Oid, Size: f.Size}) t, err := uploadTransfer(f.Oid, "Test file") if err != nil { return nil, nil, err } uploadQueue.Add(t.Name, t.Path, t.Oid, t.Size) } uploadQueue.Wait() for _, err := range uploadQueue.Errors() { if errors.IsFatalError(err) { exit("Fatal error setting up test data: %s", err) } } // Generate SHAs for missing files, random but repeatable // No actual file content needed for these rand.Seed(int64(oidCount)) runningSha := sha256.New() for i := 0; i < oidCount; i++ { runningSha.Write([]byte{byte(rand.Intn(256))}) oid := hex.EncodeToString(runningSha.Sum(nil)) sz := int64(rand.Intn(200)) + 50 oidsMissing = append(oidsMissing, TestObject{Oid: oid, Size: sz}) } return oidsExist, oidsMissing, nil } func saveTestOids(filename string, objs []TestObject) { f, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0644) if err != nil { exit("Error opening file %s", filename) } defer f.Close() for _, o := range objs { f.WriteString(fmt.Sprintf("%s %d\n", o.Oid, o.Size)) } } func runTests(manifest *tq.Manifest, oidsExist, oidsMissing []TestObject) bool { ok := true fmt.Printf("Running %d tests...\n", len(tests)) for _, t := range tests { err := runTest(t, manifest, oidsExist, oidsMissing) if err != nil { ok = false } } return ok } func runTest(t ServerTest, manifest *tq.Manifest, oidsExist, oidsMissing []TestObject) error { const linelen = 70 line := t.Name if len(line) > linelen { line = line[:linelen] } else if len(line) < linelen { line = fmt.Sprintf("%s%s", line, strings.Repeat(" ", linelen-len(line))) } fmt.Printf("%s...\r", line) err := t.F(manifest, oidsExist, oidsMissing) if err != nil { fmt.Printf("%s FAILED\n", line) fmt.Println(err.Error()) } else { fmt.Printf("%s OK\n", line) } return err } // Exit prints a formatted message and exits. func exit(format string, args ...interface{}) { fmt.Fprintf(os.Stderr, format, args...) os.Exit(2) } func addTest(name string, f func(manifest *tq.Manifest, oidsExist, oidsMissing []TestObject) error) { tests = append(tests, ServerTest{Name: name, F: f}) } func callBatchApi(manifest *tq.Manifest, dir tq.Direction, objs []TestObject) ([]*tq.Transfer, error) { apiobjs := make([]*tq.Transfer, 0, len(objs)) for _, o := range objs { apiobjs = append(apiobjs, &tq.Transfer{Oid: o.Oid, Size: o.Size}) } bres, err := tq.Batch(manifest, dir, "origin", apiobjs) if err != nil { return nil, err } return bres.Objects, nil } // Combine 2 slices into one by "randomly" interleaving // Not actually random, same sequence each time so repeatable func interleaveTestData(slice1, slice2 []TestObject) []TestObject { // Predictable sequence, mixin existing & missing semi-randomly rand.Seed(21) count := len(slice1) + len(slice2) ret := make([]TestObject, 0, count) slice1Idx := 0 slice2Idx := 0 for left := count; left > 0; { for i := rand.Intn(3) + 1; slice1Idx < len(slice1) && i > 0; i-- { obj := slice1[slice1Idx] ret = append(ret, obj) slice1Idx++ left-- } for i := rand.Intn(3) + 1; slice2Idx < len(slice2) && i > 0; i-- { obj := slice2[slice2Idx] ret = append(ret, obj) slice2Idx++ left-- } } return ret } func uploadTransfer(oid, filename string) (*tq.Transfer, error) { localMediaPath, err := lfs.LocalMediaPath(oid) if err != nil { return nil, errors.Wrapf(err, "Error uploading file %s (%s)", filename, oid) } fi, err := os.Stat(localMediaPath) if err != nil { return nil, errors.Wrapf(err, "Error uploading file %s (%s)", filename, oid) } return &tq.Transfer{ Name: filename, Path: localMediaPath, Oid: oid, Size: fi.Size(), }, nil } func init() { RootCmd.Flags().StringVarP(&apiUrl, "url", "u", "", "URL of the API (must supply this or --clone)") RootCmd.Flags().StringVarP(&cloneUrl, "clone", "c", "", "Clone URL from which to find API (must supply this or --url)") RootCmd.Flags().StringVarP(&savePrefix, "save", "s", "", "Saves generated data to _exists|missing for subsequent use") } git-lfs-2.3.4/test/git-lfs-test-server-api/testdownload.go000066400000000000000000000064351317167762300235240ustar00rootroot00000000000000package main import ( "bytes" "errors" "fmt" "github.com/git-lfs/git-lfs/tools" "github.com/git-lfs/git-lfs/tq" ) // "download" - all present func downloadAllExist(manifest *tq.Manifest, oidsExist, oidsMissing []TestObject) error { retobjs, err := callBatchApi(manifest, tq.Download, oidsExist) if err != nil { return err } if len(retobjs) != len(oidsExist) { return fmt.Errorf("Incorrect number of returned objects, expected %d, got %d", len(oidsExist), len(retobjs)) } var errbuf bytes.Buffer for _, o := range retobjs { rel, _ := o.Rel("download") if rel == nil { errbuf.WriteString(fmt.Sprintf("Missing download link for %s\n", o.Oid)) } } if errbuf.Len() > 0 { return errors.New(errbuf.String()) } return nil } // "download" - all missing (test includes 404 error entry) func downloadAllMissing(manifest *tq.Manifest, oidsExist, oidsMissing []TestObject) error { retobjs, err := callBatchApi(manifest, tq.Download, oidsMissing) if err != nil { return err } if len(retobjs) != len(oidsMissing) { return fmt.Errorf("Incorrect number of returned objects, expected %d, got %d", len(oidsMissing), len(retobjs)) } var errbuf bytes.Buffer for _, o := range retobjs { link, _ := o.Rel("download") if link != nil { errbuf.WriteString(fmt.Sprintf("Download link should not exist for %s, was %s\n", o.Oid, link)) } if o.Error == nil { errbuf.WriteString(fmt.Sprintf("Download should include an error for missing object %s\n", o.Oid)) } else if o.Error.Code != 404 { errbuf.WriteString(fmt.Sprintf("Download error code for missing object %s should be 404, got %d\n", o.Oid, o.Error.Code)) } } if errbuf.Len() > 0 { return errors.New(errbuf.String()) } return nil } // "download" - mixture func downloadMixed(manifest *tq.Manifest, oidsExist, oidsMissing []TestObject) error { existSet := tools.NewStringSetWithCapacity(len(oidsExist)) for _, o := range oidsExist { existSet.Add(o.Oid) } missingSet := tools.NewStringSetWithCapacity(len(oidsMissing)) for _, o := range oidsMissing { missingSet.Add(o.Oid) } calloids := interleaveTestData(oidsExist, oidsMissing) retobjs, err := callBatchApi(manifest, tq.Download, calloids) if err != nil { return err } count := len(oidsExist) + len(oidsMissing) if len(retobjs) != count { return fmt.Errorf("Incorrect number of returned objects, expected %d, got %d", count, len(retobjs)) } var errbuf bytes.Buffer for _, o := range retobjs { link, _ := o.Rel("download") if missingSet.Contains(o.Oid) { if link != nil { errbuf.WriteString(fmt.Sprintf("Download link should not exist for %s, was %s\n", o.Oid, link)) } if o.Error == nil { errbuf.WriteString(fmt.Sprintf("Download should include an error for missing object %s", o.Oid)) } else if o.Error.Code != 404 { errbuf.WriteString(fmt.Sprintf("Download error code for missing object %s should be 404, got %d\n", o.Oid, o.Error.Code)) } } if existSet.Contains(o.Oid) && link == nil { errbuf.WriteString(fmt.Sprintf("Missing download link for %s\n", o.Oid)) } } if errbuf.Len() > 0 { return errors.New(errbuf.String()) } return nil } func init() { addTest("Test download: all existing", downloadAllExist) addTest("Test download: all missing", downloadAllMissing) addTest("Test download: mixed", downloadMixed) } git-lfs-2.3.4/test/git-lfs-test-server-api/testupload.go000066400000000000000000000132241317167762300231730ustar00rootroot00000000000000package main import ( "bytes" "errors" "fmt" "github.com/git-lfs/git-lfs/tools" "github.com/git-lfs/git-lfs/tq" ) // "upload" - all missing func uploadAllMissing(manifest *tq.Manifest, oidsExist, oidsMissing []TestObject) error { retobjs, err := callBatchApi(manifest, tq.Upload, oidsMissing) if err != nil { return err } if len(retobjs) != len(oidsMissing) { return fmt.Errorf("Incorrect number of returned objects, expected %d, got %d", len(oidsMissing), len(retobjs)) } var errbuf bytes.Buffer for _, o := range retobjs { rel, _ := o.Rel("upload") if rel == nil { errbuf.WriteString(fmt.Sprintf("Missing upload link for %s\n", o.Oid)) } // verify link is optional so don't check } if errbuf.Len() > 0 { return errors.New(errbuf.String()) } return nil } // "upload" - all present func uploadAllExists(manifest *tq.Manifest, oidsExist, oidsMissing []TestObject) error { retobjs, err := callBatchApi(manifest, tq.Upload, oidsExist) if err != nil { return err } if len(retobjs) != len(oidsExist) { return fmt.Errorf("Incorrect number of returned objects, expected %d, got %d", len(oidsExist), len(retobjs)) } var errbuf bytes.Buffer for _, o := range retobjs { link, _ := o.Rel("upload") if link == nil { errbuf.WriteString(fmt.Sprintf("Upload link should not exist for %s, was %s\n", o.Oid, link)) } } if errbuf.Len() > 0 { return errors.New(errbuf.String()) } return nil } // "upload" - mix of missing & present func uploadMixed(manifest *tq.Manifest, oidsExist, oidsMissing []TestObject) error { existSet := tools.NewStringSetWithCapacity(len(oidsExist)) for _, o := range oidsExist { existSet.Add(o.Oid) } missingSet := tools.NewStringSetWithCapacity(len(oidsMissing)) for _, o := range oidsMissing { missingSet.Add(o.Oid) } calloids := interleaveTestData(oidsExist, oidsMissing) retobjs, err := callBatchApi(manifest, tq.Upload, calloids) if err != nil { return err } count := len(oidsExist) + len(oidsMissing) if len(retobjs) != count { return fmt.Errorf("Incorrect number of returned objects, expected %d, got %d", count, len(retobjs)) } var errbuf bytes.Buffer for _, o := range retobjs { link, _ := o.Rel("upload") if existSet.Contains(o.Oid) { if link != nil { errbuf.WriteString(fmt.Sprintf("Upload link should not exist for %s, was %s\n", o.Oid, link)) } } if missingSet.Contains(o.Oid) && link == nil { errbuf.WriteString(fmt.Sprintf("Missing upload link for %s\n", o.Oid)) } } if errbuf.Len() > 0 { return errors.New(errbuf.String()) } return nil } func uploadEdgeCases(manifest *tq.Manifest, oidsExist, oidsMissing []TestObject) error { errorCases := make([]TestObject, 0, 5) errorCodeMap := make(map[string]int, 5) errorReasonMap := make(map[string]string, 5) validCases := make([]TestObject, 0, 1) validReasonMap := make(map[string]string, 5) // Invalid SHAs - code 422 // Too short sha := "a345cde" errorCases = append(errorCases, TestObject{Oid: sha, Size: 99}) errorCodeMap[sha] = 422 errorReasonMap[sha] = "SHA is too short" // Too long sha = "1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef" errorCases = append(errorCases, TestObject{Oid: sha, Size: 99}) errorCodeMap[sha] = 422 errorReasonMap[sha] = "SHA is too long" // Invalid characters -----!---------------------------------! sha = "bf3e3e2af9366a3b704ax0c31de5afa64193ebabffde2091936ad2G7510bc03a" errorCases = append(errorCases, TestObject{Oid: sha, Size: 99}) errorCodeMap[sha] = 422 errorReasonMap[sha] = "SHA contains invalid characters" // Invalid size - code 422 sha = "e3bf3e2af9366a3b704af0c31de5afa64193ebabffde2091936ad237510bc03a" errorCases = append(errorCases, TestObject{Oid: sha, Size: -1}) errorCodeMap[sha] = 422 errorReasonMap[sha] = "Negative size" sha = "d2983e2af9366a3b704af0c31de5afa64193ebabffde2091936ad237510bc03a" errorCases = append(errorCases, TestObject{Oid: sha, Size: -125}) errorCodeMap[sha] = 422 errorReasonMap[sha] = "Negative size" // Zero size - should be allowed sha = "159f6ac723b9023b704af0c31de5afa64193ebabffde2091936ad237510bc03a" validCases = append(validCases, TestObject{Oid: sha, Size: 0}) validReasonMap[sha] = "Zero size should be allowed" calloids := interleaveTestData(errorCases, validCases) retobjs, err := callBatchApi(manifest, tq.Upload, calloids) if err != nil { return err } count := len(errorCases) + len(validCases) if len(retobjs) != count { return fmt.Errorf("Incorrect number of returned objects, expected %d, got %d", count, len(retobjs)) } var errbuf bytes.Buffer for _, o := range retobjs { link, _ := o.Rel("upload") if code, iserror := errorCodeMap[o.Oid]; iserror { reason, _ := errorReasonMap[o.Oid] if link != nil { errbuf.WriteString(fmt.Sprintf("Upload link should not exist for %s, was %s, reason %s\n", o.Oid, link, reason)) } if o.Error == nil { errbuf.WriteString(fmt.Sprintf("Upload should include an error for invalid object %s, reason %s", o.Oid, reason)) } else if o.Error.Code != code { errbuf.WriteString(fmt.Sprintf("Upload error code for missing object %s should be %d, got %d, reason %s\n", o.Oid, code, o.Error.Code, reason)) } } if reason, reasonok := validReasonMap[o.Oid]; reasonok { if link == nil { errbuf.WriteString(fmt.Sprintf("Missing upload link for %s, should be present because %s\n", o.Oid, reason)) } } } if errbuf.Len() > 0 { return errors.New(errbuf.String()) } return nil } func init() { addTest("Test upload: all missing", uploadAllMissing) addTest("Test upload: all present", uploadAllExists) addTest("Test upload: mixed", uploadMixed) addTest("Test upload: edge cases", uploadEdgeCases) } git-lfs-2.3.4/test/test-askpass.sh000077500000000000000000000062331317167762300170460ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" begin_test "askpass: push with GIT_ASKPASS" ( set -e reponame="askpass-with-git-environ" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" echo "hello" > a.dat git add .gitattributes a.dat git commit -m "initial commit" # $password is defined from test/cmd/lfstest-gitserver.go (see: skipIfBadAuth) export LFS_ASKPASS_USERNAME="user" export LFS_ASKPASS_PASSWORD="pass" git config "credential.helper" "" GIT_ASKPASS="lfs-askpass" SSH_ASKPASS="dont-call-me" GIT_TRACE=1 GIT_CURL_VERBOSE=1 git push origin master 2>&1 | tee push.log GITSERVER_USER="$(printf $GITSERVER | sed -e 's/http:\/\//http:\/\/user@/')" grep "filling with GIT_ASKPASS: lfs-askpass Username for \"$GITSERVER/$reponame\"" push.log grep "filling with GIT_ASKPASS: lfs-askpass Password for \"$GITSERVER_USER/$reponame\"" push.log grep "master -> master" push.log ) end_test begin_test "askpass: push with core.askPass" ( set -e if [ ! -z "$TRAVIS" ] ; then # This test is known to be broken on Travis, so we skip it if the $TRAVIS # environment variable is set. # # See: https://github.com/git-lfs/git-lfs/pull/2500 for more. exit 0 fi reponame="askpass-with-config" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" echo "hello" > a.dat git add .gitattributes a.dat git commit -m "initial commit" # $password is defined from test/cmd/lfstest-gitserver.go (see: skipIfBadAuth) export LFS_ASKPASS_PASSWORD="pass" git config "credential.helper" "" git config "core.askPass" "lfs-askpass" cat .git/config SSH_ASKPASS="dont-call-me" GIT_TRACE=1 GIT_CURL_VERBOSE=1 git push origin master 2>&1 | tee push.log GITSERVER_USER="$(printf $GITSERVER | sed -e 's/http:\/\//http:\/\/user@/')" grep "filling with GIT_ASKPASS: lfs-askpass Username for \"$GITSERVER/$reponame\"" push.log grep "filling with GIT_ASKPASS: lfs-askpass Password for \"$GITSERVER_USER/$reponame\"" push.log grep "master -> master" push.log ) end_test begin_test "askpass: push with SSH_ASKPASS" ( set -e if [ ! -z "$TRAVIS" ] ; then # This test is known to be broken on Travis, so we skip it if the $TRAVIS # environment variable is set. # # See: https://github.com/git-lfs/git-lfs/pull/2500 for more. exit 0 fi reponame="askpass-with-ssh-environ" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" echo "hello" > a.dat git add .gitattributes a.dat git commit -m "initial commit" # $password is defined from test/cmd/lfstest-gitserver.go (see: skipIfBadAuth) export LFS_ASKPASS_USERNAME="user" export LFS_ASKPASS_PASSWORD="pass" git config "credential.helper" "" SSH_ASKPASS="lfs-askpass" GIT_TRACE=1 GIT_CURL_VERBOSE=1 git push origin master 2>&1 | tee push.log GITSERVER_USER="$(printf $GITSERVER | sed -e 's/http:\/\//http:\/\/user@/')" grep "filling with GIT_ASKPASS: lfs-askpass Username for \"$GITSERVER/$reponame\"" push.log grep "filling with GIT_ASKPASS: lfs-askpass Password for \"$GITSERVER_USER/$reponame\"" push.log grep "master -> master" push.log ) end_test git-lfs-2.3.4/test/test-batch-error-handling.sh000077500000000000000000000031721317167762300213720ustar00rootroot00000000000000#!/usr/bin/env bash # This is a sample Git LFS test. See test/README.md and testhelpers.sh for # more documentation. . "test/testlib.sh" begin_test "batch error handling" ( set -e # This initializes a new bare git repository in test/remote. # These remote repositories are global to every test, so keep the names # unique. reponame="badbatch" # Server looks for the "badbatch" repo, returns a 203 status setup_remote_repo "$reponame" # Clone the repository from the test Git server. This is empty, and will be # used to test a "git pull" below. The repo is cloned to $TRASHDIR/clone clone_repo "$reponame" clone # Clone the repository again to $TRASHDIR/repo. This will be used to commit # and push objects. clone_repo "$reponame" repo # This executes Git LFS from the local repo that was just cloned. git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log contents="a" contents_oid=$(calc_oid "$contents") printf "$contents" > a.dat git add a.dat git add .gitattributes git commit -m "add a.dat" 2>&1 | tee commit.log grep "master (root-commit)" commit.log grep "2 files changed" commit.log grep "create mode 100644 a.dat" commit.log grep "create mode 100644 .gitattributes" commit.log [ "a" = "$(cat a.dat)" ] # This is a small shell function that runs several git commands together. assert_pointer "master" "a.dat" "$contents_oid" 1 refute_server_object "$reponame" "$contents_oid" # This pushes to the remote repository set up at the top of the test. git push origin master 2>&1 | tee push.log grep "Unable to parse HTTP response" push.log ) end_test git-lfs-2.3.4/test/test-batch-retries.sh000077500000000000000000000036551317167762300201420ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" begin_test "batch storage upload causes retries" ( set -e reponame="batch-storage-upload-retry" setup_remote_repo "$reponame" clone_repo "$reponame" batch-storage-repo-upload contents="storage-upload-retry" oid="$(calc_oid "$contents")" printf "$contents" > a.dat git lfs track "*.dat" git add .gitattributes a.dat git commit -m "initial commit" git config --local lfs.transfer.maxretries 3 GIT_TRACE=1 git push origin master 2>&1 | tee push.log if [ "0" -ne "${PIPESTATUS[0]}" ]; then echo >&2 "fatal: expected \`git push origin master\` to succeed ..." exit 1 fi actual_count="$(grep -c "tq: retrying object $oid: Fatal error: Server error" push.log)" [ "2" = "$actual_count" ] assert_server_object "$reponame" "$oid" ) end_test begin_test "batch storage download causes retries" ( set -e reponame="batch-storage-download-retry" setup_remote_repo "$reponame" clone_repo "$reponame" batch-storage-repo-download contents="storage-download-retry" oid="$(calc_oid "$contents")" printf "$contents" > a.dat git lfs track "*.dat" git add .gitattributes a.dat git commit -m "initial commit" git push origin master assert_server_object "$reponame" "$oid" pushd .. git \ -c "filter.lfs.process=" \ -c "filter.lfs.smudge=cat" \ -c "filter.lfs.required=false" \ clone "$GITSERVER/$reponame" "$reponame-assert" cd "$reponame-assert" git config credential.helper lfstest git config --local lfs.transfer.maxretries 3 GIT_TRACE=1 git lfs pull origin master 2>&1 | tee pull.log if [ "0" -ne "${PIPESTATUS[0]}" ]; then echo >&2 "fatal: expected \`git lfs pull origin master\` to succeed ..." exit 1 fi actual_count="$(grep -c "tq: retrying object $oid: Fatal error: Server error" pull.log)" [ "2" = "$actual_count" ] assert_local_object "$oid" "${#contents}" popd ) end_test git-lfs-2.3.4/test/test-batch-transfer.sh000077500000000000000000000072571317167762300203130ustar00rootroot00000000000000#!/usr/bin/env bash # This is a sample Git LFS test. See test/README.md and testhelpers.sh for # more documentation. . "test/testlib.sh" begin_test "batch transfer" ( set -e # This initializes a new bare git repository in test/remote. # These remote repositories are global to every test, so keep the names # unique. reponame1="$(basename "$0" ".sh")" reponame2="CAPITALLETTERS" reponame=$reponame1$reponame2 setup_remote_repo "$reponame" # Clone the repository from the test Git server. This is empty, and will be # used to test a "git pull" below. The repo is cloned to $TRASHDIR/clone clone_repo "$reponame" clone # Clone the repository again to $TRASHDIR/repo. This will be used to commit # and push objects. clone_repo "$reponame" repo # This executes Git LFS from the local repo that was just cloned. git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log contents="a" contents_oid=$(calc_oid "$contents") printf "$contents" > a.dat git add a.dat git add .gitattributes git commit -m "add a.dat" 2>&1 | tee commit.log grep "master (root-commit)" commit.log grep "2 files changed" commit.log grep "create mode 100644 a.dat" commit.log grep "create mode 100644 .gitattributes" commit.log [ "a" = "$(cat a.dat)" ] # This is a small shell function that runs several git commands together. assert_pointer "master" "a.dat" "$contents_oid" 1 refute_server_object "$reponame" "$contents_oid" # This pushes to the remote repository set up at the top of the test. git push origin master 2>&1 | tee push.log grep "(1 of 1 files)" push.log grep "master -> master" push.log assert_server_object "$reponame" "$contents_oid" # change to the clone's working directory cd ../clone git pull [ "a" = "$(cat a.dat)" ] assert_pointer "master" "a.dat" "$contents_oid" 1 ) end_test begin_test "batch transfers occur in reverse order by size" ( set -e reponame="batch-order-test" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" git add .gitattributes git commit -m "initial commit" small_contents="small" small_oid="$(calc_oid "$small_contents")" printf "$small_contents" > small.dat bigger_contents="bigger" bigger_oid="$(calc_oid "$bigger_contents")" printf "$bigger_contents" > bigger.dat git add *.dat git commit -m "add small and large objects" GIT_CURL_VERBOSE=1 git push origin master 2>&1 | tee push.log batch="$(grep "{\"operation\":\"upload\"" push.log | head -1)" pos_small="$(substring_position "$batch" "$small_oid")" pos_large="$(substring_position "$batch" "$bigger_oid")" # Assert that the the larger object shows up earlier in the batch than the # smaller object [ "$pos_large" -lt "$pos_small" ] ) end_test begin_test "batch transfers with ssh endpoint" ( set -e reponame="batch-ssh" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" sshurl="${GITSERVER/http:\/\//ssh://git@}/$reponame" git config lfs.url "$sshurl" git lfs env contents="test" oid="$(calc_oid "$contents")" git lfs track "*.dat" printf "$contents" > test.dat git add .gitattributes test.dat git commit -m "initial commit" git push origin master 2>&1 ) end_test begin_test "batch transfers with ntlm server" ( set -e reponame="ntlmtest" setup_remote_repo "$reponame" printf "ntlmdomain\\\ntlmuser:ntlmpass" > "$CREDSDIR/127.0.0.1--$reponame" clone_repo "$reponame" "$reponame" contents="test" oid="$(calc_oid "$contents")" git lfs track "*.dat" printf "$contents" > test.dat git add .gitattributes test.dat git commit -m "initial commit" GIT_CURL_VERBOSE=1 git push origin master 2>&1 ) end_test git-lfs-2.3.4/test/test-batch-unknown-oids.sh000077500000000000000000000012621317167762300211100ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" begin_test "transfer queue rejects unknown OIDs" ( set -e reponame="unknown-oids" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" git add .gitattributes git commit -m "initial commit" contents="unknown-oid" printf "$contents" > a.dat git add a.dat git commit -m "add objects" set +e git push origin master 2>&1 | tee push.log res="${PIPESTATUS[0]}" set -e refute_server_object "$reponame" "$(calc_oid "$contents")" if [ "0" -eq "$res" ]; then echo "push successful?" exit 1 fi grep "\[unknown-oid\] The server returned an unknown OID." push.log ) end_test git-lfs-2.3.4/test/test-checkout.sh000077500000000000000000000064571317167762300172160ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" begin_test "checkout" ( set -e reponame="$(basename "$0" ".sh")" setup_remote_repo "$reponame" clone_repo "$reponame" repo git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log contents="something something" contentsize=19 contents_oid=$(calc_oid "$contents") echo "Same content everywhere is ok, just one object in lfs db" printf "$contents" > file1.dat printf "$contents" > file2.dat printf "$contents" > file3.dat mkdir folder1 folder2 printf "$contents" > folder1/nested.dat printf "$contents" > folder2/nested.dat git add file1.dat file2.dat file3.dat folder1/nested.dat folder2/nested.dat git add .gitattributes git commit -m "add files" [ "$contents" = "$(cat file1.dat)" ] [ "$contents" = "$(cat file2.dat)" ] [ "$contents" = "$(cat file3.dat)" ] [ "$contents" = "$(cat folder1/nested.dat)" ] [ "$contents" = "$(cat folder2/nested.dat)" ] assert_pointer "master" "file1.dat" "$contents_oid" $contentsize # Remove the working directory rm -rf file1.dat file2.dat file3.dat folder1/nested.dat folder2/nested.dat echo "checkout should replace all" git lfs checkout [ "$contents" = "$(cat file1.dat)" ] [ "$contents" = "$(cat file2.dat)" ] [ "$contents" = "$(cat file3.dat)" ] [ "$contents" = "$(cat folder1/nested.dat)" ] [ "$contents" = "$(cat folder2/nested.dat)" ] # Remove again rm -rf file1.dat file2.dat file3.dat folder1/nested.dat folder2/nested.dat echo "checkout with filters" git lfs checkout file2.dat [ "$contents" = "$(cat file2.dat)" ] [ ! -f file1.dat ] [ ! -f file3.dat ] [ ! -f folder1/nested.dat ] [ ! -f folder2/nested.dat ] echo "quotes to avoid shell globbing" git lfs checkout "file*.dat" [ "$contents" = "$(cat file1.dat)" ] [ "$contents" = "$(cat file3.dat)" ] [ ! -f folder1/nested.dat ] [ ! -f folder2/nested.dat ] echo "test subdir context" pushd folder1 git lfs checkout nested.dat [ "$contents" = "$(cat nested.dat)" ] [ ! -f ../folder2/nested.dat ] # test '.' in current dir rm nested.dat git lfs checkout . [ "$contents" = "$(cat nested.dat)" ] popd echo "test folder param" git lfs checkout folder2 [ "$contents" = "$(cat folder2/nested.dat)" ] echo "test '.' in current dir" rm -rf file1.dat file2.dat file3.dat folder1/nested.dat folder2/nested.dat git lfs checkout . [ "$contents" = "$(cat file1.dat)" ] [ "$contents" = "$(cat file2.dat)" ] [ "$contents" = "$(cat file3.dat)" ] [ "$contents" = "$(cat folder1/nested.dat)" ] [ "$contents" = "$(cat folder2/nested.dat)" ] echo "test checkout with missing data doesn't fail" git push origin master rm -rf .git/lfs/objects rm file*.dat git lfs checkout [ "$(pointer $contents_oid $contentsize)" = "$(cat file1.dat)" ] [ "$(pointer $contents_oid $contentsize)" = "$(cat file2.dat)" ] [ "$(pointer $contents_oid $contentsize)" = "$(cat file3.dat)" ] [ "$contents" = "$(cat folder1/nested.dat)" ] [ "$contents" = "$(cat folder2/nested.dat)" ] ) end_test begin_test "checkout: outside git repository" ( set +e git lfs checkout 2>&1 > checkout.log res=$? set -e if [ "$res" = "0" ]; then echo "Passes because $GIT_LFS_TEST_DIR is unset." exit 0 fi [ "$res" = "128" ] grep "Not in a git repository" checkout.log ) end_test git-lfs-2.3.4/test/test-chunked-transfer-encoding.sh000077500000000000000000000035331317167762300224300ustar00rootroot00000000000000#!/usr/bin/env bash # This is a sample Git LFS test. See test/README.md and testhelpers.sh for # more documentation. . "test/testlib.sh" begin_test "chunked transfer encoding" ( set -e # This initializes a new bare git repository in test/remote. # These remote repositories are global to every test, so keep the names # unique. reponame="$(basename "$0" ".sh")" setup_remote_repo "$reponame" # Clone the repository from the test Git server. This is empty, and will be # used to test a "git pull" below. The repo is cloned to $TRASHDIR/clone clone_repo "$reponame" clone # Clone the repository again to $TRASHDIR/repo. This will be used to commit # and push objects. clone_repo "$reponame" repo # This executes Git LFS from the local repo that was just cloned. git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log contents="a" contents_oid=$(calc_oid "$contents") # Regular Git commands can be used. printf "$contents" > a.dat git add a.dat git add .gitattributes git commit -m "add a.dat" 2>&1 | tee commit.log grep "master (root-commit)" commit.log grep "2 files changed" commit.log grep "create mode 100644 a.dat" commit.log grep "create mode 100644 .gitattributes" commit.log [ "a" = "$(cat a.dat)" ] # This is a small shell function that runs several git commands together. assert_pointer "master" "a.dat" "$contents_oid" 1 refute_server_object "$reponame" "$contents_oid" # This pushes to the remote repository set up at the top of the test. git push origin master 2>&1 | tee push.log grep "(1 of 1 files)" push.log grep "master -> master" push.log assert_server_object "$reponame" "$contents_oid" # change to the clone's working directory cd ../clone git pull 2>&1 [ "a" = "$(cat a.dat)" ] assert_pointer "master" "a.dat" "$contents_oid" 1 ) end_test git-lfs-2.3.4/test/test-clean.sh000077500000000000000000000062421317167762300164630ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" clean_setup () { mkdir "$1" cd "$1" git init } begin_test "clean simple file" ( set -e clean_setup "simple" echo "whatever" | git lfs clean | tee clean.log [ "$(pointer cd293be6cea034bd45a0352775a219ef5dc7825ce55d1f7dae9762d80ce64411 9)" = "$(cat clean.log)" ] ) end_test begin_test "clean a pointer" ( set -e clean_setup "pointer" pointer cd293be6cea034bd45a0352775a219ef5dc7825ce55d1f7dae9762d80ce64411 9 | git lfs clean | tee clean.log [ "$(pointer cd293be6cea034bd45a0352775a219ef5dc7825ce55d1f7dae9762d80ce64411 9)" = "$(cat clean.log)" ] ) end_test begin_test "clean pseudo pointer" ( set -e clean_setup "pseudo" echo "version https://git-lfs.github.com/spec/v1 oid sha256:7cd8be1d2cd0dd22cd9d229bb6b5785009a05e8b39d405615d882caac56562b5 size 1024 This is my test pointer. There are many like it, but this one is mine." | git lfs clean | tee clean.log [ "$(pointer f492acbebb5faa22da4c1501c022af035469f624f426631f31936575873fefe1 202)" = "$(cat clean.log)" ] ) end_test begin_test "clean pseudo pointer with extra data" ( set -e clean_setup "extra-data" # pointer includes enough extra data to fill the 'git lfs clean' buffer printf "version https://git-lfs.github.com/spec/v1 oid sha256:7cd8be1d2cd0dd22cd9d229bb6b5785009a05e8b39d405615d882caac56562b5 size 1024 \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n This is my test pointer. There are many like it, but this one is mine.\n" | git lfs clean | tee clean.log [ "$(pointer c2f909f6961bf85a92e2942ef3ed80c938a3d0ebaee6e72940692581052333be 586)" = "$(cat clean.log)" ] ) end_test begin_test "clean stdin" ( set -e # git-lfs-clean(1) writes to .git/lfs/objects, and therefore must be executed # within a repository. reponame="clean-over-stdin" git init "$reponame" cd "$reponame" base64 /dev/urandom | head -c 1024 > small.dat base64 /dev/urandom | head -c 2048 > large.dat expected_small="$(calc_oid_file "small.dat")" expected_large="$(calc_oid_file "large.dat")" actual_small="$(git lfs clean < "small.dat" | grep "oid" | cut -d ':' -f 2)" actual_large="$(git lfs clean < "large.dat" | grep "oid" | cut -d ':' -f 2)" if [ "$expected_small" != "$actual_small" ]; then echo >&2 "fatal: expected small OID of: $expected_small, got: $actual_small" exit 1 fi if [ "$expected_large" != "$actual_large" ]; then echo >&2 "fatal: expected large OID of: $expected_large, got: $actual_large" exit 1 fi ) end_test git-lfs-2.3.4/test/test-clone-deprecated.sh000077500000000000000000000007241317167762300205760ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" ensure_git_version_isnt $VERSION_LOWER "2.15.0" begin_test "clone (deprecated on new versions of Git)" ( set -e reponame="clone-deprecated-recent-versions" setup_remote_repo "$reponame" mkdir -p "$reponame" pushd "$reponame" > /dev/null git lfs clone "$GITSERVER/$reponame" 2>&1 | tee clone.log grep "WARNING: 'git lfs clone' is deprecated and will not be updated" clone.log popd > /dev/null ) end_test git-lfs-2.3.4/test/test-clone.sh000077500000000000000000000407331317167762300165040ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" ensure_git_version_isnt $VERSION_LOWER "2.2.0" begin_test "clone" ( set -e reponame="$(basename "$0" ".sh")" setup_remote_repo "$reponame" clone_repo "$reponame" repo git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log # generate some test data & commits with random LFS data echo "[ { \"CommitDate\":\"$(get_date -10d)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":100}, {\"Filename\":\"file2.dat\",\"Size\":75}] }, { \"CommitDate\":\"$(get_date -7d)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":110}, {\"Filename\":\"file3.dat\",\"Size\":66}, {\"Filename\":\"file4.dat\",\"Size\":23}] }, { \"CommitDate\":\"$(get_date -10d)\", \"Files\":[ {\"Filename\":\"file5.dat\",\"Size\":120}, {\"Filename\":\"file6.dat\",\"Size\":30}] } ]" | lfstest-testutils addcommits git push origin master # Now clone again, test specific clone dir cd "$TRASHDIR" newclonedir="testclone1" git lfs clone "$GITSERVER/$reponame" "$newclonedir" 2>&1 | tee lfsclone.log grep "Cloning into" lfsclone.log grep "Git LFS:" lfsclone.log # should be no filter errors [ ! $(grep "filter" lfsclone.log) ] [ ! $(grep "error" lfsclone.log) ] # should be cloned into location as per arg [ -d "$newclonedir" ] # check a few file sizes to make sure pulled pushd "$newclonedir" [ $(wc -c < "file1.dat") -eq 110 ] [ $(wc -c < "file2.dat") -eq 75 ] [ $(wc -c < "file3.dat") -eq 66 ] assert_hooks "$(dot_git_dir)" [ ! -e "lfs" ] assert_clean_status popd # Now check clone with implied dir rm -rf "$reponame" git lfs clone "$GITSERVER/$reponame" 2>&1 | tee lfsclone.log grep "Cloning into" lfsclone.log grep "Git LFS:" lfsclone.log # should be no filter errors [ ! $(grep "filter" lfsclone.log) ] [ ! $(grep "error" lfsclone.log) ] # clone location should be implied [ -d "$reponame" ] pushd "$reponame" [ $(wc -c < "file1.dat") -eq 110 ] [ $(wc -c < "file2.dat") -eq 75 ] [ $(wc -c < "file3.dat") -eq 66 ] assert_hooks "$(dot_git_dir)" [ ! -e "lfs" ] assert_clean_status popd ) end_test begin_test "cloneSSL" ( set -e if $TRAVIS; then echo "Skipping SSL tests, Travis has weird behaviour in validating custom certs, test locally only" exit 0 fi reponame="test-cloneSSL" setup_remote_repo "$reponame" clone_repo_ssl "$reponame" "$reponame" git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log # generate some test data & commits with random LFS data echo "[ { \"CommitDate\":\"$(get_date -5d)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":100}, {\"Filename\":\"file2.dat\",\"Size\":75}] }, { \"CommitDate\":\"$(get_date -1d)\", \"Files\":[ {\"Filename\":\"file3.dat\",\"Size\":30}] } ]" | lfstest-testutils addcommits git push origin master # Now SSL clone again with 'git lfs clone', test specific clone dir cd "$TRASHDIR" newclonedir="testcloneSSL1" git lfs clone "$SSLGITSERVER/$reponame" "$newclonedir" 2>&1 | tee lfsclone.log assert_clean_status grep "Cloning into" lfsclone.log grep "Git LFS:" lfsclone.log # should be no filter errors [ ! $(grep "filter" lfsclone.log) ] [ ! $(grep "error" lfsclone.log) ] # should be cloned into location as per arg [ -d "$newclonedir" ] # check a few file sizes to make sure pulled pushd "$newclonedir" [ $(wc -c < "file1.dat") -eq 100 ] [ $(wc -c < "file2.dat") -eq 75 ] [ $(wc -c < "file3.dat") -eq 30 ] assert_hooks "$(dot_git_dir)" popd # Now check SSL clone with standard 'git clone' and smudge download rm -rf "$reponame" git clone "$SSLGITSERVER/$reponame" ) end_test begin_test "clone ClientCert" ( set -e reponame="test-cloneClientCert" setup_remote_repo "$reponame" clone_repo_clientcert "$reponame" "$reponame" if [ $(grep -c "client-cert-mac-openssl" clone_client_cert.log) -gt 0 ]; then echo "Skipping due to SSL client cert bug in Git" exit 0 fi git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log # generate some test data & commits with random LFS data echo "[ { \"CommitDate\":\"$(get_date -5d)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":100}, {\"Filename\":\"file2.dat\",\"Size\":75}] }, { \"CommitDate\":\"$(get_date -1d)\", \"Files\":[ {\"Filename\":\"file3.dat\",\"Size\":30}] } ]" | lfstest-testutils addcommits git push origin master # Now clone again with 'git lfs clone', test specific clone dir cd "$TRASHDIR" newclonedir="testcloneClietCert1" git lfs clone "$CLIENTCERTGITSERVER/$reponame" "$newclonedir" 2>&1 | tee lfsclone.log grep "Cloning into" lfsclone.log grep "Git LFS:" lfsclone.log # should be no filter errors [ ! $(grep "filter" lfsclone.log) ] [ ! $(grep "error" lfsclone.log) ] # should be cloned into location as per arg [ -d "$newclonedir" ] # check a few file sizes to make sure pulled pushd "$newclonedir" [ $(wc -c < "file1.dat") -eq 100 ] [ $(wc -c < "file2.dat") -eq 75 ] [ $(wc -c < "file3.dat") -eq 30 ] assert_hooks "$(dot_git_dir)" assert_clean_status popd # Now check SSL clone with standard 'git clone' and smudge download rm -rf "$reponame" git clone "$CLIENTCERTGITSERVER/$reponame" ) end_test begin_test "clone with flags" ( set -e reponame="$(basename "$0" ".sh")-flags" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log # generate some test data & commits with random LFS data echo "[ { \"CommitDate\":\"$(get_date -10d)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":100}, {\"Filename\":\"file2.dat\",\"Size\":75}] }, { \"CommitDate\":\"$(get_date -7d)\", \"NewBranch\":\"branch2\", \"Files\":[ {\"Filename\":\"fileonbranch2.dat\",\"Size\":66}] }, { \"CommitDate\":\"$(get_date -3d)\", \"ParentBranches\":[\"master\"], \"Files\":[ {\"Filename\":\"file3.dat\",\"Size\":120}, {\"Filename\":\"file4.dat\",\"Size\":30}] } ]" | lfstest-testutils addcommits git push origin master branch2 # Now clone again, test specific clone dir cd "$TRASHDIR" mkdir "$TRASHDIR/templatedir" newclonedir="testflagsclone1" # many of these flags won't do anything but make sure they're not rejected git lfs clone --template "$TRASHDIR/templatedir" --local --no-hardlinks --shared --verbose --progress --recursive "$GITSERVER/$reponame" "$newclonedir" rm -rf "$newclonedir" # specific test for --no-checkout git lfs clone --quiet --no-checkout "$GITSERVER/$reponame" "$newclonedir" if [ -e "$newclonedir/file1.dat" ]; then exit 1 fi rm -rf "$newclonedir" # specific test for --branch and --origin git lfs clone --branch branch2 --recurse-submodules --origin differentorigin "$GITSERVER/$reponame" "$newclonedir" pushd "$newclonedir" # this file is only on branch2 [ -e "fileonbranch2.dat" ] # confirm remote is called differentorigin git remote get-url differentorigin assert_hooks "$(dot_git_dir)" popd rm -rf "$newclonedir" # specific test for --separate-git-dir gitdir="$TRASHDIR/separategitdir" git lfs clone --separate-git-dir "$gitdir" "$GITSERVER/$reponame" "$newclonedir" # .git should be a file not dir if [ -d "$newclonedir/.git" ]; then exit 1 fi [ -e "$newclonedir/.git" ] [ -d "$gitdir/objects" ] assert_hooks "$gitdir" rm -rf "$newclonedir" rm -rf "$gitdir" # specific test for --bare git lfs clone --bare "$GITSERVER/$reponame" "$newclonedir" [ -d "$newclonedir/objects" ] rm -rf "$newclonedir" # short flags git lfs clone -l -v -n -s -b branch2 "$GITSERVER/$reponame" "$newclonedir" rm -rf "$newclonedir" ) end_test begin_test "clone (with include/exclude args)" ( set -e reponame="clone_include_exclude" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log contents_a="a" contents_a_oid=$(calc_oid "$contents_a") printf "$contents_a" > "a.dat" printf "$contents_a" > "a-dupe.dat" printf "$contents_a" > "dupe-a.dat" contents_b="b" contents_b_oid=$(calc_oid "$contents_b") printf "$contents_b" > "b.dat" git add *.dat .gitattributes git commit -m "add a.dat, b.dat" 2>&1 | tee commit.log grep "master (root-commit)" commit.log grep "5 files changed" commit.log grep "create mode 100644 a.dat" commit.log grep "create mode 100644 a-dupe.dat" commit.log grep "create mode 100644 dupe-a.dat" commit.log grep "create mode 100644 b.dat" commit.log grep "create mode 100644 .gitattributes" commit.log git push origin master 2>&1 | tee push.log grep "master -> master" push.log grep "Git LFS: (2 of 2 files)" push.log cd "$TRASHDIR" local_reponame="clone_with_includes" git lfs clone "$GITSERVER/$reponame" "$local_reponame" -I "a*.dat" pushd "$local_reponame" assert_local_object "$contents_a_oid" 1 refute_local_object "$contents_b_oid" [ "a" = "$(cat a.dat)" ] [ "a" = "$(cat a-dupe.dat)" ] [ "$(pointer $contents_a_oid 1)" = "$(cat dupe-a.dat)" ] [ "$(pointer $contents_b_oid 1)" = "$(cat b.dat)" ] assert_hooks "$(dot_git_dir)" popd local_reponame="clone_with_excludes" git lfs clone "$GITSERVER/$reponame" "$local_reponame" -I "b.dat" -X "a.dat" pushd "$local_reponame" assert_local_object "$contents_b_oid" 1 refute_local_object "$contents_a_oid" [ "$(pointer $contents_a_oid 1)" = "$(cat a.dat)" ] [ "b" = "$(cat b.dat)" ] assert_hooks "$(dot_git_dir)" popd ) end_test begin_test "clone (with .lfsconfig)" ( set -e reponame="clone_with_lfsconfig" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log contents_a="a" contents_a_oid=$(calc_oid "$contents_a") printf "$contents_a" > "a.dat" contents_b="b" contents_b_oid=$(calc_oid "$contents_b") printf "$contents_b" > "b.dat" git add a.dat b.dat .gitattributes git commit -m "add a.dat, b.dat" 2>&1 | tee commit.log grep "master (root-commit)" commit.log grep "3 files changed" commit.log grep "create mode 100644 a.dat" commit.log grep "create mode 100644 b.dat" commit.log grep "create mode 100644 .gitattributes" commit.log git config -f ".lfsconfig" "lfs.fetchinclude" "a*" git add ".lfsconfig" git commit -m "config lfs.fetchinclude a*" 2>&1 | tee commit.log grep "master" commit.log grep "1 file changed" commit.log grep "create mode 100644 .lfsconfig" commit.log git push origin master 2>&1 | tee push.log grep "master -> master" push.log grep "Git LFS: (2 of 2 files)" push.log pushd "$TRASHDIR" echo "test: clone with lfs.fetchinclude in .lfsconfig" local_reponame="clone_with_config_include" set +x git lfs clone "$GITSERVER/$reponame" "$local_reponame" ok="$?" set -x if [ "0" -ne "$ok" ]; then # TEMP: used to catch transient failure from above `clone` command, as in: # https://github.com/git-lfs/git-lfs/pull/1782#issuecomment-267678319 echo >&2 "[!] \`git lfs clone $GITSERVER/$reponame $local_reponame\` failed" git lfs logs last exit 1 fi pushd "$local_reponame" assert_local_object "$contents_a_oid" 1 refute_local_object "$contents_b_oid" assert_hooks "$(dot_git_dir)" popd echo "test: clone with lfs.fetchinclude in .lfsconfig, and args" local_reponame="clone_with_config_include_and_args" git lfs clone "$GITSERVER/$reponame" "$local_reponame" -I "b.dat" pushd "$local_reponame" refute_local_object "$contents_a_oid" assert_local_object "$contents_b_oid" 1 assert_hooks "$(dot_git_dir)" popd popd git config -f ".lfsconfig" "lfs.fetchinclude" "b*" git config -f ".lfsconfig" "lfs.fetchexclude" "a*" git add .lfsconfig git commit -m "config lfs.fetchinclude a*" 2>&1 | tee commit.log grep "master" commit.log grep "1 file changed" commit.log git push origin master 2>&1 | tee push.log grep "master -> master" push.log pushd "$TRASHDIR" echo "test: clone with lfs.fetchexclude in .lfsconfig" local_reponame="clone_with_config_exclude" git lfs clone "$GITSERVER/$reponame" "$local_reponame" pushd "$local_reponame" cat ".lfsconfig" assert_local_object "$contents_b_oid" 1 refute_local_object "$contents_a_oid" assert_hooks "$(dot_git_dir)" popd echo "test: clone with lfs.fetchexclude in .lfsconfig, and args" local_reponame="clone_with_config_exclude_and_args" git lfs clone "$GITSERVER/$reponame" "$local_reponame" -I "a.dat" -X "b.dat" pushd "$local_reponame" assert_local_object "$contents_a_oid" 1 refute_local_object "$contents_b_oid" assert_hooks "$(dot_git_dir)" popd popd ) end_test begin_test "clone with submodules" ( set -e # set up a doubly nested submodule, each with LFS content reponame="submod-root" submodname1="submod-level1" submodname2="submod-level2" setup_remote_repo "$reponame" setup_remote_repo "$submodname1" setup_remote_repo "$submodname2" clone_repo "$submodname2" submod2 git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log contents_sub2="Inception. Now, before you bother telling me it's impossible..." contents_sub2_oid=$(calc_oid "$contents_sub2") printf "$contents_sub2" > "sub2.dat" git add sub2.dat .gitattributes git commit -m "Nested submodule level 2" git push origin master clone_repo "$submodname1" submod1 git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log contents_sub1="We're dreaming?" contents_sub1_oid=$(calc_oid "$contents_sub1") printf "$contents_sub1" > "sub1.dat" # add submodule2 as submodule of submodule1 git submodule add "$GITSERVER/$submodname2" sub2 git submodule update git add sub2 sub1.dat .gitattributes git commit -m "Nested submodule level 1" git push origin master clone_repo "$reponame" rootrepo git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log contents_root="Downwards is the only way forwards." contents_root_oid=$(calc_oid "$contents_root") printf "$contents_root" > "root.dat" # add submodule1 as submodule of root git submodule add "$GITSERVER/$submodname1" sub1 git submodule update git add sub1 root.dat .gitattributes git commit -m "Root repo" git push origin master pushd "$TRASHDIR" local_reponame="submod-clone" git lfs clone --recursive "$GITSERVER/$reponame" "$local_reponame" # check everything is where it should be cd $local_reponame assert_hooks "$(dot_git_dir)" # check LFS store and working copy assert_local_object "$contents_root_oid" "${#contents_root}" [ $(wc -c < "root.dat") -eq ${#contents_root} ] # and so on for nested subs cd sub1 assert_local_object "$contents_sub1_oid" "${#contents_sub1}" [ $(wc -c < "sub1.dat") -eq ${#contents_sub1} ] cd sub2 assert_local_object "$contents_sub2_oid" "${#contents_sub2}" [ $(wc -c < "sub2.dat") -eq ${#contents_sub2} ] popd ) end_test begin_test "clone in current directory" ( set -e reponame="clone_in_current_dir" setup_remote_repo "$reponame" clone_repo "$reponame" $reponame git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log contents="contents" contents_oid="$(calc_oid "$contents")" printf "$contents" > a.dat git add .gitattributes a.dat git commit -m "initial commit" 2>&1 | tee commit.log grep "master (root-commit)" commit.log grep "2 files changed" commit.log grep "create mode 100644 a.dat" commit.log grep "create mode 100644 .gitattributes" commit.log git push origin master 2>&1 | tee push.log pushd $TRASHDIR mkdir "$reponame-clone" cd "$reponame-clone" git lfs clone $GITSERVER/$reponame "." 2>&1 | grep "Git LFS" assert_local_object "$contents_oid" 8 assert_hooks "$(dot_git_dir)" [ ! -f ./lfs ] popd ) end_test begin_test "clone empty repository" ( set -e reponame="clone_empty" setup_remote_repo "$reponame" cd "$TRASHDIR" git lfs clone "$GITSERVER/$reponame" "$reponame" 2>&1 | tee clone.log if [ "0" -ne "${PIPESTATUS[0]}" ]; then echo >&2 "fatal: expected clone to succeed ..." exit 1 fi ) end_test begin_test "clone bare empty repository" ( set -e reponame="clone_bare_empty" setup_remote_repo "$reponame" cd "$TRASHDIR" git lfs clone "$GITSERVER/$reponame" "$reponame" --bare 2>&1 | tee clone.log if [ "0" -ne "${PIPESTATUS[0]}" ]; then echo >&2 "fatal: expected clone to succeed ..." exit 1 fi ) end_test git-lfs-2.3.4/test/test-commit-delete-push.sh000077500000000000000000000030021317167762300210750ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" begin_test "commit, delete, then push" ( set -e reponame="$(basename "$0" ".sh")" setup_remote_repo "$reponame" clone_repo "$reponame" repo git lfs track "*.dat" deleted_oid=$(calc_oid "deleted\n") echo "deleted" > deleted.dat git add deleted.dat .gitattributes git commit -m "add deleted file" git lfs push origin master --dry-run | grep "push ee31ef227442936872744b50d3297385c08b40ffc7baeaf34a39e6d81d6cd9ee => deleted.dat" assert_pointer "master" "deleted.dat" "$deleted_oid" 8 added_oid=$(calc_oid "added\n") echo "added" > added.dat git add added.dat git commit -m "add file" git lfs push origin master --dry-run | tee dryrun.log grep "push ee31ef227442936872744b50d3297385c08b40ffc7baeaf34a39e6d81d6cd9ee => deleted.dat" dryrun.log grep "push 3428719b7688c78a0cc8ba4b9e80b4e464c815fbccfd4b20695a15ffcefc22af => added.dat" dryrun.log git rm deleted.dat git commit -m "did not need deleted.dat after all" git lfs push origin master --dry-run 2>&1 | tee dryrun.log grep "push ee31ef227442936872744b50d3297385c08b40ffc7baeaf34a39e6d81d6cd9ee => deleted.dat" dryrun.log grep "push 3428719b7688c78a0cc8ba4b9e80b4e464c815fbccfd4b20695a15ffcefc22af => added.dat" dryrun.log git log git push origin master 2>&1 > push.log || { cat push.log git lfs logs last exit 1 } grep "(2 of 2 files)" push.log | cat push.log assert_server_object "$reponame" "$deleted_oid" assert_server_object "$reponame" "$added_oid" ) end_test git-lfs-2.3.4/test/test-config.sh000077500000000000000000000102331317167762300166410ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" begin_test "default config" ( set -e reponame="default-config" mkdir $reponame cd $reponame git init git remote add origin "$GITSERVER/$reponame" git lfs env | tee env.log grep "Endpoint=$GITSERVER/$reponame.git/info/lfs (auth=none)" env.log git config --file=.gitconfig lfs.url http://gitconfig-file-ignored git config --file=.lfsconfig lfs.url http://lfsconfig-file git config --file=.lfsconfig lfs.http://lfsconfig-file.access lfsconfig git lfs env | tee env.log grep "Endpoint=http://lfsconfig-file (auth=lfsconfig)" env.log git config --file=.lfsconfig --unset lfs.url git config --file=.lfsconfig --unset lfs.http://lfsconfig-file.access # new endpoint url from local git config # access setting no longer applied git config lfs.url http://local-lfsconfig git lfs env | tee env.log grep "Endpoint=http://local-lfsconfig (auth=none)" env.log # add the access setting to lfsconfig git config --file=.lfsconfig lfs.http://local-lfsconfig.access lfsconfig git lfs env | tee env.log grep "Endpoint=http://local-lfsconfig (auth=lfsconfig)" env.log git config --file=.lfsconfig --unset lfs.http://local-lfsconfig.access # add the access setting to git config git config lfs.http://local-lfsconfig.access gitconfig git lfs env | tee env.log grep "Endpoint=http://local-lfsconfig (auth=gitconfig)" env.log ) end_test begin_test "extension config" ( set -e git config --global lfs.extension.env-test.clean "env-test-clean" git config --global lfs.extension.env-test.smudge "env-test-smudge" git config --global lfs.extension.env-test.priority 0 reponame="extension-config" mkdir $reponame cd $reponame git init expected0="Extension: env-test clean = env-test-clean smudge = env-test-smudge priority = 0" [ "$expected0" = "$(git lfs ext)" ] # any git config takes precedence over .lfsconfig git config --global --unset lfs.extension.env-test.priority git config --file=.lfsconfig lfs.extension.env-test.clean "file-env-test-clean" git config --file=.lfsconfig lfs.extension.env-test.smudge "file-env-test-smudge" git config --file=.lfsconfig lfs.extension.env-test.priority 1 cat .lfsconfig expected1="Extension: env-test clean = env-test-clean smudge = env-test-smudge priority = 1" [ "$expected1" = "$(GIT_TRACE=5 git lfs ext)" ] git config lfs.extension.env-test.clean "local-env-test-clean" git config lfs.extension.env-test.smudge "local-env-test-smudge" git config lfs.extension.env-test.priority 2 expected2="Extension: env-test clean = local-env-test-clean smudge = local-env-test-smudge priority = 2" [ "$expected2" = "$(git lfs ext)" ] ) end_test begin_test "url alias config" ( set -e mkdir url-alias cd url-alias git init # When more than one insteadOf strings match a given URL, the longest match is used. git config url."http://wrong-url/".insteadOf alias git config url."http://actual-url/".insteadOf alias: git config lfs.url alias:rest git lfs env | tee env.log grep "Endpoint=http://actual-url/rest (auth=none)" env.log ) end_test begin_test "ambiguous url alias" ( set -e mkdir url-alias-ambiguous cd url-alias-ambiguous git init git config url."http://actual-url/".insteadOf alias: git config url."http://dupe-url".insteadOf alias: git config lfs.url alias:rest git config -l | grep url git lfs env 2>&1 | tee env2.log grep "WARNING: Multiple 'url.*.insteadof'" env2.log ) end_test begin_test "url alias must be prefix" ( set -e mkdir url-alias-bad cd url-alias-bad git init git config url."http://actual-url/".insteadOf alias: git config lfs.url badalias:rest git lfs env | tee env.log grep "Endpoint=badalias:rest (auth=none)" env.log ) end_test begin_test "config: ignoring unsafe lfsconfig keys" ( set -e reponame="config-unsafe-lfsconfig-keys" git init "$reponame" cd "$reponame" # Insert an 'unsafe' key into this repository's '.lfsconfig'. git config --file=.lfsconfig core.askpass unsafe git lfs status 2>&1 | tee status.log grep "WARNING: These unsafe lfsconfig keys were ignored:" status.log grep " core.askpass" status.log ) end_test git-lfs-2.3.4/test/test-credentials-no-prompt.sh000077500000000000000000000014001317167762300216160ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" # these tests rely on GIT_TERMINAL_PROMPT to test properly ensure_git_version_isnt $VERSION_LOWER "2.3.0" begin_test "attempt private access without credential helper" ( set -e reponame="$(basename "$0" ".sh")" setup_remote_repo "$reponame" clone_repo "$reponame" without-creds git lfs track "*.dat" echo "hi" > hi.dat git add hi.dat git add .gitattributes git commit -m "initial commit" git config --global credential.helper lfsnoop git config credential.helper lfsnoop git config -l GIT_TERMINAL_PROMPT=0 git push origin master 2>&1 | tee push.log grep "Authorization error: $GITSERVER/$reponame" push.log || grep "Git credentials for $GITSERVER/$reponame not found" push.log ) end_test git-lfs-2.3.4/test/test-credentials.sh000077500000000000000000000154611317167762300177010ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" ensure_git_version_isnt $VERSION_LOWER "2.3.0" begin_test "credentials without useHttpPath, with bad path password" ( set -e reponame="$(basename "$0" ".sh")" setup_remote_repo "$reponame" printf "path:wrong" > "$CREDSDIR/127.0.0.1--$reponame" clone_repo "$reponame" without-path git config credential.useHttpPath false git checkout -b without-path git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log contents="a" contents_oid=$(calc_oid "$contents") printf "$contents" > a.dat git add a.dat git add .gitattributes git commit -m "add a.dat" git push origin without-path 2>&1 | tee push.log grep "(1 of 1 files)" push.log ) end_test begin_test "credentials with useHttpPath, with wrong password" ( set -e reponame="$(basename "$0" ".sh")" setup_remote_repo "$reponame" printf "path:wrong" > "$CREDSDIR/127.0.0.1--$reponame" clone_repo "$reponame" with-path-wrong-pass git checkout -b with-path-wrong-pass git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log contents="a" contents_oid=$(calc_oid "$contents") printf "$contents" > a.dat git add a.dat git add .gitattributes git commit -m "add a.dat" git push origin with-path-wrong-pass 2>&1 | tee push.log [ "0" = "$(grep -c "(1 of 1 files)" push.log)" ] ) end_test begin_test "credentials with useHttpPath, with correct password" ( set -e reponame="$(basename "$0" ".sh")" setup_remote_repo "$reponame" printf "path:$reponame" > "$CREDSDIR/127.0.0.1--$reponame" clone_repo "$reponame" with-path-correct-pass git checkout -b with-path-correct-pass git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log # creating new branch does not re-send any objects existing on other # remote branches anymore, generate new object, different from prev tests contents="b" contents_oid=$(calc_oid "$contents") printf "$contents" > b.dat git add b.dat git add .gitattributes git commit -m "add b.dat" git push origin with-path-correct-pass 2>&1 | tee push.log grep "(1 of 1 files)" push.log ) end_test begin_test "git credential" ( set -e printf "git:server" > "$CREDSDIR/credential-test.com" printf "git:path" > "$CREDSDIR/credential-test.com--some-path" mkdir empty cd empty git init echo "protocol=http host=credential-test.com path=some/path" | GIT_TERMINAL_PROMPT=0 git credential fill > cred.log cat cred.log expected="protocol=http host=credential-test.com path=some/path username=git password=path" [ "$expected" = "$(cat cred.log)" ] git config credential.useHttpPath false echo "protocol=http host=credential-test.com" | GIT_TERMINAL_PROMPT=0 git credential fill > cred.log cat cred.log expected="protocol=http host=credential-test.com username=git password=server" [ "$expected" = "$(cat cred.log)" ] echo "protocol=http host=credential-test.com path=some/path" | GIT_TERMINAL_PROMPT=0 git credential fill > cred.log cat cred.log expected="protocol=http host=credential-test.com username=git password=server" [ "$expected" = "$(cat cred.log)" ] ) end_test if [[ $(uname) == *"MINGW"* ]]; then NETRCFILE="$HOME/_netrc" else NETRCFILE="$HOME/.netrc" fi begin_test "credentials from netrc" ( set -e printf "machine localhost\nlogin netrcuser\npassword netrcpass\n" >> "$NETRCFILE" echo $HOME echo "GITSERVER $GITSERVER" cat $NETRCFILE # prevent prompts on Windows particularly export SSH_ASKPASS= reponame="netrctest" setup_remote_repo "$reponame" clone_repo "$reponame" repo # Need a remote named "localhost" or 127.0.0.1 in netrc will interfere with the other auth git remote add "netrc" "$(echo $GITSERVER | sed s/127.0.0.1/localhost/)/netrctest" git lfs env git lfs track "*.dat" echo "push a" > a.dat git add .gitattributes a.dat git commit -m "add a.dat" git lfs push netrc master 2>&1 | tee push.log grep "(1 of 1 files)" push.log ) end_test begin_test "credentials from netrc with bad password" ( set -e printf "machine localhost\nlogin netrcuser\npassword badpass\n" >> "$NETRCFILE" echo $HOME echo "GITSERVER $GITSERVER" cat $NETRCFILE # prevent prompts on Windows particularly export SSH_ASKPASS= reponame="netrctest" setup_remote_repo "$reponame" clone_repo "$reponame" repo2 # Need a remote named "localhost" or 127.0.0.1 in netrc will interfere with the other auth git remote add "netrc" "$(echo $GITSERVER | sed s/127.0.0.1/localhost/)/netrctest" git lfs env git lfs track "*.dat" echo "push a" > a.dat git add .gitattributes a.dat git commit -m "add a.dat" git push netrc master 2>&1 | tee push.log [ "0" = "$(grep -c "(1 of 1 files)" push.log)" ] ) end_test begin_test "credentials from lfs.url" ( set -e reponame="requirecreds" setup_remote_repo "$reponame" clone_repo "$reponame" requirecreds-lfsurl git lfs track "*.dat" echo "push a" > a.dat git add .gitattributes a.dat git commit -m "add a.dat" echo "bad push" git lfs env git lfs push origin master 2>&1 | tee push.log grep "(0 of 1 files)" push.log echo "good push" gitserverhost=$(echo "$GITSERVER" | cut -d'/' -f3) git config lfs.url http://requirecreds:pass@$gitserverhost/$reponame.git/info/lfs git lfs env git lfs push origin master 2>&1 | tee push.log grep "(1 of 1 files)" push.log echo "bad fetch" rm -rf .git/lfs/objects git config lfs.url http://$gitserverhost/$reponame.git/info/lfs git lfs env git lfs fetch --all 2>&1 | tee fetch.log grep "(0 of 1 files)" fetch.log echo "good fetch" rm -rf .git/lfs/objects git config lfs.url http://requirecreds:pass@$gitserverhost/$reponame.git/info/lfs git lfs env git lfs fetch --all 2>&1 | tee fetch.log grep "(1 of 1 files)" fetch.log ) end_test begin_test "credentials from remote.origin.url" ( set -e reponame="requirecreds" setup_remote_repo "$reponame" clone_repo "$reponame" requirecreds-remoteurl git lfs track "*.dat" echo "push b" > b.dat git add .gitattributes b.dat git commit -m "add b.dat" echo "bad push" git lfs env git lfs push origin master 2>&1 | tee push.log grep "(0 of 1 files)" push.log echo "good push" gitserverhost=$(echo "$GITSERVER" | cut -d'/' -f3) git config remote.origin.url http://requirecreds:pass@$gitserverhost/$reponame.git git lfs env git lfs push origin master 2>&1 | tee push.log grep "(1 of 1 files)" push.log echo "bad fetch" rm -rf .git/lfs/objects git config remote.origin.url http://$gitserverhost/$reponame.git git lfs env git lfs fetch --all 2>&1 | tee fetch.log grep "(0 of 1 files)" fetch.log echo "good fetch" rm -rf .git/lfs/objects git config remote.origin.url http://requirecreds:pass@$gitserverhost/$reponame.git git lfs env git lfs fetch --all 2>&1 | tee fetch.log grep "(1 of 1 files)" fetch.log ) end_test git-lfs-2.3.4/test/test-custom-transfers.sh000077500000000000000000000221321317167762300207140ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" begin_test "custom-transfer-wrong-path" ( set -e # this repo name is the indicator to the server to support custom transfer reponame="test-custom-transfer-fail" setup_remote_repo "$reponame" clone_repo "$reponame" $reponame # deliberately incorrect path git config lfs.customtransfer.testcustom.path path-to-nothing git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log contents="jksgdfljkgsdlkjafg lsjdgf alkjgsd lkfjag sldjkgf alkjsgdflkjagsd kljfg asdjgf kalsd" contents_oid=$(calc_oid "$contents") printf "$contents" > a.dat git add a.dat git add .gitattributes git commit -m "add a.dat" 2>&1 | tee commit.log GIT_TRACE=1 GIT_TRANSFER_TRACE=1 git push origin master 2>&1 | tee pushcustom.log # use PIPESTATUS otherwise we get exit code from tee res=${PIPESTATUS[0]} grep "xfer: adapter \"testcustom\" Begin()" pushcustom.log grep "Failed to start custom transfer command" pushcustom.log if [ "$res" = "0" ]; then echo "Push should have failed because of an incorrect custom transfer path." exit 1 fi ) end_test begin_test "custom-transfer-upload-download" ( set -e # this repo name is the indicator to the server to support custom transfer reponame="test-custom-transfer-1" setup_remote_repo "$reponame" clone_repo "$reponame" $reponame # set up custom transfer adapter git config lfs.customtransfer.testcustom.path lfstest-customadapter git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log git add .gitattributes git commit -m "Tracking" # set up a decent amount of data so that there's work for multiple concurrent adapters echo "[ { \"CommitDate\":\"$(get_date -10d)\", \"Files\":[ {\"Filename\":\"verify.dat\",\"Size\":18,\"Data\":\"send-verify-action\"}, {\"Filename\":\"file1.dat\",\"Size\":1024}, {\"Filename\":\"file2.dat\",\"Size\":750}] }, { \"CommitDate\":\"$(get_date -7d)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":1050}, {\"Filename\":\"file3.dat\",\"Size\":660}, {\"Filename\":\"file4.dat\",\"Size\":230}] }, { \"CommitDate\":\"$(get_date -5d)\", \"Files\":[ {\"Filename\":\"file5.dat\",\"Size\":1200}, {\"Filename\":\"file6.dat\",\"Size\":300}] }, { \"CommitDate\":\"$(get_date -2d)\", \"Files\":[ {\"Filename\":\"file3.dat\",\"Size\":120}, {\"Filename\":\"file5.dat\",\"Size\":450}, {\"Filename\":\"file7.dat\",\"Size\":520}, {\"Filename\":\"file8.dat\",\"Size\":2048}] } ]" | lfstest-testutils addcommits GIT_TRACE=1 GIT_TRANSFER_TRACE=1 git push origin master 2>&1 | tee pushcustom.log # use PIPESTATUS otherwise we get exit code from tee [ ${PIPESTATUS[0]} = "0" ] grep "xfer: started custom adapter process" pushcustom.log grep "xfer\[lfstest-customadapter\]:" pushcustom.log grep "12 of 12 files" pushcustom.log rm -rf .git/lfs/objects GIT_TRACE=1 GIT_TRANSFER_TRACE=1 git lfs fetch --all 2>&1 | tee fetchcustom.log [ ${PIPESTATUS[0]} = "0" ] grep "xfer: started custom adapter process" fetchcustom.log grep "xfer\[lfstest-customadapter\]:" fetchcustom.log grep "12 of 12 files" fetchcustom.log grep "Terminating test custom adapter gracefully" fetchcustom.log objectlist=`find .git/lfs/objects -type f` [ "$(echo "$objectlist" | wc -l)" -eq 12 ] ) end_test begin_test "custom-transfer-standalone" ( set -e # setup a git repo to be used as a local repo, not remote reponame="test-custom-transfer-standalone" setup_remote_repo "$reponame" # clone directly, not through lfstest-gitserver clone_repo_url "$REMOTEDIR/$reponame.git" $reponame # set up custom transfer adapter to use a specific transfer agent git config lfs.customtransfer.testcustom.path lfstest-standalonecustomadapter git config lfs.customtransfer.testcustom.concurrent false git config lfs.standalonetransferagent testcustom export TEST_STANDALONE_BACKUP_PATH="$(pwd)/test-custom-transfer-standalone-backup" mkdir -p $TEST_STANDALONE_BACKUP_PATH rm -rf $TEST_STANDALONE_BACKUP_PATH/* git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log git add .gitattributes git commit -m "Tracking" # set up a decent amount of data so that there's work for multiple concurrent adapters echo "[ { \"CommitDate\":\"$(get_date -10d)\", \"Files\":[ {\"Filename\":\"verify.dat\",\"Size\":18,\"Data\":\"send-verify-action\"}, {\"Filename\":\"file1.dat\",\"Size\":1024}, {\"Filename\":\"file2.dat\",\"Size\":750}] }, { \"CommitDate\":\"$(get_date -7d)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":1050}, {\"Filename\":\"file3.dat\",\"Size\":660}, {\"Filename\":\"file4.dat\",\"Size\":230}] }, { \"CommitDate\":\"$(get_date -5d)\", \"Files\":[ {\"Filename\":\"file5.dat\",\"Size\":1200}, {\"Filename\":\"file6.dat\",\"Size\":300}] }, { \"CommitDate\":\"$(get_date -2d)\", \"Files\":[ {\"Filename\":\"file3.dat\",\"Size\":120}, {\"Filename\":\"file5.dat\",\"Size\":450}, {\"Filename\":\"file7.dat\",\"Size\":520}, {\"Filename\":\"file8.dat\",\"Size\":2048}] } ]" | lfstest-testutils addcommits GIT_TRACE=1 GIT_TRANSFER_TRACE=1 git push origin master 2>&1 | tee pushcustom.log # use PIPESTATUS otherwise we get exit code from tee [ ${PIPESTATUS[0]} = "0" ] # Make sure the lock verification is not attempted. grep "locks/verify$" pushcustom.log && false grep "xfer: started custom adapter process" pushcustom.log grep "xfer\[lfstest-standalonecustomadapter\]:" pushcustom.log grep "12 of 12 files" pushcustom.log rm -rf .git/lfs/objects GIT_TRACE=1 GIT_TRANSFER_TRACE=1 git lfs fetch --all 2>&1 | tee fetchcustom.log [ ${PIPESTATUS[0]} = "0" ] grep "xfer: started custom adapter process" fetchcustom.log grep "xfer\[lfstest-standalonecustomadapter\]:" fetchcustom.log grep "12 of 12 files" fetchcustom.log grep "Terminating test custom adapter gracefully" fetchcustom.log objectlist=`find .git/lfs/objects -type f` [ "$(echo "$objectlist" | wc -l)" -eq 12 ] ) end_test begin_test "custom-transfer-standalone-urlmatch" ( set -e # setup a git repo to be used as a local repo, not remote reponame="test-custom-transfer-standalone-urlmatch" setup_remote_repo "$reponame" # clone directly, not through lfstest-gitserver clone_repo_url "$REMOTEDIR/$reponame.git" $reponame # set up custom transfer adapter to use a specific transfer agent, using a URL prefix match git config lfs.customtransfer.testcustom.path lfstest-standalonecustomadapter git config lfs.customtransfer.testcustom.concurrent false git config remote.origin.lfsurl https://git.example.com/example/path/to/repo git config lfs.https://git.example.com/example/path/.standalonetransferagent testcustom git config lfs.standalonetransferagent invalid-agent # git config lfs.standalonetransferagent testcustom export TEST_STANDALONE_BACKUP_PATH="$(pwd)/test-custom-transfer-standalone-urlmatch-backup" mkdir -p $TEST_STANDALONE_BACKUP_PATH rm -rf $TEST_STANDALONE_BACKUP_PATH/* git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log git add .gitattributes git commit -m "Tracking" # set up a decent amount of data so that there's work for multiple concurrent adapters echo "[ { \"CommitDate\":\"$(get_date -10d)\", \"Files\":[ {\"Filename\":\"verify.dat\",\"Size\":18,\"Data\":\"send-verify-action\"}, {\"Filename\":\"file1.dat\",\"Size\":1024}, {\"Filename\":\"file2.dat\",\"Size\":750}] }, { \"CommitDate\":\"$(get_date -7d)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":1050}, {\"Filename\":\"file3.dat\",\"Size\":660}, {\"Filename\":\"file4.dat\",\"Size\":230}] }, { \"CommitDate\":\"$(get_date -5d)\", \"Files\":[ {\"Filename\":\"file5.dat\",\"Size\":1200}, {\"Filename\":\"file6.dat\",\"Size\":300}] }, { \"CommitDate\":\"$(get_date -2d)\", \"Files\":[ {\"Filename\":\"file3.dat\",\"Size\":120}, {\"Filename\":\"file5.dat\",\"Size\":450}, {\"Filename\":\"file7.dat\",\"Size\":520}, {\"Filename\":\"file8.dat\",\"Size\":2048}] } ]" | lfstest-testutils addcommits GIT_TRACE=1 GIT_TRANSFER_TRACE=1 git push origin master 2>&1 | tee pushcustom.log # use PIPESTATUS otherwise we get exit code from tee [ ${PIPESTATUS[0]} = "0" ] # Make sure the lock verification is not attempted. grep "locks/verify$" pushcustom.log && false grep "xfer: started custom adapter process" pushcustom.log grep "xfer\[lfstest-standalonecustomadapter\]:" pushcustom.log grep "12 of 12 files" pushcustom.log rm -rf .git/lfs/objects GIT_TRACE=1 GIT_TRANSFER_TRACE=1 git lfs fetch --all 2>&1 | tee fetchcustom.log [ ${PIPESTATUS[0]} = "0" ] grep "xfer: started custom adapter process" fetchcustom.log grep "xfer\[lfstest-standalonecustomadapter\]:" fetchcustom.log grep "12 of 12 files" fetchcustom.log grep "Terminating test custom adapter gracefully" fetchcustom.log objectlist=`find .git/lfs/objects -type f` [ "$(echo "$objectlist" | wc -l)" -eq 12 ] ) end_test git-lfs-2.3.4/test/test-duplicate-oids.sh000077500000000000000000000027021317167762300203040ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" begin_test "multiple revs with same OID get pushed once" ( set -e reponame="mutliple-revs-one-oid" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" git add .gitattributes git commit -m "initial commit" contents="contents" contents_oid="$(calc_oid "$contents")" # Stash the contents of the file that we want to commit in .git/lfs/objects. object_dir="$(echo $contents_oid \ | awk '{ print substr($0, 0, 2) "/" substr($0, 3, 2) }')" mkdir -p ".git/lfs/objects/$object_dir" printf "$contents" > ".git/lfs/objects/$object_dir/$contents_oid" # Create a pointer with the old "http://git-media.io" spec legacy_pointer="$(pointer $contents_oid 8 http://git-media.io/v/2)" # Create a pointer with the latest spec to create a modification, but leave # the OID untouched. latest_pointer="$(pointer $contents_oid 8)" # Commit the legacy pointer printf "$legacy_pointer" > a.dat git add a.dat git commit -m "commit legacy" # Commit the new pointer, causing a diff on a.dat, but leaving the OID # unchanged. printf "$latest_pointer" > a.dat git add a.dat git commit -m "commit latest" # Delay the push until here, so the server doesn't have a copy of the OID that # we're trying to push. git push origin master 2>&1 | tee push.log grep "Git LFS: (1 of 1 files)" push.log assert_server_object "$reponame" "$contents_oid" ) end_test git-lfs-2.3.4/test/test-env.sh000077500000000000000000000564121317167762300161750ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" envInitConfig='git config filter.lfs.process = "git-lfs filter-process" git config filter.lfs.smudge = "git-lfs smudge -- %f" git config filter.lfs.clean = "git-lfs clean -- %f"' begin_test "env with no remote" ( set -e reponame="env-no-remote" mkdir $reponame cd $reponame git init localwd=$(native_path "$TRASHDIR/$reponame") localgit=$(native_path "$TRASHDIR/$reponame/.git") localgitstore=$(native_path "$TRASHDIR/$reponame/.git") lfsstorage=$(native_path "$TRASHDIR/$reponame/.git/lfs") localmedia=$(native_path "$TRASHDIR/$reponame/.git/lfs/objects") tempdir=$(native_path "$TRASHDIR/$reponame/.git/lfs/tmp") envVars=$(printf "%s" "$(env | grep "^GIT")") expected=$(printf '%s %s LocalWorkingDir=%s LocalGitDir=%s LocalGitStorageDir=%s LocalMediaDir=%s LocalReferenceDir= TempDir=%s ConcurrentTransfers=3 TusTransfers=false BasicTransfersOnly=false SkipDownloadErrors=false FetchRecentAlways=false FetchRecentRefsDays=7 FetchRecentCommitsDays=0 FetchRecentRefsIncludeRemotes=true PruneOffsetDays=3 PruneVerifyRemoteAlways=false PruneRemoteName=origin LfsStorageDir=%s AccessDownload=none AccessUpload=none DownloadTransfers=basic UploadTransfers=basic %s %s ' "$(git lfs version)" "$(git version)" "$localwd" "$localgit" "$localgitstore" "$localmedia" "$tempdir" "$lfsstorage" "$envVars" "$envInitConfig") actual=$(git lfs env) contains_same_elements "$expected" "$actual" ) end_test begin_test "env with origin remote" ( set -e reponame="env-origin-remote" mkdir $reponame cd $reponame git init git remote add origin "$GITSERVER/env-origin-remote" endpoint="$GITSERVER/$reponame.git/info/lfs (auth=none)" localwd=$(native_path "$TRASHDIR/$reponame") localgit=$(native_path "$TRASHDIR/$reponame/.git") localgitstore=$(native_path "$TRASHDIR/$reponame/.git") lfsstorage=$(native_path "$TRASHDIR/$reponame/.git/lfs") localmedia=$(native_path "$TRASHDIR/$reponame/.git/lfs/objects") tempdir=$(native_path "$TRASHDIR/$reponame/.git/lfs/tmp") envVars=$(printf "%s" "$(env | grep "^GIT")") expected=$(printf '%s %s Endpoint=%s LocalWorkingDir=%s LocalGitDir=%s LocalGitStorageDir=%s LocalMediaDir=%s LocalReferenceDir= TempDir=%s ConcurrentTransfers=3 TusTransfers=false BasicTransfersOnly=false SkipDownloadErrors=false FetchRecentAlways=false FetchRecentRefsDays=7 FetchRecentCommitsDays=0 FetchRecentRefsIncludeRemotes=true PruneOffsetDays=3 PruneVerifyRemoteAlways=false PruneRemoteName=origin LfsStorageDir=%s AccessDownload=none AccessUpload=none DownloadTransfers=basic UploadTransfers=basic %s %s ' "$(git lfs version)" "$(git version)" "$endpoint" "$localwd" "$localgit" "$localgitstore" "$localmedia" "$tempdir" "$lfsstorage" "$envVars" "$envInitConfig") actual=$(git lfs env) contains_same_elements "$expected" "$actual" cd .git expected2=$(echo "$expected" | sed -e 's/LocalWorkingDir=.*/LocalWorkingDir=/') actual2=$(git lfs env) contains_same_elements "$expected2" "$actual2" ) end_test begin_test "env with multiple remotes" ( set -e reponame="env-multiple-remotes" mkdir $reponame cd $reponame git init git remote add origin "$GITSERVER/env-origin-remote" git remote add other "$GITSERVER/env-other-remote" endpoint="$GITSERVER/env-origin-remote.git/info/lfs (auth=none)" endpoint2="$GITSERVER/env-other-remote.git/info/lfs (auth=none)" localwd=$(native_path "$TRASHDIR/$reponame") localgit=$(native_path "$TRASHDIR/$reponame/.git") localgitstore=$(native_path "$TRASHDIR/$reponame/.git") lfsstorage=$(native_path "$TRASHDIR/$reponame/.git/lfs") localmedia=$(native_path "$TRASHDIR/$reponame/.git/lfs/objects") tempdir=$(native_path "$TRASHDIR/$reponame/.git/lfs/tmp") envVars=$(printf "%s" "$(env | grep "^GIT")") expected=$(printf '%s %s Endpoint=%s Endpoint (other)=%s LocalWorkingDir=%s LocalGitDir=%s LocalGitStorageDir=%s LocalMediaDir=%s LocalReferenceDir= TempDir=%s ConcurrentTransfers=3 TusTransfers=false BasicTransfersOnly=false SkipDownloadErrors=false FetchRecentAlways=false FetchRecentRefsDays=7 FetchRecentCommitsDays=0 FetchRecentRefsIncludeRemotes=true PruneOffsetDays=3 PruneVerifyRemoteAlways=false PruneRemoteName=origin LfsStorageDir=%s AccessDownload=none AccessUpload=none DownloadTransfers=basic UploadTransfers=basic %s %s ' "$(git lfs version)" "$(git version)" "$endpoint" "$endpoint2" "$localwd" "$localgit" "$localgitstore" "$localmedia" "$tempdir" "$lfsstorage" "$envVars" "$envInitConfig") actual=$(git lfs env) contains_same_elements "$expected" "$actual" cd .git expected2=$(echo "$expected" | sed -e 's/LocalWorkingDir=.*/LocalWorkingDir=/') actual2=$(git lfs env) contains_same_elements "$expected2" "$actual2" ) end_test begin_test "env with other remote" ( set -e reponame="env-other-remote" mkdir $reponame cd $reponame git init git remote add other "$GITSERVER/env-other-remote" endpoint="$GITSERVER/env-other-remote.git/info/lfs (auth=none)" localwd=$(native_path "$TRASHDIR/$reponame") localgit=$(native_path "$TRASHDIR/$reponame/.git") localgitstore=$(native_path "$TRASHDIR/$reponame/.git") lfsstorage=$(native_path "$TRASHDIR/$reponame/.git/lfs") localmedia=$(native_path "$TRASHDIR/$reponame/.git/lfs/objects") tempdir=$(native_path "$TRASHDIR/$reponame/.git/lfs/tmp") envVars=$(printf "%s" "$(env | grep "^GIT")") expected=$(printf '%s %s Endpoint (other)=%s LocalWorkingDir=%s LocalGitDir=%s LocalGitStorageDir=%s LocalMediaDir=%s LocalReferenceDir= TempDir=%s ConcurrentTransfers=3 TusTransfers=false BasicTransfersOnly=false SkipDownloadErrors=false FetchRecentAlways=false FetchRecentRefsDays=7 FetchRecentCommitsDays=0 FetchRecentRefsIncludeRemotes=true PruneOffsetDays=3 PruneVerifyRemoteAlways=false PruneRemoteName=origin LfsStorageDir=%s AccessDownload=none AccessUpload=none DownloadTransfers=basic UploadTransfers=basic %s %s ' "$(git lfs version)" "$(git version)" "$endpoint" "$localwd" "$localgit" "$localgitstore" "$localmedia" "$tempdir" "$lfsstorage" "$envVars" "$envInitConfig") actual=$(git lfs env) contains_same_elements "$expected" "$actual" cd .git expected2=$(echo "$expected" | sed -e 's/LocalWorkingDir=.*/LocalWorkingDir=/') actual2=$(git lfs env) contains_same_elements "$expected2" "$actual2" ) end_test begin_test "env with multiple remotes and lfs.url config" ( set -e reponame="env-multiple-remotes-with-lfs-url" mkdir $reponame cd $reponame git init git remote add origin "$GITSERVER/env-origin-remote" git remote add other "$GITSERVER/env-other-remote" git config lfs.url "http://foo/bar" endpoint="$GITSERVER/env-other-remote.git/info/lfs (auth=none)" localwd=$(native_path "$TRASHDIR/$reponame") localgit=$(native_path "$TRASHDIR/$reponame/.git") localgitstore=$(native_path "$TRASHDIR/$reponame/.git") lfsstorage=$(native_path "$TRASHDIR/$reponame/.git/lfs") localmedia=$(native_path "$TRASHDIR/$reponame/.git/lfs/objects") tempdir=$(native_path "$TRASHDIR/$reponame/.git/lfs/tmp") envVars=$(printf "%s" "$(env | grep "^GIT")") expected=$(printf '%s %s Endpoint=http://foo/bar (auth=none) Endpoint (other)=%s LocalWorkingDir=%s LocalGitDir=%s LocalGitStorageDir=%s LocalMediaDir=%s LocalReferenceDir= TempDir=%s ConcurrentTransfers=3 TusTransfers=false BasicTransfersOnly=false SkipDownloadErrors=false FetchRecentAlways=false FetchRecentRefsDays=7 FetchRecentCommitsDays=0 FetchRecentRefsIncludeRemotes=true PruneOffsetDays=3 PruneVerifyRemoteAlways=false PruneRemoteName=origin LfsStorageDir=%s AccessDownload=none AccessUpload=none DownloadTransfers=basic UploadTransfers=basic %s %s ' "$(git lfs version)" "$(git version)" "$endpoint" "$localwd" "$localgit" "$localgitstore" "$localmedia" "$tempdir" "$lfsstorage" "$envVars" "$envInitConfig") actual=$(git lfs env) contains_same_elements "$expected" "$actual" cd .git expected2=$(echo "$expected" | sed -e 's/LocalWorkingDir=.*/LocalWorkingDir=/') actual2=$(git lfs env) contains_same_elements "$expected2" "$actual2" ) end_test begin_test "env with multiple remotes and lfs configs" ( set -e reponame="env-multiple-remotes-lfs-configs" mkdir $reponame cd $reponame git init git remote add origin "$GITSERVER/env-origin-remote" git remote add other "$GITSERVER/env-other-remote" git config lfs.url "http://foo/bar" git config remote.origin.lfsurl "http://custom/origin" git config remote.other.lfsurl "http://custom/other" localwd=$(native_path "$TRASHDIR/$reponame") localgit=$(native_path "$TRASHDIR/$reponame/.git") localgitstore=$(native_path "$TRASHDIR/$reponame/.git") lfsstorage=$(native_path "$TRASHDIR/$reponame/.git/lfs") localmedia=$(native_path "$TRASHDIR/$reponame/.git/lfs/objects") tempdir=$(native_path "$TRASHDIR/$reponame/.git/lfs/tmp") envVars=$(printf "%s" "$(env | grep "^GIT")") expected=$(printf '%s %s Endpoint=http://foo/bar (auth=none) Endpoint (other)=http://custom/other (auth=none) LocalWorkingDir=%s LocalGitDir=%s LocalGitStorageDir=%s LocalMediaDir=%s LocalReferenceDir= TempDir=%s ConcurrentTransfers=3 TusTransfers=false BasicTransfersOnly=false SkipDownloadErrors=false FetchRecentAlways=false FetchRecentRefsDays=7 FetchRecentCommitsDays=0 FetchRecentRefsIncludeRemotes=true PruneOffsetDays=3 PruneVerifyRemoteAlways=false PruneRemoteName=origin LfsStorageDir=%s AccessDownload=none AccessUpload=none DownloadTransfers=basic UploadTransfers=basic %s %s ' "$(git lfs version)" "$(git version)" "$localwd" "$localgit" "$localgitstore" "$localmedia" "$tempdir" "$lfsstorage" "$envVars" "$envInitConfig") actual=$(git lfs env) contains_same_elements "$expected" "$actual" cd .git expected2=$(echo "$expected" | sed -e 's/LocalWorkingDir=.*/LocalWorkingDir=/') actual2=$(git lfs env) contains_same_elements "$expected2" "$actual2" ) end_test begin_test "env with multiple remotes and lfs url and batch configs" ( set -e reponame="env-multiple-remotes-lfs-batch-configs" mkdir $reponame cd $reponame git init git remote add origin "$GITSERVER/env-origin-remote" git remote add other "$GITSERVER/env-other-remote" git config lfs.url "http://foo/bar" git config lfs.concurrenttransfers 5 git config remote.origin.lfsurl "http://custom/origin" git config remote.other.lfsurl "http://custom/other" localwd=$(native_path "$TRASHDIR/$reponame") localgit=$(native_path "$TRASHDIR/$reponame/.git") localgitstore=$(native_path "$TRASHDIR/$reponame/.git") lfsstorage=$(native_path "$TRASHDIR/$reponame/.git/lfs") localmedia=$(native_path "$TRASHDIR/$reponame/.git/lfs/objects") tempdir=$(native_path "$TRASHDIR/$reponame/.git/lfs/tmp") envVars=$(printf "%s" "$(env | grep "^GIT")") expected=$(printf '%s %s Endpoint=http://foo/bar (auth=none) Endpoint (other)=http://custom/other (auth=none) LocalWorkingDir=%s LocalGitDir=%s LocalGitStorageDir=%s LocalMediaDir=%s LocalReferenceDir= TempDir=%s ConcurrentTransfers=5 TusTransfers=false BasicTransfersOnly=false SkipDownloadErrors=false FetchRecentAlways=false FetchRecentRefsDays=7 FetchRecentCommitsDays=0 FetchRecentRefsIncludeRemotes=true PruneOffsetDays=3 PruneVerifyRemoteAlways=false PruneRemoteName=origin LfsStorageDir=%s AccessDownload=none AccessUpload=none DownloadTransfers=basic UploadTransfers=basic %s %s ' "$(git lfs version)" "$(git version)" "$localwd" "$localgit" "$localgitstore" "$localmedia" "$tempdir" "$lfsstorage" "$envVars" "$envInitConfig") actual=$(git lfs env) contains_same_elements "$expected" "$actual" cd .git expected2=$(echo "$expected" | sed -e 's/LocalWorkingDir=.*/LocalWorkingDir=/') actual2=$(git lfs env) contains_same_elements "$expected2" "$actual2" ) end_test begin_test "env with .lfsconfig" ( set -e reponame="env-with-lfsconfig" git init $reponame cd $reponame git remote add origin "$GITSERVER/env-origin-remote" echo '[remote "origin"] lfsurl = http://foobar:8080/ [lfs] batch = false concurrenttransfers = 5 ' > .lfsconfig echo '[remote "origin"] lfsurl = http://foobar:5050/ [lfs] batch = true concurrenttransfers = 50 ' > .gitconfig localwd=$(native_path "$TRASHDIR/$reponame") localgit=$(native_path "$TRASHDIR/$reponame/.git") localgitstore=$(native_path "$TRASHDIR/$reponame/.git") lfsstorage=$(native_path "$TRASHDIR/$reponame/.git/lfs") localmedia=$(native_path "$TRASHDIR/$reponame/.git/lfs/objects") tempdir=$(native_path "$TRASHDIR/$reponame/.git/lfs/tmp") envVars=$(printf "%s" "$(env | grep "^GIT")") expected=$(printf '%s %s Endpoint=http://foobar:8080/ (auth=none) LocalWorkingDir=%s LocalGitDir=%s LocalGitStorageDir=%s LocalMediaDir=%s LocalReferenceDir= TempDir=%s ConcurrentTransfers=3 TusTransfers=false BasicTransfersOnly=false SkipDownloadErrors=false FetchRecentAlways=false FetchRecentRefsDays=7 FetchRecentCommitsDays=0 FetchRecentRefsIncludeRemotes=true PruneOffsetDays=3 PruneVerifyRemoteAlways=false PruneRemoteName=origin LfsStorageDir=%s AccessDownload=none AccessUpload=none DownloadTransfers=basic UploadTransfers=basic %s %s ' "$(git lfs version)" "$(git version)" "$localwd" "$localgit" "$localgitstore" "$localmedia" "$tempdir" "$lfsstorage" "$envVars" "$envInitConfig") actual=$(git lfs env) contains_same_elements "$expected" "$actual" mkdir a cd a actual2=$(git lfs env) contains_same_elements "$expected" "$actual2" ) end_test begin_test "env with environment variables" ( set -e reponame="env-with-envvars" git init $reponame mkdir -p $reponame/a/b/c gitDir=$(native_path "$TRASHDIR/$reponame/.git") workTree=$(native_path "$TRASHDIR/$reponame/a/b") localwd=$(native_path "$TRASHDIR/$reponame/a/b") localgit=$(native_path "$TRASHDIR/$reponame/.git") localgitstore=$(native_path "$TRASHDIR/$reponame/.git") lfsstorage=$(native_path "$TRASHDIR/$reponame/.git/lfs") localmedia=$(native_path "$TRASHDIR/$reponame/.git/lfs/objects") tempdir=$(native_path "$TRASHDIR/$reponame/.git/lfs/tmp") envVars="$(GIT_DIR=$gitDir GIT_WORK_TREE=$workTree env | grep "^GIT" | sort)" expected=$(printf '%s %s LocalWorkingDir=%s LocalGitDir=%s LocalGitStorageDir=%s LocalMediaDir=%s LocalReferenceDir= TempDir=%s ConcurrentTransfers=3 TusTransfers=false BasicTransfersOnly=false SkipDownloadErrors=false FetchRecentAlways=false FetchRecentRefsDays=7 FetchRecentCommitsDays=0 FetchRecentRefsIncludeRemotes=true PruneOffsetDays=3 PruneVerifyRemoteAlways=false PruneRemoteName=origin LfsStorageDir=%s AccessDownload=none AccessUpload=none DownloadTransfers=basic UploadTransfers=basic %s %s ' "$(git lfs version)" "$(git version)" "$localwd" "$localgit" "$localgitstore" "$localmedia" "$tempdir" "$lfsstorage" "$envVars" "$envInitConfig") actual=$(GIT_DIR=$gitDir GIT_WORK_TREE=$workTree git lfs env) contains_same_elements "$expected" "$actual" cd $TRASHDIR/$reponame actual2=$(GIT_DIR=$gitDir GIT_WORK_TREE=$workTree git lfs env) contains_same_elements "$expected" "$actual2" cd $TRASHDIR/$reponame/.git actual3=$(GIT_DIR=$gitDir GIT_WORK_TREE=$workTree git lfs env) contains_same_elements "$expected" "$actual3" cd $TRASHDIR/$reponame/a/b/c actual4=$(GIT_DIR=$gitDir GIT_WORK_TREE=$workTree git lfs env) contains_same_elements "$expected" "$actual4" envVars="$(GIT_DIR=$gitDir GIT_WORK_TREE=a/b env | grep "^GIT" | sort)" expected5=$(printf '%s %s LocalWorkingDir=%s LocalGitDir=%s LocalGitStorageDir=%s LocalMediaDir=%s LocalReferenceDir= TempDir=%s ConcurrentTransfers=3 TusTransfers=false BasicTransfersOnly=false SkipDownloadErrors=false FetchRecentAlways=false FetchRecentRefsDays=7 FetchRecentCommitsDays=0 FetchRecentRefsIncludeRemotes=true PruneOffsetDays=3 PruneVerifyRemoteAlways=false PruneRemoteName=origin LfsStorageDir=%s AccessDownload=none AccessUpload=none DownloadTransfers=basic UploadTransfers=basic %s ' "$(git lfs version)" "$(git version)" "$localwd" "$localgit" "$localgitstore" "$localmedia" "$tempdir" "$lfsstorage" "$envVars") actual5=$(GIT_DIR=$gitDir GIT_WORK_TREE=a/b git lfs env) contains_same_elements "$expected5" "$actual5" cd $TRASHDIR/$reponame/a/b envVars="$(GIT_DIR=$gitDir env | grep "^GIT" | sort)" expected7=$(printf '%s %s LocalWorkingDir=%s LocalGitDir=%s LocalGitStorageDir=%s LocalMediaDir=%s LocalReferenceDir= TempDir=%s ConcurrentTransfers=3 TusTransfers=false BasicTransfersOnly=false SkipDownloadErrors=false FetchRecentAlways=false FetchRecentRefsDays=7 FetchRecentCommitsDays=0 FetchRecentRefsIncludeRemotes=true PruneOffsetDays=3 PruneVerifyRemoteAlways=false PruneRemoteName=origin LfsStorageDir=%s AccessDownload=none AccessUpload=none DownloadTransfers=basic UploadTransfers=basic %s %s ' "$(git lfs version)" "$(git version)" "$localwd" "$localgit" "$localgitstore" "$localmedia" "$tempdir" "$lfsstorage" "$envVars" "$envInitConfig") actual7=$(GIT_DIR=$gitDir git lfs env) contains_same_elements "$expected7" "$actual7" cd $TRASHDIR/$reponame/a envVars="$(GIT_WORK_TREE=$workTree env | grep "^GIT" | sort)" expected8=$(printf '%s %s LocalWorkingDir=%s LocalGitDir=%s LocalGitStorageDir=%s LocalMediaDir=%s LocalReferenceDir= TempDir=%s ConcurrentTransfers=3 TusTransfers=false BasicTransfersOnly=false SkipDownloadErrors=false FetchRecentAlways=false FetchRecentRefsDays=7 FetchRecentCommitsDays=0 FetchRecentRefsIncludeRemotes=true PruneOffsetDays=3 PruneVerifyRemoteAlways=false PruneRemoteName=origin LfsStorageDir=%s AccessDownload=none AccessUpload=none DownloadTransfers=basic UploadTransfers=basic %s %s ' "$(git lfs version)" "$(git version)" "$localwd" "$localgit" "$localgitstore" "$localmedia" "$tempdir" "$lfsstorage" "$envVars" "$envInitConfig") actual8=$(GIT_WORK_TREE=$workTree git lfs env) contains_same_elements "$expected8" "$actual8" ) end_test begin_test "env with bare repo" ( set -e reponame="env-with-bare-repo" git init --bare $reponame cd $reponame localgit=$(native_path "$TRASHDIR/$reponame") localgitstore=$(native_path "$TRASHDIR/$reponame") lfsstorage=$(native_path "$TRASHDIR/$reponame/lfs") localmedia=$(native_path "$TRASHDIR/$reponame/lfs/objects") tempdir=$(native_path "$TRASHDIR/$reponame/lfs/tmp") envVars=$(printf "%s" "$(env | grep "^GIT")") expected=$(printf "%s\n%s\n LocalWorkingDir= LocalGitDir=%s LocalGitStorageDir=%s LocalMediaDir=%s LocalReferenceDir= TempDir=%s ConcurrentTransfers=3 TusTransfers=false BasicTransfersOnly=false SkipDownloadErrors=false FetchRecentAlways=false FetchRecentRefsDays=7 FetchRecentCommitsDays=0 FetchRecentRefsIncludeRemotes=true PruneOffsetDays=3 PruneVerifyRemoteAlways=false PruneRemoteName=origin LfsStorageDir=%s AccessDownload=none AccessUpload=none DownloadTransfers=basic UploadTransfers=basic %s %s " "$(git lfs version)" "$(git version)" "$localgit" "$localgitstore" "$localmedia" "$tempdir" "$lfsstorage" "$envVars" "$envInitConfig") actual=$(git lfs env) contains_same_elements "$expected" "$actual" ) end_test begin_test "env with multiple ssh remotes" ( set -e reponame="env-with-ssh" mkdir $reponame cd $reponame git init git remote add origin git@git-server.com:user/repo.git git remote add other git@other-git-server.com:user/repo.git expected='Endpoint=https://git-server.com/user/repo.git/info/lfs (auth=none) SSH=git@git-server.com:user/repo.git Endpoint (other)=https://other-git-server.com/user/repo.git/info/lfs (auth=none) SSH=git@other-git-server.com:user/repo.git GIT_SSH=lfs-ssh-echo' contains_same_elements "$expected" "$(git lfs env | grep -e "Endpoint" -e "SSH=")" ) end_test begin_test "env with skip download errors" ( set -e reponame="env-with-skip-dl" git init $reponame cd $reponame git config lfs.skipdownloaderrors 1 localgit=$(native_path "$TRASHDIR/$reponame") localgitstore=$(native_path "$TRASHDIR/$reponame") lfsstorage=$(native_path "$TRASHDIR/$reponame/lfs") localmedia=$(native_path "$TRASHDIR/$reponame/lfs/objects") tempdir=$(native_path "$TRASHDIR/$reponame/lfs/tmp") envVars=$(printf "%s" "$(env | grep "^GIT")") localwd=$(native_path "$TRASHDIR/$reponame") localgit=$(native_path "$TRASHDIR/$reponame/.git") localgitstore=$(native_path "$TRASHDIR/$reponame/.git") lfsstorage=$(native_path "$TRASHDIR/$reponame/.git/lfs") localmedia=$(native_path "$TRASHDIR/$reponame/.git/lfs/objects") tempdir=$(native_path "$TRASHDIR/$reponame/.git/lfs/tmp") envVars=$(printf "%s" "$(env | grep "^GIT")") expectedenabled=$(printf '%s %s LocalWorkingDir=%s LocalGitDir=%s LocalGitStorageDir=%s LocalMediaDir=%s LocalReferenceDir= TempDir=%s ConcurrentTransfers=3 TusTransfers=false BasicTransfersOnly=false SkipDownloadErrors=true FetchRecentAlways=false FetchRecentRefsDays=7 FetchRecentCommitsDays=0 FetchRecentRefsIncludeRemotes=true PruneOffsetDays=3 PruneVerifyRemoteAlways=false PruneRemoteName=origin LfsStorageDir=%s AccessDownload=none AccessUpload=none DownloadTransfers=basic UploadTransfers=basic %s %s ' "$(git lfs version)" "$(git version)" "$localwd" "$localgit" "$localgitstore" "$localmedia" "$tempdir" "$lfsstorage" "$envVars" "$envInitConfig") actual=$(git lfs env) contains_same_elements "$expectedenabled" "$actual" git config --unset lfs.skipdownloaderrors # prove it's usually off expecteddisabled=$(printf '%s %s LocalWorkingDir=%s LocalGitDir=%s LocalGitStorageDir=%s LocalMediaDir=%s LocalReferenceDir= TempDir=%s ConcurrentTransfers=3 TusTransfers=false BasicTransfersOnly=false SkipDownloadErrors=true FetchRecentAlways=false FetchRecentRefsDays=7 FetchRecentCommitsDays=0 FetchRecentRefsIncludeRemotes=true PruneOffsetDays=3 PruneVerifyRemoteAlways=false PruneRemoteName=origin LfsStorageDir=%s AccessDownload=none AccessUpload=none DownloadTransfers=basic UploadTransfers=basic %s %s ' "$(git lfs version)" "$(git version)" "$localwd" "$localgit" "$localgitstore" "$localmedia" "$tempdir" "$lfsstorage" "$envVars" "$envInitConfig") actual=$(git lfs env) contains_same_elements "$expecteddisabled" "$actual" # now enable via env var actual=$(GIT_LFS_SKIP_DOWNLOAD_ERRORS=1 git lfs env) contains_same_elements "$expectedenabled" "$actual" ) end_test begin_test "env with extra transfer methods" ( set -e reponame="env-with-transfers" git init $reponame cd $reponame git config lfs.tustransfers true git config lfs.customtransfer.supertransfer.path /path/to/something localgit=$(native_path "$TRASHDIR/$reponame") localgitstore=$(native_path "$TRASHDIR/$reponame") lfsstorage=$(native_path "$TRASHDIR/$reponame/lfs") localmedia=$(native_path "$TRASHDIR/$reponame/lfs/objects") tempdir=$(native_path "$TRASHDIR/$reponame/lfs/tmp") envVars=$(printf "%s" "$(env | grep "^GIT")") localwd=$(native_path "$TRASHDIR/$reponame") localgit=$(native_path "$TRASHDIR/$reponame/.git") localgitstore=$(native_path "$TRASHDIR/$reponame/.git") lfsstorage=$(native_path "$TRASHDIR/$reponame/.git/lfs") localmedia=$(native_path "$TRASHDIR/$reponame/.git/lfs/objects") tempdir=$(native_path "$TRASHDIR/$reponame/.git/lfs/tmp") envVars=$(printf "%s" "$(env | grep "^GIT")") expectedenabled=$(printf '%s %s LocalWorkingDir=%s LocalGitDir=%s LocalGitStorageDir=%s LocalMediaDir=%s LocalReferenceDir= TempDir=%s ConcurrentTransfers=3 TusTransfers=true BasicTransfersOnly=false SkipDownloadErrors=false FetchRecentAlways=false FetchRecentRefsDays=7 FetchRecentCommitsDays=0 FetchRecentRefsIncludeRemotes=true PruneOffsetDays=3 PruneVerifyRemoteAlways=false PruneRemoteName=origin LfsStorageDir=%s AccessDownload=none AccessUpload=none DownloadTransfers=basic,supertransfer UploadTransfers=basic,supertransfer,tus %s %s ' "$(git lfs version)" "$(git version)" "$localwd" "$localgit" "$localgitstore" "$localmedia" "$tempdir" "$lfsstorage" "$envVars" "$envInitConfig") actual=$(git lfs env) contains_same_elements "$expectedenabled" "$actual" ) end_test git-lfs-2.3.4/test/test-expired.sh000077500000000000000000000027121317167762300170370ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" declare -a expiration_types=("absolute" "relative" "both") for typ in "${expiration_types[@]}"; do begin_test "expired action ($typ time)" ( set -e reponame="expired-$typ" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" contents="contents" contents_oid="$(calc_oid "$contents")" git lfs track "*.dat" git add .gitattributes git commit -m "initial commit" printf "$contents" > a.dat git add a.dat git commit -m "add a.dat" GIT_TRACE=1 git push origin master 2>&1 | tee push.log if [ "0" -eq "${PIPESTATUS[0]}" ]; then echo >&2 "fatal: expected push to fail, didn't" exit 1 fi refute_server_object "$reponame" "$contents_oid" ) end_test done for typ in "${expiration_types[@]}"; do begin_test "ssh expired ($typ time)" ( set -e reponame="ssh-expired-$typ" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" sshurl="${GITSERVER/http:\/\//ssh://git@}/$reponame" git config lfs.url "$sshurl" git config lfs.cachecredentials "true" contents="contents" contents_oid="$(calc_oid "$contents")" git lfs track "*.dat" git add .gitattributes git commit -m "initial commit" printf "$contents" > a.dat git add a.dat git commit -m "add a.dat" GIT_TRACE=1 git push origin master 2>&1 | tee push.log grep "ssh cache expired" push.log ) end_test done git-lfs-2.3.4/test/test-ext.sh000077500000000000000000000026761317167762300162100ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" begin_test "ext" ( set -e # no need to setup a remote repo, since this test does not need to push or pull mkdir ext cd ext git init git config lfs.extension.foo.clean "foo-clean %f" git config lfs.extension.foo.smudge "foo-smudge %f" git config lfs.extension.foo.priority 0 git config lfs.extension.bar.clean "bar-clean %f" git config lfs.extension.bar.smudge "bar-smudge %f" git config lfs.extension.bar.priority 1 git config lfs.extension.baz.clean "baz-clean %f" git config lfs.extension.baz.smudge "baz-smudge %f" git config lfs.extension.baz.priority 2 fooExpected="Extension: foo clean = foo-clean %f smudge = foo-smudge %f priority = 0" barExpected="Extension: bar clean = bar-clean %f smudge = bar-smudge %f priority = 1" bazExpected="Extension: baz clean = baz-clean %f smudge = baz-smudge %f priority = 2" actual=$(git lfs ext list foo) [ "$actual" = "$fooExpected" ] actual=$(git lfs ext list bar) [ "$actual" = "$barExpected" ] actual=$(git lfs ext list baz) [ "$actual" = "$bazExpected" ] actual=$(git lfs ext list foo bar) expected=$(printf "%s\n%s" "$fooExpected" "$barExpected") [ "$actual" = "$expected" ] actual=$(git lfs ext list) expected=$(printf "%s\n%s\n%s" "$fooExpected" "$barExpected" "$bazExpected") [ "$actual" = "$expected" ] actual=$(git lfs ext) [ "$actual" = "$expected" ] ) end_test git-lfs-2.3.4/test/test-extra-header.sh000077500000000000000000000011531317167762300177460ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" begin_test "http..extraHeader" ( set -e reponame="copy-headers" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" url="$(git config remote.origin.url).git/info/lfs" git config --add "http.$url.extraHeader" "X-Foo: bar" git config --add "http.$url.extraHeader" "X-Foo: baz" git lfs track "*.dat" printf "contents" > a.dat git add .gitattributes a.dat git commit -m "initial commit" GIT_CURL_VERBOSE=1 GIT_TRACE=1 git push origin master 2>&1 | tee curl.log grep "> X-Foo: bar" curl.log grep "> X-Foo: baz" curl.log ) end_test git-lfs-2.3.4/test/test-fetch-include.sh000077500000000000000000000034001317167762300201040ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" reponame="$(basename "$0" ".sh")" contents="big file" contents_oid=$(calc_oid "$contents") begin_test "fetch: setup for include test" ( set -e setup_remote_repo "$reponame" clone_repo "$reponame" repo git lfs track "*.big" mkdir -p big/a mkdir -p big/b printf "$contents" > big/a/a1.big printf "$contents" > big/b/b1.big contents2="big file 2" printf "$contents2" > big/big1.big printf "$contents2" > big/big2.big printf "$contents2" > big/big3.big git add .gitattributes big git commit -m "commit" | tee commit.log grep "6 files changed" commit.log grep "create mode 100644 .gitattributes" commit.log grep "create mode 100644 big/a/a1.big" commit.log grep "create mode 100644 big/b/b1.big" commit.log grep "create mode 100644 big/big1.big" commit.log grep "create mode 100644 big/big2.big" commit.log grep "create mode 100644 big/big3.big" commit.log git push origin master | tee push.log grep "2 of 2 files" push.log assert_server_object "$reponame" "$contents_oid" ) end_test begin_test "fetch: include first matching file" ( set -e mkdir clone-1 cd clone-1 git init git lfs install --local --skip-smudge git remote add origin $GITSERVER/$reponame git pull origin master refute_local_object "$contents_oid" git lfs ls-files git lfs fetch --include=big/a assert_local_object "$contents_oid" "8" ) end_test begin_test "fetch: include second matching file" ( set -e mkdir clone-2 cd clone-2 git init git lfs install --local --skip-smudge git remote add origin $GITSERVER/$reponame git pull origin master refute_local_object "$contents_oid" git lfs ls-files git lfs fetch --include=big/b assert_local_object "$contents_oid" "8" ) end_test git-lfs-2.3.4/test/test-fetch-paths.sh000077500000000000000000000036141317167762300176070ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" reponame="$(basename "$0" ".sh")" contents="a" contents_oid=$(calc_oid "$contents") begin_test "init fetch unclean paths" ( set -e setup_remote_repo $reponame clone_repo $reponame repo git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log mkdir dir printf "$contents" > dir/a.dat git add dir/a.dat git add .gitattributes git commit -m "add dir/a.dat" 2>&1 | tee commit.log grep "master (root-commit)" commit.log grep "2 files changed" commit.log grep "create mode 100644 dir/a.dat" commit.log grep "create mode 100644 .gitattributes" commit.log [ "a" = "$(cat dir/a.dat)" ] assert_local_object "$contents_oid" 1 refute_server_object "$contents_oid" git push origin master 2>&1 | tee push.log grep "(1 of 1 files)" push.log grep "master -> master" push.log assert_server_object "$reponame" "$contents_oid" # This clone is used for subsequent tests clone_repo "$reponame" clone ) end_test begin_test "fetch unclean paths with include filter in gitconfig" ( set -e cd clone rm -rf .git/lfs/objects git config "lfs.fetchinclude" "dir/" git lfs fetch assert_local_object "$contents_oid" 1 ) end_test begin_test "fetch unclean paths with exclude filter in gitconfig" ( set -e cd clone rm -rf .git/lfs/objects git config --unset "lfs.fetchinclude" git config "lfs.fetchexclude" "dir/" git lfs fetch refute_local_object "$contents_oid" ) end_test begin_test "fetch unclean paths with include filter in cli" ( set -e cd clone rm -rf .git/lfs/objects git config --unset "lfs.fetchexclude" rm -rf .git/lfs/objects git lfs fetch -I="dir/" assert_local_object "$contents_oid" 1 ) end_test begin_test "fetch unclean paths with exclude filter in cli" ( set -e cd clone rm -rf .git/lfs/objects git lfs fetch -X="dir/" refute_local_object "$contents_oid" ) end_test git-lfs-2.3.4/test/test-fetch-recent.sh000077500000000000000000000122541317167762300177500ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" reponame="fetch-recent" # generate content we'll use content0="filecontent0" content1="filecontent1" content2="filecontent2" content3="filecontent3" content4="filecontent4" content5="filecontent5" oid0=$(calc_oid "$content0") oid1=$(calc_oid "$content1") oid2=$(calc_oid "$content2") oid3=$(calc_oid "$content3") oid4=$(calc_oid "$content4") oid5=$(calc_oid "$content5") begin_test "init fetch-recent" ( set -e setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log echo "[ { \"CommitDate\":\"$(get_date -18d)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":${#content0}, \"Data\":\"$content0\"}, {\"Filename\":\"file3.dat\",\"Size\":${#content5}, \"Data\":\"$content5\"}] }, { \"CommitDate\":\"$(get_date -14d)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":${#content1}, \"Data\":\"$content1\"}] }, { \"CommitDate\":\"$(get_date -5d)\", \"NewBranch\":\"other_branch\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":${#content4}, \"Data\":\"$content4\"}] }, { \"CommitDate\":\"$(get_date -1d)\", \"ParentBranches\":[\"master\"], \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":${#content2}, \"Data\":\"$content2\"}, {\"Filename\":\"file2.dat\",\"Size\":${#content3}, \"Data\":\"$content3\"}] } ]" | lfstest-testutils addcommits git push origin master git push origin other_branch assert_server_object "$reponame" "$oid0" assert_server_object "$reponame" "$oid1" assert_server_object "$reponame" "$oid2" assert_server_object "$reponame" "$oid3" assert_server_object "$reponame" "$oid4" # This clone is used for subsequent tests clone_repo "$reponame" clone git checkout other_branch git checkout master ) end_test begin_test "fetch-recent normal" ( set -e cd clone rm -rf .git/lfs/objects git config lfs.fetchrecentalways false git config lfs.fetchrecentrefsdays 0 git config lfs.fetchrecentremoterefs false git config lfs.fetchrecentcommitsdays 7 # fetch normally, should just get the last state for file1/2 git lfs fetch origin master assert_local_object "$oid2" "${#content2}" assert_local_object "$oid3" "${#content3}" assert_local_object "$oid5" "${#content5}" refute_local_object "$oid0" refute_local_object "$oid1" refute_local_object "$oid4" ) end_test begin_test "fetch-recent commits" ( set -e cd clone rm -rf .git/lfs/objects # now fetch recent - just commits for now git config lfs.fetchrecentrefsdays 0 git config lfs.fetchrecentremoterefs false git config lfs.fetchrecentcommitsdays 7 git lfs fetch --recent origin # that should have fetched master plus previous state needed within 7 days # current state assert_local_object "$oid2" "${#content2}" assert_local_object "$oid3" "${#content3}" # previous state is the 'before' state of any commits made in last 7 days # ie you can check out anything in last 7 days (may have non-LFS commits in between) assert_local_object "$oid1" "${#content1}" refute_local_object "$oid0" refute_local_object "$oid4" ) end_test begin_test "fetch-recent days" ( set -e cd clone rm -rf .git/lfs/objects # now fetch other_branch as well git config lfs.fetchrecentrefsdays 6 git config lfs.fetchrecentremoterefs false git config lfs.fetchrecentcommitsdays 7 git lfs fetch --recent origin # that should have fetched master plus previous state needed within 7 days # current state PLUS refs within 6 days (& their commits within 7) assert_local_object "$oid2" "${#content2}" assert_local_object "$oid3" "${#content3}" assert_local_object "$oid1" "${#content1}" assert_local_object "$oid4" "${#content4}" # still omits oid0 since that's at best 13 days prior to other_branch tip refute_local_object "$oid0" ) end_test begin_test "fetch-recent older commits" ( set -e cd clone # now test that a 14 day limit picks oid0 up from other_branch # because other_branch was itself 5 days ago, 5+14=19 day search limit git config lfs.fetchrecentcommitsdays 14 rm -rf .git/lfs/objects git lfs fetch --recent origin assert_local_object "$oid0" "${#content0}" ) end_test begin_test "fetch-recent remote branch" ( set -e cd "$reponame" # push branch & test remote branch recent git push origin other_branch cd ../clone git branch -D other_branch rm -rf .git/lfs/objects git config lfs.fetchrecentcommitsdays 0 git config lfs.fetchrecentremoterefs false git config lfs.fetchrecentrefsdays 6 git lfs fetch --recent origin # should miss #4 until we include remote branches (#1 will always be missing commitdays=0) assert_local_object "$oid2" "${#content2}" assert_local_object "$oid3" "${#content3}" refute_local_object "$oid1" refute_local_object "$oid0" refute_local_object "$oid4" ) end_test begin_test "fetch-recent remote refs" ( set -e cd clone rm -rf .git/lfs/objects # pick up just snapshot at remote ref, ie #4 git config lfs.fetchrecentremoterefs true git lfs fetch --recent origin assert_local_object "$oid4" "${#content4}" refute_local_object "$oid0" refute_local_object "$oid1" ) end_test git-lfs-2.3.4/test/test-fetch.sh000077500000000000000000000312151317167762300164700ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" contents="a" contents_oid=$(calc_oid "$contents") b="b" b_oid=$(calc_oid "$b") reponame="$(basename "$0" ".sh")" begin_test "init for fetch tests" ( set -e setup_remote_repo "$reponame" clone_repo "$reponame" repo git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log printf "$contents" > a.dat git add a.dat git add .gitattributes git commit -m "add a.dat" 2>&1 | tee commit.log grep "master (root-commit)" commit.log grep "2 files changed" commit.log grep "create mode 100644 a.dat" commit.log grep "create mode 100644 .gitattributes" commit.log [ "a" = "$(cat a.dat)" ] assert_local_object "$contents_oid" 1 refute_server_object "$reponame" "$contents_oid" git push origin master 2>&1 | tee push.log grep "(1 of 1 files)" push.log grep "master -> master" push.log assert_server_object "$reponame" "$contents_oid" # Add a file in a different branch git checkout -b newbranch printf "$b" > b.dat git add b.dat git commit -m "add b.dat" assert_local_object "$b_oid" 1 git push origin newbranch assert_server_object "$reponame" "$b_oid" # This clone is used for subsequent tests clone_repo "$reponame" clone ) end_test begin_test "fetch" ( set -e cd clone rm -rf .git/lfs/objects git lfs fetch 2>&1 | grep "(1 of 1 files)" assert_local_object "$contents_oid" 1 ) end_test begin_test "fetch with remote" ( set -e cd clone rm -rf .git/lfs/objects git lfs fetch origin 2>&1 | grep "(1 of 1 files)" assert_local_object "$contents_oid" 1 refute_local_object "$b_oid" 1 ) end_test begin_test "fetch with remote and branches" ( set -e cd clone git checkout newbranch git checkout master rm -rf .git/lfs/objects git lfs fetch origin master newbranch assert_local_object "$contents_oid" 1 assert_local_object "$b_oid" 1 ) end_test begin_test "fetch with master commit sha1" ( set -e cd clone rm -rf .git/lfs/objects master_sha1=$(git rev-parse master) git lfs fetch origin "$master_sha1" assert_local_object "$contents_oid" 1 refute_local_object "$b_oid" 1 ) end_test begin_test "fetch with newbranch commit sha1" ( set -e cd clone rm -rf .git/lfs/objects newbranch_sha1=$(git rev-parse newbranch) git lfs fetch origin "$newbranch_sha1" assert_local_object "$contents_oid" 1 assert_local_object "$b_oid" 1 ) end_test begin_test "fetch with include filters in gitconfig" ( set -e cd clone rm -rf .git/lfs/objects git config "lfs.fetchinclude" "a*" git lfs fetch origin master newbranch assert_local_object "$contents_oid" 1 refute_local_object "$b_oid" ) end_test begin_test "fetch with exclude filters in gitconfig" ( set -e cd clone git config --unset "lfs.fetchinclude" rm -rf .git/lfs/objects git config "lfs.fetchexclude" "a*" git lfs fetch origin master newbranch refute_local_object "$contents_oid" assert_local_object "$b_oid" 1 ) end_test begin_test "fetch with include/exclude filters in gitconfig" ( set -e cd clone rm -rf .git/lfs/objects git config --unset "lfs.fetchexclude" git config "lfs.fetchinclude" "a*,b*" git config "lfs.fetchexclude" "c*,d*" git lfs fetch origin master newbranch assert_local_object "$contents_oid" 1 assert_local_object "$b_oid" 1 rm -rf .git/lfs/objects git config "lfs.fetchinclude" "c*,d*" git config "lfs.fetchexclude" "a*,b*" git lfs fetch origin master newbranch refute_local_object "$contents_oid" refute_local_object "$b_oid" ) end_test begin_test "fetch with include filter in cli" ( set -e cd clone git config --unset "lfs.fetchinclude" git config --unset "lfs.fetchexclude" rm -rf .git/lfs/objects git lfs fetch --include="a*" origin master newbranch assert_local_object "$contents_oid" 1 refute_local_object "$b_oid" ) end_test begin_test "fetch with exclude filter in cli" ( set -e cd clone rm -rf .git/lfs/objects git lfs fetch --exclude="a*" origin master newbranch refute_local_object "$contents_oid" assert_local_object "$b_oid" 1 ) end_test begin_test "fetch with include/exclude filters in cli" ( set -e cd clone rm -rf .git/lfs/objects git lfs fetch -I "a*,b*" -X "c*,d*" origin master newbranch assert_local_object "$contents_oid" 1 assert_local_object "$b_oid" 1 rm -rf .git/lfs/objects git lfs fetch --include="c*,d*" --exclude="a*,b*" origin master newbranch refute_local_object "$contents_oid" refute_local_object "$b_oid" ) end_test begin_test "fetch with include filter overriding exclude filter" ( set -e cd clone rm -rf .git/lfs/objects git config lfs.fetchexclude "b*" git lfs fetch -I "b.dat" -X "" origin master newbranch assert_local_object "$b_oid" "1" ) end_test begin_test "fetch with missing object" ( set -e cd clone git config --unset lfs.fetchexclude rm -rf .git/lfs/objects delete_server_object "$reponame" "$b_oid" refute_server_object "$reponame" "$b_oid" # should return non-zero, but should also download all the other valid files too set +e git lfs fetch origin master newbranch fetch_exit=$? set -e [ "$fetch_exit" != "0" ] assert_local_object "$contents_oid" 1 refute_local_object "$b_oid" ) end_test begin_test "fetch-all" ( set -e reponame="fetch-all" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log NUMFILES=12 # generate content we'll use for ((a=0; a < NUMFILES ; a++)) do content[$a]="filecontent$a" oid[$a]=$(calc_oid "${content[$a]}") done echo "[ { \"CommitDate\":\"$(get_date -180d)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":${#content[0]}, \"Data\":\"${content[0]}\"}, {\"Filename\":\"file2.dat\",\"Size\":${#content[1]}, \"Data\":\"${content[1]}\"}] }, { \"NewBranch\":\"branch1\", \"CommitDate\":\"$(get_date -140d)\", \"Files\":[ {\"Filename\":\"file3.dat\",\"Size\":${#content[2]}, \"Data\":\"${content[2]}\"}] }, { \"ParentBranches\":[\"master\"], \"CommitDate\":\"$(get_date -100d)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":${#content[3]}, \"Data\":\"${content[3]}\"}] }, { \"NewBranch\":\"remote_branch_only\", \"CommitDate\":\"$(get_date -80d)\", \"Files\":[ {\"Filename\":\"file2.dat\",\"Size\":${#content[4]}, \"Data\":\"${content[4]}\"}] }, { \"ParentBranches\":[\"master\"], \"CommitDate\":\"$(get_date -75d)\", \"Files\":[ {\"Filename\":\"file4.dat\",\"Size\":${#content[5]}, \"Data\":\"${content[5]}\"}] }, { \"NewBranch\":\"tag_only\", \"Tags\":[\"tag1\"], \"CommitDate\":\"$(get_date -70d)\", \"Files\":[ {\"Filename\":\"file4.dat\",\"Size\":${#content[6]}, \"Data\":\"${content[6]}\"}] }, { \"ParentBranches\":[\"master\"], \"CommitDate\":\"$(get_date -60d)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":${#content[7]}, \"Data\":\"${content[7]}\"}] }, { \"NewBranch\":\"branch3\", \"CommitDate\":\"$(get_date -50d)\", \"Files\":[ {\"Filename\":\"file4.dat\",\"Size\":${#content[8]}, \"Data\":\"${content[8]}\"}] }, { \"CommitDate\":\"$(get_date -40d)\", \"ParentBranches\":[\"master\"], \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":${#content[9]}, \"Data\":\"${content[9]}\"}, {\"Filename\":\"file2.dat\",\"Size\":${#content[10]}, \"Data\":\"${content[10]}\"}] }, { \"ParentBranches\":[\"master\"], \"CommitDate\":\"$(get_date -30d)\", \"Files\":[ {\"Filename\":\"file4.dat\",\"Size\":${#content[11]}, \"Data\":\"${content[11]}\"}] } ]" | lfstest-testutils addcommits git push origin master git push origin branch1 git push origin branch3 git push origin remote_branch_only git push origin tag_only for ((a=0; a < NUMFILES ; a++)) do assert_server_object "$reponame" "${oid[$a]}" done # delete remote_branch_only and make sure that objects are downloaded even # though not checked out to a local branch (full backup always) git branch -D remote_branch_only # delete tag_only to make sure objects are downloaded when only reachable from tag git branch -D tag_only rm -rf .git/lfs/objects git lfs fetch --all origin for ((a=0; a < NUMFILES ; a++)) do assert_local_object "${oid[$a]}" "${#content[$a]}" done # Make a bare clone of the repository cd .. git clone --bare "$GITSERVER/$reponame" "$reponame-bare" cd "$reponame-bare" # Preform the same assertion as above, on the same data git lfs fetch --all origin for ((a=0; a < NUMFILES ; a++)); do assert_local_object "${oid[$a]}" "${#content[$a]}" done ) end_test begin_test "fetch: outside git repository" ( set +e git lfs fetch 2>&1 > fetch.log res=$? set -e if [ "$res" = "0" ]; then echo "Passes because $GIT_LFS_TEST_DIR is unset." exit 0 fi [ "$res" = "128" ] grep "Not in a git repository" fetch.log ) end_test begin_test "fetch with no origin remote" ( set -e reponame="fetch-no-remote" setup_remote_repo "$reponame" clone_repo "$reponame" no-remote-clone clone_repo "$reponame" no-remote-repo git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log contents="a" contents_oid=$(calc_oid "$contents") printf "$contents" > a.dat git add a.dat git add .gitattributes git commit -m "add a.dat" 2>&1 | tee commit.log grep "master (root-commit)" commit.log grep "2 files changed" commit.log grep "create mode 100644 a.dat" commit.log grep "create mode 100644 .gitattributes" commit.log [ "a" = "$(cat a.dat)" ] assert_local_object "$contents_oid" 1 refute_server_object "$reponame" "$contents_oid" git push origin master 2>&1 | tee push.log grep "(1 of 1 files)" push.log grep "master -> master" push.log # change to the clone's working directory cd ../no-remote-clone # pull commits & lfs git pull 2>&1 assert_local_object "$contents_oid" 1 # now checkout detached HEAD so we're not tracking anything on remote git checkout --detach # delete lfs rm -rf .git/lfs # rename remote from 'origin' to 'something' git remote rename origin something # fetch should still pick this remote as in the case of no tracked remote, # and no origin, but only 1 remote, should pick the only one as default git lfs fetch assert_local_object "$contents_oid" 1 # delete again, now add a second remote, also non-origin rm -rf .git/lfs git remote add something2 "$GITSERVER/$reponame" git lfs fetch 2>&1 | grep "No default remote" refute_local_object "$contents_oid" ) end_test begin_test "fetch --prune" ( set -e reponame="fetch_prune" setup_remote_repo "remote_$reponame" clone_repo "remote_$reponame" "clone_$reponame" git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log content_head="HEAD content" content_commit2="Content for commit 2 (prune)" content_commit1="Content for commit 1 (prune)" oid_head=$(calc_oid "$content_head") oid_commit2=$(calc_oid "$content_commit2") oid_commit1=$(calc_oid "$content_commit1") echo "[ { \"CommitDate\":\"$(get_date -50d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_commit1}, \"Data\":\"$content_commit1\"}] }, { \"CommitDate\":\"$(get_date -35d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_commit2}, \"Data\":\"$content_commit2\"}] }, { \"CommitDate\":\"$(get_date -25d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_head}, \"Data\":\"$content_head\"}] } ]" | lfstest-testutils addcommits # push all so no unpushed reason to not prune git push origin master # set no recents so max ability to prune git config lfs.fetchrecentrefsdays 0 git config lfs.fetchrecentcommitsdays 0 # delete HEAD object to prove that we still download something # also prune at the same time which will remove anything other than HEAD delete_local_object "$oid_head" git lfs fetch --prune assert_local_object "$oid_head" "${#content_head}" refute_local_object "$oid_commit1" refute_local_object "$oid_commit2" ) end_test begin_test "fetch raw remote url" ( set -e mkdir raw cd raw git init git lfs install --local --skip-smudge git remote add origin $GITSERVER/test-fetch git pull origin master # LFS object not downloaded, pointer in working directory refute_local_object "$contents_oid" grep "$content_oid" a.dat git lfs fetch "$GITSERVER/test-fetch" # LFS object downloaded, pointer still in working directory assert_local_object "$contents_oid" 1 grep "$content_oid" a.dat ) end_test begin_test "fetch with invalid remote" ( set -e cd repo git lfs fetch not-a-remote 2>&1 | tee fetch.log grep "Invalid remote name" fetch.log ) end_test git-lfs-2.3.4/test/test-filter-branch.sh000077500000000000000000000016201317167762300201140ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" begin_test "filter-branch (git-lfs/git-lfs#1773)" ( set -e reponame="filter-branch" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" contents_a="contents (a)" printf "$contents_a" > a.dat git add a.dat git commit -m "add a.dat" contents_b="contents (b)" printf "$contents_b" > b.dat git add b.dat git commit -m "add b.dat" contents_c="contents (c)" printf "$contents_c" > c.dat git add c.dat git commit -m "add c.dat" git filter-branch -f --prune-empty \ --tree-filter ' echo >&2 "---" git rm --cached -r -q . git lfs track "*.dat" git add . ' --tag-name-filter cat -- --all assert_pointer "master" "a.dat" "$(calc_oid "$contents_a")" 12 assert_pointer "master" "b.dat" "$(calc_oid "$contents_b")" 12 assert_pointer "master" "c.dat" "$(calc_oid "$contents_c")" 12 ) end_test git-lfs-2.3.4/test/test-filter-process.sh000077500000000000000000000047161317167762300203460ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" # HACK(taylor): git uses ".g" in the version name to signal that it is # from the "next" branch, which is the only (current) version of Git that has # support for the filter protocol. # ensure_git_version_isnt $VERSION_LOWER "2.11.0" begin_test "filter process: checking out a branch" ( set -e reponame="filter_process_checkout" setup_remote_repo "$reponame" clone_repo "$reponame" repo git lfs track "*.dat" git add .gitattributes git commit -m "initial commit" contents_a="contents_a" contents_a_oid="$(calc_oid $contents_a)" printf "$contents_a" > a.dat git add a.dat git commit -m "add a.dat" git checkout -b b contents_b="contents_b" contents_b_oid="$(calc_oid $contents_b)" printf "$contents_b" > b.dat git add b.dat git commit -m "add b.dat" git push origin --all pushd .. # Git will choose filter.lfs.process over `filter.lfs.clean` and # `filter.lfs.smudge` GIT_TRACE_PACKET=1 git \ -c "filter.lfs.process=git-lfs filter-process" \ -c "filter.lfs.clean=false"\ -c "filter.lfs.smudge=false" \ -c "filter.lfs.required=true" \ clone "$GITSERVER/$reponame" "$reponame-assert" cd "$reponame-assert" # Assert that we are on the "master" branch, and have a.dat [ "master" = "$(git rev-parse --abbrev-ref HEAD)" ] [ "$contents_a" = "$(cat a.dat)" ] assert_pointer "master" "a.dat" "$contents_a_oid" 10 git checkout b # Assert that we are on the "b" branch, and have b.dat [ "b" = "$(git rev-parse --abbrev-ref HEAD)" ] [ "$contents_b" = "$(cat b.dat)" ] assert_pointer "b" "b.dat" "$contents_b_oid" 10 popd ) end_test begin_test "filter process: adding a file" ( set -e reponame="filter_process_add" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" git add .gitattributes git commit -m "initial commit" contents="contents" contents_oid="$(calc_oid "$contents")" printf "$contents" > a.dat git add a.dat expected="$(pointer "$contents_oid" "${#contents}")" got="$(git cat-file -p :a.dat)" diff -u <(echo "$expected") <(echo "$got") ) end_test # https://github.com/git-lfs/git-lfs/issues/1697 begin_test "filter process: add a file with 1024 bytes" ( set -e mkdir repo-issue-1697 cd repo-issue-1697 git init git lfs track "*.dat" dd if=/dev/zero of=first.dat bs=1024 count=1 printf "any contents" > second.dat git add . ) end_test git-lfs-2.3.4/test/test-fsck.sh000077500000000000000000000056471317167762300163370ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" begin_test "fsck default" ( set -e reponame="fsck-default" git init $reponame cd $reponame # Create a commit with some files tracked by git-lfs git lfs track *.dat echo "test data" > a.dat echo "test data 2" > b.dat git add .gitattributes *.dat git commit -m "first commit" [ "Git LFS fsck OK" = "$(git lfs fsck)" ] aOid=$(git log --patch a.dat | grep "^+oid" | cut -d ":" -f 2) aOid12=$(echo $aOid | cut -b 1-2) aOid34=$(echo $aOid | cut -b 3-4) if [ "$aOid" != "$(calc_oid_file .git/lfs/objects/$aOid12/$aOid34/$aOid)" ]; then echo "oid for a.dat does not match" exit 1 fi bOid=$(git log --patch b.dat | grep "^+oid" | cut -d ":" -f 2) bOid12=$(echo $bOid | cut -b 1-2) bOid34=$(echo $bOid | cut -b 3-4) if [ "$bOid" != "$(calc_oid_file .git/lfs/objects/$bOid12/$bOid34/$bOid)" ]; then echo "oid for b.dat does not match" exit 1 fi echo "CORRUPTION" >> .git/lfs/objects/$aOid12/$aOid34/$aOid moved=$(native_path "$TRASHDIR/$reponame/.git/lfs/bad") expected="$(printf 'Object a.dat (%s) is corrupt Moving corrupt objects to %s' "$aOid" "$moved")" [ "$expected" = "$(git lfs fsck)" ] [ -e ".git/lfs/bad/$aOid" ] [ ! -e ".git/lfs/objects/$aOid12/$aOid34/$aOid" ] [ "$bOid" = "$(calc_oid_file .git/lfs/objects/$bOid12/$bOid34/$bOid)" ] ) end_test begin_test "fsck dry run" ( set -e reponame="fsck-dry-run" git init $reponame cd $reponame # Create a commit with some files tracked by git-lfs git lfs track *.dat echo "test data" > a.dat echo "test data 2" > b.dat git add .gitattributes *.dat git commit -m "first commit" [ "Git LFS fsck OK" = "$(git lfs fsck --dry-run)" ] aOid=$(git log --patch a.dat | grep "^+oid" | cut -d ":" -f 2) aOid12=$(echo $aOid | cut -b 1-2) aOid34=$(echo $aOid | cut -b 3-4) if [ "$aOid" != "$(calc_oid_file .git/lfs/objects/$aOid12/$aOid34/$aOid)" ]; then echo "oid for a.dat does not match" exit 1 fi bOid=$(git log --patch b.dat | grep "^+oid" | cut -d ":" -f 2) bOid12=$(echo $bOid | cut -b 1-2) bOid34=$(echo $bOid | cut -b 3-4) if [ "$bOid" != "$(calc_oid_file .git/lfs/objects/$bOid12/$bOid34/$bOid)" ]; then echo "oid for b.dat does not match" exit 1 fi echo "CORRUPTION" >> .git/lfs/objects/$aOid12/$aOid34/$aOid [ "Object a.dat ($aOid) is corrupt" = "$(git lfs fsck --dry-run)" ] if [ "$aOid" = "$(calc_oid_file .git/lfs/objects/$aOid12/$aOid34/$aOid)" ]; then echo "oid for a.dat still matches match" exit 1 fi if [ "$bOid" != "$(calc_oid_file .git/lfs/objects/$bOid12/$bOid34/$bOid)" ]; then echo "oid for b.dat does not match" exit 1 fi ) end_test begin_test "fsck: outside git repository" ( set +e git lfs fsck 2>&1 > fsck.log res=$? set -e if [ "$res" = "0" ]; then echo "Passes because $GIT_LFS_TEST_DIR is unset." exit 0 fi [ "$res" = "128" ] grep "Not in a git repository" fsck.log ) end_test git-lfs-2.3.4/test/test-happy-path.sh000077500000000000000000000063041317167762300174530ustar00rootroot00000000000000#!/usr/bin/env bash # This is a sample Git LFS test. See test/README.md and testhelpers.sh for # more documentation. . "test/testlib.sh" begin_test "happy path" ( set -e # This initializes a new bare git repository in test/remote. # These remote repositories are global to every test, so keep the names # unique. reponame="$(basename "$0" ".sh")" setup_remote_repo "$reponame" # Clone the repository from the test Git server. This is empty, and will be # used to test a "git pull" below. The repo is cloned to $TRASHDIR/clone clone_repo "$reponame" clone # Clone the repository again to $TRASHDIR/repo. This will be used to commit # and push objects. clone_repo "$reponame" repo # This executes Git LFS from the local repo that was just cloned. git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log contents="a" contents_oid=$(calc_oid "$contents") # Regular Git commands can be used. printf "$contents" > a.dat git add a.dat git add .gitattributes git commit -m "add a.dat" 2>&1 | tee commit.log grep "master (root-commit)" commit.log grep "2 files changed" commit.log grep "create mode 100644 a.dat" commit.log grep "create mode 100644 .gitattributes" commit.log [ "a" = "$(cat a.dat)" ] # This is a small shell function that runs several git commands together. assert_pointer "master" "a.dat" "$contents_oid" 1 refute_server_object "$reponame" "$contents_oid" # This pushes to the remote repository set up at the top of the test. git push origin master 2>&1 | tee push.log grep "(1 of 1 files)" push.log grep "master -> master" push.log assert_server_object "$reponame" "$contents_oid" # change to the clone's working directory cd ../clone git pull [ "a" = "$(cat a.dat)" ] assert_pointer "master" "a.dat" "$contents_oid" 1 ) end_test begin_test "clears local temp objects" ( set -e mkdir repo-temp-objects cd repo-temp-objects git init # abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz01 mkdir -p .git/lfs/objects/go/od mkdir -p .git/lfs/tmp/objects touch .git/lfs/objects/go/od/goodabcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwx touch .git/lfs/tmp/objects/goodabcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwx-rand123 touch .git/lfs/tmp/objects/goodabcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwx-rand456 touch .git/lfs/tmp/objects/badabcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxy-rand123 touch .git/lfs/tmp/objects/badabcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxy-rand456 GIT_TRACE=5 git lfs env # object file exists [ -e ".git/lfs/objects/go/od/goodabcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwx" ] # newer tmp files exist [ -e ".git/lfs/tmp/objects/badabcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxy-rand123" ] [ -e ".git/lfs/tmp/objects/badabcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxy-rand456" ] # existing tmp files were cleaned up [ ! -e ".git/lfs/tmp/objects/goodabcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwx-rand123" ] [ ! -e ".git/lfs/tmp/objects/goodabcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwx-rand456" ] ) end_test git-lfs-2.3.4/test/test-install-custom-hooks-path-unsupported.sh000077500000000000000000000012121317167762300250100ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" # These tests rely on behavior found in Git versions less than 2.9.0 to perform # themselves, specifically: # - lack of core.hooksPath support ensure_git_version_isnt $VERSION_HIGHER "2.9.0" begin_test "install with unsupported core.hooksPath" ( set -e repo_name="unsupported-custom-hooks-path" git init "$repo_name" cd "$repo_name" hooks_dir="custom_hooks_dir" mkdir -p "$hooks_dir" git config --local core.hooksPath "$hooks_dir" git lfs install 2>&1 | tee install.log grep "Updated git hooks" install.log [ ! -e "$hooks_dir/pre-push" ] [ -e ".git/hooks/pre-push" ] ) end_test git-lfs-2.3.4/test/test-install-custom-hooks-path.sh000077500000000000000000000014421317167762300224270ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" # These tests rely on behavior found in 2.9.0 to perform themselves, # specifically: # - core.hooksPath support ensure_git_version_isnt $VERSION_LOWER "2.9.0" begin_test "install with supported core.hooksPath" ( set -e repo_name="supported-custom-hooks-path" git init "$repo_name" cd "$repo_name" hooks_dir="custom_hooks_dir" mkdir -p "$hooks_dir" git config --local core.hooksPath "$hooks_dir" git lfs install 2>&1 | tee install.log grep "Updated git hooks" install.log [ -e "$hooks_dir/pre-push" ] [ ! -e ".git/pre-push" ] [ -e "$hooks_dir/post-checkout" ] [ ! -e ".git/post-checkout" ] [ -e "$hooks_dir/post-commit" ] [ ! -e ".git/post-commit" ] [ -e "$hooks_dir/post-merge" ] [ ! -e ".git/post-merge" ] ) end_test git-lfs-2.3.4/test/test-install.sh000077500000000000000000000207411317167762300170470ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" begin_test "install again" ( set -e smudge="$(git config filter.lfs.smudge)" clean="$(git config filter.lfs.clean)" filter="$(git config filter.lfs.process)" printf "$smudge" | grep "git-lfs smudge" printf "$clean" | grep "git-lfs clean" printf "$filter" | grep "git-lfs filter-process" git lfs install [ "$smudge" = "$(git config filter.lfs.smudge)" ] [ "$clean" = "$(git config filter.lfs.clean)" ] [ "$filter" = "$(git config filter.lfs.process)" ] ) end_test begin_test "install with old (non-upgradeable) settings" ( set -e git config --global filter.lfs.smudge "git-lfs smudge --something %f" git config --global filter.lfs.clean "git-lfs clean --something %f" git lfs install | tee install.log [ "${PIPESTATUS[0]}" = 0 ] grep -E "(clean|smudge)\" attribute should be" install.log [ `grep -c "(MISSING)" install.log` = "0" ] [ "git-lfs smudge --something %f" = "$(git config --global filter.lfs.smudge)" ] [ "git-lfs clean --something %f" = "$(git config --global filter.lfs.clean)" ] git lfs install --force [ "git-lfs smudge -- %f" = "$(git config --global filter.lfs.smudge)" ] [ "git-lfs clean -- %f" = "$(git config --global filter.lfs.clean)" ] ) end_test begin_test "install with upgradeable settings" ( set -e git config --global filter.lfs.smudge "git-lfs smudge %f" git config --global filter.lfs.clean "git-lfs clean %f" # should not need force, should upgrade this old style git lfs install [ "git-lfs smudge -- %f" = "$(git config --global filter.lfs.smudge)" ] [ "git-lfs clean -- %f" = "$(git config --global filter.lfs.clean)" ] [ "git-lfs filter-process" = "$(git config --global filter.lfs.process)" ] ) end_test begin_test "install updates repo hooks" ( set -e mkdir install-repo-hooks cd install-repo-hooks git init pre_push_hook="#!/bin/sh command -v git-lfs >/dev/null 2>&1 || { echo >&2 \"\\nThis repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting .git/hooks/pre-push.\\n\"; exit 2; } git lfs pre-push \"\$@\"" post_checkout_hook="#!/bin/sh command -v git-lfs >/dev/null 2>&1 || { echo >&2 \"\\nThis repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting .git/hooks/post-checkout.\\n\"; exit 2; } git lfs post-checkout \"\$@\"" post_commit_hook="#!/bin/sh command -v git-lfs >/dev/null 2>&1 || { echo >&2 \"\\nThis repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting .git/hooks/post-commit.\\n\"; exit 2; } git lfs post-commit \"\$@\"" post_merge_hook="#!/bin/sh command -v git-lfs >/dev/null 2>&1 || { echo >&2 \"\\nThis repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting .git/hooks/post-merge.\\n\"; exit 2; } git lfs post-merge \"\$@\"" [ "Updated git hooks. Git LFS initialized." = "$(git lfs install)" ] [ "$pre_push_hook" = "$(cat .git/hooks/pre-push)" ] [ "$post_checkout_hook" = "$(cat .git/hooks/post-checkout)" ] [ "$post_commit_hook" = "$(cat .git/hooks/post-commit)" ] [ "$post_merge_hook" = "$(cat .git/hooks/post-merge)" ] # replace old hook # more-comprehensive hook update tests are in test-update.sh echo "#!/bin/sh git lfs push --stdin \$*" > .git/hooks/pre-push [ "Updated git hooks. Git LFS initialized." = "$(git lfs install)" ] [ "$pre_push_hook" = "$(cat .git/hooks/pre-push)" ] # don't replace unexpected hook expected="Hook already exists: pre-push test To resolve this, either: 1: run \`git lfs update --manual\` for instructions on how to merge hooks. 2: run \`git lfs update --force\` to overwrite your hook." echo "test" > .git/hooks/pre-push echo "test" > .git/hooks/post-checkout echo "test" > .git/hooks/post-commit echo "test" > .git/hooks/post-merge [ "test" = "$(cat .git/hooks/pre-push)" ] [ "$expected" = "$(git lfs install 2>&1)" ] [ "test" = "$(cat .git/hooks/pre-push)" ] [ "test" = "$(cat .git/hooks/post-checkout)" ] [ "test" = "$(cat .git/hooks/post-commit)" ] [ "test" = "$(cat .git/hooks/post-merge)" ] # Make sure returns non-zero set +e git lfs install if [ $? -eq 0 ] then exit 1 fi set -e # force replace unexpected hook [ "Updated git hooks. Git LFS initialized." = "$(git lfs install --force)" ] [ "$pre_push_hook" = "$(cat .git/hooks/pre-push)" ] [ "$post_checkout_hook" = "$(cat .git/hooks/post-checkout)" ] [ "$post_commit_hook" = "$(cat .git/hooks/post-commit)" ] [ "$post_merge_hook" = "$(cat .git/hooks/post-merge)" ] has_test_dir || exit 0 echo "test with bare repository" cd .. git clone --mirror install-repo-hooks bare-install-repo-hooks cd bare-install-repo-hooks git lfs env git lfs install ls -al hooks [ "$pre_push_hook" = "$(cat hooks/pre-push)" ] ) end_test begin_test "install outside repository directory" ( set -e if [ -d "hooks" ]; then ls -al echo "hooks dir exists" exit 1 fi git lfs install 2>&1 > check.log if [ -d "hooks" ]; then ls -al echo "hooks dir exists" exit 1 fi cat check.log # doesn't print this because being in a git repo is not necessary for install [ "$(grep -c "Not in a git repository" check.log)" = "0" ] ) end_test begin_test "install --skip-smudge" ( set -e mkdir install-skip-smudge-test cd install-skip-smudge-test git lfs install [ "git-lfs clean -- %f" = "$(git config --global filter.lfs.clean)" ] [ "git-lfs smudge -- %f" = "$(git config --global filter.lfs.smudge)" ] [ "git-lfs filter-process" = "$(git config --global filter.lfs.process)" ] git lfs install --skip-smudge [ "git-lfs clean -- %f" = "$(git config --global filter.lfs.clean)" ] [ "git-lfs smudge --skip -- %f" = "$(git config --global filter.lfs.smudge)" ] [ "git-lfs filter-process --skip" = "$(git config --global filter.lfs.process)" ] git lfs install [ "git-lfs clean -- %f" = "$(git config --global filter.lfs.clean)" ] [ "git-lfs smudge -- %f" = "$(git config --global filter.lfs.smudge)" ] [ "git-lfs filter-process" = "$(git config --global filter.lfs.process)" ] [ ! -e "lfs" ] ) end_test begin_test "install --local" ( set -e # old values that should be ignored by `install --local` git config --global filter.lfs.smudge "git lfs smudge %f" git config --global filter.lfs.clean "git lfs clean %f" mkdir install-local-repo cd install-local-repo git init git lfs install --local [ "git-lfs clean -- %f" = "$(git config filter.lfs.clean)" ] [ "git-lfs clean -- %f" = "$(git config --local filter.lfs.clean)" ] [ "git lfs clean %f" = "$(git config --global filter.lfs.clean)" ] [ "git-lfs filter-process" = "$(git config filter.lfs.process)" ] [ "git-lfs filter-process" = "$(git config --local filter.lfs.process)" ] ) end_test begin_test "install --local outside repository" ( # If run inside the git-lfs source dir this will update its .git/config & cause issues if [ "$GIT_LFS_TEST_DIR" == "" ]; then echo "Skipping install --local because GIT_LFS_TEST_DIR is not set" exit 0 fi set +e has_test_dir || exit 0 git lfs install --local 2> err.log res=$? [ "Not in a git repository." = "$(cat err.log)" ] [ "0" != "$res" ] ) end_test begin_test "install in directory without access to .git/lfs" ( set -e mkdir not-a-repo cd not-a-repo mkdir .git touch .git/lfs touch lfs git config --global filter.lfs.clean whatevs [ "whatevs" = "$(git config filter.lfs.clean)" ] git lfs install --force [ "git-lfs clean -- %f" = "$(git config filter.lfs.clean)" ] ) end_test begin_test "install in repo without changing hooks" ( set -e git init non-lfs-repo cd non-lfs-repo git lfs install --skip-repo # should not install hooks [ ! -f .git/hooks/pre-push ] [ ! -f .git/hooks/post-checkout ] [ ! -f .git/hooks/post-merge ] [ ! -f .git/hooks/post-commit ] # filters should still be installed [ "git-lfs clean -- %f" = "$(git config filter.lfs.clean)" ] [ "git-lfs smudge -- %f" = "$(git config filter.lfs.smudge)" ] [ "git-lfs filter-process" = "$(git config filter.lfs.process)" ] ) end_test begin_test "can install when multiple global values registered" ( set -e git config --global filter.lfs.smudge "git-lfs smudge --something %f" git config --global --add filter.lfs.smudge "git-lfs smudge --something-else %f" git lfs install --force ) end_test git-lfs-2.3.4/test/test-lock.sh000077500000000000000000000060751317167762300163350ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" begin_test "creating a lock" ( set -e reponame="lock_create_simple" setup_remote_repo_with_file "$reponame" "a.dat" git lfs lock --json "a.dat" | tee lock.json id=$(assert_lock lock.json a.dat) assert_server_lock "$reponame" "$id" ) end_test begin_test "create lock with server using client cert" ( set -e reponame="lock_create_client_cert" setup_remote_repo_with_file "$reponame" "cc.dat" git config lfs.url "$CLIENTCERTGITSERVER/$reponame.git/info/lfs" git lfs lock --json "cc.dat" | tee lock.json id=$(assert_lock lock.json cc.dat) assert_server_lock "$reponame" "$id" ) end_test begin_test "creating a lock (with output)" ( set -e reponame="lock_create_simple_output" setup_remote_repo_with_file "$reponame" "a_output.dat" git lfs lock "a_output.dat" | tee lock.log grep "Locked a_output.dat" lock.log id=$(grep -oh "\((.*)\)" lock.log | tr -d \(\)) assert_server_lock "$reponame" "$id" ) end_test begin_test "locking a previously locked file" ( set -e reponame="lock_create_previously_created" setup_remote_repo_with_file "$reponame" "b.dat" git lfs lock --json "b.dat" | tee lock.json id=$(assert_lock lock.json b.dat) assert_server_lock "$reponame" "$id" grep "lock already created" <(git lfs lock "b.dat" 2>&1) ) end_test begin_test "locking a directory" ( set -e reponame="locking_directories" setup_remote_repo "remote_$reponame" clone_repo "remote_$reponame" "clone_$reponame" git lfs track "*.dat" mkdir dir echo "a" > dir/a.dat git add dir/a.dat .gitattributes git commit -m "add dir/a.dat" | tee commit.log grep "master (root-commit)" commit.log grep "2 files changed" commit.log grep "create mode 100644 dir/a.dat" commit.log grep "create mode 100644 .gitattributes" commit.log git push origin master 2>&1 | tee push.log grep "master -> master" push.log git lfs lock ./dir/ 2>&1 | tee lock.log grep "cannot lock directory" lock.log ) end_test begin_test "locking a nested file" ( set -e reponame="locking-nested-file" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" --lockable git add .gitattributes git commit -m "initial commit" mkdir -p foo/bar/baz contents="contents" contents_oid="$(calc_oid "$contents")" printf "$contents" > foo/bar/baz/a.dat git add foo/bar/baz/a.dat git commit -m "add a.dat" git push origin master assert_server_object "$reponame" "$contents_oid" git lfs lock foo/bar/baz/a.dat 2>&1 | tee lock.log grep "Locked foo/bar/baz/a.dat" lock.log git lfs locks 2>&1 | tee locks.log grep "foo/bar/baz/a.dat" locks.log ) end_test begin_test "creating a lock (within subdirectory)" ( set -e reponame="lock_create_within_subdirectory" setup_remote_repo_with_file "$reponame" "sub/a.dat" cd sub git lfs lock --json "a.dat" | tee lock.json if [ "0" -ne "${PIPESTATUS[0]}" ]; then echo >&2 "fatal: expected 'git lfs lock \'a.dat\'' to succeed" exit 1 fi id=$(assert_lock lock.json sub/a.dat) assert_server_lock "$reponame" "$id" ) end_test git-lfs-2.3.4/test/test-locks.sh000077500000000000000000000075501317167762300165170ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" begin_test "list a single lock" ( set -e reponame="locks_list_single" setup_remote_repo_with_file "$reponame" "f.dat" git lfs lock --json "f.dat" | tee lock.log id=$(assert_lock lock.log f.dat) assert_server_lock "$reponame" "$id" git lfs locks --path "f.dat" | tee locks.log [ $(wc -l < locks.log) -eq 1 ] grep "f.dat" locks.log grep "Git LFS Tests" locks.log ) end_test begin_test "list a single lock (--json)" ( set -e reponame="locks_list_single_json" setup_remote_repo_with_file "$reponame" "f_json.dat" git lfs lock --json "f_json.dat" | tee lock.log id=$(assert_lock lock.log f_json.dat) assert_server_lock "$reponame" "$id" git lfs locks --json --path "f_json.dat" | tee locks.log grep "\"path\":\"f_json.dat\"" locks.log grep "\"owner\":{\"name\":\"Git LFS Tests\"}" locks.log ) end_test begin_test "list locks with a limit" ( set -e reponame="locks_list_limit" setup_remote_repo "$reponame" clone_repo "$reponame" "clone_$reponame" git lfs track "*.dat" echo "foo" > "g_1.dat" echo "bar" > "g_2.dat" git add "g_1.dat" "g_2.dat" ".gitattributes" git commit -m "add files" | tee commit.log grep "3 files changed" commit.log grep "create mode 100644 g_1.dat" commit.log grep "create mode 100644 g_2.dat" commit.log grep "create mode 100644 .gitattributes" commit.log git push origin master 2>&1 | tee push.log grep "master -> master" push.log git lfs lock --json "g_1.dat" | tee lock.log assert_server_lock "$reponame" "$(assert_log "lock.log" g_1.dat)" git lfs lock --json "g_2.dat" | tee lock.log assert_server_lock "$reponame" "$(assert_lock "lock.log" g_2.dat)" git lfs locks --limit 1 | tee locks.log [ $(wc -l < locks.log) -eq 1 ] ) end_test begin_test "list locks with pagination" ( set -e reponame="locks_list_paginate" setup_remote_repo "$reponame" clone_repo "$reponame" "clone_$reponame" git lfs track "*.dat" for i in $(seq 1 5); do echo "$i" > "h_$i.dat" done git add "h_1.dat" "h_2.dat" "h_3.dat" "h_4.dat" "h_5.dat" ".gitattributes" git commit -m "add files" | tee commit.log grep "6 files changed" commit.log for i in $(seq 1 5); do grep "create mode 100644 h_$i.dat" commit.log done grep "create mode 100644 .gitattributes" commit.log git push origin master 2>&1 | tee push.log grep "master -> master" push.log for i in $(seq 1 5); do git lfs lock --json "h_$i.dat" | tee lock.log assert_server_lock "$reponame" "$(assert_lock "lock.log" "h_$1.dat")" done # The server will return, at most, three locks at a time git lfs locks --limit 4 | tee locks.log [ $(wc -l < locks.log) -eq 4 ] ) end_test begin_test "cached locks" ( set -e reponame="cached_locks" setup_remote_repo "remote_$reponame" clone_repo "remote_$reponame" "clone_$reponame" git lfs track "*.dat" echo "foo" > "cached1.dat" echo "bar" > "cached2.dat" git add "cached1.dat" "cached2.dat" ".gitattributes" git commit -m "add files" | tee commit.log grep "3 files changed" commit.log grep "create mode 100644 cached1.dat" commit.log grep "create mode 100644 cached2.dat" commit.log grep "create mode 100644 .gitattributes" commit.log git push origin master 2>&1 | tee push.log grep "master -> master" push.log git lfs lock --json "cached1.dat" | tee lock.log assert_server_lock "$(assert_lock "lock.log" cached1.dat)" git lfs lock --json "cached2.dat" | tee lock.log assert_server_lock "$(assert_lock "lock.log" cached2.dat)" git lfs locks --local | tee locks.log [ $(wc -l < locks.log) -eq 2 ] # delete the remote to prove we're using the local records git remote remove origin git lfs locks --local --path "cached1.dat" | tee locks.log [ $(wc -l < locks.log) -eq 1 ] grep "cached1.dat" locks.log git lfs locks --local --limit 1 | tee locks.log [ $(wc -l < locks.log) -eq 1 ] ) end_test git-lfs-2.3.4/test/test-logs.sh000077500000000000000000000006711317167762300163450ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" begin_test "logs" ( set -e mkdir logs cd logs git init boomtownExit="" set +e git lfs logs boomtown boomtownExit=$? set -e [ "$boomtownExit" = "2" ] logname=`ls .git/lfs/objects/logs` logfile=".git/lfs/objects/logs/$logname" cat "$logfile" echo "... grep ..." grep "$ git-lfs logs boomtown" "$logfile" [ "$(cat "$logfile")" = "$(git lfs logs last)" ] ) end_test git-lfs-2.3.4/test/test-ls-files.sh000077500000000000000000000046171317167762300171230ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" begin_test "ls-files" ( set -e mkdir repo cd repo git init git lfs track "*.dat" | grep "Tracking \"\*.dat\"" echo "some data" > some.dat echo "some text" > some.txt echo "missing" > missing.dat git add missing.dat git commit -m "add missing file" [ "6bbd052ab0 * missing.dat" = "$(git lfs ls-files)" ] git rm missing.dat git add some.dat some.txt git commit -m "added some files, removed missing one" git lfs ls-files | tee ls.log grep some.dat ls.log [ `wc -l < ls.log` = 1 ] diff -u <(git lfs ls-files --debug) <(cat <<-EOF filepath: some.dat size: 10 checkout: true download: true oid: sha256 5aa03f96c77536579166fba147929626cc3a97960e994057a9d80271a736d10f version: https://git-lfs.github.com/spec/v1 EOF) ) end_test begin_test "ls-files: outside git repository" ( set +e git lfs ls-files 2>&1 > ls-files.log res=$? set -e if [ "$res" = "0" ]; then echo "Passes because $GIT_LFS_TEST_DIR is unset." exit 0 fi [ "$res" = "128" ] grep "Not in a git repository" ls-files.log ) end_test begin_test "ls-files: with zero files" ( set -e mkdir empty cd empty git init git lfs track "*.dat" git add .gitattributes set +e git lfs ls-files 2> ls-files.log res=$? set -e cat ls-files.log [ "$res" = "2" ] grep "Git can't resolve ref:" ls-files.log git commit -m "initial commit" [ "$(git lfs ls-files)" = "" ] ) end_test begin_test "ls-files: show duplicate files" ( set -e mkdir dupRepoShort cd dupRepoShort git init git lfs track "*.tgz" | grep "Tracking \"\*.tgz\"" echo "test content" > one.tgz echo "test content" > two.tgz git add one.tgz git add two.tgz git commit -m "add duplicate files" expected="$(echo "a1fff0ffef * one.tgz a1fff0ffef * two.tgz")" [ "$expected" = "$(git lfs ls-files)" ] ) end_test begin_test "ls-files: show duplicate files with long OID" ( set -e mkdir dupRepoLong cd dupRepoLong git init git lfs track "*.tgz" | grep "Tracking \"\*.tgz\"" echo "test content" > one.tgz echo "test content" > two.tgz git add one.tgz git add two.tgz git commit -m "add duplicate files with long OID" expected="$(echo "a1fff0ffefb9eace7230c24e50731f0a91c62f9cefdfe77121c2f607125dffae * one.tgz a1fff0ffefb9eace7230c24e50731f0a91c62f9cefdfe77121c2f607125dffae * two.tgz")" [ "$expected" = "$(git lfs ls-files --long)" ] ) end_test git-lfs-2.3.4/test/test-malformed-pointers.sh000077500000000000000000000044201317167762300212040ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" begin_test "malformed pointers" ( set -e reponame="malformed-pointers" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" git add .gitattributes git commit -m "initial commit" base64 /dev/urandom | head -c 1023 > malformed_small.dat base64 /dev/urandom | head -c 1024 > malformed_exact.dat base64 /dev/urandom | head -c 1025 > malformed_large.dat base64 /dev/urandom | head -c 1048576 > malformed_xxl.dat git \ -c "filter.lfs.process=" \ -c "filter.lfs.clean=cat" \ -c "filter.lfs.required=false" \ add *.dat git commit -m "add malformed pointer" git push origin master pushd .. >/dev/null clone_repo "$reponame" "$reponame-assert" grep "malformed_small.dat" clone.log grep "malformed_exact.dat" clone.log grep "malformed_large.dat" clone.log grep "malformed_xxl.dat" clone.log expected_small="$(cat ../$reponame/malformed_small.dat)" expected_exact="$(cat ../$reponame/malformed_exact.dat)" expected_large="$(cat ../$reponame/malformed_large.dat)" expected_xxl="$(cat ../$reponame/malformed_xxl.dat)" actual_small="$(cat malformed_small.dat)" actual_exact="$(cat malformed_exact.dat)" actual_large="$(cat malformed_large.dat)" actual_xxl="$(cat malformed_xxl.dat)" [ "$expected_small" = "$actual_small" ] [ "$expected_exact" = "$actual_exact" ] [ "$expected_large" = "$actual_large" ] [ "$expected_xxl" = "$actual_xxl" ] popd >/dev/null ) end_test begin_test "empty pointers" ( set -e reponame="empty-pointers" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" git add .gitattributes git commit -m "initial commit" touch empty.dat git \ -c "filter.lfs.process=" \ -c "filter.lfs.clean=cat" \ -c "filter.lfs.required=false" \ add empty.dat git commit -m "add empty pointer" [ "0" -eq "$(git cat-file -p :empty.dat | wc -c)" ] [ "0" -eq "$(wc -c < empty.dat)" ] git push origin master pushd .. >/dev/null clone_repo "$reponame" "$reponame-assert" [ "0" -eq "$(grep -c "empty.dat" clone.log)" ] [ "0" -eq "$(git cat-file -p :empty.dat | wc -c)" ] [ "0" -eq "$(wc -c < empty.dat)" ] popd >/dev/null ) end_test git-lfs-2.3.4/test/test-migrate-fixtures.sh000077500000000000000000000124351317167762300207010ustar00rootroot00000000000000#!/usr/bin/env bash # assert_ref_unmoved ensures that the previous and current SHA1 of a given ref # is equal by string comparison: # # assert_ref_unmoved "HEAD" "$previous_sha" "$current_sha" # # If the two are unequal (the ref has moved), a message is printed to stderr and # the program exits. assert_ref_unmoved() { local name="$1" local prev_sha="$2" local current_sha="$3" if [ "$prev_sha" != "$current_sha" ]; then echo >&2 "$name should not have moved (from: $prev_sha, to: $current_sha)" exit 1 fi } # setup_multiple_local_branches creates a repository as follows: # # A---B # \ # refs/heads/master # # - Commit 'A' has 120, in a.txt, and a corresponding entry in .gitattributes. setup_local_branch_with_gitattrs() { set -e reponame="migrate-single-remote-branch-with-attrs" remove_and_create_local_repo "$reponame" base64 < /dev/urandom | head -c 120 > a.txt git add a.txt git commit -m "initial commit" git lfs track "*.txt" git lfs track "*.other" git add .gitattributes git commit -m "add .gitattributes" } # setup_multiple_local_branches creates a repository as follows: # # B # / \ # A refs/heads/my-feature # \ # refs/heads/master # # - Commit 'A' has 120, 140 bytes of data in a.txt, and a.md, respectively. # # - Commit 'B' has 30 bytes of data in a.txt, and includes commit 'A' as a # parent. setup_multiple_local_branches() { set -e reponame="migrate-info-multiple-local-branches" remove_and_create_local_repo "$reponame" base64 < /dev/urandom | head -c 120 > a.txt base64 < /dev/urandom | head -c 140 > a.md git add a.txt a.md git commit -m "initial commit" git checkout -b my-feature base64 < /dev/urandom | head -c 30 > a.md git add a.md git commit -m "add an additional 30 bytes to a.md" git checkout master } # setup_single_remote_branch creates a repository as follows: # # A---B # \ \ # \ refs/heads/master # \ # refs/remotes/origin/master # # - Commit 'A' has 120, 140 bytes of data in a.txt, and a.md, respectively. It # is the latest commit pushed to the remote 'origin'. # # - Commit 'B' has 30, 50 bytes of data in a.txt, and a.md, respectively. setup_single_remote_branch() { set -e reponame="migrate-info-single-remote-branch" remove_and_create_remote_repo "$reponame" base64 < /dev/urandom | head -c 120 > a.txt base64 < /dev/urandom | head -c 140 > a.md git add a.txt a.md git commit -m "initial commit" git push origin master base64 < /dev/urandom | head -c 30 > a.txt base64 < /dev/urandom | head -c 50 > a.md git add a.md a.txt git commit -m "add an additional 30, 50 bytes to a.{txt,md}" } # setup_multiple_remote_branches creates a repository as follows: # # C # / \ # A---B refs/heads/my-feature # \ \ # \ refs/heads/master # \ # refs/remotes/origin/master # # - Commit 'A' has 10, 11 bytes of data in a.txt, and a.md, respectively. It is # the latest commit pushed to the remote 'origin'. # # - Commit 'B' has 20, 21 bytes of data in a.txt, and a.md, respectively. # # - Commit 'C' has 30, 31 bytes of data in a.txt, and a.md, respectively. It is # the latest commit on refs/heads/my-feature. setup_multiple_remote_branches() { set -e reponame="migrate-info-exclude-remote-refs-given-branch" remove_and_create_remote_repo "$reponame" base64 < /dev/urandom | head -c 10 > a.txt base64 < /dev/urandom | head -c 11 > a.md git add a.txt a.md git commit -m "add 10, 11 bytes, a.{txt,md}" git push origin master base64 < /dev/urandom | head -c 20 > a.txt base64 < /dev/urandom | head -c 21 > a.md git add a.txt a.md git commit -m "add 20, 21 bytes, a.{txt,md}" git checkout -b my-feature base64 < /dev/urandom | head -c 30 > a.txt base64 < /dev/urandom | head -c 31 > a.md git add a.txt a.md git commit -m "add 30, 31 bytes, a.{txt,md}" git checkout master } # setup_single_local_branch_deep_trees creates a repository as follows: # # A # \ # refs/heads/master # # - Commit 'A' has 120 bytes of data in 'foo/bar/baz/a.txt'. setup_single_local_branch_deep_trees() { set -e reponame="migrate-single-local-branch-with-deep-trees" remove_and_create_local_repo "$reponame" mkdir -p foo/bar/baz base64 < /dev/urandom | head -c 120 > foo/bar/baz/a.txt git add foo/bar/baz/a.txt git commit -m "initial commit" } # make_bare converts the existing full checkout of a repository into a bare one, # and then `cd`'s into it. make_bare() { reponame=$(basename "$(pwd)") mv .git "../$reponame.git" cd .. rm -rf "$reponame" cd "$reponame.git" git config --bool core.bare true } # remove_and_create_local_repo removes, creates, and checks out a local # repository given by a particular name: # # remove_and_create_local_repo "$reponame" remove_and_create_local_repo() { local reponame="$(base64 < /dev/urandom | head -c 8 | sed -e 's/\///')-$1" git init "$reponame" cd "$reponame" } # remove_and_create_remote_repo removes, creates, and checks out a remote # repository both locally and on the gitserver, given by a particular name: # # remove_and_create_remote_repo "$reponame" remove_and_create_remote_repo() { local reponame="$(base64 < /dev/urandom | head -c 8 | sed -e 's/\///')-$1" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" } git-lfs-2.3.4/test/test-migrate-import.sh000077500000000000000000000322541317167762300203430ustar00rootroot00000000000000#!/usr/bin/env bash . "test/test-migrate-fixtures.sh" . "test/testlib.sh" begin_test "migrate import (default branch)" ( set -e setup_multiple_local_branches md_oid="$(calc_oid "$(git cat-file -p :a.md)")" txt_oid="$(calc_oid "$(git cat-file -p :a.txt)")" md_feature_oid="$(calc_oid "$(git cat-file -p my-feature:a.md)")" git lfs migrate import assert_pointer "refs/heads/master" "a.md" "$md_oid" "140" assert_pointer "refs/heads/master" "a.txt" "$txt_oid" "120" assert_local_object "$md_oid" "140" assert_local_object "$txt_oid" "120" refute_local_object "$md_feature_oid" "30" master="$(git rev-parse refs/heads/master)" feature="$(git rev-parse refs/heads/my-feature)" master_attrs="$(git cat-file -p "$master:.gitattributes")" [ ! $(git cat-file -p "$feature:.gitattributes") ] echo "$master_attrs" | grep -q "*.md filter=lfs diff=lfs merge=lfs" echo "$master_attrs" | grep -q "*.txt filter=lfs diff=lfs merge=lfs" ) end_test begin_test "migrate import (given branch)" ( set -e setup_multiple_local_branches md_oid="$(calc_oid "$(git cat-file -p :a.md)")" txt_oid="$(calc_oid "$(git cat-file -p :a.txt)")" md_feature_oid="$(calc_oid "$(git cat-file -p my-feature:a.md)")" git lfs migrate import my-feature assert_pointer "refs/heads/my-feature" "a.md" "$md_feature_oid" "30" assert_pointer "refs/heads/my-feature" "a.txt" "$txt_oid" "120" assert_pointer "refs/heads/master" "a.md" "$md_oid" "140" assert_pointer "refs/heads/master" "a.txt" "$txt_oid" "120" assert_local_object "$md_oid" "140" assert_local_object "$md_feature_oid" "30" assert_local_object "$txt_oid" "120" master="$(git rev-parse refs/heads/master)" feature="$(git rev-parse refs/heads/my-feature)" master_attrs="$(git cat-file -p "$master:.gitattributes")" feature_attrs="$(git cat-file -p "$feature:.gitattributes")" echo "$master_attrs" | grep -q "*.md filter=lfs diff=lfs merge=lfs" echo "$master_attrs" | grep -q "*.txt filter=lfs diff=lfs merge=lfs" echo "$feature_attrs" | grep -q "*.md filter=lfs diff=lfs merge=lfs" echo "$feature_attrs" | grep -q "*.txt filter=lfs diff=lfs merge=lfs" ) end_test begin_test "migrate import (default branch with filter)" ( set -e setup_multiple_local_branches md_oid="$(calc_oid "$(git cat-file -p :a.md)")" txt_oid="$(calc_oid "$(git cat-file -p :a.txt)")" md_feature_oid="$(calc_oid "$(git cat-file -p my-feature:a.md)")" git lfs migrate import --include "*.md" assert_pointer "refs/heads/master" "a.md" "$md_oid" "140" assert_local_object "$md_oid" "140" refute_local_object "$txt_oid" "120" refute_local_object "$md_feature_oid" "30" master="$(git rev-parse refs/heads/master)" feature="$(git rev-parse refs/heads/my-feature)" master_attrs="$(git cat-file -p "$master:.gitattributes")" [ ! $(git cat-file -p "$feature:.gitattributes") ] echo "$master_attrs" | grep -q "*.md filter=lfs diff=lfs merge=lfs" echo "$master_attrs" | grep -vq "*.txt filter=lfs diff=lfs merge=lfs" ) end_test begin_test "migrate import (given branch with filter)" ( set -e setup_multiple_local_branches md_oid="$(calc_oid "$(git cat-file -p :a.md)")" txt_oid="$(calc_oid "$(git cat-file -p :a.txt)")" md_feature_oid="$(calc_oid "$(git cat-file -p my-feature:a.md)")" git lfs migrate import --include "*.md" my-feature assert_pointer "refs/heads/my-feature" "a.md" "$md_feature_oid" "30" assert_pointer "refs/heads/my-feature~1" "a.md" "$md_oid" "140" assert_local_object "$md_oid" "140" assert_local_object "$md_feature_oid" "30" refute_local_object "$txt_oid" "120" master="$(git rev-parse refs/heads/master)" feature="$(git rev-parse refs/heads/my-feature)" master_attrs="$(git cat-file -p "$master:.gitattributes")" feature_attrs="$(git cat-file -p "$feature:.gitattributes")" echo "$master_attrs" | grep -q "*.md filter=lfs diff=lfs merge=lfs" echo "$master_attrs" | grep -vq "*.txt filter=lfs diff=lfs merge=lfs" echo "$feature_attrs" | grep -q "*.md filter=lfs diff=lfs merge=lfs" echo "$feature_attrs" | grep -vq "*.txt filter=lfs diff=lfs merge=lfs" ) end_test begin_test "migrate import (default branch, exclude remote refs)" ( set -e setup_single_remote_branch md_remote_oid="$(calc_oid "$(git cat-file -p "refs/remotes/origin/master:a.md")")" txt_remote_oid="$(calc_oid "$(git cat-file -p "refs/remotes/origin/master:a.txt")")" md_oid="$(calc_oid "$(git cat-file -p "refs/heads/master:a.md")")" txt_oid="$(calc_oid "$(git cat-file -p "refs/heads/master:a.txt")")" git lfs migrate import assert_pointer "refs/heads/master" "a.md" "$md_oid" "50" assert_pointer "refs/heads/master" "a.txt" "$txt_oid" "30" assert_local_object "$md_oid" "50" assert_local_object "$txt_oid" "30" refute_local_object "$md_remote_oid" "140" refute_local_object "$txt_remote_oid" "120" master="$(git rev-parse refs/heads/master)" remote="$(git rev-parse refs/remotes/origin/master)" master_attrs="$(git cat-file -p "$master:.gitattributes")" [ ! $(git cat-file -p "$remote:.gitattributes") ] echo "$master_attrs" | grep -q "*.md filter=lfs diff=lfs merge=lfs" echo "$master_attrs" | grep -vq "*.txt filter=lfs diff=lfs merge=lfs" ) end_test begin_test "migrate import (given branch, exclude remote refs)" ( set -e setup_multiple_remote_branches md_master_oid="$(calc_oid "$(git cat-file -p "refs/heads/master:a.md")")" md_remote_oid="$(calc_oid "$(git cat-file -p "refs/remotes/origin/master:a.md")")" md_feature_oid="$(calc_oid "$(git cat-file -p "refs/heads/my-feature:a.md")")" txt_master_oid="$(calc_oid "$(git cat-file -p "refs/heads/master:a.txt")")" txt_remote_oid="$(calc_oid "$(git cat-file -p "refs/remotes/origin/master:a.txt")")" txt_feature_oid="$(calc_oid "$(git cat-file -p "refs/heads/my-feature:a.txt")")" git lfs migrate import my-feature assert_pointer "refs/heads/master" "a.md" "$md_master_oid" "21" assert_pointer "refs/heads/my-feature" "a.md" "$md_feature_oid" "31" assert_pointer "refs/heads/master" "a.txt" "$txt_master_oid" "20" assert_pointer "refs/heads/my-feature" "a.txt" "$txt_feature_oid" "30" assert_local_object "$md_feature_oid" "31" assert_local_object "$md_master_oid" "21" assert_local_object "$txt_feature_oid" "30" assert_local_object "$txt_master_oid" "20" refute_local_object "$md_remote_oid" "11" refute_local_object "$txt_remote_oid" "10" master="$(git rev-parse refs/heads/master)" feature="$(git rev-parse refs/heads/my-feature)" remote="$(git rev-parse refs/remotes/origin/master)" master_attrs="$(git cat-file -p "$master:.gitattributes")" [ ! $(git cat-file -p "$remote:.gitattributes") ] feature_attrs="$(git cat-file -p "$feature:.gitattributes")" echo "$master_attrs" | grep -q "*.md filter=lfs diff=lfs merge=lfs" echo "$master_attrs" | grep -q "*.txt filter=lfs diff=lfs merge=lfs" echo "$feature_attrs" | grep -q "*.md filter=lfs diff=lfs merge=lfs" echo "$feature_attrs" | grep -vq "*.txt filter=lfs diff=lfs merge=lfs" ) end_test begin_test "migrate import (include/exclude ref)" ( set -e setup_multiple_remote_branches md_master_oid="$(calc_oid "$(git cat-file -p "refs/heads/master:a.md")")" md_remote_oid="$(calc_oid "$(git cat-file -p "refs/remotes/origin/master:a.md")")" md_feature_oid="$(calc_oid "$(git cat-file -p "refs/heads/my-feature:a.md")")" txt_master_oid="$(calc_oid "$(git cat-file -p "refs/heads/master:a.txt")")" txt_remote_oid="$(calc_oid "$(git cat-file -p "refs/remotes/origin/master:a.txt")")" txt_feature_oid="$(calc_oid "$(git cat-file -p "refs/heads/my-feature:a.txt")")" git lfs migrate import \ --include-ref=refs/heads/my-feature \ --exclude-ref=refs/heads/master assert_pointer "refs/heads/my-feature" "a.md" "$md_feature_oid" "31" assert_pointer "refs/heads/my-feature" "a.txt" "$txt_feature_oid" "30" assert_local_object "$md_feature_oid" "31" refute_local_object "$md_master_oid" "21" assert_local_object "$txt_feature_oid" "30" refute_local_object "$txt_master_oid" "20" refute_local_object "$md_remote_oid" "11" refute_local_object "$txt_remote_oid" "10" master="$(git rev-parse refs/heads/master)" feature="$(git rev-parse refs/heads/my-feature)" remote="$(git rev-parse refs/remotes/origin/master)" [ ! $(git cat-file -p "$master:.gitattributes") ] [ ! $(git cat-file -p "$remote:.gitattributes") ] feature_attrs="$(git cat-file -p "$feature:.gitattributes")" echo "$feature_attrs" | grep -q "*.md filter=lfs diff=lfs merge=lfs" echo "$feature_attrs" | grep -q "*.txt filter=lfs diff=lfs merge=lfs" ) end_test begin_test "migrate import (include/exclude ref with filter)" ( set -e setup_multiple_remote_branches md_master_oid="$(calc_oid "$(git cat-file -p "refs/heads/master:a.md")")" md_remote_oid="$(calc_oid "$(git cat-file -p "refs/remotes/origin/master:a.md")")" md_feature_oid="$(calc_oid "$(git cat-file -p "refs/heads/my-feature:a.md")")" txt_master_oid="$(calc_oid "$(git cat-file -p "refs/heads/master:a.txt")")" txt_remote_oid="$(calc_oid "$(git cat-file -p "refs/remotes/origin/master:a.txt")")" txt_feature_oid="$(calc_oid "$(git cat-file -p "refs/heads/my-feature:a.txt")")" git lfs migrate import \ --include="*.txt" \ --include-ref=refs/heads/my-feature \ --exclude-ref=refs/heads/master assert_pointer "refs/heads/my-feature" "a.txt" "$txt_feature_oid" "30" refute_local_object "$md_feature_oid" "31" refute_local_object "$md_master_oid" "21" assert_local_object "$txt_feature_oid" "30" refute_local_object "$txt_master_oid" "20" refute_local_object "$md_remote_oid" "11" refute_local_object "$txt_remote_oid" "10" master="$(git rev-parse refs/heads/master)" feature="$(git rev-parse refs/heads/my-feature)" remote="$(git rev-parse refs/remotes/origin/master)" [ ! $(git cat-file -p "$master:.gitattributes") ] [ ! $(git cat-file -p "$remote:.gitattributes") ] feature_attrs="$(git cat-file -p "$feature:.gitattributes")" echo "$feature_attrs" | grep -vq "*.md filter=lfs diff=lfs merge=lfs" echo "$feature_attrs" | grep -q "*.txt filter=lfs diff=lfs merge=lfs" ) end_test begin_test "migrate import (existing .gitattributes)" ( set -e setup_local_branch_with_gitattrs pwd master="$(git rev-parse refs/heads/master)" txt_master_oid="$(calc_oid "$(git cat-file -p "$master:a.txt")")" git lfs migrate import --include-ref=refs/heads/master --include="*.txt" assert_local_object "$txt_master_oid" "120" master="$(git rev-parse refs/heads/master)" prev="$(git rev-parse refs/heads/master^1)" diff -u <(git cat-file -p $master:.gitattributes) <(cat <<-EOF *.txt filter=lfs diff=lfs merge=lfs -text *.other filter=lfs diff=lfs merge=lfs -text EOF) diff -u <(git cat-file -p $prev:.gitattributes) <(cat <<-EOF *.txt filter=lfs diff=lfs merge=lfs -text EOF) ) end_test begin_test "migrate import (bare repository)" ( set -e setup_multiple_local_branches make_bare git lfs migrate import \ --include-ref=master ) end_test begin_test "migrate import (nested sub-trees, no filter)" ( set -e setup_single_local_branch_deep_trees oid="$(calc_oid "$(git cat-file -p :foo/bar/baz/a.txt)")" size="$(git cat-file -p :foo/bar/baz/a.txt | wc -c | awk '{ print $1 }')" git lfs migrate import --everything assert_local_object "$oid" "$size" ) end_test begin_test "migrate import (prefix include(s))" ( set -e includes="foo${PATH_SEPARATOR}bar${PATH_SEPARATOR}baz ${PATH_SEPARATOR}foo foo${PATH_SEPARATOR}**${PATH_SEPARATOR}baz${PATH_SEPARATOR}a.txt *.txt" for include in $includes; do setup_single_local_branch_deep_trees oid="$(calc_oid "$(git cat-file -p :foo/bar/baz/a.txt)")" git lfs migrate import --include="$include" assert_local_object "$oid" 120 cd .. done ) end_test begin_test "migrate import (--everything)" ( set -e setup_multiple_local_branches git checkout master master_txt_oid="$(calc_oid "$(git cat-file -p :a.txt)")" master_md_oid="$(calc_oid "$(git cat-file -p :a.md)")" feature_md_oid="$(calc_oid "$(git cat-file -p my-feature:a.md)")" master_txt_size="$(git cat-file -p :a.txt | wc -c | awk '{ print $1 }')" master_md_size="$(git cat-file -p :a.md | wc -c | awk '{ print $1 }')" feature_md_size="$(git cat-file -p my-feature:a.md | wc -c | awk '{ print $1 }')" git lfs migrate import --everything assert_pointer "master" "a.txt" "$master_txt_oid" "$master_txt_size" assert_pointer "master" "a.md" "$master_md_oid" "$master_md_size" assert_pointer "my-feature" "a.md" "$feature_md_oid" "$feature_md_size" ) end_test begin_test "migrate import (--everything with args)" ( set -e setup_multiple_local_branches [ "$(git lfs migrate import --everything master 2>&1)" = \ "fatal: cannot use --everything with explicit reference arguments" ] ) end_test begin_test "migrate import (--everything with --include-ref)" ( set -e setup_multiple_local_branches [ "$(git lfs migrate import --everything --include-ref=refs/heads/master 2>&1)" = \ "fatal: cannot use --everything with --include-ref or --exclude-ref" ] ) end_test exit 0 begin_test "migrate import (--everything with --exclude-ref)" ( set -e setup_multiple_local_branches [ "$(git lfs migrate import --everything --exclude-ref=refs/heads/master 2>&1)" = \ "fatal: cannot use --everything with --include-ref or --exclude-ref" ] ) end_test git-lfs-2.3.4/test/test-migrate-info.sh000077500000000000000000000207661317167762300177710ustar00rootroot00000000000000#!/usr/bin/env bash . "test/test-migrate-fixtures.sh" . "test/testlib.sh" begin_test "migrate info (default branch)" ( set -e setup_multiple_local_branches original_head="$(git rev-parse HEAD)" diff -u <(git lfs migrate info 2>&1 | tail -n 2) <(cat <<-EOF *.md 140 B 1/1 files(s) 100% *.txt 120 B 1/1 files(s) 100% EOF) migrated_head="$(git rev-parse HEAD)" assert_ref_unmoved "HEAD" "$original_head" "$migrated_head" ) end_test begin_test "migrate info (given branch)" ( set -e setup_multiple_local_branches original_master="$(git rev-parse refs/heads/master)" original_feature="$(git rev-parse refs/heads/my-feature)" diff -u <(git lfs migrate info my-feature 2>&1 | tail -n 2) <(cat <<-EOF *.md 170 B 2/2 files(s) 100% *.txt 120 B 1/1 files(s) 100% EOF) migrated_master="$(git rev-parse refs/heads/master)" migrated_feature="$(git rev-parse refs/heads/my-feature)" assert_ref_unmoved "refs/heads/master" "$original_master" "$migrated_master" assert_ref_unmoved "refs/heads/my-feature" "$original_feature" "$migrated_feature" ) end_test begin_test "migrate info (default branch with filter)" ( set -e setup_multiple_local_branches original_head="$(git rev-parse HEAD)" diff -u <(git lfs migrate info --include "*.md" 2>&1 | tail -n 1) <(cat <<-EOF *.md 140 B 1/1 files(s) 100% EOF) migrated_head="$(git rev-parse HEAD)" assert_ref_unmoved "refs/heads/master" "$original_head" "$migrated_head" ) end_test begin_test "migrate info (given branch with filter)" ( set -e setup_multiple_local_branches original_master="$(git rev-parse refs/heads/master)" original_feature="$(git rev-parse refs/heads/my-feature)" diff -u <(git lfs migrate info --include "*.md" my-feature 2>&1 | tail -n 1) <(cat <<-EOF *.md 170 B 2/2 files(s) 100% EOF) migrated_master="$(git rev-parse refs/heads/master)" migrated_feature="$(git rev-parse refs/heads/my-feature)" assert_ref_unmoved "refs/heads/master" "$original_master" "$migrated_master" assert_ref_unmoved "refs/heads/my-feature" "$original_feature" "$migrated_feature" ) end_test begin_test "migrate info (default branch, exclude remote refs)" ( set -e setup_single_remote_branch git show-ref original_remote="$(git rev-parse refs/remotes/origin/master)" original_master="$(git rev-parse refs/heads/master)" diff -u <(git lfs migrate info 2>&1 | tail -n 2) <(cat <<-EOF *.md 50 B 1/1 files(s) 100% *.txt 30 B 1/1 files(s) 100% EOF) migrated_remote="$(git rev-parse refs/remotes/origin/master)" migrated_master="$(git rev-parse refs/heads/master)" assert_ref_unmoved "refs/heads/master" "$original_master" "$migrated_master" assert_ref_unmoved "refs/remotes/origin/master" "$original_remote" "$migrated_remote" ) end_test begin_test "migrate info (given branch, exclude remote refs)" ( set -e setup_multiple_remote_branches original_remote="$(git rev-parse refs/remotes/origin/master)" original_master="$(git rev-parse refs/heads/master)" original_feature="$(git rev-parse refs/heads/my-feature)" diff -u <(git lfs migrate info my-feature 2>&1 | tail -n 2) <(cat <<-EOF *.md 52 B 2/2 files(s) 100% *.txt 50 B 2/2 files(s) 100% EOF) migrated_remote="$(git rev-parse refs/remotes/origin/master)" migrated_master="$(git rev-parse refs/heads/master)" migrated_feature="$(git rev-parse refs/heads/my-feature)" assert_ref_unmoved "refs/remotes/origin/master" "$original_remote" "$migrated_remote" assert_ref_unmoved "refs/heads/master" "$original_master" "$migrated_master" assert_ref_unmoved "refs/heads/my-feature" "$original_feature" "$migrated_feature" ) end_test begin_test "migrate info (include/exclude ref)" ( set -e setup_multiple_remote_branches original_master="$(git rev-parse refs/heads/master)" original_feature="$(git rev-parse refs/heads/my-feature)" diff -u <(git lfs migrate info \ --include-ref=refs/heads/my-feature \ --exclude-ref=refs/heads/master 2>&1 | tail -n 2) <(cat <<-EOF *.md 31 B 1/1 files(s) 100% *.txt 30 B 1/1 files(s) 100% EOF) migrated_master="$(git rev-parse refs/heads/master)" migrated_feature="$(git rev-parse refs/heads/my-feature)" assert_ref_unmoved "refs/heads/master" "$original_master" "$migrated_master" assert_ref_unmoved "refs/heads/my-feature" "$original_feature" "$migrated_feature" ) end_test begin_test "migrate info (include/exclude ref with filter)" ( set -e setup_multiple_remote_branches original_master="$(git rev-parse refs/heads/master)" original_feature="$(git rev-parse refs/heads/my-feature)" diff -u <(git lfs migrate info \ --include="*.txt" \ --include-ref=refs/heads/my-feature \ --exclude-ref=refs/heads/master 2>&1 | tail -n 1) <(cat <<-EOF *.txt 30 B 1/1 files(s) 100% EOF) migrated_master="$(git rev-parse refs/heads/master)" migrated_feature="$(git rev-parse refs/heads/my-feature)" assert_ref_unmoved "refs/heads/master" "$original_master" "$migrated_master" assert_ref_unmoved "refs/heads/my-feature" "$original_feature" "$migrated_feature" ) end_test begin_test "migrate info (nested sub-trees, no filter)" ( set -e setup_single_local_branch_deep_trees original_master="$(git rev-parse refs/heads/master)" diff -u <(git lfs migrate info 2>/dev/null) <(cat <<-EOF *.txt 120 B 1/1 files(s) 100% EOF) migrated_master="$(git rev-parse refs/heads/master)" assert_ref_unmoved "refs/heads/master" "$original_master" "$migrated_master" ) end_test begin_test "migrate info (above threshold)" ( set -e setup_multiple_local_branches original_head="$(git rev-parse HEAD)" diff -u <(git lfs migrate info --above=130B 2>&1 | tail -n 1) <(cat <<-EOF *.md 140 B 1/1 files(s) 100% EOF) migrated_head="$(git rev-parse HEAD)" assert_ref_unmoved "HEAD" "$original_head" "$migrated_head" ) end_test begin_test "migrate info (above threshold, top)" ( set -e setup_multiple_local_branches original_head="$(git rev-parse HEAD)" diff -u <(git lfs migrate info --above=130B --top=1 2>&1 | tail -n 1) <(cat <<-EOF *.md 140 B 1/1 files(s) 100% EOF) migrated_head="$(git rev-parse HEAD)" assert_ref_unmoved "HEAD" "$original_head" "$migrated_head" ) end_test begin_test "migrate info (given unit)" ( set -e setup_multiple_local_branches original_head="$(git rev-parse HEAD)" diff -u <(git lfs migrate info --unit=kb 2>&1 | tail -n 2) <(cat <<-EOF *.md 0.1 1/1 files(s) 100% *.txt 0.1 1/1 files(s) 100% EOF) migrated_head="$(git rev-parse HEAD)" assert_ref_unmoved "HEAD" "$original_head" "$migrated_head" ) end_test begin_test "migrate info (doesn't show empty info entries)" ( set -e setup_multiple_local_branches original_head="$(git rev-parse HEAD)" [ "0" -eq "$(git lfs migrate info --above=1mb 2>/dev/null | wc -l)" ] migrated_head="$(git rev-parse HEAD)" assert_ref_unmoved "HEAD" "$original_head" "$migrated_head" ) end_test begin_test "migrate info (empty set)" ( set -e setup_multiple_local_branches migrate="$(git lfs migrate info \ --include-ref=refs/heads/master \ --exclude-ref=refs/heads/master 2>/dev/null )" [ "0" -eq "$(echo -n "$migrate" | wc -l | awk '{ print $1 }')" ] ) end_test begin_test "migrate info (--everything)" ( set -e setup_multiple_local_branches git checkout master original_master="$(git rev-parse refs/heads/master)" original_feature="$(git rev-parse refs/heads/my-feature)" diff -u <(git lfs migrate info --everything 2>&1 | tail -n 2) <(cat <<-EOF *.md 170 B 2/2 files(s) 100% *.txt 120 B 1/1 files(s) 100% EOF) migrated_master="$(git rev-parse refs/heads/master)" migrated_feature="$(git rev-parse refs/heads/my-feature)" assert_ref_unmoved "refs/heads/master" "$original_master" "$migrated_master" assert_ref_unmoved "refs/heads/my-feature" "$original_feature" "$migrated_feature" ) end_test begin_test "migrate info (--everything with args)" ( set -e setup_multiple_local_branches [ "$(git lfs migrate info --everything master 2>&1)" = \ "fatal: cannot use --everything with explicit reference arguments" ] ) end_test begin_test "migrate info (--everything with --include-ref)" ( set -e setup_multiple_local_branches [ "$(git lfs migrate info --everything --include-ref=refs/heads/master 2>&1)" = \ "fatal: cannot use --everything with --include-ref or --exclude-ref" ] ) end_test exit 0 begin_test "migrate info (--everything with --exclude-ref)" ( set -e setup_multiple_local_branches [ "$(git lfs migrate info --everything --exclude-ref=refs/heads/master 2>&1)" = \ "fatal: cannot use --everything with --include-ref or --exclude-ref" ] ) end_test git-lfs-2.3.4/test/test-object-authenticated.sh000077500000000000000000000010251317167762300214610ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" # these tests rely on GIT_TERMINAL_PROMPT to test properly ensure_git_version_isnt $VERSION_LOWER "2.3.0" begin_test "download authenticated object" ( set -e reponame="$(basename "$0" ".sh")" setup_remote_repo "$reponame" clone_repo "$reponame" without-creds git lfs track "*.dat" printf "object-authenticated" > hi.dat git add hi.dat git add .gitattributes git commit -m "initial commit" GIT_CURL_VERBOSE=1 GIT_TERMINAL_PROMPT=0 git lfs push origin master ) end_test git-lfs-2.3.4/test/test-pointer.sh000077500000000000000000000154601317167762300170630ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" begin_test "pointer --file --stdin" ( set -e echo "simple" > some-file input="version https://git-lfs.github.com/spec/v1 oid sha256:6c17f2007cbe934aee6e309b28b2dba3c119c5dff2ef813ed124699efe319868 size 7" expected="Git LFS pointer for some-file version https://git-lfs.github.com/spec/v1 oid sha256:6c17f2007cbe934aee6e309b28b2dba3c119c5dff2ef813ed124699efe319868 size 7 Git blob OID: e18acd45d7e3ce0451d1d637f9697aa508e07dee Pointer from STDIN version https://git-lfs.github.com/spec/v1 oid sha256:6c17f2007cbe934aee6e309b28b2dba3c119c5dff2ef813ed124699efe319868 size 7 Git blob OID: e18acd45d7e3ce0451d1d637f9697aa508e07dee" [ "$expected" = "$(echo "$input" | git lfs pointer --file=some-file --stdin 2>&1)" ] ) end_test begin_test "pointer --file --stdin mismatch" ( set -e echo "simple" > some-file input="version https://git-lfs.github.com/spec/v1 oid sha256:6c17f2007cbe934aee6e309b28b2dba3c119c5dff2ef813ed124699efe319868 size 123" set +e output=$(echo "$input" | git lfs pointer --file=some-file --stdin 2>&1) status=$? set -e [ "1" = "$status" ] expected="Git LFS pointer for some-file version https://git-lfs.github.com/spec/v1 oid sha256:6c17f2007cbe934aee6e309b28b2dba3c119c5dff2ef813ed124699efe319868 size 7 Git blob OID: e18acd45d7e3ce0451d1d637f9697aa508e07dee Pointer from STDIN version https://git-lfs.github.com/spec/v1 oid sha256:6c17f2007cbe934aee6e309b28b2dba3c119c5dff2ef813ed124699efe319868 size 123 Git blob OID: 905bcc24b5dc074ab870f9944178e398eec3b470 Pointers do not match" [ "$expected" = "$output" ] ) end_test begin_test "pointer --stdin" ( set -e echo "version https://git-lfs.github.com/spec/v1 oid sha256:6c17f2007cbe934aee6e309b28b2dba3c119c5dff2ef813ed124699efe319868 size 7" > valid-pointer output=$(cat valid-pointer | git lfs pointer --stdin 2>&1) expected="Pointer from STDIN version https://git-lfs.github.com/spec/v1 oid sha256:6c17f2007cbe934aee6e309b28b2dba3c119c5dff2ef813ed124699efe319868 size 7" [ "$expected" = "$output" ] ) end_test begin_test "pointer --stdin without stdin" ( # this test doesn't work on Windows, it just operates like 'bad pointer' case # stdin isn't detectable as detached, it just times out with no content if [[ "$(is_stdin_attached)" == "0" ]]; then echo "Skipping pointer without stdin because STDIN attached" exit 0 fi output=$(echo "" | git lfs pointer --stdin 2>&1) status=$? set -e expected="Cannot read from STDIN. The --stdin flag expects a pointer file from STDIN." [ "$expected" = "$output" ] [ "1" = "$status" ] ) begin_test "pointer --stdin with bad pointer" ( output=$(echo "not a pointer" | git lfs pointer --stdin 2>&1) status=$? set -e expected="Pointer from STDIN Pointer file error: invalid header" diff -u <(printf "$expected") <(printf "$output") [ "1" = "$status" ] ) begin_test "pointer --file --pointer mismatch" ( set -e echo "simple" > some-file echo "version https://git-lfs.github.com/spec/v1 oid sha256:6c17f2007cbe934aee6e309b28b2dba3c119c5dff2ef813ed124699efe319868 size 123" > invalid-pointer expected="Git LFS pointer for some-file version https://git-lfs.github.com/spec/v1 oid sha256:6c17f2007cbe934aee6e309b28b2dba3c119c5dff2ef813ed124699efe319868 size 7 Git blob OID: e18acd45d7e3ce0451d1d637f9697aa508e07dee Pointer from invalid-pointer version https://git-lfs.github.com/spec/v1 oid sha256:6c17f2007cbe934aee6e309b28b2dba3c119c5dff2ef813ed124699efe319868 size 123 Git blob OID: 905bcc24b5dc074ab870f9944178e398eec3b470 Pointers do not match" set +e output=$(git lfs pointer --file=some-file --pointer=invalid-pointer 2>&1) status=$? set -e [ "1" = "$status" ] [ "$expected" = "$output" ] ) end_test begin_test "pointer --file --pointer" ( set -e echo "simple" > some-file echo "version https://git-lfs.github.com/spec/v1 oid sha256:6c17f2007cbe934aee6e309b28b2dba3c119c5dff2ef813ed124699efe319868 size 7" > valid-pointer expected="Git LFS pointer for some-file version https://git-lfs.github.com/spec/v1 oid sha256:6c17f2007cbe934aee6e309b28b2dba3c119c5dff2ef813ed124699efe319868 size 7 Git blob OID: e18acd45d7e3ce0451d1d637f9697aa508e07dee Pointer from valid-pointer version https://git-lfs.github.com/spec/v1 oid sha256:6c17f2007cbe934aee6e309b28b2dba3c119c5dff2ef813ed124699efe319868 size 7 Git blob OID: e18acd45d7e3ce0451d1d637f9697aa508e07dee" [ "$expected" = "$(git lfs pointer --file=some-file --pointer=valid-pointer 2>&1)" ] ) end_test begin_test "pointer --pointer" ( set -e echo "version https://git-lfs.github.com/spec/v1 oid sha256:6c17f2007cbe934aee6e309b28b2dba3c119c5dff2ef813ed124699efe319868 size 7" > valid-pointer expected="Pointer from valid-pointer version https://git-lfs.github.com/spec/v1 oid sha256:6c17f2007cbe934aee6e309b28b2dba3c119c5dff2ef813ed124699efe319868 size 7" [ "$expected" = "$(git lfs pointer --pointer=valid-pointer 2>&1)" ] ) end_test begin_test "pointer missing --pointer" ( output=$(git lfs pointer --pointer=missing-pointer 2>&1) status=$? set -e [ "1" = "$status" ] echo "$output" echo "$output" | grep "open missing-pointer:" ) end_test begin_test "pointer invalid --pointer" ( set -e echo "not a pointer" > some-pointer set +e output=$(git lfs pointer --pointer=some-pointer 2>&1) status=$? set -e [ "1" = "$status" ] expected="Pointer from some-pointer Pointer file error: invalid header diff -u <(printf "$expected") <(printf "$output") [ "$expected" = "$output" ] ) end_test begin_test "pointer --file" ( set -e echo "simple" > some-file expected="Git LFS pointer for some-file version https://git-lfs.github.com/spec/v1 oid sha256:6c17f2007cbe934aee6e309b28b2dba3c119c5dff2ef813ed124699efe319868 size 7" [ "$expected" = "$(git lfs pointer --file=some-file 2>&1)" ] ) end_test begin_test "pointer without args" ( output=$(git lfs pointer 2>&1) status=$? set -e [ "Nothing to do!" = "$output" ] [ "1" = "$status" ] ) end_test begin_test "pointer stdout/stderr" ( set -e echo "pointer-stdout-test" > pointer-stdout-test.txt git lfs pointer --file=pointer-stdout-test.txt > stdout.txt 2> stderr.txt echo "stdout:" cat stdout.txt [ $(wc -l stdout.txt | sed -e 's/^[[:space:]]*//' | cut -f1 -d' ') -eq 3 ] grep "oid sha256:e96ec1bd71eea8df78b24c64a7ab9d42dd7f821c4e503f0e2288273b9bff6c16" stdout.txt [ $(grep -c "Git LFS pointer" stdout.txt) -eq 0 ] echo "stderr:" cat stderr.txt grep "Git LFS pointer" stderr.txt [ $(grep -c "oid sha256:" stderr.txt) -eq 0 ] ) end_test begin_test "pointer to console" ( set -e echo "pointer-stdout-test" > pointer-stdout-test.txt git lfs pointer --file=pointer-stdout-test.txt 2>&1 | tee pointer.txt grep "Git LFS pointer" pointer.txt grep "oid sha256:e96ec1bd71eea8df78b24c64a7ab9d42dd7f821c4e503f0e2288273b9bff6c16" pointer.txt ) end_test git-lfs-2.3.4/test/test-post-checkout.sh000077500000000000000000000066271317167762300202000ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" begin_test "post-checkout" ( set -e reponame="$(basename "$0" ".sh")" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track --lockable "*.dat" git lfs track "*.big" # not lockable git add .gitattributes git commit -m "add git attributes" echo "[ { \"CommitDate\":\"$(get_date -10d)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Data\":\"file 1 creation\"}, {\"Filename\":\"file2.dat\",\"Data\":\"file 2 creation\"}] }, { \"CommitDate\":\"$(get_date -7d)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Data\":\"file 1 updated commit 2\"}, {\"Filename\":\"file3.big\",\"Data\":\"file 3 creation\"}, {\"Filename\":\"file4.big\",\"Data\":\"file 4 creation\"}], \"Tags\":[\"atag\"] }, { \"CommitDate\":\"$(get_date -5d)\", \"Files\":[ {\"Filename\":\"file2.dat\",\"Data\":\"file 2 updated commit 3\"}] }, { \"CommitDate\":\"$(get_date -3d)\", \"NewBranch\":\"branch2\", \"Files\":[ {\"Filename\":\"file5.dat\",\"Data\":\"file 5 creation in branch2\"}, {\"Filename\":\"file6.big\",\"Data\":\"file 6 creation in branch2\"}] }, { \"CommitDate\":\"$(get_date -1d)\", \"Files\":[ {\"Filename\":\"file2.dat\",\"Data\":\"file 2 updated in branch2\"}, {\"Filename\":\"file3.big\",\"Data\":\"file 3 updated in branch2\"}] } ]" | GIT_LFS_SET_LOCKABLE_READONLY=0 lfstest-testutils addcommits # skipped setting read-only above to make bulk load simpler (no read-only issues) git push -u origin master branch2 # re-clone the repo so we start fresh cd .. rm -rf "$reponame" clone_repo "$reponame" "$reponame" # this will be master [ "$(cat file1.dat)" == "file 1 updated commit 2" ] [ "$(cat file2.dat)" == "file 2 updated commit 3" ] [ "$(cat file3.big)" == "file 3 creation" ] [ "$(cat file4.big)" == "file 4 creation" ] [ ! -e file5.dat ] [ ! -e file6.big ] # without the post-checkout hook, any changed files would now be writeable refute_file_writeable file1.dat refute_file_writeable file2.dat assert_file_writeable file3.big assert_file_writeable file4.big # checkout branch git checkout branch2 [ -e file5.dat ] [ -e file6.big ] refute_file_writeable file1.dat refute_file_writeable file2.dat refute_file_writeable file5.dat assert_file_writeable file3.big assert_file_writeable file4.big assert_file_writeable file6.big # Confirm that contents of existing files were updated even though were read-only [ "$(cat file2.dat)" == "file 2 updated in branch2" ] [ "$(cat file3.big)" == "file 3 updated in branch2" ] # restore files inside a branch (causes full scan since no diff) rm -f *.dat [ ! -e file1.dat ] [ ! -e file2.dat ] [ ! -e file5.dat ] git checkout file1.dat file2.dat file5.dat [ "$(cat file1.dat)" == "file 1 updated commit 2" ] [ "$(cat file2.dat)" == "file 2 updated in branch2" ] [ "$(cat file5.dat)" == "file 5 creation in branch2" ] refute_file_writeable file1.dat refute_file_writeable file2.dat refute_file_writeable file5.dat # now lock files, then remove & restore git lfs lock file1.dat git lfs lock file2.dat assert_file_writeable file1.dat assert_file_writeable file2.dat rm -f *.dat git checkout file1.dat file2.dat file5.dat assert_file_writeable file1.dat assert_file_writeable file2.dat refute_file_writeable file5.dat ) end_test git-lfs-2.3.4/test/test-post-commit.sh000077500000000000000000000034101317167762300176460ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" begin_test "post-commit" ( set -e reponame="$(basename "$0" ".sh")" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track --lockable "*.dat" git lfs track "*.big" # not lockable git add .gitattributes git commit -m "add git attributes" echo "Come with me" > pcfile1.dat echo "and you'll be" > pcfile2.dat echo "in a world" > pcfile3.big echo "of pure imagination" > pcfile4.big git add *.dat git commit -m "Committed large files" # New lockable files should have been made read-only now since not locked refute_file_writeable pcfile1.dat refute_file_writeable pcfile2.dat assert_file_writeable pcfile3.big assert_file_writeable pcfile4.big git push -u origin master # now lock files, then edit git lfs lock pcfile1.dat git lfs lock pcfile2.dat echo "Take a look" > pcfile1.dat echo "and you'll see" > pcfile2.dat git add pcfile1.dat pcfile2.dat git commit -m "Updated" # files should remain writeable since locked assert_file_writeable pcfile1.dat assert_file_writeable pcfile2.dat ) end_test begin_test "post-commit (locked file outside of LFS)" ( set -e reponame="post-commit-external" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs install # This step is intentionally done in two commits, due to a known bug bug in # the post-checkout process LFS performs. It compares changed files from HEAD, # which is an invalid previous state for the initial commit of a repository. echo "*.dat lockable" > .gitattributes git add .gitattributes git commit -m "initial commit" echo "hello" > a.dat git add a.dat assert_file_writeable a.dat git commit -m "add a.dat" refute_file_writeable a.dat ) end_test git-lfs-2.3.4/test/test-post-merge.sh000077500000000000000000000056501317167762300174650ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" begin_test "post-merge" ( set -e reponame="$(basename "$0" ".sh")" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track --lockable "*.dat" git lfs track "*.big" # not lockable git add .gitattributes git commit -m "add git attributes" echo "[ { \"CommitDate\":\"$(get_date -10d)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Data\":\"file 1 creation\"}, {\"Filename\":\"file2.dat\",\"Data\":\"file 2 creation\"}] }, { \"CommitDate\":\"$(get_date -7d)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Data\":\"file 1 updated commit 2\"}, {\"Filename\":\"file3.big\",\"Data\":\"file 3 creation\"}, {\"Filename\":\"file4.big\",\"Data\":\"file 4 creation\"}], \"Tags\":[\"atag\"] }, { \"CommitDate\":\"$(get_date -5d)\", \"Files\":[ {\"Filename\":\"file2.dat\",\"Data\":\"file 2 updated commit 3\"}] }, { \"CommitDate\":\"$(get_date -3d)\", \"NewBranch\":\"branch2\", \"Files\":[ {\"Filename\":\"file5.dat\",\"Data\":\"file 5 creation in branch2\"}, {\"Filename\":\"file6.big\",\"Data\":\"file 6 creation in branch2\"}] }, { \"CommitDate\":\"$(get_date -1d)\", \"Files\":[ {\"Filename\":\"file2.dat\",\"Data\":\"file 2 updated in branch2\"}, {\"Filename\":\"file3.big\",\"Data\":\"file 3 updated in branch2\"}] } ]" | GIT_LFS_SET_LOCKABLE_READONLY=0 lfstest-testutils addcommits # skipped setting read-only above to make bulk load simpler (no read-only issues) git push -u origin master branch2 # re-clone the repo so we start fresh cd .. rm -rf "$reponame" clone_repo "$reponame" "$reponame" # this will be master [ "$(cat file1.dat)" == "file 1 updated commit 2" ] [ "$(cat file2.dat)" == "file 2 updated commit 3" ] [ "$(cat file3.big)" == "file 3 creation" ] [ "$(cat file4.big)" == "file 4 creation" ] [ ! -e file5.dat ] [ ! -e file6.big ] # without the post-checkout hook, any changed files would now be writeable refute_file_writeable file1.dat refute_file_writeable file2.dat assert_file_writeable file3.big assert_file_writeable file4.big # merge branch, with readonly option disabled to demonstrate what would happen GIT_LFS_SET_LOCKABLE_READONLY=0 git merge origin/branch2 # branch2 had hanges to file2.dat and file5.dat which were lockable # but because we disabled the readonly feature they will be writeable now assert_file_writeable file2.dat assert_file_writeable file5.dat # now let's do it again with the readonly option enabled git reset --hard HEAD^ git merge origin/branch2 # This time they should be read-only refute_file_writeable file2.dat refute_file_writeable file5.dat # Confirm that contents of existing files were updated even though were read-only [ "$(cat file2.dat)" == "file 2 updated in branch2" ] [ "$(cat file5.dat)" == "file 5 creation in branch2" ] ) end_test git-lfs-2.3.4/test/test-pre-push.sh000077500000000000000000000662661317167762300171600ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" begin_test "pre-push" ( set -e reponame="$(basename "$0" ".sh")" setup_remote_repo "$reponame" clone_repo "$reponame" repo git lfs track "*.dat" git add .gitattributes git commit -m "add git attributes" git config "lfs.$(repo_endpoint $GITSERVER $reponame).locksverify" true echo "refs/heads/master master refs/heads/master 0000000000000000000000000000000000000000" | git lfs pre-push origin "$GITSERVER/$reponame" 2>&1 | tee push.log # no output if nothing to do [ "$(du -k push.log | cut -f 1)" == "0" ] git lfs track "*.dat" echo "hi" > hi.dat git add hi.dat git commit -m "add hi.dat" git show refute_server_object "$reponame" 98ea6e4f216f2fb4b69fff9b3a44842c38686ca685f3f55dc48c5d3fb1107be4 # push file to the git lfs server echo "refs/heads/master master refs/heads/master 0000000000000000000000000000000000000000" | git lfs pre-push origin "$GITSERVER/$reponame" 2>&1 | tee push.log grep "(1 of 1 files)" push.log assert_server_object "$reponame" 98ea6e4f216f2fb4b69fff9b3a44842c38686ca685f3f55dc48c5d3fb1107be4 ) end_test begin_test "pre-push dry-run" ( set -e reponame="$(basename "$0" ".sh")-dry-run" setup_remote_repo "$reponame" clone_repo "$reponame" repo-dry-run git lfs track "*.dat" git add .gitattributes git commit -m "add git attributes" git config "lfs.$(repo_endpoint $GITSERVER $reponame).locksverify" true echo "refs/heads/master master refs/heads/master 0000000000000000000000000000000000000000" | git lfs pre-push --dry-run origin "$GITSERVER/$reponame" 2>&1 | tee push.log [ "" = "$(cat push.log)" ] git lfs track "*.dat" echo "dry" > hi.dat git add hi.dat git commit -m "add hi.dat" git show refute_server_object "$reponame" 2840e0eafda1d0760771fe28b91247cf81c76aa888af28a850b5648a338dc15b echo "refs/heads/master master refs/heads/master 0000000000000000000000000000000000000000" | git lfs pre-push --dry-run origin "$GITSERVER/$reponame" 2>&1 | tee push.log grep "push 2840e0eafda1d0760771fe28b91247cf81c76aa888af28a850b5648a338dc15b => hi.dat" push.log cat push.log [ `wc -l < push.log` = 1 ] refute_server_object "$reponame" 2840e0eafda1d0760771fe28b91247cf81c76aa888af28a850b5648a338dc15b ) end_test begin_test "pre-push 307 redirects" ( set -e reponame="$(basename "$0" ".sh")" setup_remote_repo "$reponame" clone_repo "$reponame" repo-307 git lfs track "*.dat" git add .gitattributes git commit -m "add git attributes" # relative redirect git config remote.origin.lfsurl "$GITSERVER/redirect307/rel/$reponame.git/info/lfs" git lfs track "*.dat" echo "hi" > hi.dat git add hi.dat git commit -m "add hi.dat" git show # push file to the git lfs server echo "refs/heads/master master refs/heads/master 0000000000000000000000000000000000000000" | git lfs pre-push origin "$GITSERVER/redirect307/rel/$reponame.git/info/lfs" 2>&1 | tee push.log grep "(0 of 0 files, 1 skipped)" push.log assert_server_object "$reponame" 98ea6e4f216f2fb4b69fff9b3a44842c38686ca685f3f55dc48c5d3fb1107be4 # absolute redirect git config remote.origin.lfsurl "$GITSERVER/redirect307/abs/$reponame.git/info/lfs" echo "hi" > hi2.dat git add hi2.dat git commit -m "add hi2.dat" git show # push file to the git lfs server echo "refs/heads/master master refs/heads/master 0000000000000000000000000000000000000000" | git lfs pre-push origin "$GITSERVER/redirect307/abs/$reponame.git/info/lfs" 2>&1 | tee push.log grep "(0 of 0 files, 1 skipped)" push.log ) end_test begin_test "pre-push with existing file" ( set -e reponame="$(basename "$0" ".sh")-existing-file" setup_remote_repo "$reponame" clone_repo "$reponame" existing-file echo "existing" > existing.dat git add existing.dat git commit -m "add existing dat" git lfs track "*.dat" echo "new" > new.dat git add new.dat git add .gitattributes git commit -m "add new file through git lfs" # push file to the git lfs server echo "refs/heads/master master refs/heads/master 0000000000000000000000000000000000000000" | git lfs pre-push origin "$GITSERVER/$reponame" 2>&1 | tee push.log grep "(1 of 1 files)" push.log # now the file exists assert_server_object "$reponame" 7aa7a5359173d05b63cfd682e3c38487f3cb4f7f1d60659fe59fab1505977d4c ) end_test begin_test "pre-push with existing pointer" ( set -e reponame="$(basename "$0" ".sh")-existing-pointer" setup_remote_repo "$reponame" clone_repo "$reponame" existing-pointer echo "$(pointer "7aa7a5359173d05b63cfd682e3c38487f3cb4f7f1d60659fe59fab1505977d4c" 4)" > new.dat git add new.dat git commit -m "add new pointer" mkdir -p .git/lfs/objects/7a/a7 echo "new" > .git/lfs/objects/7a/a7/7aa7a5359173d05b63cfd682e3c38487f3cb4f7f1d60659fe59fab1505977d4c # push file to the git lfs server echo "refs/heads/master master refs/heads/master 0000000000000000000000000000000000000000" | git lfs pre-push origin "$GITSERVER/$reponame" 2>&1 | tee push.log grep "(1 of 1 files)" push.log ) end_test begin_test "pre-push with missing pointer not on server" ( set -e reponame="$(basename "$0" ".sh")-missing-pointer" setup_remote_repo "$reponame" clone_repo "$reponame" missing-pointer oid="7aa7a5359173d05b63cfd682e3c38487f3cb4f7f1d60659fe59fab1505977d4c" echo "$(pointer "$oid" 4)" > new.dat git add new.dat git commit -m "add new pointer" # assert that push fails set +e echo "refs/heads/master master refs/heads/master 0000000000000000000000000000000000000000" | git lfs pre-push origin "$GITSERVER/$reponame" 2>&1 | tee push.log set -e grep " (missing) new.dat ($oid)" push.log ) end_test begin_test "pre-push with missing pointer which is on server" ( # should permit push if files missing locally but are on server, shouldn't # require client to have every file (prune) set -e reponame="$(basename "$0" ".sh")-missing-but-on-server" setup_remote_repo "$reponame" clone_repo "$reponame" missing-but-on-server contents="common data" contents_oid=$(calc_oid "$contents") git lfs track "*.dat" printf "$contents" > common1.dat git add common1.dat git add .gitattributes git commit -m "add first file" # push file to the git lfs server echo "refs/heads/master master refs/heads/master 0000000000000000000000000000000000000000" | git lfs pre-push origin "$GITSERVER/$reponame" 2>&1 | tee push.log grep "(1 of 1 files)" push.log # now the file exists assert_server_object "$reponame" "$contents_oid" # create another commit referencing same oid, then delete local data & push printf "$contents" > common2.dat git add common2.dat git commit -m "add second file, same content" rm -rf .git/lfs/objects echo "refs/heads/master master refs/heads/master 0000000000000000000000000000000000000000" | git lfs pre-push origin "$GITSERVER/$reponame" 2>&1 | tee push.log # make sure there were no errors reported [ -z "$(grep -i 'Error' push.log)" ] ) end_test begin_test "pre-push with missing and present pointers" ( set -e reponame="pre-push-missing-and-present" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" git add .gitattributes git commit -m "initial commit" present="present" present_oid="$(calc_oid "$present")" printf "$present" > present.dat missing="missing" missing_oid="$(calc_oid "$missing")" printf "$missing" > missing.dat git add present.dat missing.dat git commit -m "add present.dat and missing.dat" git rm missing.dat git commit -m "remove missing" # :fire: the "missing" object missing_oid_part_1="$(echo "$missing_oid" | cut -b 1-2)" missing_oid_part_2="$(echo "$missing_oid" | cut -b 3-4)" missing_oid_path=".git/lfs/objects/$missing_oid_part_1/$missing_oid_part_2/$missing_oid" rm "$missing_oid_path" echo "refs/heads/master master refs/heads/master 0000000000000000000000000000000000000000" | git lfs pre-push origin "$GITSERVER/$reponame" 2>&1 | tee push.log if [ "0" -ne "${PIPESTATUS[1]}" ]; then echo >&2 "fatal: expected \`git lfs pre-push origin $GITSERVER/$reponame\` to succeed..." exit 1 fi grep "LFS upload missing objects" push.log grep " (missing) missing.dat ($missing_oid)" push.log assert_server_object "$reponame" "$present_oid" refute_server_object "$reponame" "$missing_oid" ) end_test begin_test "pre-push allowincompletepush=f reject missing pointers" ( set -e reponame="pre-push-reject-missing-and-present" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" git add .gitattributes git commit -m "initial commit" present="present" present_oid="$(calc_oid "$present")" printf "$present" > present.dat missing="missing" missing_oid="$(calc_oid "$missing")" printf "$missing" > missing.dat git add present.dat missing.dat git commit -m "add present.dat and missing.dat" git rm missing.dat git commit -m "remove missing" # :fire: the "missing" object missing_oid_part_1="$(echo "$missing_oid" | cut -b 1-2)" missing_oid_part_2="$(echo "$missing_oid" | cut -b 3-4)" missing_oid_path=".git/lfs/objects/$missing_oid_part_1/$missing_oid_part_2/$missing_oid" rm "$missing_oid_path" git config "lfs.allowincompletepush" "false" echo "refs/heads/master master refs/heads/master 0000000000000000000000000000000000000000" | git lfs pre-push origin "$GITSERVER/$reponame" 2>&1 | tee push.log if [ "2" -ne "${PIPESTATUS[1]}" ]; then echo >&2 "fatal: expected \`git lfs pre-push origin $GITSERVER/$reponame\` to fail..." exit 1 fi grep "no such file or directory" push.log || # unix grep "cannot find the file" push.log # windows refute_server_object "$reponame" "$present_oid" refute_server_object "$reponame" "$missing_oid" ) end_test begin_test "pre-push multiple branches" ( set -e reponame="$(basename "$0" ".sh")-multiple-branches" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log NUMFILES=6 # generate content we'll use for ((a=0; a < NUMFILES ; a++)) do content[$a]="filecontent$a" oid[$a]=$(calc_oid "${content[$a]}") done echo "[ { \"CommitDate\":\"$(get_date -10d)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":${#content[0]}, \"Data\":\"${content[0]}\"}, {\"Filename\":\"file2.dat\",\"Size\":${#content[1]}, \"Data\":\"${content[1]}\"}] }, { \"NewBranch\":\"branch1\", \"CommitDate\":\"$(get_date -5d)\", \"Files\":[ {\"Filename\":\"file2.dat\",\"Size\":${#content[2]}, \"Data\":\"${content[2]}\"}] }, { \"ParentBranches\":[\"master\"], \"NewBranch\":\"branch2\", \"CommitDate\":\"$(get_date -5d)\", \"Files\":[ {\"Filename\":\"file3.dat\",\"Size\":${#content[3]}, \"Data\":\"${content[3]}\"}] }, { \"ParentBranches\":[\"master\"], \"NewBranch\":\"branch3\", \"CommitDate\":\"$(get_date -2d)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":${#content[4]}, \"Data\":\"${content[4]}\"}] }, { \"ParentBranches\":[\"master\"], \"NewBranch\":\"branch4\", \"CommitDate\":\"$(get_date -1d)\", \"Files\":[ {\"Filename\":\"file4.dat\",\"Size\":${#content[5]}, \"Data\":\"${content[5]}\"}] } ]" | lfstest-testutils addcommits # make sure when called via git push all branches are updated git push origin master branch1 branch2 branch3 branch4 for ((a=0; a < NUMFILES ; a++)) do assert_server_object "$reponame" "${oid[$a]}" done ) end_test begin_test "pre-push with bad remote" ( set -e cd repo echo "refs/heads/master master refs/heads/master 0000000000000000000000000000000000000000" | git lfs pre-push not-a-remote "$GITSERVER/$reponame" 2>&1 | tee pre-push.log grep "Invalid remote name" pre-push.log ) end_test begin_test "pre-push unfetched deleted remote branch & server GC" ( # point of this is to simulate the case where the local cache of the remote # branch state contains a branch which has actually been deleted on the remote, # the client just doesn't know yet (hasn't done 'git fetch origin --prune') # If the server GC'd the objects that deleted branch contained, but they were # referenced by a branch being pushed (earlier commit), push might assume it # doesn't have to push it, but it does. Tests that we check the real remote refs # before making an assumption about the diff we need to push set -e reponame="$(basename "$0" ".sh")-server-deleted-branch-gc" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log NUMFILES=4 # generate content we'll use for ((a=0; a < NUMFILES ; a++)) do content[$a]="filecontent$a" oid[$a]=$(calc_oid "${content[$a]}") done echo "[ { \"CommitDate\":\"$(get_date -10d)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":${#content[0]}, \"Data\":\"${content[0]}\"}, {\"Filename\":\"file2.dat\",\"Size\":${#content[1]}, \"Data\":\"${content[1]}\"}] }, { \"NewBranch\":\"branch-to-delete\", \"CommitDate\":\"$(get_date -5d)\", \"Files\":[ {\"Filename\":\"file3.dat\",\"Size\":${#content[2]}, \"Data\":\"${content[2]}\"}] }, { \"NewBranch\":\"branch-to-push-after\", \"CommitDate\":\"$(get_date -2d)\", \"Files\":[ {\"Filename\":\"file4.dat\",\"Size\":${#content[3]}, \"Data\":\"${content[3]}\"}] } ]" | lfstest-testutils addcommits # push only the first 2 branches git push origin master branch-to-delete for ((a=0; a < 3 ; a++)) do assert_server_object "$reponame" "${oid[$a]}" done # confirm we haven't pushed the last one yet refute_server_object "$reponame" "${oid[3]}" # copy the cached remote ref for the branch we're going to delete remotely cp .git/refs/remotes/origin/branch-to-delete branch-to-delete.ref # now delete the branch on the server git push origin --delete branch-to-delete # remove the OID in it, as if GC'd delete_server_object "$reponame" "${oid[2]}" refute_server_object "$reponame" "${oid[2]}" # Now put the cached remote ref back, as if someone else had deleted it but # we hadn't done git fetch --prune yet mv branch-to-delete.ref .git/refs/remotes/origin/branch-to-delete # Confirm that local cache of remote branch is back git branch -r 2>&1 | tee branch-r.log grep "origin/branch-to-delete" branch-r.log # Now push later branch which should now need to re-push previous commits LFS too git push origin branch-to-push-after # all objects should now be there even though cached remote branch claimed it already had file3.dat for ((a=0; a < NUMFILES ; a++)) do assert_server_object "$reponame" "${oid[$a]}" done ) end_test begin_test "pre-push delete branch" ( set -e reponame="$(basename "$0" ".sh")-delete-branch" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log NUMFILES=4 # generate content we'll use for ((a=0; a < NUMFILES ; a++)) do content[$a]="filecontent$a" oid[$a]=$(calc_oid "${content[$a]}") done echo "[ { \"CommitDate\":\"$(get_date -2d)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":${#content[0]}, \"Data\":\"${content[0]}\"}, {\"Filename\":\"file2.dat\",\"Size\":${#content[1]}, \"Data\":\"${content[1]}\"}] }, { \"NewBranch\":\"branch-to-delete\", \"CommitDate\":\"$(get_date -1d)\", \"Files\":[ {\"Filename\":\"file3.dat\",\"Size\":${#content[2]}, \"Data\":\"${content[2]}\"}] }, { \"ParentBranches\":[\"master\"], \"CommitDate\":\"$(get_date -0d)\", \"Files\":[ {\"Filename\":\"file4.dat\",\"Size\":${#content[3]}, \"Data\":\"${content[3]}\"}] } ]" | lfstest-testutils addcommits # push all branches git push origin master branch-to-delete for ((a=0; a < NUMFILES ; a++)) do assert_server_object "$reponame" "${oid[$a]}" done # deleting a branch with git push should not fail # (requires correct special casing of "(delete) 0000000000.." in hook) git push origin --delete branch-to-delete ) end_test begin_test "pre-push with our lock" ( set -e reponame="pre_push_owned_locks" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" git add .gitattributes git commit -m "initial commit" contents="locked contents" printf "$contents" > locked.dat git add locked.dat git commit -m "add locked.dat" git push origin master git lfs lock --json "locked.dat" | tee lock.log id=$(assert_lock lock.log locked.dat) assert_server_lock $id printf "authorized changes" >> locked.dat git add locked.dat git commit -m "add unauthorized changes" GIT_CURL_VERBOSE=1 git push origin master 2>&1 | tee push.log grep "Consider unlocking your own locked file(s)" push.log grep "* locked.dat" push.log assert_server_lock "$id" ) end_test begin_test "pre-push with their lock on lfs file" ( set -e reponame="pre_push_unowned_lock" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" git add .gitattributes git commit -m "initial commit" contents="locked contents" # any lock path with "theirs" is returned as "their" lock by /locks/verify printf "$contents" > locked_theirs.dat git add locked_theirs.dat git commit -m "add locked_theirs.dat" git push origin master git lfs lock --json "locked_theirs.dat" | tee lock.log id=$(assert_lock lock.log locked_theirs.dat) assert_server_lock $id pushd "$TRASHDIR" >/dev/null clone_repo "$reponame" "$reponame-assert" git config lfs.locksverify true printf "unauthorized changes" >> locked_theirs.dat git add locked_theirs.dat # --no-verify is used to avoid the pre-commit hook which is not under test git commit --no-verify -m "add unauthorized changes" git push origin master 2>&1 | tee push.log res="${PIPESTATUS[0]}" if [ "0" -eq "$res" ]; then echo "push should fail" exit 1 fi grep "Unable to push 1 locked file(s)" push.log grep "* locked_theirs.dat - Git LFS Tests" push.log grep "ERROR: Cannot update locked files." push.log refute_server_object "$reponame" "$(calc_oid_file locked_theirs.dat)" popd >/dev/null ) end_test begin_test "pre-push with their lock on non-lfs lockable file" ( set -e reponame="pre_push_unowned_lock_not_lfs" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" echo "*.dat lockable" > .gitattributes git add .gitattributes git commit -m "initial commit" # any lock path with "theirs" is returned as "their" lock by /locks/verify echo "hi" > readme.txt echo "tiny" > tiny_locked_theirs.dat git help > large_locked_theirs.dat git add readme.txt tiny_locked_theirs.dat large_locked_theirs.dat git commit -m "add initial files" git push origin master git lfs lock --json "tiny_locked_theirs.dat" | tee lock.log id=$(assert_lock lock.log tiny_locked_theirs.dat) assert_server_lock $id git lfs lock --json "large_locked_theirs.dat" | tee lock.log id=$(assert_lock lock.log large_locked_theirs.dat) assert_server_lock $id pushd "$TRASHDIR" >/dev/null clone_repo "$reponame" "$reponame-assert" git config lfs.locksverify true git lfs update # manually add pre-push hook, since lfs clean hook is not used echo "other changes" >> readme.txt echo "unauthorized changes" >> large_locked_theirs.dat echo "unauthorized changes" >> tiny_locked_theirs.dat # --no-verify is used to avoid the pre-commit hook which is not under test git commit --no-verify -am "add unauthorized changes" git push origin master 2>&1 | tee push.log res="${PIPESTATUS[0]}" if [ "0" -eq "$res" ]; then echo "push should fail" exit 1 fi grep "Unable to push 2 locked file(s)" push.log grep "* large_locked_theirs.dat - Git LFS Tests" push.log grep "* tiny_locked_theirs.dat - Git LFS Tests" push.log grep "ERROR: Cannot update locked files." push.log refute_server_object "$reponame" "$(calc_oid_file large_locked_theirs.dat)" refute_server_object "$reponame" "$(calc_oid_file tiny_locked_theirs.dat)" popd >/dev/null ) end_test begin_test "pre-push locks verify 5xx with verification enabled" ( set -e reponame="lock-enabled-verify-5xx" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" endpoint="$(repo_endpoint $GITSERVER $reponame)" contents="example" contents_oid="$(calc_oid "$contents")" printf "$contents" > a.dat git lfs track "*.dat" git add .gitattributes a.dat git commit --message "initial commit" git config "lfs.$endpoint.locksverify" true git push origin master 2>&1 | tee push.log grep "\"origin\" does not support the LFS locking API" push.log grep "git config lfs.$endpoint.locksverify false" push.log refute_server_object "$reponame" "$contents_oid" ) end_test begin_test "pre-push disable locks verify on exact url" ( set -e reponame="lock-disabled-verify-5xx" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" endpoint="$(repo_endpoint $GITSERVER $reponame)" contents="example" contents_oid="$(calc_oid "$contents")" printf "$contents" > a.dat git lfs track "*.dat" git add .gitattributes a.dat git commit --message "initial commit" git config "lfs.$endpoint.locksverify" false git push origin master 2>&1 | tee push.log [ "0" -eq "$(grep -c "\"origin\" does not support the LFS locking API" push.log)" ] assert_server_object "$reponame" "$contents_oid" ) end_test begin_test "pre-push disable locks verify on partial url" ( set -e reponame="lock-disabled-verify-5xx-partial" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" endpoint="$server/$repo" contents="example" contents_oid="$(calc_oid "$contents")" printf "$contents" > a.dat git lfs track "*.dat" git add .gitattributes a.dat git commit --message "initial commit" git config "lfs.$endpoint.locksverify" false git push origin master 2>&1 | tee push.log [ "0" -eq "$(grep -c "\"origin\" does not support the LFS locking API" push.log)" ] assert_server_object "$reponame" "$contents_oid" ) end_test begin_test "pre-push locks verify 5xx with verification unset" ( set -e reponame="lock-unset-verify-5xx" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" endpoint="$(repo_endpoint $GITSERVER $reponame)" contents="example" contents_oid="$(calc_oid "$contents")" printf "$contents" > a.dat git lfs track "*.dat" git add .gitattributes a.dat git commit --message "initial commit" [ -z "$(git config "lfs.$endpoint.locksverify")" ] git push origin master 2>&1 | tee push.log grep "\"origin\" does not support the LFS locking API" push.log assert_server_object "$reponame" "$contents_oid" ) end_test begin_test "pre-push locks verify 501 with verification enabled" ( set -e reponame="lock-enabled-verify-501" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" endpoint="$(repo_endpoint $GITSERVER $reponame)" contents="example" contents_oid="$(calc_oid "$contents")" printf "$contents" > a.dat git lfs track "*.dat" git add .gitattributes a.dat git commit --message "initial commit" git config "lfs.$endpoint.locksverify" true git push origin master 2>&1 | tee push.log assert_server_object "$reponame" "$contents_oid" [ "false" = "$(git config "lfs.$endpoint.locksverify")" ] ) end_test begin_test "pre-push locks verify 501 with verification disabled" ( set -e reponame="lock-disabled-verify-501" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" endpoint="$(repo_endpoint $GITSERVER $reponame)" contents="example" contents_oid="$(calc_oid "$contents")" printf "$contents" > a.dat git lfs track "*.dat" git add .gitattributes a.dat git commit --message "initial commit" git config "lfs.$endpoint.locksverify" false git push origin master 2>&1 | tee push.log assert_server_object "$reponame" "$contents_oid" [ "false" = "$(git config "lfs.$endpoint.locksverify")" ] ) end_test begin_test "pre-push locks verify 501 with verification unset" ( set -e reponame="lock-unset-verify-501" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" endpoint="$(repo_endpoint $GITSERVER $reponame)" contents="example" contents_oid="$(calc_oid "$contents")" printf "$contents" > a.dat git lfs track "*.dat" git add .gitattributes a.dat git commit --message "initial commit" [ -z "$(git config "lfs.$endpoint.locksverify")" ] git push origin master 2>&1 | tee push.log assert_server_object "$reponame" "$contents_oid" [ "false" = "$(git config "lfs.$endpoint.locksverify")" ] ) end_test begin_test "pre-push locks verify 200" ( set -e reponame="lock-verify-200" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" endpoint="$(repo_endpoint $GITSERVER $reponame)" [ -z "$(git config "lfs.$endpoint.locksverify")" ] contents="example" contents_oid="$(calc_oid "$contents")" printf "$contents" > a.dat git lfs track "*.dat" git add .gitattributes a.dat git commit --message "initial commit" git push origin master 2>&1 | tee push.log grep "Locking support detected on remote \"origin\"." push.log grep "git config lfs.$endpoint.locksverify true" push.log assert_server_object "$reponame" "$contents_oid" ) end_test begin_test "pre-push locks verify 403 with verification enabled" ( set -e reponame="lock-enabled-verify-403" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" endpoint="$(repo_endpoint $GITSERVER $reponame)" contents="example" contents_oid="$(calc_oid "$contents")" printf "$contents" > a.dat git lfs track "*.dat" git add .gitattributes a.dat git commit --message "initial commit" git config "lfs.$endpoint.locksverify" true git push origin master 2>&1 | tee push.log grep "ERROR: Authentication error" push.log refute_server_object "$reponame" "$contents_oid" [ "true" = "$(git config "lfs.$endpoint.locksverify")" ] ) end_test begin_test "pre-push locks verify 403 with verification disabled" ( set -e reponame="lock-disabled-verify-403" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" endpoint="$(repo_endpoint $GITSERVER $reponame)" contents="example" contents_oid="$(calc_oid "$contents")" printf "$contents" > a.dat git lfs track "*.dat" git add .gitattributes a.dat git commit --message "initial commit" git config "lfs.$endpoint.locksverify" false git push origin master 2>&1 | tee push.log assert_server_object "$reponame" "$contents_oid" [ "false" = "$(git config "lfs.$endpoint.locksverify")" ] ) end_test begin_test "pre-push locks verify 403 with verification unset" ( set -e reponame="lock-unset-verify-403" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" endpoint="$(repo_endpoint $GITSERVER $reponame)" contents="example" contents_oid="$(calc_oid "$contents")" printf "$contents" > a.dat git lfs track "*.dat" git add .gitattributes a.dat git commit --message "initial commit" [ -z "$(git config "lfs.$endpoint.locksverify")" ] git push origin master 2>&1 | tee push.log grep "WARNING: Authentication error" push.log assert_server_object "$reponame" "$contents_oid" [ -z "$(git config "lfs.$endpoint.locksverify")" ] ) end_test git-lfs-2.3.4/test/test-progress-meter.sh000077500000000000000000000010521317167762300203510ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" begin_test "progress meter displays positive progress" ( set -e reponame="progress-meter" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" git add .gitattributes git commit -m "initial commit" for i in `seq 1 128`; do printf "$i" > "$i.dat" done git add *.dat git commit -m "add many objects" git push origin master 2>&1 | tee push.log [ "0" -eq "${PIPESTATUS[0]}" ] grep "Git LFS: (128 of 128 files) 276 B / 276 B" push.log ) end_test git-lfs-2.3.4/test/test-progress.sh000077500000000000000000000026531317167762300172470ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" reponame="$(basename "$0" ".sh")" begin_test "GIT_LFS_PROGRESS" ( set -e setup_remote_repo "$reponame" clone_repo "$reponame" repo git lfs track "*.dat" echo "a" > a.dat echo "b" > b.dat echo "c" > c.dat echo "d" > d.dat echo "e" > e.dat git add .gitattributes *.dat git commit -m "add files" git push origin master 2>&1 | tee push.log grep "(5 of 5 files)" push.log cd .. GIT_LFS_PROGRESS="$TRASHDIR/progress.log" git lfs clone "$GITSERVER/$reponame" clone cat progress.log grep "download 1/5" progress.log grep "download 2/5" progress.log grep "download 3/5" progress.log grep "download 4/5" progress.log grep "download 5/5" progress.log GIT_LFS_SKIP_SMUDGE=1 git clone "$GITSERVER/$reponame" clone2 cd clone2 rm -rf "$TRASHDIR/progress.log" .git/lfs/objects GIT_LFS_PROGRESS="$TRASHDIR/progress.log" git lfs fetch --all cat ../progress.log grep "download 1/5" ../progress.log grep "download 2/5" ../progress.log grep "download 3/5" ../progress.log grep "download 4/5" ../progress.log grep "download 5/5" ../progress.log rm -rf "$TRASHDIR/progress.log" GIT_LFS_PROGRESS="$TRASHDIR/progress.log" git lfs checkout cat ../progress.log grep "checkout 1/5" ../progress.log grep "checkout 2/5" ../progress.log grep "checkout 3/5" ../progress.log grep "checkout 4/5" ../progress.log grep "checkout 5/5" ../progress.log ) end_test git-lfs-2.3.4/test/test-prune-worktree.sh000077500000000000000000000066011317167762300203710ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" ensure_git_version_isnt $VERSION_LOWER "2.5.0" begin_test "prune worktree" ( set -e reponame="prune_worktree" setup_remote_repo "remote_$reponame" clone_repo "remote_$reponame" "$reponame" git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log content_head="First checkout HEAD" content_worktree1head="Worktree 1 head" content_worktree2head="Worktree 2 head" content_oldcommit1="Always pruned 1" content_oldcommit2="Always pruned 2" content_oldcommit3="Always pruned 3" oid_head=$(calc_oid "$content_head") oid_worktree1head=$(calc_oid "$content_worktree1head") oid_worktree2head=$(calc_oid "$content_worktree2head") oid_oldcommit1=$(calc_oid "$content_oldcommit1") oid_oldcommit2=$(calc_oid "$content_oldcommit2") oid_oldcommit3=$(calc_oid "$content_oldcommit3") echo "[ { \"CommitDate\":\"$(get_date -40d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_oldcommit1}, \"Data\":\"$content_oldcommit1\"}] }, { \"CommitDate\":\"$(get_date -35d)\", \"NewBranch\":\"branch1\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_oldcommit2}, \"Data\":\"$content_oldcommit2\"}] }, { \"CommitDate\":\"$(get_date -20d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_worktree1head}, \"Data\":\"$content_worktree1head\"}] }, { \"CommitDate\":\"$(get_date -30d)\", \"ParentBranches\":[\"master\"], \"NewBranch\":\"branch2\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_oldcommit3}, \"Data\":\"$content_oldcommit3\"}] }, { \"CommitDate\":\"$(get_date -15d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_worktree2head}, \"Data\":\"$content_worktree2head\"}] }, { \"CommitDate\":\"$(get_date -30d)\", \"ParentBranches\":[\"master\"], \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_head}, \"Data\":\"$content_head\"}] } ]" | lfstest-testutils addcommits # push everything so that's not a retention issue git push origin master:master branch1:branch1 branch2:branch2 # don't keep any recent, just checkouts git config lfs.fetchrecentrefsdays 0 git config lfs.fetchrecentremoterefs true git config lfs.fetchrecentcommitsdays 0 # before worktree, everything except current checkout would be pruned git lfs prune --dry-run 2>&1 | tee prune.log grep "6 local objects, 1 retained" prune.log grep "5 files would be pruned" prune.log # now add worktrees on the other branches git worktree add "../w1_$reponame" "branch1" git worktree add "../w2_$reponame" "branch2" # now should retain all 3 heads git lfs prune --dry-run 2>&1 | tee prune.log grep "6 local objects, 3 retained" prune.log grep "3 files would be pruned" prune.log # also check that the same result is obtained when inside worktree rather than main cd "../w1_$reponame" git lfs prune --dry-run 2>&1 | tee prune.log grep "6 local objects, 3 retained" prune.log grep "3 files would be pruned" prune.log # now remove a worktree & prove that frees up 1 head while keeping the other cd "../$reponame" rm -rf "../w1_$reponame" git worktree prune # required to get git to tidy worktree metadata git lfs prune --dry-run 2>&1 | tee prune.log grep "6 local objects, 2 retained" prune.log grep "4 files would be pruned" prune.log ) end_test git-lfs-2.3.4/test/test-prune.sh000077500000000000000000000525711317167762300165400ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" begin_test "prune unreferenced and old" ( set -e reponame="prune_unref_old" setup_remote_repo "remote_$reponame" clone_repo "remote_$reponame" "clone_$reponame" git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log # generate content we'll use content_unreferenced="To delete: unreferenced" content_oldandpushed="To delete: pushed and too old" content_oldandunchanged="Keep: pushed and created a while ago, but still current" oid_unreferenced=$(calc_oid "$content_unreferenced") oid_oldandpushed=$(calc_oid "$content_oldandpushed") oid_oldandunchanged=$(calc_oid "$content_oldandunchanged") content_retain1="Retained content 1" content_retain2="Retained content 2" oid_retain1=$(calc_oid "$content_retain1") oid_retain2=$(calc_oid "$content_retain2") # Remember for something to be 'too old' it has to appear on the MINUS side # of the diff outside the prune window, i.e. it's not when it was introduced # but when it disappeared from relevance. That's why changes to file1.dat on master # from 7d ago are included even though the commit itself is outside of the window, # that content of file1.dat was relevant until it was removed with a commit, inside the window # think of it as windows of relevance that overlap until the content is replaced # we also make sure we commit today on master so that the recent commits measured # from latest commit on master tracks back from there echo "[ { \"CommitDate\":\"$(get_date -20d)\", \"Files\":[ {\"Filename\":\"old.dat\",\"Size\":${#content_oldandpushed}, \"Data\":\"$content_oldandpushed\"}, {\"Filename\":\"stillcurrent.dat\",\"Size\":${#content_oldandunchanged}, \"Data\":\"$content_oldandunchanged\"}] }, { \"CommitDate\":\"$(get_date -7d)\", \"Files\":[ {\"Filename\":\"old.dat\",\"Size\":${#content_retain1}, \"Data\":\"$content_retain1\"}] }, { \"CommitDate\":\"$(get_date -4d)\", \"NewBranch\":\"branch_to_delete\", \"Files\":[ {\"Filename\":\"unreferenced.dat\",\"Size\":${#content_unreferenced}, \"Data\":\"$content_unreferenced\"}] }, { \"ParentBranches\":[\"master\"], \"Files\":[ {\"Filename\":\"old.dat\",\"Size\":${#content_retain2}, \"Data\":\"$content_retain2\"}] } ]" | lfstest-testutils addcommits git push origin master git branch -D branch_to_delete git config lfs.fetchrecentrefsdays 5 git config lfs.fetchrecentremoterefs true git config lfs.fetchrecentcommitsdays 3 git config lfs.pruneoffsetdays 2 git lfs prune --dry-run --verbose 2>&1 | tee prune.log grep "5 local objects, 3 retained" prune.log grep "2 files would be pruned" prune.log grep "$oid_oldandpushed" prune.log grep "$oid_unreferenced" prune.log assert_local_object "$oid_oldandpushed" "${#content_oldandpushed}" assert_local_object "$oid_unreferenced" "${#content_unreferenced}" git lfs prune refute_local_object "$oid_oldandpushed" "${#content_oldandpushed}" refute_local_object "$oid_unreferenced" "${#content_unreferenced}" assert_local_object "$oid_retain1" "${#content_retain1}" assert_local_object "$oid_retain2" "${#content_retain2}" # now only keep AT refs, no recents git config lfs.fetchrecentcommitsdays 0 git lfs prune --verbose 2>&1 | tee prune.log grep "3 local objects, 2 retained" prune.log grep "Pruning 1 files" prune.log grep "$oid_retain1" prune.log refute_local_object "$oid_retain1" assert_local_object "$oid_retain2" "${#content_retain2}" ) end_test begin_test "prune keep unpushed" ( set -e # need to set up many commits on each branch with old data so that would # get deleted if it were not for unpushed status (heads would never be pruned but old changes would) reponame="prune_keep_unpushed" setup_remote_repo "remote_$reponame" clone_repo "remote_$reponame" "clone_$reponame" git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log content_keepunpushedhead1="Keep: unpushed HEAD 1" content_keepunpushedhead2="Keep: unpushed HEAD 2" content_keepunpushedhead3="Keep: unpushed HEAD 3" content_keepunpushedbranch1="Keep: unpushed second branch 1" content_keepunpushedbranch2="Keep: unpushed second branch 2" content_keepunpushedbranch3="Keep: unpushed second branch 3" oid_keepunpushedhead1=$(calc_oid "$content_keepunpushedhead1") oid_keepunpushedhead2=$(calc_oid "$content_keepunpushedhead2") oid_keepunpushedhead3=$(calc_oid "$content_keepunpushedhead3") oid_keepunpushedbranch1=$(calc_oid "$content_keepunpushedbranch1") oid_keepunpushedbranch2=$(calc_oid "$content_keepunpushedbranch2") oid_keepunpushedbranch3=$(calc_oid "$content_keepunpushedbranch3") oid_keepunpushedtagged1=$(calc_oid "$content_keepunpushedtagged1") oid_keepunpushedtagged2=$(calc_oid "$content_keepunpushedtagged1") echo "[ { \"CommitDate\":\"$(get_date -40d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_keepunpushedhead1}, \"Data\":\"$content_keepunpushedhead1\"}] }, { \"CommitDate\":\"$(get_date -31d)\", \"ParentBranches\":[\"master\"], \"NewBranch\":\"branch_unpushed\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_keepunpushedbranch1}, \"Data\":\"$content_keepunpushedbranch1\"}] }, { \"CommitDate\":\"$(get_date -16d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_keepunpushedbranch2}, \"Data\":\"$content_keepunpushedbranch2\"}] }, { \"CommitDate\":\"$(get_date -2d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_keepunpushedbranch3}, \"Data\":\"$content_keepunpushedbranch3\"}] }, { \"CommitDate\":\"$(get_date -21d)\", \"ParentBranches\":[\"master\"], \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_keepunpushedhead2}, \"Data\":\"$content_keepunpushedhead2\"}] }, { \"CommitDate\":\"$(get_date -0d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_keepunpushedhead3}, \"Data\":\"$content_keepunpushedhead3\"}] } ]" | lfstest-testutils addcommits git config lfs.fetchrecentrefsdays 5 git config lfs.fetchrecentremoterefs true git config lfs.fetchrecentcommitsdays 0 # only keep AT refs, no recents git config lfs.pruneoffsetdays 2 git lfs prune 2>&1 | tee prune.log grep "Nothing to prune" prune.log # Now push master and show that older versions on master will be removed git push origin master git lfs prune --verbose 2>&1 | tee prune.log grep "6 local objects, 4 retained" prune.log grep "Pruning 2 files" prune.log grep "$oid_keepunpushedhead1" prune.log grep "$oid_keepunpushedhead2" prune.log refute_local_object "$oid_keepunpushedhead1" refute_local_object "$oid_keepunpushedhead2" # MERGE the secondary branch, delete the branch then push master, then make sure # we delete the intermediate commits but also make sure they're on server # resolve conflicts by taking other branch git merge -Xtheirs branch_unpushed git branch -D branch_unpushed git lfs prune --dry-run | grep "Nothing to prune" git push origin master git lfs prune --verbose 2>&1 | tee prune.log grep "4 local objects, 1 retained" prune.log grep "Pruning 3 files" prune.log grep "$oid_keepunpushedbranch1" prune.log grep "$oid_keepunpushedbranch2" prune.log grep "$oid_keepunpushedhead3" prune.log refute_local_object "$oid_keepunpushedbranch1" refute_local_object "$oid_keepunpushedbranch2" # we used -Xtheirs so old head state is now obsolete, is the last state on branch refute_local_object "$oid_keepunpushedhead3" assert_server_object "remote_$reponame" "$oid_keepunpushedbranch1" assert_server_object "remote_$reponame" "$oid_keepunpushedbranch2" assert_server_object "remote_$reponame" "$oid_keepunpushedhead3" ) end_test begin_test "prune keep recent" ( set -e reponame="prune_recent" setup_remote_repo "remote_$reponame" clone_repo "remote_$reponame" "clone_$reponame" git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log content_keephead="Keep: HEAD" content_keeprecentbranch1tip="Keep: Recent branch 1 tip" content_keeprecentbranch2tip="Keep: Recent branch 2 tip" content_keeprecentcommithead="Keep: Recent commit on HEAD" content_keeprecentcommitbranch1="Keep: Recent commit on recent branch 1" content_keeprecentcommitbranch2="Keep: Recent commit on recent branch 2" content_prunecommitoldbranch1="Prune: old commit on old branch" content_prunecommitoldbranch2="Prune: old branch tip" content_prunecommitbranch1="Prune: old commit on recent branch 1" content_prunecommitbranch2="Prune: old commit on recent branch 2" content_prunecommithead="Prune: old commit on HEAD" oid_keephead=$(calc_oid "$content_keephead") oid_keeprecentbranch1tip=$(calc_oid "$content_keeprecentbranch1tip") oid_keeprecentbranch2tip=$(calc_oid "$content_keeprecentbranch2tip") oid_keeprecentcommithead=$(calc_oid "$content_keeprecentcommithead") oid_keeprecentcommitbranch1=$(calc_oid "$content_keeprecentcommitbranch1") oid_keeprecentcommitbranch2=$(calc_oid "$content_keeprecentcommitbranch2") oid_prunecommitoldbranch=$(calc_oid "$content_prunecommitoldbranch1") oid_prunecommitoldbranch2=$(calc_oid "$content_prunecommitoldbranch2") oid_prunecommitbranch1=$(calc_oid "$content_prunecommitbranch1") oid_prunecommitbranch2=$(calc_oid "$content_prunecommitbranch2") oid_prunecommithead=$(calc_oid "$content_prunecommithead") # use a single file so each commit supercedes the last, if different files # then history becomes harder to track # Also note that when considering 'recent' when editing a single file, it means # that the snapshot state overlapped; so the latest commit *before* the day # that you're looking at, not just the commits on/after. echo "[ { \"CommitDate\":\"$(get_date -50d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_prunecommithead}, \"Data\":\"$content_prunecommithead\"}] }, { \"CommitDate\":\"$(get_date -30d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_keeprecentcommithead}, \"Data\":\"$content_keeprecentcommithead\"}] }, { \"CommitDate\":\"$(get_date -8d)\", \"NewBranch\":\"branch_old\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_prunecommitoldbranch1}, \"Data\":\"$content_prunecommitoldbranch1\"}] }, { \"CommitDate\":\"$(get_date -7d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_prunecommitoldbranch2}, \"Data\":\"$content_prunecommitoldbranch2\"}] }, { \"CommitDate\":\"$(get_date -9d)\", \"ParentBranches\":[\"master\"], \"NewBranch\":\"branch1\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_prunecommitbranch1}, \"Data\":\"$content_prunecommitbranch1\"}] }, { \"CommitDate\":\"$(get_date -8d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_keeprecentcommitbranch1}, \"Data\":\"$content_keeprecentcommitbranch1\"}] }, { \"CommitDate\":\"$(get_date -5d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_keeprecentbranch1tip}, \"Data\":\"$content_keeprecentbranch1tip\"}] }, { \"CommitDate\":\"$(get_date -17d)\", \"ParentBranches\":[\"master\"], \"NewBranch\":\"branch2\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_prunecommitbranch2}, \"Data\":\"$content_prunecommitbranch2\"}] }, { \"CommitDate\":\"$(get_date -10d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_keeprecentcommitbranch2}, \"Data\":\"$content_keeprecentcommitbranch2\"}] }, { \"CommitDate\":\"$(get_date -2d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_keeprecentbranch2tip}, \"Data\":\"$content_keeprecentbranch2tip\"}] }, { \"CommitDate\":\"$(get_date -1d)\", \"ParentBranches\":[\"master\"], \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_keephead}, \"Data\":\"$content_keephead\"}] } ]" | lfstest-testutils addcommits # keep refs for 6 days & any prev commit that overlaps 2 days before tip (recent + offset) git config lfs.fetchrecentrefsdays 5 git config lfs.fetchrecentremoterefs true git config lfs.fetchrecentcommitsdays 1 git config lfs.pruneoffsetdays 1 # push everything so that's not a reason to retain git push origin master:master branch_old:branch_old branch1:branch1 branch2:branch2 git lfs prune --verbose 2>&1 | tee prune.log grep "11 local objects, 6 retained" prune.log grep "Pruning 5 files" prune.log grep "$oid_prunecommitoldbranch" prune.log grep "$oid_prunecommitoldbranch2" prune.log grep "$oid_prunecommitbranch1" prune.log grep "$oid_prunecommitbranch2" prune.log grep "$oid_prunecommithead" prune.log refute_local_object "$oid_prunecommitoldbranch" refute_local_object "$oid_prunecommitoldbranch2" refute_local_object "$oid_prunecommitbranch1" refute_local_object "$oid_prunecommitbranch2" refute_local_object "$oid_prunecommithead" assert_local_object "$oid_keephead" "${#content_keephead}" assert_local_object "$oid_keeprecentbranch1tip" "${#content_keeprecentbranch1tip}" assert_local_object "$oid_keeprecentbranch2tip" "${#content_keeprecentbranch2tip}" assert_local_object "$oid_keeprecentcommithead" "${#content_keeprecentcommithead}" assert_local_object "$oid_keeprecentcommitbranch1" "${#content_keeprecentcommitbranch1}" assert_local_object "$oid_keeprecentcommitbranch2" "${#content_keeprecentcommitbranch2}" # now don't include any recent commits in fetch & hence don't retain # still retain tips of branches git config lfs.fetchrecentcommitsdays 0 git lfs prune --verbose 2>&1 | tee prune.log grep "6 local objects, 3 retained" prune.log grep "Pruning 3 files" prune.log assert_local_object "$oid_keephead" "${#content_keephead}" assert_local_object "$oid_keeprecentbranch1tip" "${#content_keeprecentbranch1tip}" assert_local_object "$oid_keeprecentbranch2tip" "${#content_keeprecentbranch2tip}" refute_local_object "$oid_keeprecentcommithead" refute_local_object "$oid_keeprecentcommitbranch1" refute_local_object "$oid_keeprecentcommitbranch2" # now don't include any recent refs at all, only keep HEAD git config lfs.fetchrecentrefsdays 0 git lfs prune --verbose 2>&1 | tee prune.log grep "3 local objects, 1 retained" prune.log grep "Pruning 2 files" prune.log assert_local_object "$oid_keephead" "${#content_keephead}" refute_local_object "$oid_keeprecentbranch1tip" refute_local_object "$oid_keeprecentbranch2tip" ) end_test begin_test "prune remote tests" ( set -e reponame="prune_no_or_nonorigin_remote" git init "$reponame" cd "$reponame" git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log echo "[ { \"CommitDate\":\"$(get_date -50d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":30}] }, { \"CommitDate\":\"$(get_date -40d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":28}] }, { \"CommitDate\":\"$(get_date -35d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":37}] }, { \"CommitDate\":\"$(get_date -25d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":42}] } ]" | lfstest-testutils addcommits # set no recents so max ability to prune normally git config lfs.fetchrecentrefsdays 0 git config lfs.fetchrecentremoterefs true git config lfs.fetchrecentcommitsdays 0 git config lfs.pruneoffsetdays 1 # can never prune with no remote git lfs prune --verbose 2>&1 | tee prune.log grep "4 local objects, 4 retained" prune.log grep "Nothing to prune" prune.log # also make sure nothing is pruned when remote is not origin # create 2 remotes, neither of which is called origin & push to both setup_remote_repo "remote1_$reponame" setup_remote_repo "remote2_$reponame" cd "$TRASHDIR/$reponame" git remote add not_origin "$GITSERVER/remote1_$reponame" git push not_origin master git lfs prune --verbose 2>&1 | tee prune.log grep "4 local objects, 4 retained" prune.log grep "Nothing to prune" prune.log # now set the prune remote to be not_origin, should now prune # do a dry run so we can also verify git config lfs.pruneremotetocheck not_origin git lfs prune --verbose --dry-run 2>&1 | tee prune.log grep "4 local objects, 1 retained" prune.log grep "3 files would be pruned" prune.log ) end_test begin_test "prune verify" ( set -e reponame="prune_verify" setup_remote_repo "remote_$reponame" clone_repo "remote_$reponame" "clone_$reponame" git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log content_head="HEAD content" content_commit3="Content for commit 3 (prune)" content_commit2_failverify="Content for commit 2 (prune - fail verify)" content_commit1="Content for commit 1 (prune)" oid_head=$(calc_oid "$content_head") oid_commit3=$(calc_oid "$content_commit3") oid_commit2_failverify=$(calc_oid "$content_commit2_failverify") oid_commit1=$(calc_oid "$content_commit1") echo "[ { \"CommitDate\":\"$(get_date -50d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_commit1}, \"Data\":\"$content_commit1\"}] }, { \"CommitDate\":\"$(get_date -40d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_commit2_failverify}, \"Data\":\"$content_commit2_failverify\"}] }, { \"CommitDate\":\"$(get_date -35d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_commit3}, \"Data\":\"$content_commit3\"}] }, { \"CommitDate\":\"$(get_date -25d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_head}, \"Data\":\"$content_head\"}] } ]" | lfstest-testutils addcommits # push all so no unpushed reason to not prune git push origin master # set no recents so max ability to prune normally git config lfs.fetchrecentrefsdays 0 git config lfs.fetchrecentremoterefs true git config lfs.fetchrecentcommitsdays 0 git config lfs.pruneoffsetdays 1 # confirm that it would prune with verify when no issues git lfs prune --dry-run --verify-remote --verbose 2>&1 | tee prune.log grep "4 local objects, 1 retained, 3 verified with remote" prune.log grep "3 files would be pruned" prune.log grep "$oid_commit3" prune.log grep "$oid_commit2_failverify" prune.log grep "$oid_commit1" prune.log # delete one file on the server to make the verify fail delete_server_object "remote_$reponame" "$oid_commit2_failverify" # this should now fail git lfs prune --verify-remote 2>&1 | tee prune.log grep "4 local objects, 1 retained, 2 verified with remote" prune.log grep "missing on remote:" prune.log grep "$oid_commit2_failverify" prune.log # Nothing should have been deleted assert_local_object "$oid_commit1" "${#content_commit1}" assert_local_object "$oid_commit2_failverify" "${#content_commit2_failverify}" assert_local_object "$oid_commit3" "${#content_commit3}" # Now test with the global option git config lfs.pruneverifyremotealways true # no verify arg but should be pulled from global git lfs prune 2>&1 | tee prune.log grep "4 local objects, 1 retained, 2 verified with remote" prune.log grep "missing on remote:" prune.log grep "$oid_commit2_failverify" prune.log # Nothing should have been deleted assert_local_object "$oid_commit1" "${#content_commit1}" assert_local_object "$oid_commit2_failverify" "${#content_commit2_failverify}" assert_local_object "$oid_commit3" "${#content_commit3}" # now try overriding the global option git lfs prune --no-verify-remote 2>&1 | tee prune.log grep "4 local objects, 1 retained" prune.log grep "Pruning 3 files" prune.log # should now have been deleted refute_local_object "$oid_commit1" refute_local_object "$oid_commit2_failverify" refute_local_object "$oid_commit3" ) end_test begin_test "prune verify large numbers of refs" ( set -e reponame="prune_verify_large" setup_remote_repo "remote_$reponame" clone_repo "remote_$reponame" "clone_$reponame" git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log content_head="HEAD content" content_commit1="Recent commit" content_oldcommit="Old content" oid_head=$(calc_oid "$content_head") # Add two recent commits that should not be pruned echo "[ { \"CommitDate\":\"$(get_date -50d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_oldcommit}, \"Data\":\"$(uuidgen)\"}] }, { \"CommitDate\":\"$(get_date -45d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_oldcommit}, \"Data\":\"$(uuidgen)\"}] }, { \"CommitDate\":\"$(get_date -2d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_commit1}, \"Data\":\"$content_commit1\"}] }, { \"CommitDate\":\"$(get_date -1d)\", \"Files\":[ {\"Filename\":\"file.dat\",\"Size\":${#content_head}, \"Data\":\"$content_head\"}] } ]" | lfstest-testutils addcommits # Generate a large number of refs to old commits make sure prune has a lot of data to read git checkout $(git log --pretty=oneline master | tail -2 | awk '{print $1}' | head -1) for i in $(seq 0 1000); do git tag v$i done git checkout master # push all so no unpushed reason to not prune # git push origin master # set no recents so max ability to prune normally git config lfs.fetchrecentrefsdays 3 git config lfs.fetchrecentremoterefs true git config lfs.fetchrecentcommitsdays 3 git config lfs.pruneoffsetdays 3 # confirm that prune does not hang git lfs prune --dry-run --verify-remote --verbose 2>&1 | tee prune.log ) end_test git-lfs-2.3.4/test/test-pull.sh000077500000000000000000000121131317167762300163470ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" begin_test "pull" ( set -e reponame="$(basename "$0" ".sh")" setup_remote_repo "$reponame" clone_repo "$reponame" clone clone_repo "$reponame" repo git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log contents="a" contents_oid=$(calc_oid "$contents") contents2="A" contents2_oid=$(calc_oid "$contents2") contents3="dir" contents3_oid=$(calc_oid "$contents3") mkdir dir echo "*.log" > .gitignore printf "$contents" > a.dat printf "$contents2" > á.dat printf "$contents3" > dir/dir.dat git add . git commit -m "add files" 2>&1 | tee commit.log grep "master (root-commit)" commit.log grep "5 files changed" commit.log grep "create mode 100644 a.dat" commit.log grep "create mode 100644 .gitattributes" commit.log ls -al [ "a" = "$(cat a.dat)" ] [ "A" = "$(cat "á.dat")" ] [ "dir" = "$(cat "dir/dir.dat")" ] assert_pointer "master" "a.dat" "$contents_oid" 1 assert_pointer "master" "á.dat" "$contents2_oid" 1 assert_pointer "master" "dir/dir.dat" "$contents3_oid" 3 refute_server_object "$reponame" "$contents_oid" refute_server_object "$reponame" "$contents2_oid" refute_server_object "$reponame" "$contents33oid" echo "initial push" git push origin master 2>&1 | tee push.log grep "(3 of 3 files)" push.log grep "master -> master" push.log assert_server_object "$reponame" "$contents_oid" assert_server_object "$reponame" "$contents2_oid" assert_server_object "$reponame" "$contents3_oid" # change to the clone's working directory cd ../clone echo "normal pull" git pull 2>&1 [ "a" = "$(cat a.dat)" ] [ "A" = "$(cat "á.dat")" ] assert_local_object "$contents_oid" 1 assert_local_object "$contents2_oid" 1 assert_clean_status echo "lfs pull" rm -r a.dat á.dat dir # removing files makes the status dirty rm -rf .git/lfs/objects git lfs pull 2>&1 | grep "(3 of 3 files)" ls -al [ "a" = "$(cat a.dat)" ] [ "A" = "$(cat "á.dat")" ] assert_local_object "$contents_oid" 1 assert_local_object "$contents2_oid" 1 echo "lfs pull with remote" rm -r a.dat á.dat dir rm -rf .git/lfs/objects git lfs pull origin 2>&1 | grep "(3 of 3 files)" [ "a" = "$(cat a.dat)" ] [ "A" = "$(cat "á.dat")" ] assert_local_object "$contents_oid" 1 assert_local_object "$contents2_oid" 1 assert_clean_status echo "lfs pull with local storage" rm a.dat á.dat git lfs pull [ "a" = "$(cat a.dat)" ] [ "A" = "$(cat "á.dat")" ] assert_clean_status echo "lfs pull with include/exclude filters in gitconfig" rm -rf .git/lfs/objects git config "lfs.fetchinclude" "a*" git lfs pull assert_local_object "$contents_oid" 1 assert_clean_status rm -rf .git/lfs/objects git config --unset "lfs.fetchinclude" git config "lfs.fetchexclude" "a*" git lfs pull refute_local_object "$contents_oid" assert_clean_status echo "lfs pull with include/exclude filters in command line" git config --unset "lfs.fetchexclude" rm -rf .git/lfs/objects git lfs pull --include="a*" assert_local_object "$contents_oid" 1 assert_clean_status rm -rf .git/lfs/objects git lfs pull --exclude="a*" refute_local_object "$contents_oid" assert_clean_status echo "resetting to test status" git reset --hard assert_clean_status echo "lfs pull clean status" git lfs pull assert_clean_status echo "lfs pull with -I" git lfs pull -I "*.dat" assert_clean_status echo "lfs pull in subdir" cd dir git lfs pull assert_clean_status echo "lfs pull in subdir with -I" git lfs pull -I "*.dat" assert_clean_status ) end_test begin_test "pull with raw remote url" ( set -e mkdir raw cd raw git init git lfs install --local --skip-smudge git remote add origin $GITSERVER/test-pull git pull origin master contents="a" contents_oid=$(calc_oid "$contents") # LFS object not downloaded, pointer in working directory refute_local_object "$contents_oid" grep "$contents_oid" a.dat git lfs pull "$GITSERVER/test-pull" echo "pulled!" # LFS object downloaded and in working directory assert_local_object "$contents_oid" 1 [ "0" = "$(grep -c "$contents_oid" a.dat)" ] [ "a" = "$(cat a.dat)" ] ) end_test begin_test "pull: with missing object" ( set -e # this clone is setup in the first test in this file cd clone rm -rf .git/lfs/objects contents_oid=$(calc_oid "a") reponame="$(basename "$0" ".sh")" delete_server_object "$reponame" "$contents_oid" refute_server_object "$reponame" "$contents_oid" # should return non-zero, but should also download all the other valid files too git lfs pull 2>&1 | tee pull.log pull_exit="${PIPESTATUS[0]}" [ "$pull_exit" != "0" ] grep "$contents_oid" pull.log contents2_oid=$(calc_oid "A") assert_local_object "$contents2_oid" 1 refute_local_object "$contents_oid" ) end_test begin_test "pull: outside git repository" ( set +e git lfs pull 2>&1 > pull.log res=$? set -e if [ "$res" = "0" ]; then echo "Passes because $GIT_LFS_TEST_DIR is unset." exit 0 fi [ "$res" = "128" ] grep "Not in a git repository" pull.log ) end_test git-lfs-2.3.4/test/test-push-bad-dns.sh000077500000000000000000000013431317167762300176630ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" ensure_git_version_isnt $VERSION_LOWER "2.3.0" begin_test "push: upload to bad dns" ( set -e reponame="$(basename "$0" ".sh")-bad-dns" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" printf "hi" > good.dat git add .gitattributes good.dat git commit -m "welp" port="$(echo "http://127.0.0.1:63378" | cut -f 3 -d ":")" git config lfs.url "http://git-lfs-bad-dns:$port" set +e GIT_TERMINAL_PROMPT=0 git push origin master 2>&1 | tee push.log res="${PIPESTATUS[0]}" set -e refute_server_object "$reponame" "$(calc_oid "hi")" if [ "$res" = "0" ]; then cat push.log echo "push successful?" exit 1 fi ) end_test git-lfs-2.3.4/test/test-push-failures.sh000077500000000000000000000044111317167762300201640ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" # push_fail_test preforms a test expecting a `git lfs push` to fail given the # contents of a particular file contained within that push. The Git server used # during tests has certain special cases that are triggered by finding specific # keywords within a file (as given by the first argument). # # An optional second argument can be included, "msg", that assert that the # contents "msg" was included in the output of a `git lfs push`. push_fail_test() { local contents="$1" local msg="$2" set -e local reponame="$(basename "$0" ".sh")-$contents" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" printf "hi" > good.dat printf "$contents" > bad.dat git add .gitattributes good.dat bad.dat git commit -m "welp" set +e git push origin master 2>&1 | tee push.log res="${PIPESTATUS[0]}" set -e if [ ! -z "$msg" ]; then grep "$msg" push.log fi refute_server_object "$reponame" "$(calc_oid "$contents")" if [ "$res" = "0" ]; then echo "push successful?" exit 1 fi } begin_test "push: upload file with storage 403" ( set -e push_fail_test "status-storage-403" ) end_test begin_test "push: upload file with storage 404" ( set -e push_fail_test "status-storage-404" ) end_test begin_test "push: upload file with storage 410" ( set -e push_fail_test "status-storage-410" ) end_test begin_test "push: upload file with storage 422" ( set -e push_fail_test "status-storage-422" ) end_test begin_test "push: upload file with storage 500" ( set -e push_fail_test "status-storage-500" ) end_test begin_test "push: upload file with storage 503" ( set -e push_fail_test "status-storage-503" "LFS is temporarily unavailable" ) end_test begin_test "push: upload file with api 403" ( set -e push_fail_test "status-batch-403" ) end_test begin_test "push: upload file with api 404" ( set -e push_fail_test "status-batch-404" ) end_test begin_test "push: upload file with api 410" ( set -e push_fail_test "status-batch-410" ) end_test begin_test "push: upload file with api 422" ( set -e push_fail_test "status-batch-422" ) end_test begin_test "push: upload file with api 500" ( set -e push_fail_test "status-batch-500" ) end_test git-lfs-2.3.4/test/test-push-file-with-branch-name.sh000077500000000000000000000006441317167762300224170ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" begin_test "push a file with the same name as a branch" ( set -e reponame="$(basename "$0" ".sh")" setup_remote_repo "$reponame" clone_repo "$reponame" repo git lfs track "master" echo "master" > master git add .gitattributes master git commit -m "add master" git lfs push --all origin master 2>&1 | tee push.log grep "(1 of 1 files)" push.log ) end_test git-lfs-2.3.4/test/test-push-missing.sh000077500000000000000000000034631317167762300200310ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" begin_test "push missing objects" ( set -e reponame="push-missing-objects" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" git add .gitattributes git commit -m "initial commit" missing="missing" missing_oid="$(calc_oid "$missing")" missing_len="$(printf "$missing" | wc -c | awk '{ print $1 }')" printf "$missing" > missing.dat git add missing.dat git commit -m "add missing.dat" corrupt="corrupt" corrupt_oid="$(calc_oid "$corrupt")" corrupt_len="$(printf "$corrupt" | wc -c | awk '{ print $1 }')" printf "$corrupt" > corrupt.dat git add corrupt.dat git commit -m "add corrupt.dat" present="present" present_oid="$(calc_oid "$present")" present_len="$(printf "$present" | wc -c | awk '{ print $1 }')" printf "$present" > present.dat git add present.dat git commit -m "add present.dat" assert_local_object "$missing_oid" "$missing_len" assert_local_object "$corrupt_oid" "$corrupt_len" assert_local_object "$present_oid" "$present_len" delete_local_object "$missing_oid" corrupt_local_object "$corrupt_oid" refute_local_object "$missing_oid" refute_local_object "$corrupt_oid" "$corrupt_len" assert_local_object "$present_oid" "$present_len" git config lfs.allowincompletepush false git push origin master 2>&1 | tee push.log if [ "0" -eq "${PIPESTATUS[0]}" ]; then echo >&2 "fatal: expected 'git push origin master' to exit with non-zero code" exit 1 fi grep "LFS upload failed:" push.log grep " (missing) missing.dat ($missing_oid)" push.log grep " (corrupt) corrupt.dat ($corrupt_oid)" push.log refute_server_object "$reponame" "$missing_oid" refute_server_object "$reponame" "$corrupt_oid" assert_server_object "$reponame" "$present_oid" ) end_test git-lfs-2.3.4/test/test-push.sh000077500000000000000000000513741317167762300163660ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" begin_test "push" ( set -e reponame="$(basename "$0" ".sh")" setup_remote_repo "$reponame" clone_repo "$reponame" repo git config "lfs.$(repo_endpoint "$GITSERVER" "$reponame").locksverify" true git lfs track "*.dat" echo "push a" > a.dat git add .gitattributes a.dat git commit -m "add a.dat" git lfs push --dry-run origin master 2>&1 | tee push.log grep "push 4c48d2a6991c9895bcddcf027e1e4907280bcf21975492b1afbade396d6a3340 => a.dat" push.log [ $(grep -c "push" push.log) -eq 1 ] git lfs push origin master 2>&1 | tee push.log grep "(1 of 1 files)" push.log git checkout -b push-b echo "push b" > b.dat git add b.dat git commit -m "add b.dat" git lfs push --dry-run origin push-b 2>&1 | tee push.log grep "push 4c48d2a6991c9895bcddcf027e1e4907280bcf21975492b1afbade396d6a3340 => a.dat" push.log grep "push 82be50ad35070a4ef3467a0a650c52d5b637035e7ad02c36652e59d01ba282b7 => b.dat" push.log [ $(grep -c "push" < push.log) -eq 2 ] # simulate remote ref mkdir -p .git/refs/remotes/origin git rev-parse HEAD > .git/refs/remotes/origin/HEAD git lfs push --dry-run origin push-b 2>&1 | tee push.log [ $(grep -c "push" push.log) -eq 0 ] rm -rf .git/refs/remotes git lfs push origin push-b 2>&1 | tee push.log grep "(1 of 1 files, 1 skipped)" push.log ) end_test # sets up the tests for the next few push --all tests push_all_setup() { suffix="$1" reponame="$(basename "$0" ".sh")-all" content1="initial" content2="update" content3="branch" content4="tagged" content5="master" extracontent="extra" oid1=$(calc_oid "$content1") oid2=$(calc_oid "$content2") oid3=$(calc_oid "$content3") oid4=$(calc_oid "$content4") oid5=$(calc_oid "$content5") extraoid=$(calc_oid "$extracontent") # if the local repo exists, it has already been bootstrapped [ -d "push-all" ] && exit 0 clone_repo "$reponame" "push-all" git config "lfs.$(repo_endpoint "$GITSERVER" "$reponame").locksverify" true git lfs track "*.dat" echo "[ { \"CommitDate\":\"$(get_date -6m)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":${#content1},\"Data\":\"$content1\"} ] }, { \"CommitDate\":\"$(get_date -5m)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":${#content2},\"Data\":\"$content2\"} ] }, { \"CommitDate\":\"$(get_date -4m)\", \"NewBranch\":\"branch\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":${#content3},\"Data\":\"$content3\"} ] }, { \"CommitDate\":\"$(get_date -4m)\", \"ParentBranches\":[\"master\"], \"Tags\":[\"tag\"], \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":${#content4},\"Data\":\"$content4\"} ] }, { \"CommitDate\":\"$(get_date -2m)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":${#content5},\"Data\":\"$content5\"}, {\"Filename\":\"file2.dat\",\"Size\":${#extracontent},\"Data\":\"$extracontent\"} ] } ]" | lfstest-testutils addcommits git rm file2.dat git commit -m "remove file2.dat" # simulate remote ref mkdir -p .git/refs/remotes/origin git rev-parse HEAD > .git/refs/remotes/origin/HEAD setup_alternate_remote "$reponame-$suffix" git config "lfs.$(repo_endpoint "$GITSERVER" "$reponame-$suffix").locksverify" true } begin_test "push --all (no ref args)" ( set -e push_all_setup "everything" git lfs push --dry-run --all origin 2>&1 | tee push.log grep "push $oid1 => file1.dat" push.log grep "push $oid2 => file1.dat" push.log grep "push $oid3 => file1.dat" push.log grep "push $oid4 => file1.dat" push.log grep "push $oid5 => file1.dat" push.log grep "push $extraoid => file2.dat" push.log [ $(grep -c "push" < push.log) -eq 6 ] git push --all origin 2>&1 | tee push.log [ $(grep -c "(6 of 6 files)" push.log) -eq 1 ] assert_server_object "$reponame-$suffix" "$oid1" assert_server_object "$reponame-$suffix" "$oid2" assert_server_object "$reponame-$suffix" "$oid3" assert_server_object "$reponame-$suffix" "$oid4" assert_server_object "$reponame-$suffix" "$oid5" assert_server_object "$reponame-$suffix" "$extraoid" echo "push while missing old objects locally" setup_alternate_remote "$reponame-$suffix-2" git config "lfs.$(repo_endpoint "$GITSERVER" "$reponame-$suffix-2").locksverify" true git lfs push --object-id origin $oid1 assert_server_object "$reponame-$suffix-2" "$oid1" refute_server_object "$reponame-$suffix-2" "$oid2" refute_server_object "$reponame-$suffix-2" "$oid3" refute_server_object "$reponame-$suffix-2" "$oid4" refute_server_object "$reponame-$suffix-2" "$oid5" refute_server_object "$reponame-$suffix-2" "$extraoid" rm ".git/lfs/objects/${oid1:0:2}/${oid1:2:2}/$oid1" echo "dry run missing local object that exists on server" git lfs push --dry-run --all origin 2>&1 | tee push.log grep "push $oid1 => file1.dat" push.log grep "push $oid2 => file1.dat" push.log grep "push $oid3 => file1.dat" push.log grep "push $oid4 => file1.dat" push.log grep "push $oid5 => file1.dat" push.log grep "push $extraoid => file2.dat" push.log [ $(grep -c "push" push.log) -eq 6 ] git push --all origin 2>&1 | tee push.log grep "(5 of 5 files, 1 skipped)" push.log [ $(grep -c "files" push.log) -eq 1 ] [ $(grep -c "skipped" push.log) -eq 1 ] assert_server_object "$reponame-$suffix-2" "$oid2" assert_server_object "$reponame-$suffix-2" "$oid3" assert_server_object "$reponame-$suffix-2" "$oid4" assert_server_object "$reponame-$suffix-2" "$oid5" assert_server_object "$reponame-$suffix-2" "$extraoid" ) end_test begin_test "push --all (1 ref arg)" ( set -e push_all_setup "ref" git lfs push --dry-run --all origin branch 2>&1 | tee push.log grep "push $oid1 => file1.dat" push.log grep "push $oid2 => file1.dat" push.log grep "push $oid3 => file1.dat" push.log [ $(grep -c "push" < push.log) -eq 3 ] git lfs push --all origin branch 2>&1 | tee push.log grep "3 files" push.log assert_server_object "$reponame-$suffix" "$oid1" assert_server_object "$reponame-$suffix" "$oid2" assert_server_object "$reponame-$suffix" "$oid3" refute_server_object "$reponame-$suffix" "$oid4" # in master and the tag refute_server_object "$reponame-$suffix" "$oid5" refute_server_object "$reponame-$suffix" "$extraoid" echo "push while missing old objects locally" setup_alternate_remote "$reponame-$suffix-2" git config "lfs.$(repo_endpoint "$GITSERVER" "$reponame-$suffix-2").locksverify" true git lfs push --object-id origin $oid1 assert_server_object "$reponame-$suffix-2" "$oid1" refute_server_object "$reponame-$suffix-2" "$oid2" refute_server_object "$reponame-$suffix-2" "$oid3" refute_server_object "$reponame-$suffix-2" "$oid4" refute_server_object "$reponame-$suffix-2" "$oid5" refute_server_object "$reponame-$suffix-2" "$extraoid" rm ".git/lfs/objects/${oid1:0:2}/${oid1:2:2}/$oid1" # dry run doesn't change git lfs push --dry-run --all origin branch 2>&1 | tee push.log grep "push $oid1 => file1.dat" push.log grep "push $oid2 => file1.dat" push.log grep "push $oid3 => file1.dat" push.log [ $(grep -c "push" push.log) -eq 3 ] git push --all origin branch 2>&1 | tee push.log grep "5 files, 1 skipped" push.log # should be 5? assert_server_object "$reponame-$suffix-2" "$oid2" assert_server_object "$reponame-$suffix-2" "$oid3" refute_server_object "$reponame-$suffix-2" "$oid4" refute_server_object "$reponame-$suffix-2" "$oid5" refute_server_object "$reponame-$suffix-2" "$extraoid" ) end_test begin_test "push --all (multiple ref args)" ( set -e push_all_setup "multiple-refs" git lfs push --dry-run --all origin branch tag 2>&1 | tee push.log grep "push $oid1 => file1.dat" push.log grep "push $oid2 => file1.dat" push.log grep "push $oid3 => file1.dat" push.log grep "push $oid4 => file1.dat" push.log [ $(grep -c "push" push.log) -eq 4 ] git lfs push --all origin branch tag 2>&1 | tee push.log grep "4 files" push.log assert_server_object "$reponame-$suffix" "$oid1" assert_server_object "$reponame-$suffix" "$oid2" assert_server_object "$reponame-$suffix" "$oid3" assert_server_object "$reponame-$suffix" "$oid4" refute_server_object "$reponame-$suffix" "$oid5" # only in master refute_server_object "$reponame-$suffix" "$extraoid" echo "push while missing old objects locally" setup_alternate_remote "$reponame-$suffix-2" git config "lfs.$(repo_endpoint "$GITSERVER" "$reponame-$suffix-2").locksverify" true git lfs push --object-id origin $oid1 assert_server_object "$reponame-$suffix-2" "$oid1" refute_server_object "$reponame-$suffix-2" "$oid2" refute_server_object "$reponame-$suffix-2" "$oid3" refute_server_object "$reponame-$suffix-2" "$oid4" refute_server_object "$reponame-$suffix-2" "$oid5" refute_server_object "$reponame-$suffix-2" "$extraoid" rm ".git/lfs/objects/${oid1:0:2}/${oid1:2:2}/$oid1" # dry run doesn't change git lfs push --dry-run --all origin branch tag 2>&1 | tee push.log grep "push $oid1 => file1.dat" push.log grep "push $oid2 => file1.dat" push.log grep "push $oid3 => file1.dat" push.log grep "push $oid4 => file1.dat" push.log [ $(grep -c "push" push.log) -eq 3 ] git push --all origin branch tag 2>&1 | tee push.log grep "5 files, 1 skipped" push.log # should be 5? assert_server_object "$reponame-$suffix-2" "$oid2" assert_server_object "$reponame-$suffix-2" "$oid3" assert_server_object "$reponame-$suffix-2" "$oid4" refute_server_object "$reponame-$suffix-2" "$oid5" refute_server_object "$reponame-$suffix-2" "$extraoid" ) end_test begin_test "push --all (ref with deleted files)" ( set -e push_all_setup "ref-with-deleted" git lfs push --dry-run --all origin master 2>&1 | tee push.log grep "push $oid1 => file1.dat" push.log grep "push $oid2 => file1.dat" push.log grep "push $oid4 => file1.dat" push.log grep "push $oid5 => file1.dat" push.log grep "push $extraoid => file2.dat" push.log [ $(grep -c "push" push.log) -eq 5 ] git lfs push --all origin master 2>&1 | tee push.log grep "5 files" push.log assert_server_object "$reponame-$suffix" "$oid1" assert_server_object "$reponame-$suffix" "$oid2" refute_server_object "$reponame-$suffix" "$oid3" # only in the branch assert_server_object "$reponame-$suffix" "$oid4" assert_server_object "$reponame-$suffix" "$oid5" assert_server_object "$reponame-$suffix" "$extraoid" echo "push while missing old objects locally" setup_alternate_remote "$reponame-$suffix-2" git config "lfs.$(repo_endpoint "$GITSERVER" "$reponame-$suffix-2").locksverify" true git lfs push --object-id origin $oid1 assert_server_object "$reponame-$suffix-2" "$oid1" refute_server_object "$reponame-$suffix-2" "$oid2" refute_server_object "$reponame-$suffix-2" "$oid3" refute_server_object "$reponame-$suffix-2" "$oid4" refute_server_object "$reponame-$suffix-2" "$oid5" refute_server_object "$reponame-$suffix-2" "$extraoid" rm ".git/lfs/objects/${oid1:0:2}/${oid1:2:2}/$oid1" # dry run doesn't change git lfs push --dry-run --all origin master 2>&1 | tee push.log grep "push $oid1 => file1.dat" push.log grep "push $oid2 => file1.dat" push.log grep "push $oid4 => file1.dat" push.log grep "push $oid5 => file1.dat" push.log grep "push $extraoid => file2.dat" push.log [ $(grep -c "push" push.log) -eq 5 ] git push --all origin master 2>&1 | tee push.log grep "5 files, 1 skipped" push.log # should be 5? assert_server_object "$reponame-$suffix-2" "$oid2" refute_server_object "$reponame-$suffix-2" "$oid3" assert_server_object "$reponame-$suffix-2" "$oid4" assert_server_object "$reponame-$suffix-2" "$oid5" assert_server_object "$reponame-$suffix-2" "$extraoid" ) end_test begin_test "push object id(s)" ( set -e reponame="$(basename "$0" ".sh")" setup_remote_repo "$reponame" clone_repo "$reponame" repo2 git config "lfs.$(repo_endpoint "$GITSERVER" "$reponame").locksverify" true git lfs track "*.dat" echo "push a" > a.dat git add .gitattributes a.dat git commit -m "add a.dat" git lfs push --object-id origin \ 4c48d2a6991c9895bcddcf027e1e4907280bcf21975492b1afbade396d6a3340 \ 2>&1 | tee push.log grep "(0 of 0 files, 1 skipped)" push.log echo "push b" > b.dat git add b.dat git commit -m "add b.dat" git lfs push --object-id origin \ 4c48d2a6991c9895bcddcf027e1e4907280bcf21975492b1afbade396d6a3340 \ 82be50ad35070a4ef3467a0a650c52d5b637035e7ad02c36652e59d01ba282b7 \ 2>&1 | tee push.log grep "(0 of 0 files, 2 skipped)" push.log ) end_test begin_test "push modified files" ( set -e reponame="$(basename "$0" ".sh")-modified" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" # generate content we'll use content1="filecontent1" content2="filecontent2" content3="filecontent3" content4="filecontent4" content5="filecontent5" oid1=$(calc_oid "$content1") oid2=$(calc_oid "$content2") oid3=$(calc_oid "$content3") oid4=$(calc_oid "$content4") oid5=$(calc_oid "$content5") echo "[ { \"CommitDate\":\"$(get_date -6m)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":${#content1}, \"Data\":\"$content1\"}] }, { \"CommitDate\":\"$(get_date -3m)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":${#content2}, \"Data\":\"$content2\"}] }, { \"CommitDate\":\"$(get_date -1m)\", \"NewBranch\":\"other_branch\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":${#content5}, \"Data\":\"$content5\"}] }, { \"CommitDate\":\"$(get_date -1m)\", \"ParentBranches\":[\"master\"], \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":${#content3}, \"Data\":\"$content3\"}, {\"Filename\":\"file2.dat\",\"Size\":${#content4}, \"Data\":\"$content4\"}] } ]" | lfstest-testutils addcommits git lfs push origin master git lfs push origin other_branch assert_server_object "$reponame" "$oid1" assert_server_object "$reponame" "$oid2" assert_server_object "$reponame" "$oid3" assert_server_object "$reponame" "$oid4" assert_server_object "$reponame" "$oid5" ) end_test begin_test "push with invalid remote" ( set -e cd repo git lfs push not-a-remote 2>&1 | tee push.log grep "Invalid remote name" push.log ) end_test begin_test "push ambiguous branch name" ( set -e reponame="$(basename "$0" ".sh")-ambiguous-branch" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log NUMFILES=5 # generate content we'll use for ((a=0; a < NUMFILES ; a++)) do content[$a]="filecontent$a" oid[$a]=$(calc_oid "${content[$a]}") done echo "[ { \"CommitDate\":\"$(get_date -10d)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":${#content[0]}, \"Data\":\"${content[0]}\"}, {\"Filename\":\"file2.dat\",\"Size\":${#content[1]}, \"Data\":\"${content[1]}\"}] }, { \"NewBranch\":\"ambiguous\", \"CommitDate\":\"$(get_date -5d)\", \"Files\":[ {\"Filename\":\"file3.dat\",\"Size\":${#content[2]}, \"Data\":\"${content[2]}\"}] }, { \"CommitDate\":\"$(get_date -2d)\", \"Files\":[ {\"Filename\":\"file4.dat\",\"Size\":${#content[3]}, \"Data\":\"${content[3]}\"}] }, { \"ParentBranches\":[\"master\"], \"CommitDate\":\"$(get_date -1d)\", \"Files\":[ {\"Filename\":\"file1.dat\",\"Size\":${#content[4]}, \"Data\":\"${content[4]}\"}] } ]" | lfstest-testutils addcommits # create tag with same name as branch git tag ambiguous # lfs push master, should work git lfs push origin master # push ambiguous, should fail set +e git lfs push origin ambiguous if [ $? -eq 0 ] then exit 1 fi set -e ) end_test begin_test "push (retry with expired actions)" ( set -e reponame="push_retry_expired_action" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" contents="return-expired-action" contents_oid="$(calc_oid "$contents")" contents_size="$(printf "$contents" | wc -c | awk '{ print $1 }')" printf "$contents" > a.dat git add .gitattributes a.dat git commit -m "add a.dat, .gitattributes" 2>&1 | tee commit.log grep "master (root-commit)" commit.log grep "2 files changed" commit.log grep "create mode 100644 a.dat" commit.log grep "create mode 100644 .gitattributes" commit.log GIT_TRACE=1 git push origin master 2>&1 | tee push.log expected="enqueue retry #1 for \"$contents_oid\" (size: $contents_size): LFS: tq: action \"upload\" expires at" grep "$expected" push.log grep "(1 of 1 files)" push.log ) end_test begin_test "push to raw remote url" ( set -e setup_remote_repo "push-raw" mkdir push-raw cd push-raw git init git lfs track "*.dat" contents="raw" contents_oid=$(calc_oid "$contents") printf "$contents" > raw.dat git add raw.dat .gitattributes git commit -m "add" 2>&1 | tee commit.log grep "master (root-commit)" commit.log grep "2 files changed" commit.log grep "create mode 100644 raw.dat" commit.log grep "create mode 100644 .gitattributes" commit.log refute_server_object push-raw "$contents_oid" git lfs push $GITSERVER/push-raw master assert_server_object push-raw "$contents_oid" ) end_test begin_test "push (with invalid object size)" ( set -e reponame="push-invalid-object-size" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" contents="return-invalid-size" printf "$contents" > a.dat git add a.dat .gitattributes git commit -m "add a.dat, .gitattributes" 2>&1 | tee commit.log grep "master (root-commit)" commit.log grep "2 files changed" commit.log grep "create mode 100644 a.dat" commit.log grep "create mode 100644 .gitattributes" commit.log set +e git push origin master 2>&1 2> push.log res="$?" set -e grep "invalid size (got: -1)" push.log [ "0" -ne "$res" ] refute_server_object "$reponame" "$(calc_oid "$contents")" ) end_test begin_test "push with deprecated _links" ( set -e reponame="$(basename "$0" ".sh")-deprecated" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" git add .gitattributes git commit -m "initial commit" contents="send-deprecated-links" contents_oid="$(calc_oid "$contents")" printf "$contents" > a.dat git add a.dat git commit -m "add a.dat" git push origin master assert_server_object "$reponame" "$contents_oid" ) begin_test "push with missing objects (lfs.allowincompletepush=t)" ( set -e reponame="push-with-missing-objects" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" git add .gitattributes git commit -m "initial commit" present="present" present_oid="$(calc_oid "$present")" printf "$present" > present.dat missing="missing" missing_oid="$(calc_oid "$missing")" printf "$missing" > missing.dat git add missing.dat present.dat git commit -m "add objects" git rm missing.dat git commit -m "remove missing" # :fire: the "missing" object missing_oid_part_1="$(echo "$missing_oid" | cut -b 1-2)" missing_oid_part_2="$(echo "$missing_oid" | cut -b 3-4)" missing_oid_path=".git/lfs/objects/$missing_oid_part_1/$missing_oid_part_2/$missing_oid" rm "$missing_oid_path" git push origin master 2>&1 | tee push.log if [ "0" -ne "${PIPESTATUS[0]}" ]; then echo >&2 "fatal: expected \`git push origin master\` to succeed ..." exit 1 fi grep "LFS upload missing objects" push.log grep " (missing) missing.dat ($missing_oid)" push.log assert_server_object "$reponame" "$present_oid" refute_server_object "$reponame" "$missing_oid" ) end_test begin_test "push reject missing objects (lfs.allowincompletepush=f)" ( set -e reponame="push-reject-missing-objects" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" git add .gitattributes git commit -m "initial commit" present="present" present_oid="$(calc_oid "$present")" printf "$present" > present.dat missing="missing" missing_oid="$(calc_oid "$missing")" printf "$missing" > missing.dat git add missing.dat present.dat git commit -m "add objects" git rm missing.dat git commit -m "remove missing" # :fire: the "missing" object missing_oid_part_1="$(echo "$missing_oid" | cut -b 1-2)" missing_oid_part_2="$(echo "$missing_oid" | cut -b 3-4)" missing_oid_path=".git/lfs/objects/$missing_oid_part_1/$missing_oid_part_2/$missing_oid" rm "$missing_oid_path" git config "lfs.allowincompletepush" "false" git push origin master 2>&1 | tee push.log if [ "1" -ne "${PIPESTATUS[0]}" ]; then echo >&2 "fatal: expected \`git push origin master\` to succeed ..." exit 1 fi grep "no such file or directory" push.log || # unix grep "cannot find the file" push.log # windows grep "failed to push some refs" push.log refute_server_object "$reponame" "$present_oid" refute_server_object "$reponame" "$missing_oid" ) end_test git-lfs-2.3.4/test/test-reference-clone.sh000077500000000000000000000040011317167762300204240ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" assert_same_inode() { local repo1=$1 local repo2=$2 local oid=$3 if ! uname -s | grep -qE 'CYGWIN|MSYS|MINGW'; then cfg1=$(cd "$repo1"; git lfs env | grep LocalMediaDir) f1="${cfg1:14}/${oid:0:2}/${oid:2:2}/$oid" inode1=$(ls -i $f1 | cut -f1 -d\ ) cfg2=$(cd "$repo2"; git lfs env | grep LocalMediaDir) f2="${cfg2:14}/${oid:0:2}/${oid:2:2}/$oid" inode2=$(ls -i $f2 | cut -f1 -d\ ) [ "$inode1" == "$inode2" ] fi } begin_test "clone with reference" ( set -e reponame="$(basename "$0" ".sh")" setup_remote_repo "$reponame" ref_repo=clone_reference_repo ref_repo_dir=$TRASHDIR/$ref_repo clone_repo "$reponame" "$ref_repo" git lfs track "*.dat" contents="a" oid=$(calc_oid "$contents") printf "$contents" > a.dat git add a.dat git add .gitattributes git commit -m "add a.dat" 2>&1 git push origin master delete_server_object "$reponame" "$oid" repo=test_repo repo_dir=$TRASHDIR/$repo git clone --reference "$ref_repo_dir/.git" \ "$GITSERVER/$reponame" "$repo_dir" cd "$TRASHDIR/$repo" assert_pointer "master" "a.dat" "$oid" 1 assert_same_inode "$repo_dir" "$ref_repo_dir" "$oid" ) end_test begin_test "fetch from clone reference" ( set -e reponame="$(basename "$0" ".sh")2" setup_remote_repo "$reponame" ref_repo=clone_reference_repo2 ref_repo_dir=$TRASHDIR/$ref_repo clone_repo "$reponame" "$ref_repo" repo=test_repo2 repo_dir=$TRASHDIR/$repo git clone --reference "$ref_repo_dir/.git" \ "$GITSERVER/$reponame" "$repo_dir" 2> clone.log cd "$ref_repo_dir" git lfs track "*.dat" contents="a" oid=$(calc_oid "$contents") printf "$contents" > a.dat git add a.dat git add .gitattributes git commit -m "add a.dat" 2>&1 git push origin master delete_server_object "$reponame" "$oid" cd "$repo_dir" GIT_LFS_SKIP_SMUDGE=1 git pull git lfs pull assert_pointer "master" "a.dat" "$oid" 1 assert_same_inode "$TRASHDIR/$repo" "$TRASHDIR/$ref_repo" "$oid" ) end_test git-lfs-2.3.4/test/test-resume-http-range.sh000077500000000000000000000047741317167762300207600ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" begin_test "resume-http-range" ( set -e reponame="$(basename "$0" ".sh")" setup_remote_repo "$reponame" clone_repo "$reponame" $reponame git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log # this string announces to server that we want a test that # interrupts the transfer when started from 0 to cause resume contents="status-batch-resume-206" contents_oid=$(calc_oid "$contents") printf "$contents" > a.dat git add a.dat git add .gitattributes git commit -m "add a.dat" 2>&1 | tee commit.log git push origin master assert_server_object "$reponame" "$contents_oid" # delete local copy then fetch it back # server will abort the transfer mid way (so will error) when not resuming # then we can restart it rm -rf .git/lfs/objects git lfs fetch 2>&1 | tee fetchinterrupted.log refute_local_object "$contents_oid" # now fetch again, this should try to resume and server should send remainder # this time (it does not cut short when Range is requested) GIT_TRACE=1 git lfs fetch 2>&1 | tee fetchresume.log grep "xfer: server accepted resume" fetchresume.log assert_local_object "$contents_oid" "${#contents}" ) end_test begin_test "resume-http-range-fallback" ( set -e reponame="resume-http-range-fallback" setup_remote_repo "$reponame" clone_repo "$reponame" $reponame git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log # this string announces to server that we want it to abort the download part # way, but reject the Range: header and fall back on re-downloading instead contents="batch-resume-fail-fallback" contents_oid=$(calc_oid "$contents") printf "$contents" > a.dat git add a.dat git add .gitattributes git commit -m "add a.dat" 2>&1 | tee commit.log git push origin master assert_server_object "$reponame" "$contents_oid" # delete local copy then fetch it back # server will abort the transfer mid way (so will error) when not resuming # then we can restart it rm -rf .git/lfs/objects git lfs fetch 2>&1 | tee fetchinterrupted.log refute_local_object "$contents_oid" # now fetch again, this should try to resume but server should reject the Range # header, which should cause client to re-download GIT_TRACE=1 git lfs fetch 2>&1 | tee fetchresumefallback.log grep "xfer: server rejected resume" fetchresumefallback.log # re-download should still have worked assert_local_object "$contents_oid" "${#contents}" ) end_test git-lfs-2.3.4/test/test-resume-tus.sh000077500000000000000000000043211317167762300175060ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" begin_test "tus-upload-uninterrupted" ( set -e # this repo name is the indicator to the server to use tus reponame="test-tus-upload" setup_remote_repo "$reponame" clone_repo "$reponame" $reponame git config lfs.tustransfers true git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log contents="send-verify-action" contents_oid=$(calc_oid "$contents") printf "$contents" > a.dat git add a.dat git add .gitattributes git commit -m "add a.dat" 2>&1 | tee commit.log GIT_TRACE=1 GIT_TRANSFER_TRACE=1 git push origin master 2>&1 | tee pushtus.log grep "xfer: tus.io uploading" pushtus.log assert_server_object "$reponame" "$contents_oid" ) end_test begin_test "tus-upload-interrupted-resume" ( set -e # this repo name is the indicator to the server to use tus, AND to # interrupt the upload part way reponame="test-tus-upload-interrupt" setup_remote_repo "$reponame" clone_repo "$reponame" $reponame git config lfs.tustransfers true git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log contents_verify="send-verify-action" contents_verify_oid="$(calc_oid "$contents_verify")" # this string announces to server that we want it to abort the download part # way, but reject the Range: header and fall back on re-downloading instead contents="234587134187634598o634857619384765b747qcvtuedvoaicwtvseudtvcoqi7280r7qvow4i7r8c46pr9q6v9pri6ioq2r8" contents_oid=$(calc_oid "$contents") printf "$contents" > a.dat printf "$contents_verify" > verify.dat git add a.dat verify.dat git add .gitattributes git commit -m "add a.dat, verify.dat" 2>&1 | tee commit.log GIT_TRACE=1 GIT_TRANSFER_TRACE=1 git push origin master 2>&1 | tee pushtus_resume.log # first attempt will start from the beginning grep "xfer: tus.io uploading" pushtus_resume.log grep "HTTP: 500" pushtus_resume.log # that will have failed but retry on 500 will resume it grep "xfer: tus.io resuming" pushtus_resume.log grep "HTTP: 204" pushtus_resume.log # should have completed in the end assert_server_object "$reponame" "$contents_oid" assert_server_object "$reponame" "$contents_verify_oid" ) end_test git-lfs-2.3.4/test/test-smudge.sh000077500000000000000000000130261317167762300166630ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" begin_test "smudge" ( set -e reponame="$(basename "$0" ".sh")" setup_remote_repo "$reponame" clone_repo "$reponame" repo git lfs track "*.dat" echo "smudge a" > a.dat git add .gitattributes a.dat git commit -m "add a.dat" # smudge works even though it hasn't been pushed, by reading from .git/lfs/objects output="$(pointer fcf5015df7a9089a7aa7fe74139d4b8f7d62e52d5a34f9a87aeffc8e8c668254 9 | git lfs smudge)" [ "smudge a" = "$output" ] git push origin master # download it from the git lfs server rm -rf .git/lfs/objects output="$(pointer fcf5015df7a9089a7aa7fe74139d4b8f7d62e52d5a34f9a87aeffc8e8c668254 9 | git lfs smudge)" [ "smudge a" = "$output" ] ) end_test begin_test "smudge with temp file" ( set -e cd repo rm -rf .git/lfs/objects mkdir -p .git/lfs/tmp/objects touch .git/lfs/tmp/objects/fcf5015df7a9089a7aa7fe74139d4b8f7d62e52d5a34f9a87aeffc8e8c668254-1 pointer fcf5015df7a9089a7aa7fe74139d4b8f7d62e52d5a34f9a87aeffc8e8c668254 9 | GIT_TRACE=5 git lfs smudge | tee smudge.log [ "smudge a" = "$(cat smudge.log)" ] || { rm -rf .git/lfs/tmp git lfs logs last exit 1 } ) end_test begin_test "smudge with invalid pointer" ( set -e cd repo [ "wat" = "$(echo "wat" | git lfs smudge)" ] [ "not a git-lfs file" = "$(echo "not a git-lfs file" | git lfs smudge)" ] [ "version " = "$(echo "version " | git lfs smudge)" ] ) end_test begin_test "smudge include/exclude" ( set -e reponame="$(basename "$0" ".sh")-includeexclude" setup_remote_repo "$reponame" clone_repo "$reponame" includeexclude git lfs track "*.dat" echo "smudge a" > a.dat git add .gitattributes a.dat git commit -m "add a.dat" pointer="$(pointer fcf5015df7a9089a7aa7fe74139d4b8f7d62e52d5a34f9a87aeffc8e8c668254 9)" # smudge works even though it hasn't been pushed, by reading from .git/lfs/objects [ "smudge a" = "$(echo "$pointer" | git lfs smudge)" ] git push origin master # this WOULD download except we're going to prevent it with include/exclude rm -rf .git/lfs/objects git config "lfs.fetchexclude" "a*" [ "$pointer" = "$(echo "$pointer" | git lfs smudge a.dat)" ] ) end_test begin_test "smudge with skip" ( set -e reponame="$(basename "$0" ".sh")-skip" setup_remote_repo "$reponame" clone_repo "$reponame" "skip" git lfs track "*.dat" echo "smudge a" > a.dat git add .gitattributes a.dat git commit -m "add a.dat" pointer="$(pointer fcf5015df7a9089a7aa7fe74139d4b8f7d62e52d5a34f9a87aeffc8e8c668254 9)" [ "smudge a" = "$(echo "$pointer" | git lfs smudge)" ] git push origin master # Must clear the cache because smudge will use # cached objects even with --skip/GIT_LFS_SKIP_SMUDGE # (--skip applies to whether or not it downloads). rm -rf .git/lfs/objects [ "$pointer" = "$(echo "$pointer" | GIT_LFS_SKIP_SMUDGE=1 git lfs smudge)" ] echo "test clone with env" export GIT_LFS_SKIP_SMUDGE=1 env | grep LFS_SKIP clone_repo "$reponame" "skip-clone-env" [ "$pointer" = "$(cat a.dat)" ] git lfs pull [ "smudge a" = "$(cat a.dat)" ] echo "test clone without env" unset GIT_LFS_SKIP_SMUDGE [ "$(env | grep LFS_SKIP)" == "" ] clone_repo "$reponame" "no-skip" [ "smudge a" = "$(cat a.dat)" ] echo "test clone with init --skip-smudge" git lfs install --skip-smudge clone_repo "$reponame" "skip-clone-init" [ "$pointer" = "$(cat a.dat)" ] git lfs install --force ) end_test begin_test "smudge clone with include/exclude" ( set -e reponame="smudge_include_exclude" setup_remote_repo "$reponame" clone_repo "$reponame" "repo_$reponame" git lfs track "*.dat" 2>&1 | tee track.log grep "Tracking \"\*.dat\"" track.log contents="a" contents_oid=$(calc_oid "$contents") printf "$contents" > a.dat git add a.dat git add .gitattributes git commit -m "add a.dat" 2>&1 | tee commit.log grep "master (root-commit)" commit.log grep "2 files changed" commit.log grep "create mode 100644 a.dat" commit.log grep "create mode 100644 .gitattributes" commit.log [ "a" = "$(cat a.dat)" ] assert_local_object "$contents_oid" 1 git push origin master 2>&1 | tee push.log grep "(1 of 1 files)" push.log grep "master -> master" push.log assert_server_object "$reponame" "$contents_oid" clone="$TRASHDIR/clone_$reponame" git -c lfs.fetchexclude="a*" clone "$GITSERVER/$reponame" "$clone" cd "$clone" # Should have succeeded but not downloaded refute_local_object "$contents_oid" ) end_test begin_test "smudge skip download failure" ( set -e reponame="$(basename "$0" ".sh")-skipdownloadfail" setup_remote_repo "$reponame" clone_repo "$reponame" skipdownloadfail git lfs track "*.dat" echo "smudge a" > a.dat git add .gitattributes a.dat git commit -m "add a.dat" pointer="$(pointer fcf5015df7a9089a7aa7fe74139d4b8f7d62e52d5a34f9a87aeffc8e8c668254 9)" # smudge works even though it hasn't been pushed, by reading from .git/lfs/objects [ "smudge a" = "$(echo "$pointer" | git lfs smudge)" ] git push origin master # make it try to download but we're going to make it fail rm -rf .git/lfs/objects git remote set-url origin httpnope://nope.com/nope # this should fail set +e echo "$pointer" | git lfs smudge a.dat; test ${PIPESTATUS[1]} -ne 0 set -e git config lfs.skipdownloaderrors true echo "$pointer" | git lfs smudge a.dat # check content too [ "$pointer" = "$(echo "$pointer" | git lfs smudge a.dat)" ] # now try env var git config --unset lfs.skipdownloaderrors echo "$pointer" | GIT_LFS_SKIP_DOWNLOAD_ERRORS=1 git lfs smudge a.dat ) end_test git-lfs-2.3.4/test/test-ssh.sh000077500000000000000000000013341317167762300161730ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" begin_test "ssh with proxy command in lfs.url" ( set -e reponame="batch-ssh-proxy" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" sshurl="${GITSERVER/http:\/\//ssh://-oProxyCommand=ssh-proxy-test/}/$reponame" echo $sshurl git config lfs.url "$sshurl" contents="test" oid="$(calc_oid "$contents")" git lfs track "*.dat" printf "$contents" > test.dat git add .gitattributes test.dat git commit -m "initial commit" git push origin master 2>&1 | tee push.log if [ "0" -eq "${PIPESTATUS[0]}" ]; then echo >&2 "fatal: push succeeded" exit 1 fi grep "got 4 args" push.log grep "lfs-ssh-echo -- -oProxyCommand" push.log ) end_test git-lfs-2.3.4/test/test-status.sh000077500000000000000000000200061317167762300167160ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" begin_test "status" ( set -e mkdir repo-1 cd repo-1 git init git lfs track "*.dat" file_1="some data" file_1_oid="$(calc_oid "$file_1")" file_1_oid_short="$(echo "$file_1_oid" | head -c 7)" printf "$file_1" > file1.dat git add file1.dat git commit -m "file1.dat" file_1_new="other data" file_1_new_oid="$(calc_oid "$file_1_new")" file_1_new_oid_short="$(echo "$file_1_new_oid" | head -c 7)" printf "$file_1_new" > file1.dat file_2="file2 data" file_2_oid="$(calc_oid "$file_2")" file_2_oid_short="$(echo "$file_2_oid" | head -c 7)" printf "$file_2" > file2.dat git add file2.dat file_3="file3 data" file_3_oid="$(calc_oid "$file_3")" file_3_oid_short="$(echo "$file_3_oid" | head -c 7)" printf "$file_3" > file3.dat git add file3.dat file_3_new="file3 other data" file_3_new_oid="$(calc_oid "$file_3_new")" file_3_new_oid_short="$(echo "$file_3_new_oid" | head -c 7)" printf "$file_3_new" > file3.dat expected="On branch master Git LFS objects to be committed: file2.dat (LFS: $file_2_oid_short) file3.dat (LFS: $file_3_oid_short) Git LFS objects not staged for commit: file1.dat (LFS: $file_1_oid_short -> File: $file_1_new_oid_short) file3.dat (File: $file_3_new_oid_short)" [ "$expected" = "$(git lfs status)" ] ) end_test begin_test "status --porcelain" ( set -e mkdir repo-2 cd repo-2 git init git lfs track "*.dat" echo "some data" > file1.dat git add file1.dat git commit -m "file1.dat" echo "other data" > file1.dat echo "file2 data" > file2.dat git add file2.dat echo "file3 data" > file3.dat git add file3.dat echo "file3 other data" > file3.dat expected=" M file1.dat A file3.dat A file2.dat" [ "$expected" = "$(git lfs status --porcelain)" ] ) end_test begin_test "status --json" ( set -e mkdir repo-3 cd repo-3 git init git lfs track "*.dat" echo "some data" > file1.dat git add file1.dat git commit -m "file1.dat" echo "other data" > file1.dat expected='{"files":{"file1.dat":{"status":"M"}}}' [ "$expected" = "$(git lfs status --json)" ] git add file1.dat git commit -m "file1.dat changed" git mv file1.dat file2.dat expected='{"files":{"file2.dat":{"status":"R","from":"file1.dat"}}}' [ "$expected" = "$(git lfs status --json)" ] git commit -m "file1.dat -> file2.dat" # Ensure status --json does not include non-lfs files echo hi > test1.txt git add test1.txt expected='{"files":{}}' [ "$expected" = "$(git lfs status --json)" ] ) end_test begin_test "status: outside git repository" ( set +e git lfs status 2>&1 > status.log res=$? set -e if [ "$res" = "0" ]; then echo "Passes because $GIT_LFS_TEST_DIR is unset." exit 0 fi [ "$res" = "128" ] grep "Not in a git repository" status.log ) end_test begin_test "status - before initial commit" ( set -e git init repo-initial cd repo-initial git lfs track "*.dat" # should not fail when nothing to display (ignore output, will be blank) git lfs status contents="some data" contents_oid="$(calc_oid "$contents")" contents_oid_short="$(echo "$contents_oid" | head -c 7)" printf "$contents" > file1.dat git add file1.dat expected=" Git LFS objects to be committed: file1.dat (LFS: $contents_oid_short) Git LFS objects not staged for commit:" [ "$expected" = "$(git lfs status)" ] ) end_test begin_test "status shows multiple files with identical contents" ( set -e reponame="uniq-status" mkdir "$reponame" cd "$reponame" git init git lfs track "*.dat" contents="contents" printf "$contents" > a.dat printf "$contents" > b.dat git add --all . git lfs status | tee status.log [ "1" -eq "$(grep -c "a.dat" status.log)" ] [ "1" -eq "$(grep -c "b.dat" status.log)" ] ) end_test begin_test "status shows multiple copies of partially staged files" ( set -e reponame="status-partially-staged" git init "$reponame" cd "$reponame" git lfs track "*.dat" git add .gitattributes git commit -m "initial commit" contents_1="part 1" contents_1_oid="$(calc_oid "$contents_1")" contents_1_oid_short="$(echo "$contents_1_oid" | head -c 7)" printf "$contents_1" > a.dat # "$contents_1" changes are staged git add a.dat # "$contents_2" changes are unstaged contents_2="part 2" contents_2_oid="$(calc_oid "$contents_2")" contents_2_oid_short="$(echo "$contents_2_oid" | head -c 7)" printf "$contents_2" > a.dat expected="On branch master Git LFS objects to be committed: a.dat (LFS: $contents_1_oid_short) Git LFS objects not staged for commit: a.dat (File: $contents_2_oid_short)" actual="$(git lfs status)" diff -u <(echo "$expected") <(echo "$actual") ) end_test begin_test "status: LFS to LFS change" ( set -e reponame="status-lfs-to-lfs-change" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" contents="contents" contents_oid="$(calc_oid "$contents")" contents_oid_short="$(echo "$contents_oid" | head -c 7)" git lfs track "*.dat" git add .gitattributes git commit -m "track *.dat files" printf "$contents" > a.dat git add a.dat git commit -m "add a.dat" contents_new="$contents +extra" contents_new_oid="$(calc_oid "$contents_new")" contents_new_oid_short="$(echo $contents_new_oid | head -c 7)" printf "$contents_new" > a.dat git add a.dat expected="On branch master Git LFS objects to be committed: a.dat (LFS: $contents_oid_short -> LFS: $contents_new_oid_short) Git LFS objects not staged for commit:" actual="$(git lfs status)" [ "$expected" = "$actual" ] ) end_test begin_test "status: Git to LFS change" ( set -e reponame="status-git-to-lfs-change" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" contents="contents" contents_oid="$(calc_oid "$contents")" contents_oid_short="$(echo "$contents_oid" | head -c 7)" printf "$contents" > a.dat git add a.dat git commit -m "add a.dat" git lfs track "*.dat" git add .gitattributes git commit -m "track *.dat files" contents_new="$contents +extra" contents_new_oid="$(calc_oid "$contents_new")" contents_new_oid_short="$(echo $contents_new_oid | head -c 7)" printf "$contents_new" > a.dat git add a.dat expected="On branch master Git LFS objects to be committed: a.dat (Git: $contents_oid_short -> LFS: $contents_new_oid_short) Git LFS objects not staged for commit:" actual="$(git lfs status)" [ "$expected" = "$actual" ] ) end_test begin_test "status: Git to LFS conversion" ( set -e reponame="status-git-to-lfs-conversion" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" contents="contents" contents_oid="$(calc_oid "$contents")" contents_oid_short="$(echo "$contents_oid" | head -c 7)" printf "$contents" > a.dat git add a.dat git commit -m "add a.dat" git lfs track "*.dat" git add .gitattributes git commit -m "track *.dat" git push origin master pushd "$TRASHDIR" > /dev/null clone_repo "$reponame" "$reponame-2" git add a.dat git lfs status 2>&1 | tee status.log if [ "0" -ne "${PIPESTATUS[0]}" ]; then echo >&2 "git lfs status should have succeeded, didn't ..." exit 1 fi expected="On branch master Git LFS objects to be pushed to origin/master: Git LFS objects to be committed: a.dat (Git: $contents_oid_short -> LFS: $contents_oid_short) Git LFS objects not staged for commit:" actual="$(cat status.log)" [ "$expected" = "$actual" ] popd > /dev/null ) end_test begin_test "status (missing objects)" ( set -e reponame="status-missing-objects" git init "$reponame" cd "$reponame" git lfs track "*.dat" printf "a" > a.dat git add .gitattributes a.dat git commit -m "initial commit" # Remove the original object "a.dat" (ensure '--no-filters' is not given). oid="$(git hash-object -t blob -- a.dat)" rm -rf ".git/objects/${oid:0:2}/${oid:2}" # Create an unstaged change against a source file that doesn't exist. printf "b" > a.dat git add a.dat git lfs status \ | grep "a.dat (?: -> LFS: $(calc_oid b | head -c 7))" ) end_test git-lfs-2.3.4/test/test-submodule-lfsconfig.sh000077500000000000000000000037061317167762300213520ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" lfsname="submodule-config-test-lfs" reponame="submodule-config-test-repo" submodname="submodule-config-test-submodule" begin_test "submodule env with .lfsconfig" ( set -e # setup dummy repo with lfs store # no git data will be pushed, just lfs objects setup_remote_repo "$lfsname" echo $GITSERVER/$lfsname.git/info/lfs # setup submodule setup_remote_repo "$submodname" clone_repo "$submodname" submod mkdir dir git config -f .lfsconfig lfs.url "$GITSERVER/$lfsname.git/info/lfs" git lfs track "*.dat" submodcontent="submodule lfs file" submodoid=$(calc_oid "$submodcontent") printf "$submodcontent" > dir/test.dat git add .lfsconfig .gitattributes dir git commit -m "create submodule" git push origin master assert_server_object "$lfsname" "$submodoid" # setup repo with submodule setup_remote_repo "$reponame" clone_repo "$reponame" repo git config -f .lfsconfig lfs.url "$GITSERVER/$lfsname.git/info/lfs" git submodule add "$GITSERVER/$submodname" sub git submodule update git lfs track "*.dat" mkdir dir repocontent="repository lfs file" repooid=$(calc_oid "$repocontent") printf "$repocontent" > dir/test.dat git add .gitattributes .lfsconfig .gitmodules dir sub git commit -m "create repo" git push origin master assert_server_object "$lfsname" "$repooid" echo "repo" git lfs env | tee env.log grep "Endpoint=$GITSERVER/$lfsname.git/info/lfs (auth=basic)$" env.log cd sub echo "./sub" git lfs env | tee env.log grep "Endpoint=$GITSERVER/$lfsname.git/info/lfs (auth=basic)$" env.log cd dir echo "./sub/dir" git lfs env | tee env.log grep "Endpoint=$GITSERVER/$lfsname.git/info/lfs (auth=basic)$" env.log ) end_test begin_test "submodule update --init --remote with .lfsconfig" ( set -e clone_repo "$reponame" clone grep "$repocontent" dir/test.dat git submodule update --init --remote grep "$submodcontent" sub/dir/test.dat ) end_test git-lfs-2.3.4/test/test-submodule.sh000077500000000000000000000056521317167762300174040ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" reponame="submodule-test-repo" submodname="submodule-test-submodule" begin_test "submodule local git dir" ( set -e setup_remote_repo "$reponame" setup_remote_repo "$submodname" clone_repo "$submodname" submod mkdir dir echo "sub module" > dir/README git add dir/README git commit -a -m "submodule readme" git push origin master clone_repo "$reponame" repo git submodule add "$GITSERVER/$submodname" sub git submodule update git add .gitmodules sub git commit -m "add submodule" git push origin master grep "sub module" sub/dir/README || { echo "submodule not setup correctly?" cat sub/dir/README exit 1 } ) end_test begin_test "submodule env" ( set -e # using the local clone from the above test cd repo git lfs env | tee env.log grep "Endpoint=$GITSERVER/$reponame.git/info/lfs (auth=none)$" env.log grep "LocalWorkingDir=$(native_path_escaped "$TRASHDIR/repo$")" env.log grep "LocalGitDir=$(native_path_escaped "$TRASHDIR/repo/.git$")" env.log grep "LocalGitStorageDir=$(native_path_escaped "$TRASHDIR/repo/.git$")" env.log grep "LocalMediaDir=$(native_path_escaped "$TRASHDIR/repo/.git/lfs/objects$")" env.log grep "TempDir=$(native_path_escaped "$TRASHDIR/repo/.git/lfs/tmp$")" env.log cd .git echo "./.git" git lfs env | tee env.log cat env.log grep "Endpoint=$GITSERVER/$reponame.git/info/lfs (auth=none)$" env.log grep "LocalWorkingDir=$" env.log grep "LocalGitDir=$(native_path_escaped "$TRASHDIR/repo/.git$")" env.log grep "LocalGitStorageDir=$(native_path_escaped "$TRASHDIR/repo/.git$")" env.log grep "LocalMediaDir=$(native_path_escaped "$TRASHDIR/repo/.git/lfs/objects$")" env.log grep "TempDir=$(native_path_escaped "$TRASHDIR/repo/.git/lfs/tmp$")" env.log cd ../sub echo "./sub" git lfs env | tee env.log grep "Endpoint=$GITSERVER/$submodname.git/info/lfs (auth=none)$" env.log grep "LocalWorkingDir=$(native_path_escaped "$TRASHDIR/repo/sub$")" env.log grep "LocalGitDir=$(native_path_escaped "$TRASHDIR/repo/.git/modules/sub$")" env.log grep "LocalGitStorageDir=$(native_path_escaped "$TRASHDIR/repo/.git/modules/sub$")" env.log grep "LocalMediaDir=$(native_path_escaped "$TRASHDIR/repo/.git/modules/sub/lfs/objects$")" env.log grep "TempDir=$(native_path_escaped "$TRASHDIR/repo/.git/modules/sub/lfs/tmp$")" env.log cd dir echo "./sub/dir" git lfs env | tee env.log grep "Endpoint=$GITSERVER/$submodname.git/info/lfs (auth=none)$" env.log grep "LocalWorkingDir=$(native_path_escaped "$TRASHDIR/repo/sub$")" env.log grep "LocalGitDir=$(native_path_escaped "$TRASHDIR/repo/.git/modules/sub$")" env.log grep "LocalGitStorageDir=$(native_path_escaped "$TRASHDIR/repo/.git/modules/sub$")" env.log grep "LocalMediaDir=$(native_path_escaped "$TRASHDIR/repo/.git/modules/sub/lfs/objects$")" env.log grep "TempDir=$(native_path_escaped "$TRASHDIR/repo/.git/modules/sub/lfs/tmp$")" env.log ) end_test git-lfs-2.3.4/test/test-track-attrs.sh000077500000000000000000000013571317167762300176420ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" ensure_git_version_isnt $VERSION_LOWER "2.1.0" begin_test "track (--no-modify-attrs)" ( set -e reponame="track-no-modify-attrs" git init "$reponame" cd "$reponame" echo "contents" > a.dat git add a.dat # Git assumes that identical results from `stat(1)` between the index and # working copy are stat dirty. To prevent this, wait at least one second to # yield different `stat(1)` results. sleep 1 git commit -m "add a.dat" echo "*.dat filter=lfs diff=lfs merge=lfs -text" > .gitattributes git add .gitattributes git commit -m "asdf" [ -z "$(git status --porcelain)" ] git lfs track --no-modify-attrs "*.dat" [ " M a.dat" = "$(git status --porcelain)" ] ) end_test git-lfs-2.3.4/test/test-track-wildcards.sh000066400000000000000000000044641317167762300204600ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" begin_test "track files using wildcard pattern with leading slash" ( set -e reponame="track-wildcard-leading-slash" mkdir -p "$reponame/dir" cd $reponame git init # Adding files before being tracked by LFS printf "contents" > a.dat printf "contents" > dir/b.dat git add a.dat dir/b.dat git commit -m "initial commit" # Track only in the root git lfs track "/*.dat" grep "/*.dat" .gitattributes git add .gitattributes a.dat dir/b.dat sleep 1 git commit -m "convert to LFS" git lfs ls-files | tee files.log grep "a.dat" files.log [ ! $(grep "dir/b.dat" files.log) ] # Subdirectories ignored # Add files after being tracked by LFS printf "contents" > c.dat printf "contents" > dir/d.dat git add c.dat dir/d.dat sleep 1 git commit -m "more lfs files" git lfs ls-files | tee new_files.log grep "a.dat" new_files.log [ ! $(grep "dir/b.dat" new_files.log) ] grep "c.dat" new_files.log [ ! $(grep "dir/d.dat" new_files.log) ] ) end_test begin_test "track files using filename pattern with leading slash" ( set -e reponame="track-absolute-leading-slash" mkdir -p "$reponame/dir" cd $reponame git init # Adding files before being tracked by LFS printf "contents" > a.dat printf "contents" > dir/b.dat git add a.dat dir/b.dat sleep 1 git commit -m "initial commit" # These are added by git.GetTrackedFiles git lfs track "/a.dat" | tee track.log grep "Tracking \"/a.dat\"" track.log git lfs track "/dir/b.dat" | tee track.log grep "Tracking \"/dir/b.dat\"" track.log # These are added by Git's `clean` filter git lfs track "/c.dat" | tee track.log grep "Tracking \"/c.dat\"" track.log git lfs track "/dir/d.dat" | tee track.log grep "Tracking \"/dir/d.dat\"" track.log cat .gitattributes git add .gitattributes a.dat dir/b.dat sleep 1 git commit -m "convert to LFS" git lfs ls-files | tee files.log grep "a.dat" files.log grep "dir/b.dat" files.log # Add files after being tracked by LFS printf "contents" > c.dat printf "contents" > dir/d.dat git add c.dat dir/d.dat git commit -m "more lfs files" git lfs ls-files | tee new_files.log grep "a.dat" new_files.log grep "dir/b.dat" new_files.log grep "c.dat" new_files.log grep "dir/d.dat" new_files.log ) end_test git-lfs-2.3.4/test/test-track.sh000077500000000000000000000266551317167762300165170ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" begin_test "track" ( set -e # no need to setup a remote repo, since this test doesn't need to push or pull mkdir track cd track git init echo "############################################################################### # Set default behavior to automatically normalize line endings. ############################################################################### * text=auto #*.cs diff=csharp" > .gitattributes # track *.jpg once git lfs track "*.jpg" | grep "Tracking \"\*.jpg\"" assert_attributes_count "jpg" "filter=lfs" 1 # track *.jpg again git lfs track "*.jpg" | grep "\"*.jpg\" already supported" assert_attributes_count "jpg" "filter=lfs" 1 mkdir -p a/b .git/info echo "*.mov filter=lfs -text" > .git/info/attributes echo "*.gif filter=lfs -text" > a/.gitattributes echo "*.png filter=lfs -text" > a/b/.gitattributes git lfs track | tee track.log grep "Listing tracked patterns" track.log grep "*.mov ($(native_path_escaped ".git/info/attributes"))" track.log grep "*.jpg (.gitattributes)" track.log grep "*.gif ($(native_path_escaped "a/.gitattributes"))" track.log grep "*.png ($(native_path_escaped "a/b/.gitattributes"))" track.log grep "Set default behavior" .gitattributes grep "############" .gitattributes grep "* text=auto" .gitattributes grep "diff=csharp" .gitattributes grep "*.jpg" .gitattributes ) end_test begin_test "track --verbose" ( set -e reponame="track_verbose_logs" mkdir "$reponame" cd "$reponame" git init touch foo.dat git add foo.dat git lfs track --verbose "foo.dat" 2>&1 > track.log grep "touching \"foo.dat\"" track.log ) end_test begin_test "track --dry-run" ( set -e reponame="track_dry_run" mkdir "$reponame" cd "$reponame" git init touch foo.dat git add foo.dat git lfs track --dry-run "foo.dat" 2>&1 > track.log grep "Tracking \"foo.dat\"" track.log grep "Git LFS: touching \"foo.dat\"" track.log git status --porcelain 2>&1 > status.log grep "A foo.dat" status.log ) end_test begin_test "track directory" ( set -e mkdir dir cd dir git init git lfs track "foo bar\\*" | tee track.txt [ "foo[[:space:]]bar/* filter=lfs diff=lfs merge=lfs -text" = "$(cat .gitattributes)" ] [ "Tracking \"foo bar/*\"" = "$(cat track.txt)" ] mkdir "foo bar" echo "a" > "foo bar/a" echo "b" > "foo bar/b" git add foo\ bar git commit -am "add foo bar" assert_pointer "master" "foo bar/a" "87428fc522803d31065e7bce3cf03fe475096631e5e07bbd7a0fde60c4cf25c7" 2 assert_pointer "master" "foo bar/b" "0263829989b6fd954f72baaf2fc64bc2e2f01d692d4de72986ea808f6e99813f" 2 ) end_test begin_test "track without trailing linebreak" ( set -e mkdir no-linebreak cd no-linebreak git init printf "*.mov filter=lfs -text" > .gitattributes [ "*.mov filter=lfs -text" = "$(cat .gitattributes)" ] git lfs track "*.gif" expected="*.mov filter=lfs -text$(cat_end) *.gif filter=lfs diff=lfs merge=lfs -text$(cat_end)" [ "$expected" = "$(cat -e .gitattributes)" ] ) end_test begin_test "track with existing crlf" ( set -e mkdir existing-crlf cd existing-crlf git init git config core.autocrlf true git lfs track "*.mov" git lfs track "*.gif" expected="*.mov filter=lfs diff=lfs merge=lfs -text^M$ *.gif filter=lfs diff=lfs merge=lfs -text^M$" [ "$expected" = "$(cat -e .gitattributes)" ] git config core.autocrlf false git lfs track "*.jpg" expected="*.mov filter=lfs diff=lfs merge=lfs -text^M$ *.gif filter=lfs diff=lfs merge=lfs -text^M$ *.jpg filter=lfs diff=lfs merge=lfs -text^M$" [ "$expected" = "$(cat -e .gitattributes)" ] ) end_test begin_test "track with autocrlf=true" ( set -e mkdir autocrlf-true cd autocrlf-true git init git config core.autocrlf true printf "*.mov filter=lfs -text" > .gitattributes [ "*.mov filter=lfs -text" = "$(cat .gitattributes)" ] git lfs track "*.gif" expected="*.mov filter=lfs -text^M$ *.gif filter=lfs diff=lfs merge=lfs -text^M$" [ "$expected" = "$(cat -e .gitattributes)" ] ) end_test begin_test "track with autocrlf=input" ( set -e mkdir autocrlf-input cd autocrlf-input git init git config core.autocrlf input printf "*.mov filter=lfs -text" > .gitattributes [ "*.mov filter=lfs -text" = "$(cat .gitattributes)" ] git lfs track "*.gif" expected="*.mov filter=lfs -text^M$ *.gif filter=lfs diff=lfs merge=lfs -text^M$" [ "$expected" = "$(cat -e .gitattributes)" ] ) end_test begin_test "track outside git repo" ( set -e git lfs track "*.foo" || { # this fails if it's run outside of a git repo using GIT_LFS_TEST_DIR # git itself returns an exit status of 128 # $ git show # fatal: Not a git repository (or any of the parent directories): .git # $ echo "$?" # 128 [ "$?" = "128" ] exit 0 } if [ -n "$GIT_LFS_TEST_DIR" ]; then echo "GIT_LFS_TEST_DIR should be set outside of any Git repository" exit 1 fi git init track-outside cd track-outside git lfs track "*.file" git lfs track "../*.foo" || { # git itself returns an exit status of 128 # $ git add ../test.foo # fatal: ../test.foo: '../test.foo' is outside repository # $ echo "$?" # 128 [ "$?" = "128" ] exit 0 } exit 1 ) end_test begin_test "track representation" ( set -e git init track-representation cd track-representation git lfs track "*.jpg" mkdir a git lfs track "a/test.file" cd a out3=$(git lfs track "test.file") if [ "$out3" != "\"test.file\" already supported" ]; then echo "Track didn't recognize duplicate path" cat .gitattributes exit 1 fi git lfs track "file.bin" cd .. out4=$(git lfs track "a/file.bin") if [ "$out4" != "\"a/file.bin\" already supported" ]; then echo "Track didn't recognize duplicate path" cat .gitattributes exit 1 fi ) end_test begin_test "track absolute" ( # MinGW bash intercepts '/images' and passes 'C:/Program Files/Git/images' as arg! if [[ $(uname) == *"MINGW"* ]]; then echo "Skipping track absolute on Windows" exit 0 fi set -e git init track-absolute cd track-absolute git lfs track "/images" cat .gitattributes grep "^/images" .gitattributes ) end_test begin_test "track in gitDir" ( set -e git init track-in-dot-git cd track-in-dot-git echo "some content" > test.file cd .git git lfs track "../test.file" || { # this fails if it's run inside a .git directory # git itself returns an exit status of 128 # $ git add ../test.file # fatal: This operation must be run in a work tree # $ echo "$?" # 128 [ "$?" = "128" ] exit 0 } # fail if track passed exit 1 ) end_test begin_test "track in symlinked dir" ( set -e git init track-symlinkdst ln -s track-symlinkdst track-symlinksrc cd track-symlinksrc git lfs track "*.png" grep "^*.png" .gitattributes || { echo ".gitattributes doesn't contain the expected relative path *.png:" cat .gitattributes exit 1 } ) end_test begin_test "track blocklisted files by name" ( set -e repo="track_blocklisted_by_name" mkdir "$repo" cd "$repo" git init touch .gitattributes git add .gitattributes git lfs track .gitattributes 2>&1 > track.log grep "Pattern .gitattributes matches forbidden file .gitattributes" track.log ) end_test begin_test "track blocklisted files with glob" ( set -e repo="track_blocklisted_glob" mkdir "$repo" cd "$repo" git init touch .gitattributes git add .gitattributes git lfs track ".git*" 2>&1 > track.log grep "Pattern .git\* matches forbidden file" track.log ) end_test begin_test "track lockable" ( set -e repo="track_lockable" mkdir "$repo" cd "$repo" git init # track *.jpg once, lockable git lfs track --lockable "*.jpg" | grep "Tracking \"\*.jpg\"" assert_attributes_count "jpg" "lockable" 1 # track *.jpg again, don't change anything. Should retain lockable git lfs track "*.jpg" | grep "\"*.jpg\" already supported" assert_attributes_count "jpg" "lockable" 1 # track *.png once, not lockable yet git lfs track "*.png" | grep "Tracking \"\*.png\"" assert_attributes_count "png" "filter=lfs" 1 assert_attributes_count "png" "lockable" 0 # track png again, enable lockable, should replace git lfs track --lockable "*.png" | grep "Tracking \"\*.png\"" assert_attributes_count "png" "filter=lfs" 1 assert_attributes_count "png" "lockable" 1 # track png again, disable lockable, should replace git lfs track --not-lockable "*.png" | grep "Tracking \"\*.png\"" assert_attributes_count "png" "filter=lfs" 1 assert_attributes_count "png" "lockable" 0 # check output reflects lockable out=$(git lfs track) echo "$out" | grep "Listing tracked patterns" echo "$out" | grep "*.jpg \[lockable\] (.gitattributes)" echo "$out" | grep "*.png (.gitattributes)" ) end_test begin_test "track lockable read-only/read-write" ( set -e repo="track_lockable_ro_rw" mkdir "$repo" cd "$repo" git init echo "blah blah" > test.bin echo "foo bar" > test.dat mkdir subfolder echo "sub blah blah" > subfolder/test.bin echo "sub foo bar" > subfolder/test.dat # should start writeable assert_file_writeable test.bin assert_file_writeable test.dat assert_file_writeable subfolder/test.bin assert_file_writeable subfolder/test.dat # track *.bin, not lockable yet git lfs track "*.bin" | grep "Tracking \"\*.bin\"" # track *.dat, lockable immediately git lfs track --lockable "*.dat" | grep "Tracking \"\*.dat\"" # bin should remain writeable, dat should have been made read-only assert_file_writeable test.bin refute_file_writeable test.dat assert_file_writeable subfolder/test.bin refute_file_writeable subfolder/test.dat git add .gitattributes test.bin test.dat git commit -m "First commit" # bin should still be writeable assert_file_writeable test.bin assert_file_writeable subfolder/test.bin # now make bin lockable git lfs track --lockable "*.bin" | grep "Tracking \"\*.bin\"" # bin should now be read-only refute_file_writeable test.bin refute_file_writeable subfolder/test.bin # remove lockable again git lfs track --not-lockable "*.bin" | grep "Tracking \"\*.bin\"" # bin should now be writeable again assert_file_writeable test.bin assert_file_writeable subfolder/test.bin ) end_test begin_test "track escaped pattern" ( set -e reponame="track-escaped-pattern" git init "$reponame" cd "$reponame" git lfs track " " | grep "Tracking \" \"" assert_attributes_count "[[:space:]]" "filter=lfs" 1 git lfs track "#" | grep "Tracking \"#\"" assert_attributes_count "\\#" "filter=lfs" 1 ) end_test begin_test "track (symlinked repository)" ( set -e reponame="tracked-symlinked-repository" git init "$reponame" cd "$reponame" touch a.dat pushd .. > /dev/null dir="tracked-symlinked-repository-tmp" mkdir -p "$dir" ln -s "../$reponame" "./$dir" cd "$dir/$reponame" [ "Tracking \"a.dat\"" = "$(git lfs track "a.dat")" ] [ "\"a.dat\" already supported" = "$(git lfs track "a.dat")" ] popd > /dev/null ) end_test begin_test "track (\$GIT_LFS_TRACK_NO_INSTALL_HOOKS)" ( set -e reponame="track-no-setup-hooks" git init "$reponame" cd "$reponame" [ ! -f .git/hooks/pre-push ] [ ! -f .git/hooks/post-checkout ] [ ! -f .git/hooks/post-commit ] [ ! -f .git/hooks/post-merge ] GIT_LFS_TRACK_NO_INSTALL_HOOKS=1 git lfs track [ ! -f .git/hooks/pre-push ] [ ! -f .git/hooks/post-checkout ] [ ! -f .git/hooks/post-commit ] [ ! -f .git/hooks/post-merge ] ) end_test git-lfs-2.3.4/test/test-uninstall.sh000077500000000000000000000115501317167762300174100ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" begin_test "uninstall outside repository" ( set -e mkdir uninstall-test cd uninstall-test smudge="$(git config filter.lfs.smudge)" clean="$(git config filter.lfs.clean)" filter="$(git config filter.lfs.process)" printf "$smudge" | grep "git-lfs smudge" printf "$clean" | grep "git-lfs clean" printf "$filter" | grep "git-lfs filter-process" # uninstall multiple times to trigger https://github.com/git-lfs/git-lfs/issues/529 git lfs uninstall [ ! -e "lfs" ] git lfs install git lfs uninstall | tee uninstall.log grep "configuration has been removed" uninstall.log [ "" = "$(git config --global filter.lfs.smudge)" ] [ "" = "$(git config --global filter.lfs.clean)" ] [ "" = "$(git config --global filter.lfs.process)" ] cat $HOME/.gitconfig [ "$(grep 'filter "lfs"' $HOME/.gitconfig -c)" = "0" ] ) end_test begin_test "uninstall outside repository without access to .git/lfs" ( set -e mkdir uninstall-no-lfs cd uninstall-no-lfs mkdir .git touch .git/lfs touch lfs [ "" != "$(git config --global filter.lfs.smudge)" ] [ "" != "$(git config --global filter.lfs.clean)" ] [ "" != "$(git config --global filter.lfs.process)" ] git lfs uninstall [ "" = "$(git config --global filter.lfs.smudge)" ] [ "" = "$(git config --global filter.lfs.clean)" ] [ "" = "$(git config --global filter.lfs.process)" ] ) begin_test "uninstall inside repository with default pre-push hook" ( set -e reponame="$(basename "$0" ".sh")-hook" mkdir "$reponame" cd "$reponame" git init git lfs install [ -f .git/hooks/pre-push ] grep "git-lfs" .git/hooks/pre-push [ "git-lfs smudge -- %f" = "$(git config filter.lfs.smudge)" ] [ "git-lfs clean -- %f" = "$(git config filter.lfs.clean)" ] [ "git-lfs filter-process" = "$(git config filter.lfs.process)" ] git lfs uninstall [ -f .git/hooks/pre-push ] && { echo "expected .git/hooks/pre-push to be deleted" exit 1 } [ "" = "$(git config filter.lfs.smudge)" ] [ "" = "$(git config filter.lfs.clean)" ] [ "" = "$(git config filter.lfs.process)" ] ) end_test begin_test "uninstall inside repository without lfs pre-push hook" ( set -e reponame="$(basename "$0" ".sh")-no-hook" mkdir "$reponame" cd "$reponame" git init git lfs install echo "something something git-lfs" > .git/hooks/pre-push [ -f .git/hooks/pre-push ] [ "something something git-lfs" = "$(cat .git/hooks/pre-push)" ] [ "git-lfs smudge -- %f" = "$(git config filter.lfs.smudge)" ] [ "git-lfs clean -- %f" = "$(git config filter.lfs.clean)" ] [ "git-lfs filter-process" = "$(git config filter.lfs.process)" ] git lfs uninstall [ -f .git/hooks/pre-push ] [ "" = "$(git config filter.lfs.smudge)" ] [ "" = "$(git config filter.lfs.clean)" ] [ "" = "$(git config filter.lfs.process)" ] ) end_test begin_test "uninstall hooks inside repository" ( set -e reponame="$(basename "$0" ".sh")-only-hook" mkdir "$reponame" cd "$reponame" git init git lfs install [ -f .git/hooks/pre-push ] grep "git-lfs" .git/hooks/pre-push [ "git-lfs smudge -- %f" = "$(git config filter.lfs.smudge)" ] [ "git-lfs clean -- %f" = "$(git config filter.lfs.clean)" ] [ "git-lfs filter-process" = "$(git config filter.lfs.process)" ] git lfs uninstall hooks [ -f .git/hooks/pre-push ] && { echo "expected .git/hooks/pre-push to be deleted" exit 1 } [ "git-lfs smudge -- %f" = "$(git config filter.lfs.smudge)" ] [ "git-lfs clean -- %f" = "$(git config filter.lfs.clean)" ] [ "git-lfs filter-process" = "$(git config filter.lfs.process)" ] ) end_test begin_test "uninstall --local" ( set -e # old values that should be ignored by `uninstall --local` git config --global filter.lfs.smudge "global smudge" git config --global filter.lfs.clean "global clean" git config --global filter.lfs.process "global filter" reponame="$(basename "$0" ".sh")-local" mkdir "$reponame" cd "$reponame" git init git lfs install --local # local configs are correct [ "git-lfs smudge -- %f" = "$(git config --local filter.lfs.smudge)" ] [ "git-lfs clean -- %f" = "$(git config --local filter.lfs.clean)" ] [ "git-lfs filter-process" = "$(git config --local filter.lfs.process)" ] # global configs [ "global smudge" = "$(git config --global filter.lfs.smudge)" ] [ "global clean" = "$(git config --global filter.lfs.clean)" ] [ "global filter" = "$(git config --global filter.lfs.process)" ] git lfs uninstall --local # global configs [ "global smudge" = "$(git config --global filter.lfs.smudge)" ] [ "global clean" = "$(git config --global filter.lfs.clean)" ] [ "global filter" = "$(git config --global filter.lfs.process)" ] # local configs are empty [ "" = "$(git config --local filter.lfs.smudge)" ] [ "" = "$(git config --local filter.lfs.clean)" ] [ "" = "$(git config --local filter.lfs.process)" ] ) end_test git-lfs-2.3.4/test/test-unlock.sh000077500000000000000000000122761317167762300167000ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" begin_test "unlocking a lock by path" ( set -e reponame="unlock_by_path" setup_remote_repo_with_file "unlock_by_path" "c.dat" git lfs lock --json "c.dat" | tee lock.log id=$(assert_lock lock.log c.dat) assert_server_lock "$reponame" "$id" git lfs unlock "c.dat" 2>&1 | tee unlock.log refute_server_lock "$reponame" "$id" ) end_test begin_test "unlocking a file makes it readonly" ( set -e reponame="unlock_set_readonly" setup_remote_repo_with_file "$reponame" "c.dat" git lfs lock --json "c.dat" assert_file_writeable c.dat git lfs unlock "c.dat" refute_file_writeable c.dat ) end_test begin_test "unlocking a file ignores readonly" ( set -e reponame="unlock_set_readonly_ignore" setup_remote_repo_with_file "$reponame" "c.dat" git lfs lock --json "c.dat" assert_file_writeable c.dat git -c lfs.setlockablereadonly=false lfs unlock "c.dat" assert_file_writeable c.dat ) end_test begin_test "force unlocking lock with missing file" ( set -e reponame="force-unlock-missing-file" setup_remote_repo_with_file "$reponame" "a.dat" git lfs lock --json "a.dat" | tee lock.log id=$(assert_lock lock.log a.dat) assert_server_lock "$reponame" "$id" git rm a.dat git commit -m "a.dat" rm *.log *.json # ensure clean git status git status git lfs unlock "a.dat" 2>&1 | tee unlock.log grep "Unable to determine path" unlock.log assert_server_lock "$reponame" "$id" rm unlock.log git lfs unlock --force "a.dat" 2>&1 | tee unlock.log refute_server_lock "$reponame" "$id" ) end_test begin_test "unlocking a lock (--json)" ( set -e reponame="unlock_by_path_json" setup_remote_repo_with_file "$reponame" "c_json.dat" git lfs lock --json "c_json.dat" | tee lock.log id=$(assert_lock lock.log c_json.dat) assert_server_lock "$reponame" "$id" git lfs unlock --json "c_json.dat" 2>&1 | tee unlock.log grep "\"unlocked\":true" unlock.log refute_server_lock "$reponame" "$id" ) end_test begin_test "unlocking a lock by id" ( set -e reponame="unlock_by_id" setup_remote_repo_with_file "$reponame" "d.dat" git lfs lock --json "d.dat" | tee lock.log assert_file_writeable d.dat id=$(assert_lock lock.log d.dat) assert_server_lock "$reponame" "$id" git lfs unlock --id="$id" refute_file_writeable d.dat ) end_test begin_test "unlocking a lock without sufficient info" ( set -e reponame="unlock_ambiguous" setup_remote_repo_with_file "$reponame" "e.dat" git lfs lock --json "e.dat" | tee lock.log id=$(assert_lock lock.log e.dat) assert_server_lock "$reponame" "$id" git lfs unlock 2>&1 | tee unlock.log grep "Usage: git lfs unlock" unlock.log assert_server_lock "$reponame" "$id" ) end_test begin_test "unlocking a lock while uncommitted" ( set -e reponame="unlock_modified" setup_remote_repo_with_file "$reponame" "mod.dat" git lfs lock --json "mod.dat" | tee lock.log id=$(assert_lock lock.log mod.dat) assert_server_lock "$reponame" "$id" echo "\nSomething" >> mod.dat git lfs unlock "mod.dat" 2>&1 | tee unlock.log [ ${PIPESTATUS[0]} -ne "0" ] grep "Cannot unlock file with uncommitted changes" unlock.log assert_server_lock "$reponame" "$id" # should allow after discard git checkout mod.dat git lfs unlock "mod.dat" 2>&1 | tee unlock.log refute_server_lock "$reponame" "$id" ) end_test begin_test "unlocking a lock with ambiguious arguments" ( set -e reponame="unlock_ambiguious_args" setup_remote_repo_with_file "$reponame" "a.dat" git lfs lock --json "a.dat" | tee lock.log id=$(assert_lock lock.log a.dat) assert_server_lock "$reponame" "$id" git lfs unlock --id "$id" a.dat 2>&1 | tee unlock.log if [ "0" -eq "${PIPESTATUS[0]}" ]; then echo >&2 "expected ambiguous \`git lfs unlock\` command to exit, didn't" exit 1 fi grep "Usage:" unlock.log assert_server_lock "$reponame" "$id" ) end_test begin_test "unlocking a lock while uncommitted with --force" ( set -e reponame="unlock_modified_force" setup_remote_repo_with_file "$reponame" "modforce.dat" git lfs lock --json "modforce.dat" | tee lock.log id=$(assert_lock lock.log modforce.dat) assert_server_lock "$reponame" "$id" echo "\nSomething" >> modforce.dat # should allow with --force git lfs unlock --force "modforce.dat" 2>&1 | tee unlock.log grep "Warning: unlocking with uncommitted changes" unlock.log refute_server_lock "$reponame" "$id" ) end_test begin_test "unlocking a lock while untracked" ( set -e reponame="unlock_untracked" setup_remote_repo_with_file "$reponame" "notrelevant.dat" git lfs track "*.dat" # Create file but don't add it to git # Shouldn't be able to unlock it echo "something" > untracked.dat git lfs lock --json "untracked.dat" | tee lock.log id=$(assert_lock lock.log untracked.dat) assert_server_lock "$reponame" "$id" git lfs unlock "untracked.dat" 2>&1 | tee unlock.log [ ${PIPESTATUS[0]} -ne "0" ] grep "Cannot unlock file with uncommitted changes" unlock.log assert_server_lock "$reponame" "$id" # should allow after add/commit git add untracked.dat git commit -m "Added untracked" git lfs unlock "untracked.dat" 2>&1 | tee unlock.log refute_server_lock "$reponame" "$id" ) end_test git-lfs-2.3.4/test/test-untrack.sh000077500000000000000000000030301317167762300170400ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" begin_test "untrack" ( set -e # no need to setup a remote repo, since this test doesn't need to push or pull reponame="untrack" git init $reponame cd $reponame # track *.jpg once git lfs track "*.jpg" | grep "Tracking \"\*.jpg\"" echo "* annex.backend=SHA512E" >> .gitattributes git lfs untrack "*.jpg" expected="* annex.backend=SHA512E" [ "$expected" = "$(cat .gitattributes)" ] ) end_test begin_test "untrack outside git repo" ( set -e reponame="outside" mkdir $reponame cd $reponame git lfs untrack "*.foo" || { # this fails if it's run outside of a git repo using GIT_LFS_TEST_DIR # git itself returns an exit status of 128 # $ git show # fatal: Not a git repository (or any of the parent directories): .git # $ echo "$?" # 128 [ "$?" = "128" ] exit 0 } if [ -n "$GIT_LFS_TEST_DIR" ]; then echo "GIT_LFS_TEST_DIR should be set outside of any Git repository" exit 1 fi ) end_test begin_test "untrack removes escape sequences" ( set -e reponame="untrack-remove-escape-sequence" git init "$reponame" cd "$reponame" git lfs track " " | grep "Tracking \" \"" assert_attributes_count "[[:space:]]" "filter=lfs" 1 git lfs untrack " " | grep "Untracking \" \"" assert_attributes_count "[[:space:]]" "filter=lfs" 0 git lfs track "#" | grep "Tracking \"#\"" assert_attributes_count "\\#" "filter=lfs" 1 git lfs untrack "#" | grep "Untracking \"#\"" assert_attributes_count "\\#" "filter=lfs" 0 ) end_test git-lfs-2.3.4/test/test-unusual-filenames.sh000077500000000000000000000010331317167762300210270ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" reponame="$(basename "$0" ".sh")" # Leading dashes may be misinterpreted as flags if commands don't use "--" # before paths. name1='-dash.dat' contents1='leading dash' begin_test "push unusually named files" ( set -e setup_remote_repo "$reponame" clone_repo "$reponame" repo git lfs track "*.dat" echo "$content1" > "$name1" git add -- .gitattributes *.dat git commit -m "add files" git push origin master | tee push.log grep "Git LFS: (1 of 1 files)" push.log ) end_test git-lfs-2.3.4/test/test-update.sh000077500000000000000000000203221317167762300166560ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" begin_test "update" ( set -e pre_push_hook="#!/bin/sh command -v git-lfs >/dev/null 2>&1 || { echo >&2 \"\\nThis repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting .git/hooks/pre-push.\\n\"; exit 2; } git lfs pre-push \"\$@\"" post_checkout_hook="#!/bin/sh command -v git-lfs >/dev/null 2>&1 || { echo >&2 \"\\nThis repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting .git/hooks/post-checkout.\\n\"; exit 2; } git lfs post-checkout \"\$@\"" post_commit_hook="#!/bin/sh command -v git-lfs >/dev/null 2>&1 || { echo >&2 \"\\nThis repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting .git/hooks/post-commit.\\n\"; exit 2; } git lfs post-commit \"\$@\"" post_merge_hook="#!/bin/sh command -v git-lfs >/dev/null 2>&1 || { echo >&2 \"\\nThis repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting .git/hooks/post-merge.\\n\"; exit 2; } git lfs post-merge \"\$@\"" mkdir without-pre-push cd without-pre-push git init [ "Updated git hooks." = "$(git lfs update)" ] [ "$pre_push_hook" = "$(cat .git/hooks/pre-push)" ] [ "$post_checkout_hook" = "$(cat .git/hooks/post-checkout)" ] [ "$post_commit_hook" = "$(cat .git/hooks/post-commit)" ] [ "$post_merge_hook" = "$(cat .git/hooks/post-merge)" ] # run it again [ "Updated git hooks." = "$(git lfs update)" ] [ "$pre_push_hook" = "$(cat .git/hooks/pre-push)" ] [ "$post_checkout_hook" = "$(cat .git/hooks/post-checkout)" ] [ "$post_commit_hook" = "$(cat .git/hooks/post-commit)" ] [ "$post_merge_hook" = "$(cat .git/hooks/post-merge)" ] # replace old hook 1 echo "#!/bin/sh git lfs push --stdin \$*" > .git/hooks/pre-push [ "Updated git hooks." = "$(git lfs update)" ] [ "$pre_push_hook" = "$(cat .git/hooks/pre-push)" ] # replace old hook 2 echo "#!/bin/sh git lfs push --stdin \"\$@\"" > .git/hooks/pre-push [ "Updated git hooks." = "$(git lfs update)" ] [ "$pre_push_hook" = "$(cat .git/hooks/pre-push)" ] # replace old hook 3 echo "#!/bin/sh git lfs pre-push \"\$@\"" > .git/hooks/pre-push [ "Updated git hooks." = "$(git lfs update)" ] [ "$pre_push_hook" = "$(cat .git/hooks/pre-push)" ] # replace blank hook rm .git/hooks/pre-push touch .git/hooks/pre-push touch .git/hooks/post-checkout touch .git/hooks/post-merge [ "Updated git hooks." = "$(git lfs update)" ] [ "$pre_push_hook" = "$(cat .git/hooks/pre-push)" ] [ "$post_checkout_hook" = "$(cat .git/hooks/post-checkout)" ] [ "$post_commit_hook" = "$(cat .git/hooks/post-commit)" ] [ "$post_merge_hook" = "$(cat .git/hooks/post-merge)" ] # replace old hook 4 echo "#!/bin/sh command -v git-lfs >/dev/null 2>&1 || { echo >&2 \"\\nThis repository has been set up with Git LFS but Git LFS is not installed.\\n\"; exit 0; } git lfs pre-push \"$@\"" [ "Updated git hooks." = "$(git lfs update)" ] [ "$pre_push_hook" = "$(cat .git/hooks/pre-push)" ] # replace old hook 5 echo "#!/bin/sh command -v git-lfs >/dev/null 2>&1 || { echo >&2 \"\\nThis repository has been set up with Git LFS but Git LFS is not installed.\\n\"; exit 2; } git lfs pre-push \"$@\"" [ "Updated git hooks." = "$(git lfs update)" ] [ "$pre_push_hook" = "$(cat .git/hooks/pre-push)" ] # don't replace unexpected hook echo "test" > .git/hooks/pre-push echo "test" > .git/hooks/post-checkout echo "test" > .git/hooks/post-commit echo "test" > .git/hooks/post-merge expected="Hook already exists: pre-push test To resolve this, either: 1: run \`git lfs update --manual\` for instructions on how to merge hooks. 2: run \`git lfs update --force\` to overwrite your hook." [ "$expected" = "$(git lfs update 2>&1)" ] [ "test" = "$(cat .git/hooks/pre-push)" ] [ "test" = "$(cat .git/hooks/post-checkout)" ] [ "test" = "$(cat .git/hooks/post-commit)" ] [ "test" = "$(cat .git/hooks/post-merge)" ] # Make sure returns non-zero set +e git lfs update if [ $? -eq 0 ] then exit 1 fi set -e # test manual steps expected="Add the following to .git/hooks/pre-push: #!/bin/sh command -v git-lfs >/dev/null 2>&1 || { echo >&2 \"\nThis repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting .git/hooks/pre-push.\n\"; exit 2; } git lfs pre-push \"\$@\" Add the following to .git/hooks/post-checkout: #!/bin/sh command -v git-lfs >/dev/null 2>&1 || { echo >&2 \"\nThis repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting .git/hooks/post-checkout.\n\"; exit 2; } git lfs post-checkout \"\$@\" Add the following to .git/hooks/post-commit: #!/bin/sh command -v git-lfs >/dev/null 2>&1 || { echo >&2 \"\nThis repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting .git/hooks/post-commit.\n\"; exit 2; } git lfs post-commit \"\$@\" Add the following to .git/hooks/post-merge: #!/bin/sh command -v git-lfs >/dev/null 2>&1 || { echo >&2 \"\nThis repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting .git/hooks/post-merge.\n\"; exit 2; } git lfs post-merge \"\$@\"" [ "$expected" = "$(git lfs update --manual 2>&1)" ] [ "test" = "$(cat .git/hooks/pre-push)" ] [ "test" = "$(cat .git/hooks/post-checkout)" ] [ "test" = "$(cat .git/hooks/post-commit)" ] [ "test" = "$(cat .git/hooks/post-merge)" ] # force replace unexpected hook [ "Updated git hooks." = "$(git lfs update --force)" ] [ "$pre_push_hook" = "$(cat .git/hooks/pre-push)" ] [ "$post_checkout_hook" = "$(cat .git/hooks/post-checkout)" ] [ "$post_commit_hook" = "$(cat .git/hooks/post-commit)" ] [ "$post_merge_hook" = "$(cat .git/hooks/post-merge)" ] has_test_dir || exit 0 echo "test with bare repository" cd .. git clone --mirror without-pre-push bare cd bare git lfs env git lfs update ls -al hooks [ "$pre_push_hook" = "$(cat hooks/pre-push)" ] ) end_test begin_test "update with leading spaces" ( set -e reponame="update-leading-spaces" git init "$reponame" cd "$reponame" [ "Updated git hooks." = "$(git lfs update)" ] # $pre_push_hook contains leading TAB '\t' characters pre_push_hook="#!/bin/sh command -v git-lfs >/dev/null 2>&1 || { echo >&2 \"\\nThis repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting .git/hooks/pre-push.\\n\"; exit 2; } git lfs pre-push \"\$@\"" echo -n "$pre_push_hook" > .git/hooks/pre-push [ "Updated git hooks." = "$(git lfs update)" ] ) end_test begin_test "update lfs.{url}.access" ( set -e mkdir update-access cd update-access git init git config lfs.http://example.com.access private git config lfs.https://example.com.access private git config lfs.https://example2.com.access basic git config lfs.https://example3.com.access other [ "private" = "$(git config lfs.http://example.com.access)" ] [ "private" = "$(git config lfs.https://example.com.access)" ] [ "basic" = "$(git config lfs.https://example2.com.access)" ] [ "other" = "$(git config lfs.https://example3.com.access)" ] expected="Updated git hooks. Updated http://example.com access from private to basic. Updated https://example.com access from private to basic. Removed invalid https://example3.com access of other." ) end_test begin_test "update: outside git repository" ( if [ -d "hooks" ]; then ls -al echo "hooks dir exists" exit 1 fi set +e git lfs update 2>&1 > check.log res=$? set -e if [ "$res" = "0" ]; then if [ -z "$GIT_LFS_TEST_DIR" ]; then echo "Passes because $GIT_LFS_TEST_DIR is unset." exit 0 fi fi [ "$res" = "128" ] if [ -d "hooks" ]; then ls -al echo "hooks dir exists" exit 1 fi cat check.log grep "Not in a git repository" check.log ) end_test git-lfs-2.3.4/test/test-verify.sh000077500000000000000000000056601317167762300167100ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" begin_test "verify with retries" ( set -e reponame="verify-fail-2-times" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" git add .gitattributes git commit -m "initial commit" contents="send-verify-action" contents_oid="$(calc_oid "$contents")" contents_short_oid="$(echo "$contents_oid" | head -c 7)" printf "$contents" > a.dat git add a.dat git commit -m "add a.dat" GIT_TRACE=1 GIT_CURL_VERBOSE=1 git push origin master 2>&1 | tee push.log grep "Authorization: Basic * * * * *" push.log [ "0" -eq "${PIPESTATUS[0]}" ] [ "2" -eq "$(grep -c "verify $contents_short_oid attempt" push.log)" ] ) end_test begin_test "verify with retries (success without retry)" ( set -e reponame="verify-fail-0-times" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" git add .gitattributes git commit -m "initial commit" contents="send-verify-action" contents_oid="$(calc_oid "$contents")" contents_short_oid="$(echo "$contents_oid" | head -c 7)" printf "$contents" > a.dat git add a.dat git commit -m "add a.dat" GIT_TRACE=1 GIT_CURL_VERBOSE=1 git push origin master 2>&1 | tee push.log grep "Authorization: Basic * * * * *" push.log [ "0" -eq "${PIPESTATUS[0]}" ] [ "1" -eq "$(grep -c "verify $contents_short_oid attempt" push.log)" ] ) end_test begin_test "verify with retries (insufficient retries)" ( set -e reponame="verify-fail-10-times" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" git lfs track "*.dat" git add .gitattributes git commit -m "initial commit" contents="send-verify-action" contents_oid="$(calc_oid "$contents")" contents_short_oid="$(echo "$contents_oid" | head -c 7)" printf "$contents" > a.dat git add a.dat git commit -m "add a.dat" set +e GIT_TRACE=1 git push origin master 2>&1 | tee push.log if [ "0" -eq "${PIPESTATUS[0]}" ]; then echo >&2 "verify: expected \"git push\" to fail, didn't ..." exit 1 fi set -e [ "3" -eq "$(grep -c "verify $contents_short_oid attempt" push.log)" ] ) end_test begin_test "verify with retries (bad .gitconfig)" ( set -e reponame="bad-config-verify-fail-2-times" setup_remote_repo "$reponame" clone_repo "$reponame" "$reponame" # Invalid `lfs.transfer.maxverifies` will default to 3. git config "lfs.transfer.maxverifies" "-1" git lfs track "*.dat" git add .gitattributes git commit -m "initial commit" contents="send-verify-action" contents_oid="$(calc_oid "$contents")" contents_short_oid="$(echo "$contents_oid" | head -c 7)" printf "$contents" > a.dat git add a.dat git commit -m "add a.dat" GIT_TRACE=1 GIT_CURL_VERBOSE=1 git push origin master 2>&1 | tee push.log grep "Authorization: Basic * * * * *" push.log [ "0" -eq "${PIPESTATUS[0]}" ] [ "2" -eq "$(grep -c "verify $contents_short_oid attempt" push.log)" ] ) end_test git-lfs-2.3.4/test/test-worktree.sh000077500000000000000000000054361317167762300172470ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" ensure_git_version_isnt $VERSION_LOWER "2.5.0" envInitConfig='git config filter.lfs.process = "git-lfs filter-process" git config filter.lfs.smudge = "git-lfs smudge -- %f" git config filter.lfs.clean = "git-lfs clean -- %f"' begin_test "git worktree" ( set -e reponame="worktree-main" mkdir $reponame cd $reponame git init # can't create a worktree until there's 1 commit at least echo "a" > tmp.txt git add tmp.txt git commit -m "Initial commit" expected=$(printf "%s\n%s\n LocalWorkingDir=$(native_path_escaped "$TRASHDIR/$reponame") LocalGitDir=$(native_path_escaped "$TRASHDIR/$reponame/.git") LocalGitStorageDir=$(native_path_escaped "$TRASHDIR/$reponame/.git") LocalMediaDir=$(native_path_escaped "$TRASHDIR/$reponame/.git/lfs/objects") LocalReferenceDir= TempDir=$(native_path_escaped "$TRASHDIR/$reponame/.git/lfs/tmp") ConcurrentTransfers=3 TusTransfers=false BasicTransfersOnly=false SkipDownloadErrors=false FetchRecentAlways=false FetchRecentRefsDays=7 FetchRecentCommitsDays=0 FetchRecentRefsIncludeRemotes=true PruneOffsetDays=3 PruneVerifyRemoteAlways=false PruneRemoteName=origin LfsStorageDir=$(native_path_escaped "$TRASHDIR/$reponame/.git/lfs") AccessDownload=none AccessUpload=none DownloadTransfers=basic UploadTransfers=basic $(escape_path "$(env | grep "^GIT")") %s " "$(git lfs version)" "$(git version)" "$envInitConfig") actual=$(git lfs env) contains_same_elements "$expected" "$actual" worktreename="worktree-2" git worktree add "$TRASHDIR/$worktreename" cd "$TRASHDIR/$worktreename" # git dir in worktree is like submodules (except path is worktrees) but this # is only for index, temp etc # storage of git objects and lfs objects is in the original .git expected=$(printf "%s\n%s\n LocalWorkingDir=$(native_path_escaped "$TRASHDIR/$worktreename") LocalGitDir=$(native_path_escaped "$TRASHDIR/$reponame/.git/worktrees/$worktreename") LocalGitStorageDir=$(native_path_escaped "$TRASHDIR/$reponame/.git") LocalMediaDir=$(native_path_escaped "$TRASHDIR/$reponame/.git/lfs/objects") LocalReferenceDir= TempDir=$(native_path_escaped "$TRASHDIR/$reponame/.git/worktrees/$worktreename/lfs/tmp") ConcurrentTransfers=3 TusTransfers=false BasicTransfersOnly=false SkipDownloadErrors=false FetchRecentAlways=false FetchRecentRefsDays=7 FetchRecentCommitsDays=0 FetchRecentRefsIncludeRemotes=true PruneOffsetDays=3 PruneVerifyRemoteAlways=false PruneRemoteName=origin LfsStorageDir=$(native_path_escaped "$TRASHDIR/$reponame/.git/lfs") AccessDownload=none AccessUpload=none DownloadTransfers=basic UploadTransfers=basic $(escape_path "$(env | grep "^GIT")") %s " "$(git lfs version)" "$(git version)" "$envInitConfig") actual=$(git lfs env) contains_same_elements "$expected" "$actual" ) end_test git-lfs-2.3.4/test/test-zero-len-file.sh000077500000000000000000000027161317167762300200530ustar00rootroot00000000000000#!/usr/bin/env bash . "test/testlib.sh" reponame="$(basename "$0" ".sh")" begin_test "push zero len file" ( set -e setup_remote_repo "$reponame" clone_repo "$reponame" repo git lfs track "*.dat" touch empty.dat contents="full" contents_oid=$(calc_oid "$contents") printf "$contents" > full.dat git add .gitattributes *.dat git commit -m "add files" | tee commit.log # cut from commit output # $ git cat-file -p master # tree 2d67d025fb1f9df9fa349412b4b130e982314e92 tree="$(git cat-file -p master | cut -f 2 -d " " | head -n 1)" # cut from tree output # $ git cat-file -p "$tree" # 100644 blob 1e9f8f7cafb6af3a6f6ddf211fa39c45fccea7ab .gitattributes # 100644 blob e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 empty.dat # 100644 blob c5de5ac7dec1c40bafe60d24da9b498937640332 full.dat emptyblob="$(git cat-file -p "$tree" | cut -f 3 -d " " | grep "empty.dat" | cut -f 1 -d$'\t')" # look for lfs pointer in git blob [ "0" = "$(git cat-file -p "$emptyblob" | grep "lfs" -c)" ] assert_pointer "master" "full.dat" "$contents_oid" 4 git push origin master | tee push.log grep "Git LFS: (1 of 1 files)" push.log ) end_test begin_test "pull zero len file" ( set -e clone_repo "$reponame" clone rm clone.log git status | grep -E "working (directory|tree) clean" ls -al if [ -s "empty.dat" ]; then echo "empty.dat has content:" cat empty.dat exit 1 fi [ "full" = "$(cat full.dat)" ] ) end_test git-lfs-2.3.4/test/testenv.sh000066400000000000000000000073561317167762300161200ustar00rootroot00000000000000#!/usr/bin/env bash # Including in script/integration and every test/test-*.sh file. set -e UNAME=$(uname -s) IS_WINDOWS=0 IS_MAC=0 SHASUM="shasum -a 256" PATH_SEPARATOR="/" if [[ $UNAME == MINGW* || $UNAME == MSYS* || $UNAME == CYGWIN* ]] then IS_WINDOWS=1 # Windows might be MSYS2 which does not have the shasum Perl wrapper # script by default, so use sha256sum directly. MacOS on the other hand # does not have sha256sum, so still use shasum as the default. SHASUM="sha256sum" PATH_SEPARATOR="\\" elif [[ $UNAME == *Darwin* ]] then IS_MAC=1 fi resolve_symlink() { local arg=$1 if [ $IS_WINDOWS -eq 1 ]; then printf '%s' "$arg" elif [ $IS_MAC -eq 1 ]; then # no readlink -f on Mac local oldwd=$(pwd) local target=$arg cd `dirname $target` target=`basename $target` while [ -L "$target" ] do target=`readlink $target` cd `dirname $target` target=`basename $target` done local resolveddir=`pwd -P` cd "$oldwd" printf '%s' "$resolveddir/$target" else readlink -f "$arg" fi } # The root directory for the git-lfs repository by default. if [ -z "$ROOTDIR" ]; then ROOTDIR=$(cd $(dirname "$0")/.. && pwd -P) fi # Where Git LFS outputs the compiled binaries BINPATH="$ROOTDIR/bin" # Put bin path on PATH PATH="$BINPATH:$PATH" # Always provide a test dir outside our git repo if not specified TEMPDIR_PREFIX="git-lfs_TEMP.XXXXXX" if [ -z "$GIT_LFS_TEST_DIR" ]; then GIT_LFS_TEST_DIR=$(mktemp -d -t "$TEMPDIR_PREFIX") GIT_LFS_TEST_DIR=$(resolve_symlink $GIT_LFS_TEST_DIR) # cleanup either after single test or at end of integration (except on fail) RM_GIT_LFS_TEST_DIR=yes fi # create a temporary work space TMPDIR=$GIT_LFS_TEST_DIR # This is unique to every test file, and cleared after every test run. TRASHDIR="$TMPDIR/$(basename "$0")-$$" # The directory that the test Git server works from. This cleared at the # beginning of every test run. REMOTEDIR="$ROOTDIR/test/remote" # The directory that stores credentials. Credentials are stored in files with # the username:password with filenames identifying the host (port numbers are # ignored). # # # stores the credentials for http://127.0.0.1:* # $CREDSDIR/127.0.0.1 # # # stores the credentials for http://git-server.com # $CREDSDIR/git-server.com # CREDSDIR="$REMOTEDIR/creds" # This is the prefix for Git config files. See the "Test Suite" section in # test/README.md LFS_CONFIG="$REMOTEDIR/config" # This file contains the URL of the test Git server. See the "Test Suite" # section in test/README.md LFS_URL_FILE="$REMOTEDIR/url" # This file contains the SSL URL of the test Git server. See the "Test Suite" # section in test/README.md LFS_SSL_URL_FILE="$REMOTEDIR/sslurl" # This file contains the client cert SSL URL of the test Git server. See the "Test Suite" # section in test/README.md LFS_CLIENT_CERT_URL_FILE="$REMOTEDIR/clientcerturl" # This file contains the self-signed SSL cert of the TLS endpoint of the test Git server. LFS_CERT_FILE="$REMOTEDIR/cert" # This file contains the client certificate of the client cert endpoint of the test Git server. LFS_CLIENT_CERT_FILE="$REMOTEDIR/client.crt" # This file contains the client key of the client cert endpoint of the test Git server. LFS_CLIENT_KEY_FILE="$REMOTEDIR/client.key" # the fake home dir used for the initial setup TESTHOME="$REMOTEDIR/home" GIT_CONFIG_NOSYSTEM=1 GIT_TERMINAL_PROMPT=0 GIT_SSH=lfs-ssh-echo APPVEYOR_REPO_COMMIT_MESSAGE="test: env test should look for GIT_SSH too" export CREDSDIR export GIT_CONFIG_NOSYSTEM export GIT_SSH export APPVEYOR_REPO_COMMIT_MESSAGE mkdir -p "$TMPDIR" mkdir -p "$TRASHDIR" if [ $IS_WINDOWS -eq 1 ]; then # prevent Windows OpenSSH from opening GUI prompts SSH_ASKPASS="" fi . "test/testhelpers.sh" git-lfs-2.3.4/test/testhelpers.sh000066400000000000000000000454471317167762300167750ustar00rootroot00000000000000#!/usr/bin/env bash # assert_pointer confirms that the pointer in the repository for $path in the # given $ref matches the given $oid and $size. # # $ assert_pointer "master" "path/to/file" "some-oid" 123 assert_pointer() { local ref="$1" local path="$2" local oid="$3" local size="$4" gitblob=$(git ls-tree -lrz "$ref" | while read -r -d $'\0' x; do echo $x done | grep "$path" | cut -f 3 -d " ") actual=$(git cat-file -p $gitblob) expected=$(pointer $oid $size) if [ "$expected" != "$actual" ]; then exit 1 fi } # assert_local_object confirms that an object file is stored for the given oid & # has the correct size # $ assert_local_object "some-oid" size assert_local_object() { local oid="$1" local size="$2" local cfg=`git lfs env | grep LocalMediaDir` local f="${cfg:14}/${oid:0:2}/${oid:2:2}/$oid" actualsize=$(wc -c <"$f" | tr -d '[[:space:]]') if [ "$size" != "$actualsize" ]; then exit 1 fi } # refute_local_object confirms that an object file is NOT stored for an oid. # If "$size" is given as the second argument, assert that the file exists _and_ # that it does _not_ the expected size # # $ refute_local_object "some-oid" # $ refute_local_object "some-oid" "123" refute_local_object() { local oid="$1" local size="$2" local cfg=`git lfs env | grep LocalMediaDir` local regex="LocalMediaDir=(\S+)" local f="${cfg:14}/${oid:0:2}/${oid:2:2}/$oid" if [ -e $f ]; then if [ -z "$size" ]; then exit 1 fi actual_size="$(wc -c < "$f" | awk '{ print $1 }')" if [ "$size" -eq "$actual_size" ]; then echo >&2 "fatal: expected object $oid not to have size: $size" exit 1 fi fi } # delete_local_object deletes the local storage for an oid # $ delete_local_object "some-oid" delete_local_object() { local oid="$1" local cfg=`git lfs env | grep LocalMediaDir` local f="${cfg:14}/${oid:0:2}/${oid:2:2}/$oid" rm "$f" } # corrupt_local_object corrupts the local storage for an oid # $ corrupt_local_object "some-oid" corrupt_local_object() { local oid="$1" local cfg=`git lfs env | grep LocalMediaDir` local f="${cfg:14}/${oid:0:2}/${oid:2:2}/$oid" cp /dev/null "$f" } # check that the object does not exist in the git lfs server. HTTP log is # written to http.log. JSON output is written to http.json. # # $ refute_server_object "reponame" "oid" refute_server_object() { local reponame="$1" local oid="$2" curl -v "$GITSERVER/$reponame.git/info/lfs/objects/batch" \ -u "user:pass" \ -o http.json \ -d "{\"operation\":\"download\",\"objects\":[{\"oid\":\"$oid\"}]}" \ -H "Accept: application/vnd.git-lfs+json" \ -H "X-Check-Object: 1" \ -H "X-Ignore-Retries: true" 2>&1 | tee http.log [ "0" = "$(grep -c "download" http.json)" ] || { cat http.json exit 1 } } # Delete an object on the lfs server. HTTP log is # written to http.log. JSON output is written to http.json. # # $ delete_server_object "reponame" "oid" delete_server_object() { local reponame="$1" local oid="$2" curl -v "$GITSERVER/$reponame.git/info/lfs/objects/$oid" \ -X DELETE \ -u "user:pass" \ -o http.json \ -H "Accept: application/vnd.git-lfs+json" 2>&1 | tee http.log grep "200 OK" http.log } # check that the object does exist in the git lfs server. HTTP log is written # to http.log. JSON output is written to http.json. assert_server_object() { local reponame="$1" local oid="$2" curl -v "$GITSERVER/$reponame.git/info/lfs/objects/batch" \ -u "user:pass" \ -o http.json \ -d "{\"operation\":\"download\",\"objects\":[{\"oid\":\"$oid\"}]}" \ -H "Accept: application/vnd.git-lfs+json" \ -H "X-Check-Object: 1" \ -H "X-Ignore-Retries: true" 2>&1 | tee http.log grep "200 OK" http.log grep "download" http.json || { cat http.json exit 1 } } # This asserts the lock path and returns the lock ID by parsing the response of # # git lfs lock --json assert_lock() { local log="$1" local path="$2" if [ $(grep -c "\"path\":\"$path\"" "$log") -eq 0 ]; then echo "path '$path' not found in:" cat "$log" exit 1 fi local jsonid=$(grep -oh "\"id\":\"\w\+\"" "$log") echo "${jsonid:3}" | tr -d \"\: } # assert that a lock with the given ID exists on the test server assert_server_lock() { local reponame="$1" local id="$2" curl -v "$GITSERVER/$reponame.git/info/lfs/locks" \ -u "user:pass" \ -o http.json \ -H "Accept:application/vnd.git-lfs+json" 2>&1 | tee http.log grep "200 OK" http.log grep "$id" http.json || { cat http.json exit 1 } } # refute that a lock with the given ID exists on the test server refute_server_lock() { local reponame="$1" local id="$2" curl -v "$GITSERVER/$reponame.git/info/lfs/locks" \ -u "user:pass" \ -o http.json \ -H "Accept:application/vnd.git-lfs+json" 2>&1 | tee http.log grep "200 OK" http.log [ $(grep -c "$id" http.json) -eq 0 ] } # Assert that .gitattributes contains a given attribute N times assert_attributes_count() { local fileext="$1" local attrib="$2" local count="$3" pattern="\(*.\)\?$fileext\(.*\)$attrib" actual=$(grep -e "$pattern" .gitattributes | wc -l) if [ "$(printf "%d" "$actual")" != "$count" ]; then echo "wrong number of $attrib entries for $fileext" echo "expected: $count actual: $actual" cat .gitattributes exit 1 fi } assert_file_writeable() { ls -l "$1" | grep -e "^-rw" } refute_file_writeable() { ls -l "$1" | grep -e "^-r-" } git_root() { git rev-parse --show-toplevel 2>/dev/null } dot_git_dir() { echo "$(git_root)/.git" } assert_hooks() { local git_root="$1" if [ -z "$git_root" ]; then echo >&2 "fatal: (assert_hooks) not in git repository" exit 1 fi [ -x "$git_root/hooks/post-checkout" ] [ -x "$git_root/hooks/post-commit" ] [ -x "$git_root/hooks/post-merge" ] [ -x "$git_root/hooks/pre-push" ] } assert_clean_status() { status="$(git status)" echo "$status" | grep "working tree clean" || { echo $status git lfs status } } # pointer returns a string Git LFS pointer file. # # $ pointer abc-some-oid 123 # > version ... pointer() { local oid=$1 local size=$2 local version=${3:-https://git-lfs.github.com/spec/v1} printf "version %s oid sha256:%s size %s " "$version" "$oid" "$size" } # wait_for_file simply sleeps until a file exists. # # $ wait_for_file "path/to/upcoming/file" wait_for_file() { local filename="$1" n=0 while [ $n -lt 10 ]; do if [ -s $filename ]; then return 0 fi sleep 0.5 n=`expr $n + 1` done return 1 } # setup_remote_repo initializes a bare Git repository that is accessible through # the test Git server. The `pwd` is set to the repository's directory, in case # further commands need to be run. This server is running for every test in a # script/integration run, so every test file should setup its own remote # repository to avoid conflicts. # # $ setup_remote_repo "some-name" # setup_remote_repo() { local reponame="$1" echo "set up remote git repository: $reponame" repodir="$REMOTEDIR/$reponame.git" mkdir -p "$repodir" cd "$repodir" git init --bare git config http.receivepack true git config receive.denyCurrentBranch ignore } # creates a bare remote repository for a local clone. Useful to test pushing to # a fresh remote server. # # $ setup_alternate_remote "$reponame-whatever" # $ setup_alternate_remote "$reponame-whatever" "other-remote-name" # setup_alternate_remote() { local newRemoteName=$1 local remote=${2:-origin} wd=`pwd` setup_remote_repo "$newRemoteName" cd $wd git remote rm "$remote" git remote add "$remote" "$GITSERVER/$newRemoteName" } # clone_repo clones a repository from the test Git server to the subdirectory # $dir under $TRASHDIR. setup_remote_repo() needs to be run first. Output is # written to clone.log. clone_repo() { cd "$TRASHDIR" local reponame="$1" local dir="$2" echo "clone local git repository $reponame to $dir" out=$(git clone "$GITSERVER/$reponame" "$dir" 2>&1) cd "$dir" git config credential.helper lfstest echo "$out" > clone.log echo "$out" } # clone_repo_url clones a Git repository to the subdirectory $dir under $TRASHDIR. # setup_remote_repo() needs to be run first. Output is written to clone.log. clone_repo_url() { cd "$TRASHDIR" local repo="$1" local dir="$2" echo "clone git repository $repo to $dir" out=$(git clone "$repo" "$dir" 2>&1) cd "$dir" git config credential.helper lfstest echo "$out" > clone.log echo "$out" } # clone_repo_ssl clones a repository from the test Git server to the subdirectory # $dir under $TRASHDIR, using the SSL endpoint. # setup_remote_repo() needs to be run first. Output is written to clone_ssl.log. clone_repo_ssl() { cd "$TRASHDIR" local reponame="$1" local dir="$2" echo "clone local git repository $reponame to $dir" out=$(git clone "$SSLGITSERVER/$reponame" "$dir" 2>&1) cd "$dir" git config credential.helper lfstest echo "$out" > clone_ssl.log echo "$out" } # clone_repo_clientcert clones a repository from the test Git server to the subdirectory # $dir under $TRASHDIR, using the client cert endpoint. # setup_remote_repo() needs to be run first. Output is written to clone_client_cert.log. clone_repo_clientcert() { cd "$TRASHDIR" local reponame="$1" local dir="$2" echo "clone $CLIENTCERTGITSERVER/$reponame to $dir" set +e out=$(git clone "$CLIENTCERTGITSERVER/$reponame" "$dir" 2>&1) res="${PIPESTATUS[0]}" set -e if [ "0" -eq "$res" ]; then cd "$dir" echo "$out" > clone_client_cert.log git config credential.helper lfstest exit 0 fi echo "$out" > clone_client_cert.log if [ $(grep -c "NSInvalidArgumentException" clone_client_cert.log) -gt 0 ]; then echo "client-cert-mac-openssl" > clone_client_cert.log exit 0 fi exit 1 } # setup_remote_repo_with_file creates a remote repo, clones it locally, commits # a file tracked by LFS, and pushes it to the remote: # # setup_remote_repo_with_file "reponame" "filename" setup_remote_repo_with_file() { local reponame="$1" local filename="$2" local dirname="$(dirname "$filename")" setup_remote_repo "$reponame" clone_repo "$reponame" "clone_$reponame" mkdir -p "$dirname" git lfs track "$filename" echo "$filename" > "$filename" git add .gitattributes $filename git commit -m "add $filename" | tee commit.log grep "master (root-commit)" commit.log grep "2 files changed" commit.log grep "create mode 100644 $filename" commit.log grep "create mode 100644 .gitattributes" commit.log git push origin master 2>&1 | tee push.log grep "master -> master" push.log } # substring_position returns the position of a substring in a 1-indexed search # space. # # [ "$(substring_position "foo bar baz" "baz")" -eq "9" ] substring_position() { local str="$1" local substr="$2" # 1) Print the string... # 2) Remove the substring and everything after it # 3) Count the number of characters (bytes) left, i.e., the offset of the # string we were looking for. echo "$str" \ | sed "s/$substr.*$//" \ | wc -c } # repo_endpoint returns the LFS endpoint for a given server and repository. # # [ "$GITSERVER/example/repo.git/info/lfs" = "$(repo_endpoint $GITSERVER example-repo)" ] repo_endpoint() { local server="$1" local repo="$2" echo "$server/$repo.git/info/lfs" } # setup initializes the clean, isolated environment for integration tests. setup() { cd "$ROOTDIR" rm -rf "$REMOTEDIR" mkdir "$REMOTEDIR" if [ -z "$SKIPCOMPILE" ] && [ -z "$LFS_BIN" ]; then echo "compile git-lfs for $0" script/bootstrap || { return $? } fi echo "Git LFS: ${LFS_BIN:-$(which git-lfs)}" git lfs version git version if [ -z "$SKIPCOMPILE" ]; then [ $IS_WINDOWS -eq 1 ] && EXT=".exe" for go in test/cmd/*.go; do GO15VENDOREXPERIMENT=1 go build -o "$BINPATH/$(basename $go .go)$EXT" "$go" done if [ -z "$SKIPAPITESTCOMPILE" ]; then # Ensure API test util is built during tests to ensure it stays in sync GO15VENDOREXPERIMENT=1 go build -o "$BINPATH/git-lfs-test-server-api$EXT" "test/git-lfs-test-server-api/main.go" "test/git-lfs-test-server-api/testdownload.go" "test/git-lfs-test-server-api/testupload.go" fi fi LFSTEST_URL="$LFS_URL_FILE" LFSTEST_SSL_URL="$LFS_SSL_URL_FILE" LFSTEST_CLIENT_CERT_URL="$LFS_CLIENT_CERT_URL_FILE" LFSTEST_DIR="$REMOTEDIR" LFSTEST_CERT="$LFS_CERT_FILE" LFSTEST_CLIENT_CERT="$LFS_CLIENT_CERT_FILE" LFSTEST_CLIENT_KEY="$LFS_CLIENT_KEY_FILE" lfstest-gitserver > "$REMOTEDIR/gitserver.log" 2>&1 & wait_for_file "$LFS_URL_FILE" wait_for_file "$LFS_SSL_URL_FILE" wait_for_file "$LFS_CLIENT_CERT_URL_FILE" wait_for_file "$LFS_CERT_FILE" wait_for_file "$LFS_CLIENT_CERT_FILE" wait_for_file "$LFS_CLIENT_KEY_FILE" LFS_CLIENT_CERT_URL=`cat $LFS_CLIENT_CERT_URL_FILE` # Set up the initial git config and osx keychain if applicable HOME="$TESTHOME" mkdir "$HOME" git lfs install --skip-repo git config --global credential.usehttppath true git config --global credential.helper lfstest git config --global user.name "Git LFS Tests" git config --global user.email "git-lfs@example.com" git config --global http.sslcainfo "$LFS_CERT_FILE" git config --global http.$LFS_CLIENT_CERT_URL/.sslKey "$LFS_CLIENT_KEY_FILE" git config --global http.$LFS_CLIENT_CERT_URL/.sslCert "$LFS_CLIENT_CERT_FILE" git config --global http.$LFS_CLIENT_CERT_URL/.sslVerify "false" ( grep "git-lfs clean" "$REMOTEDIR/home/.gitconfig" > /dev/null && grep "git-lfs filter-process" "$REMOTEDIR/home/.gitconfig" > /dev/null ) || { echo "global git config should be set in $REMOTEDIR/home" ls -al "$REMOTEDIR/home" exit 1 } # setup the git credential password storage mkdir -p "$CREDSDIR" printf "user:pass" > "$CREDSDIR/127.0.0.1" echo echo "HOME: $HOME" echo "TMP: $TMPDIR" echo "CREDS: $CREDSDIR" echo "lfstest-gitserver:" echo " LFSTEST_URL=$LFS_URL_FILE" echo " LFSTEST_SSL_URL=$LFS_SSL_URL_FILE" echo " LFSTEST_CLIENT_CERT_URL=$LFS_CLIENT_CERT_URL_FILE ($LFS_CLIENT_CERT_URL)" echo " LFSTEST_CERT=$LFS_CERT_FILE" echo " LFSTEST_CLIENT_CERT=$LFS_CLIENT_CERT_FILE" echo " LFSTEST_CLIENT_KEY=$LFS_CLIENT_KEY_FILE" echo " LFSTEST_DIR=$REMOTEDIR" echo "GIT:" git config --global --get-regexp "lfs|credential|user" echo } # shutdown cleans the $TRASHDIR and shuts the test Git server down. shutdown() { # every test/test-*.sh file should cleanup its trashdir [ -z "$KEEPTRASH" ] && rm -rf "$TRASHDIR" if [ "$SHUTDOWN_LFS" != "no" ]; then # only cleanup test/remote after script/integration done OR a single # test/test-*.sh file is run manually. if [ -s "$LFS_URL_FILE" ]; then curl -s "$(cat "$LFS_URL_FILE")/shutdown" fi [ -z "$KEEPTRASH" ] && rm -rf "$REMOTEDIR" # delete entire lfs test root if we created it (double check pattern) if [ -z "$KEEPTRASH" ] && [ "$RM_GIT_LFS_TEST_DIR" = "yes" ] && [[ $GIT_LFS_TEST_DIR == *"$TEMPDIR_PREFIX"* ]]; then rm -rf "$GIT_LFS_TEST_DIR" fi fi } ensure_git_version_isnt() { local expectedComparison=$1 local version=$2 local gitVersion=$(git version | cut -d" " -f3) set +e compare_version $gitVersion $version result=$? set -e if [[ $result == $expectedComparison ]]; then echo "skip: $0 (git version $(comparison_to_operator $expectedComparison) $version)" exit fi } VERSION_EQUAL=0 VERSION_HIGHER=1 VERSION_LOWER=2 # Compare $1 and $2 and return VERSION_EQUAL / VERSION_LOWER / VERSION_HIGHER compare_version() { if [[ $1 == $2 ]] then return $VERSION_EQUAL fi local IFS=. local i ver1=($1) ver2=($2) # fill empty fields in ver1 with zeros for ((i=${#ver1[@]}; i<${#ver2[@]}; i++)) do ver1[i]=0 done for ((i=0; i<${#ver1[@]}; i++)) do if [[ -z ${ver2[i]} ]] then # fill empty fields in ver2 with zeros ver2[i]=0 fi if ((10#${ver1[i]} > 10#${ver2[i]})) then return $VERSION_HIGHER fi if ((10#${ver1[i]} < 10#${ver2[i]})) then return $VERSION_LOWER fi done return $VERSION_EQUAL } comparison_to_operator() { local comparison=$1 if [[ $1 == $VERSION_EQUAL ]]; then echo "==" elif [[ $1 == $VERSION_HIGHER ]]; then echo ">" elif [[ $1 == $VERSION_LOWER ]]; then echo "<" else echo "???" fi } # Calculate the object ID from the string passed as the argument calc_oid() { printf "$1" | $SHASUM | cut -f 1 -d " " } # Calculate the object ID from the file passed as the argument calc_oid_file() { $SHASUM "$1" | cut -f 1 -d " " } # Get a date string with an offset # Args: One or more date offsets of the form (regex) "[+-]\d+[dmyHM]" # e.g. +1d = 1 day forward from today # -5y = 5 years before today # Example call: # D=$(get_date +1y +1m -5H) # returns date as string in RFC3339 format ccyy-mm-ddThh:MM:ssZ # note returns in UTC time not local time hence Z and not +/- get_date() { # Wrapped because BSD (inc OSX) & GNU 'date' functions are different # on Windows under Git Bash it's GNU if date --version >/dev/null 2>&1 ; then # GNU ARGS="" for var in "$@" do # GNU offsets are more verbose unit=${var: -1} val=${var:0:${#var}-1} case "$unit" in d) unit="days" ;; m) unit="months" ;; y) unit="years" ;; H) unit="hours" ;; M) unit="minutes" ;; esac ARGS="$ARGS $val $unit" done date -d "$ARGS" -u +%Y-%m-%dT%TZ else # BSD ARGS="" for var in "$@" do ARGS="$ARGS -v$var" done date $ARGS -u +%Y-%m-%dT%TZ fi } # Convert potentially MinGW bash paths to native Windows paths # Needed to match generic built paths in test scripts to native paths generated from Go native_path() { local arg=$1 if [ $IS_WINDOWS -eq 1 ]; then # Use params form to avoid interpreting any '\' characters printf '%s' "$(cygpath -w $arg)" else printf '%s' "$arg" fi } # escape any instance of '\' with '\\' on Windows escape_path() { local unescaped="$1" if [ $IS_WINDOWS -eq 1 ]; then printf '%s' "${unescaped//\\/\\\\}" else printf '%s' "$unescaped" fi } # As native_path but escape all backslash characters to "\\" native_path_escaped() { local unescaped=$(native_path "$1") escape_path "$unescaped" } cat_end() { if [ $IS_WINDOWS -eq 1 ]; then printf '^M$' else printf '$' fi } # Compare 2 lists which are newline-delimited in a string, ignoring ordering and blank lines contains_same_elements() { # Remove blank lines then sort printf '%s' "$1" | grep -v '^$' | sort > a.txt printf '%s' "$2" | grep -v '^$' | sort > b.txt set +e diff -u a.txt b.txt 1>&2 res=$? set -e rm a.txt b.txt exit $res } is_stdin_attached() { test -t0 echo $? } has_test_dir() { if [ -z "$GIT_LFS_TEST_DIR" ]; then echo "No GIT_LFS_TEST_DIR. Skipping..." exit 0 fi } git-lfs-2.3.4/test/testlib.sh000066400000000000000000000054001317167762300160620ustar00rootroot00000000000000#!/usr/bin/env bash # Usage: . testlib.sh # Simple shell command language test library. # # Tests must follow the basic form: # # begin_test "the thing" # ( # set -e # echo "hello" # false # ) # end_test # # When a test fails its stdout and stderr are shown. # # Note that tests must `set -e' within the subshell block or failed assertions # will not cause the test to fail and the result may be misreported. # # Copyright (c) 2011-13 by Ryan Tomayko # License: MIT fullfile="$(pwd)/$0" . "test/testenv.sh" set -e # keep track of num tests and failures tests=0 failures=0 # this runs at process exit atexit () { shutdown if [ $failures -gt 0 ]; then exit 1 fi exit 0 } # create the trash dir trap "atexit" EXIT SHUTDOWN_LFS=yes GITSERVER=undefined # if the file exists, assume another process started it, and will clean it up # when it's done if [ -s $LFS_URL_FILE ]; then SHUTDOWN_LFS=no else setup || { failures=$(( failures + 1 )) exit $? } fi GITSERVER=$(cat "$LFS_URL_FILE") SSLGITSERVER=$(cat "$LFS_SSL_URL_FILE") CLIENTCERTGITSERVER=$(cat "$LFS_CLIENT_CERT_URL_FILE") cd "$TRASHDIR" # Mark the beginning of a test. A subshell should immediately follow this # statement. begin_test () { test_status=$? [ -n "$test_description" ] && end_test $test_status unset test_status tests=$(( tests + 1 )) test_description="$1" exec 3>&1 4>&2 out="$TRASHDIR/out" err="$TRASHDIR/err" trace="$TRASHDIR/trace" exec 1>"$out" 2>"$err" # enabling GIT_TRACE can cause Windows git to stall, esp with fd 5 # other fd numbers like 8/9 don't stall but still don't work, so disable if [ $IS_WINDOWS -eq 0 ]; then exec 5>"$trace" export GIT_TRACE=5 fi # reset global git config HOME="$TRASHDIR/home" rm -rf "$TRASHDIR/home" mkdir "$HOME" cp "$TESTHOME/.gitconfig" "$HOME/.gitconfig" # allow the subshell to exit non-zero without exiting this process set -x +e } # Mark the end of a test. end_test () { test_status="${1:-$?}" set +x -e exec 1>&3 2>&4 # close fd 5 (GIT_TRACE) exec 5>&- if [ "$test_status" -eq 0 ]; then printf "test: %-60s OK\n" "$test_description ..." else failures=$(( failures + 1 )) printf "test: %-60s FAILED\n" "$test_description ..." ( echo "-- stdout --" sed 's/^/ /' <"$TRASHDIR/out" echo "-- stderr --" grep -v -e '^\+ end_test' -e '^+ set +x' <"$TRASHDIR/err" | sed 's/^/ /' if [ $IS_WINDOWS -eq 0 ]; then echo "-- git trace --" sed 's/^/ /' <"$TRASHDIR/trace" fi ) 1>&2 echo fi unset test_description } git-lfs-2.3.4/test/testutils.go000066400000000000000000000314341317167762300164550ustar00rootroot00000000000000package test // Utility functions for more complex go tests // Need to be in a separate test package so they can be imported anywhere // Also can't add _test.go suffix to exclude from main build (import doesn't work) // To avoid import cycles, append "_test" to the package statement of any test using // this package and use "import . original/package/name" to get the same visibility // as if the test was in the same package (as usual) import ( "fmt" "io" "io/ioutil" "math/rand" "os" "os/exec" "path/filepath" "strings" "sync" "time" "github.com/git-lfs/git-lfs/errors" "github.com/git-lfs/git-lfs/git" "github.com/git-lfs/git-lfs/lfs" "github.com/git-lfs/git-lfs/localstorage" ) type RepoType int const ( // Normal repo with working copy RepoTypeNormal = RepoType(iota) // Bare repo (no working copy) RepoTypeBare = RepoType(iota) // Repo with working copy but git dir is separate RepoTypeSeparateDir = RepoType(iota) ) var ( // Deterministic sequence of seeds for file data fileInputSeed = rand.NewSource(0) storageOnce sync.Once ) type RepoCreateSettings struct { RepoType RepoType } // Callback interface (testing.T compatible) type RepoCallback interface { // Fatalf reports error and fails Fatalf(format string, args ...interface{}) // Errorf reports error and continues Errorf(format string, args ...interface{}) } type Repo struct { // Path to the repo, working copy if non-bare Path string // Path to the git dir GitDir string // Paths to remotes Remotes map[string]*Repo // Settings used to create this repo Settings *RepoCreateSettings // Previous dir for pushd popDir string // Test callback callback RepoCallback } // Change to repo dir but save current dir func (r *Repo) Pushd() { if r.popDir != "" { r.callback.Fatalf("Cannot Pushd twice") } oldwd, err := os.Getwd() if err != nil { r.callback.Fatalf("Can't get cwd %v", err) } err = os.Chdir(r.Path) if err != nil { r.callback.Fatalf("Can't chdir %v", err) } r.popDir = oldwd localstorage.ResolveDirs() } func (r *Repo) Popd() { if r.popDir != "" { err := os.Chdir(r.popDir) if err != nil { r.callback.Fatalf("Can't chdir %v", err) } r.popDir = "" } } func (r *Repo) Cleanup() { // pop out if necessary r.Popd() // Make sure cwd isn't inside a path we're going to delete oldwd, err := os.Getwd() if err == nil { if strings.HasPrefix(oldwd, r.Path) || strings.HasPrefix(oldwd, r.GitDir) { os.Chdir(os.TempDir()) } } if r.GitDir != "" { os.RemoveAll(r.GitDir) r.GitDir = "" } if r.Path != "" { os.RemoveAll(r.Path) r.Path = "" } for _, remote := range r.Remotes { remote.Cleanup() } r.Remotes = nil } // NewRepo creates a new git repo in a new temp dir func NewRepo(callback RepoCallback) *Repo { return NewCustomRepo(callback, &RepoCreateSettings{RepoType: RepoTypeNormal}) } // NewCustomRepo creates a new git repo in a new temp dir with more control over settings func NewCustomRepo(callback RepoCallback, settings *RepoCreateSettings) *Repo { ret := &Repo{ Settings: settings, Remotes: make(map[string]*Repo), callback: callback} path, err := ioutil.TempDir("", "lfsRepo") if err != nil { callback.Fatalf("Can't create temp dir for git repo: %v", err) } ret.Path = path args := []string{"init"} switch settings.RepoType { case RepoTypeBare: args = append(args, "--bare") ret.GitDir = ret.Path case RepoTypeSeparateDir: gitdir, err := ioutil.TempDir("", "lfstestgitdir") if err != nil { ret.Cleanup() callback.Fatalf("Can't create temp dir for git repo: %v", err) } args = append(args, "--separate-dir", gitdir) ret.GitDir = gitdir default: ret.GitDir = filepath.Join(ret.Path, ".git") } args = append(args, path) cmd := exec.Command("git", args...) err = cmd.Run() if err != nil { ret.Cleanup() callback.Fatalf("Unable to create git repo at %v: %v", path, err) } // Configure default user/email so not reliant on env ret.Pushd() RunGitCommand(callback, true, "config", "user.name", "Git LFS Tests") RunGitCommand(callback, true, "config", "user.email", "git-lfs@example.com") ret.Popd() return ret } // WrapRepo creates a new Repo instance for an existing git repo func WrapRepo(c RepoCallback, path string) *Repo { return &Repo{Path: path, callback: c, Settings: &RepoCreateSettings{RepoType: RepoTypeNormal}} } // Simplistic fire & forget running of git command - returns combined output func RunGitCommand(callback RepoCallback, failureCheck bool, args ...string) string { outp, err := exec.Command("git", args...).CombinedOutput() if failureCheck && err != nil { callback.Fatalf("Error running git command 'git %v': %v %v", strings.Join(args, " "), err, string(outp)) } return string(outp) } // Input data for a single file in a commit type FileInput struct { // Name of file (required) Filename string // Size of file (required) Size int64 // Input data (optional, if provided will be source of data) DataReader io.Reader // Input data (optional, if provided will be source of data) Data string } func (infile *FileInput) AddToIndex(output *CommitOutput, repo *Repo) { inputData := infile.getFileInputReader() pointer, err := infile.writeLFSPointer(inputData) if err != nil { repo.callback.Errorf("%+v", err) return } output.Files = append(output.Files, pointer) RunGitCommand(repo.callback, true, "add", infile.Filename) } func (infile *FileInput) writeLFSPointer(inputData io.Reader) (*lfs.Pointer, error) { cleaned, err := lfs.PointerClean(inputData, infile.Filename, infile.Size, nil) if err != nil { return nil, errors.Wrap(err, "creating pointer file") } // this only created the temp file, move to final location tmpfile := cleaned.Filename storageOnce.Do(localstorage.ResolveDirs) mediafile, err := lfs.LocalMediaPath(cleaned.Oid) if err != nil { return nil, errors.Wrap(err, "local media path") } if _, err := os.Stat(mediafile); err != nil { if err := os.Rename(tmpfile, mediafile); err != nil { return nil, err } } // Write pointer to local filename for adding (not using clean filter) os.MkdirAll(filepath.Dir(infile.Filename), 0755) f, err := os.Create(infile.Filename) if err != nil { return nil, errors.Wrap(err, "creating pointer file") } _, err = cleaned.Pointer.Encode(f) f.Close() if err != nil { return nil, errors.Wrap(err, "encoding pointer file") } return cleaned.Pointer, nil } func (infile *FileInput) getFileInputReader() io.Reader { if infile.DataReader != nil { return infile.DataReader } if len(infile.Data) > 0 { return strings.NewReader(infile.Data) } // Different data for each file but deterministic return NewPlaceholderDataReader(fileInputSeed.Int63(), infile.Size) } // Input for defining commits for test repo type CommitInput struct { // Date that we should commit on (optional, leave blank for 'now') CommitDate time.Time // List of files to include in this commit Files []*FileInput // List of parent branches (all branches must have been created in a previous NewBranch or be master) // Can be omitted to just use the parent of the previous commit ParentBranches []string // Name of a new branch we should create at this commit (optional - master not required) NewBranch string // Names of any tags we should create at this commit (optional) Tags []string // Name of committer CommitterName string // Email of committer CommitterEmail string } // Output struct with details of commits created for test type CommitOutput struct { Sha string Parents []string Files []*lfs.Pointer } func commitAtDate(atDate time.Time, committerName, committerEmail, msg string) error { var args []string if committerName != "" && committerEmail != "" { args = append(args, "-c", fmt.Sprintf("user.name=%v", committerName)) args = append(args, "-c", fmt.Sprintf("user.email=%v", committerEmail)) } args = append(args, "commit", "--allow-empty", "-m", msg) cmd := exec.Command("git", args...) env := os.Environ() // set GIT_COMMITTER_DATE environment var e.g. "Fri Jun 21 20:26:41 2013 +0900" if atDate.IsZero() { env = append(env, "GIT_COMMITTER_DATE=") env = append(env, "GIT_AUTHOR_DATE=") } else { env = append(env, fmt.Sprintf("GIT_COMMITTER_DATE=%v", git.FormatGitDate(atDate))) env = append(env, fmt.Sprintf("GIT_AUTHOR_DATE=%v", git.FormatGitDate(atDate))) } cmd.Env = env out, err := cmd.CombinedOutput() if err != nil { return fmt.Errorf("%v %v", err, string(out)) } return nil } func (repo *Repo) AddCommits(inputs []*CommitInput) []*CommitOutput { if repo.Settings.RepoType == RepoTypeBare { repo.callback.Fatalf("Cannot use AddCommits on a bare repo; clone it & push changes instead") } // Change to repo working dir oldwd, err := os.Getwd() if err != nil { repo.callback.Fatalf("Can't get cwd %v", err) } err = os.Chdir(repo.Path) if err != nil { repo.callback.Fatalf("Can't chdir to repo %v", err) } // Used to check whether we need to checkout another commit before lastBranch := "master" outputs := make([]*CommitOutput, 0, len(inputs)) for i, input := range inputs { output := &CommitOutput{} // first, are we on the correct branch if len(input.ParentBranches) > 0 { if input.ParentBranches[0] != lastBranch { RunGitCommand(repo.callback, true, "checkout", input.ParentBranches[0]) lastBranch = input.ParentBranches[0] } } // Is this a merge? if len(input.ParentBranches) > 1 { // Always take the *other* side in a merge so we adopt changes // also don't automatically commit, we'll do that below args := []string{"merge", "--no-ff", "--no-commit", "--strategy-option=theirs"} args = append(args, input.ParentBranches[1:]...) RunGitCommand(repo.callback, false, args...) } else if input.NewBranch != "" { RunGitCommand(repo.callback, true, "checkout", "-b", input.NewBranch) lastBranch = input.NewBranch } // Any files to write? for _, infile := range input.Files { infile.AddToIndex(output, repo) } // Now commit err = commitAtDate(input.CommitDate, input.CommitterName, input.CommitterEmail, fmt.Sprintf("Test commit %d", i)) if err != nil { repo.callback.Fatalf("Error committing: %v", err) } commit, err := git.GetCommitSummary("HEAD") if err != nil { repo.callback.Fatalf("Error determining commit SHA: %v", err) } // tags for _, tag := range input.Tags { // Use annotated tags, assume full release tags (also tag objects have edge cases) RunGitCommand(repo.callback, true, "tag", "-a", "-m", "Added tag", tag) } output.Sha = commit.Sha output.Parents = commit.Parents outputs = append(outputs, output) } // Restore cwd err = os.Chdir(oldwd) if err != nil { repo.callback.Fatalf("Can't restore old cwd %v", err) } return outputs } // Add a new remote (generate a path for it to live in, will be cleaned up) func (r *Repo) AddRemote(name string) *Repo { if _, exists := r.Remotes[name]; exists { r.callback.Fatalf("Remote %v already exists", name) } remote := NewCustomRepo(r.callback, &RepoCreateSettings{RepoTypeBare}) r.Remotes[name] = remote RunGitCommand(r.callback, true, "remote", "add", name, remote.Path) return remote } // Just a psuedo-random stream of bytes (not cryptographic) // Calls RNG a bit less often than using rand.Source directly type PlaceholderDataReader struct { source rand.Source bytesLeft int64 } func NewPlaceholderDataReader(seed, size int64) *PlaceholderDataReader { return &PlaceholderDataReader{rand.NewSource(seed), size} } func (r *PlaceholderDataReader) Read(p []byte) (int, error) { c := len(p) i := 0 for i < c && r.bytesLeft > 0 { // Use all 8 bytes of the 64-bit random number val64 := r.source.Int63() for j := 0; j < 8 && i < c && r.bytesLeft > 0; j++ { // Duplicate this byte 16 times (faster) for k := 0; k < 16 && r.bytesLeft > 0; k++ { p[i] = byte(val64) i++ r.bytesLeft-- } // Next byte from the 8-byte number val64 = val64 >> 8 } } var err error if r.bytesLeft == 0 { err = io.EOF } return i, err } // RefsByName implements sort.Interface for []*git.Ref based on name type RefsByName []*git.Ref func (a RefsByName) Len() int { return len(a) } func (a RefsByName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a RefsByName) Less(i, j int) bool { return a[i].Name < a[j].Name } // WrappedPointersByOid implements sort.Interface for []*lfs.WrappedPointer based on oid type WrappedPointersByOid []*lfs.WrappedPointer func (a WrappedPointersByOid) Len() int { return len(a) } func (a WrappedPointersByOid) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a WrappedPointersByOid) Less(i, j int) bool { return a[i].Pointer.Oid < a[j].Pointer.Oid } // PointersByOid implements sort.Interface for []*lfs.Pointer based on oid type PointersByOid []*lfs.Pointer func (a PointersByOid) Len() int { return len(a) } func (a PointersByOid) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a PointersByOid) Less(i, j int) bool { return a[i].Oid < a[j].Oid } git-lfs-2.3.4/tools/000077500000000000000000000000001317167762300142425ustar00rootroot00000000000000git-lfs-2.3.4/tools/channels.go000066400000000000000000000016151317167762300163670ustar00rootroot00000000000000package tools import "fmt" // Interface for all types of wrapper around a channel of results and an error channel // Implementors will expose a type-specific channel for results // Call the Wait() function after processing the results channel to catch any errors // that occurred during the async processing type ChannelWrapper interface { // Call this after processing results channel to check for async errors Wait() error } // Base implementation of channel wrapper to just deal with errors type BaseChannelWrapper struct { errorChan <-chan error } func (w *BaseChannelWrapper) Wait() error { var err error for e := range w.errorChan { if err != nil { // Combine in case multiple errors err = fmt.Errorf("%v\n%v", err, e) } else { err = e } } return err } func NewBaseChannelWrapper(errChan <-chan error) *BaseChannelWrapper { return &BaseChannelWrapper{errorChan: errChan} } git-lfs-2.3.4/tools/cygwin.go000066400000000000000000000001121317167762300160630ustar00rootroot00000000000000// +build !windows package tools func isCygwin() bool { return false } git-lfs-2.3.4/tools/cygwin_windows.go000066400000000000000000000015401317167762300176430ustar00rootroot00000000000000// +build windows package tools import ( "bytes" "fmt" "github.com/git-lfs/git-lfs/subprocess" ) type cygwinSupport byte const ( cygwinStateUnknown cygwinSupport = iota cygwinStateEnabled cygwinStateDisabled ) func (c cygwinSupport) Enabled() bool { switch c { case cygwinStateEnabled: return true case cygwinStateDisabled: return false default: panic(fmt.Sprintf("unknown enabled state for %v", c)) } } var ( cygwinState cygwinSupport ) func isCygwin() bool { if cygwinState != cygwinStateUnknown { return cygwinState.Enabled() } cmd := subprocess.ExecCommand("uname") out, err := cmd.Output() if err != nil { return false } if bytes.Contains(out, []byte("CYGWIN")) || bytes.Contains(out, []byte("MSYS")) { cygwinState = cygwinStateEnabled } else { cygwinState = cygwinStateDisabled } return cygwinState.Enabled() } git-lfs-2.3.4/tools/filetools.go000066400000000000000000000251141317167762300165740ustar00rootroot00000000000000// Package tools contains other helper functions too small to justify their own package // NOTE: Subject to change, do not rely on this package from outside git-lfs source package tools import ( "bufio" "encoding/hex" "fmt" "io" "os" "path" "path/filepath" "runtime" "strconv" "strings" "sync" "sync/atomic" "github.com/git-lfs/git-lfs/filepathfilter" ) // FileOrDirExists determines if a file/dir exists, returns IsDir() results too. func FileOrDirExists(path string) (exists bool, isDir bool) { fi, err := os.Stat(path) if err != nil { return false, false } else { return true, fi.IsDir() } } // FileExists determines if a file (NOT dir) exists. func FileExists(path string) bool { ret, isDir := FileOrDirExists(path) return ret && !isDir } // DirExists determines if a dir (NOT file) exists. func DirExists(path string) bool { ret, isDir := FileOrDirExists(path) return ret && isDir } // FileExistsOfSize determines if a file exists and is of a specific size. func FileExistsOfSize(path string, sz int64) bool { fi, err := os.Stat(path) if err != nil { return false } return !fi.IsDir() && fi.Size() == sz } // ResolveSymlinks ensures that if the path supplied is a symlink, it is // resolved to the actual concrete path func ResolveSymlinks(path string) string { if len(path) == 0 { return path } if resolved, err := filepath.EvalSymlinks(path); err == nil { return resolved } return path } // RenameFileCopyPermissions moves srcfile to destfile, replacing destfile if // necessary and also copying the permissions of destfile if it already exists func RenameFileCopyPermissions(srcfile, destfile string) error { info, err := os.Stat(destfile) if os.IsNotExist(err) { // no original file } else if err != nil { return err } else { if err := os.Chmod(srcfile, info.Mode()); err != nil { return fmt.Errorf("can't set filemode on file %q: %v", srcfile, err) } } if err := os.Rename(srcfile, destfile); err != nil { return fmt.Errorf("cannot replace %q with %q: %v", destfile, srcfile, err) } return nil } // CleanPaths splits the given `paths` argument by the delimiter argument, and // then "cleans" that path according to the path.Clean function (see // https://golang.org/pkg/path#Clean). // Note always cleans to '/' path separators regardless of platform (git friendly) func CleanPaths(paths, delim string) (cleaned []string) { // If paths is an empty string, splitting it will yield [""], which will // become the path ".". To avoid this, bail out if trimmed paths // argument is empty. if paths = strings.TrimSpace(paths); len(paths) == 0 { return } for _, part := range strings.Split(paths, delim) { part = strings.TrimSpace(part) cleaned = append(cleaned, path.Clean(part)) } return cleaned } // VerifyFileHash reads a file and verifies whether the SHA is correct // Returns an error if there is a problem func VerifyFileHash(oid, path string) error { f, err := os.Open(path) if err != nil { return err } defer f.Close() h := NewLfsContentHash() _, err = io.Copy(h, f) if err != nil { return err } calcOid := hex.EncodeToString(h.Sum(nil)) if calcOid != oid { return fmt.Errorf("File %q has an invalid hash %s, expected %s", path, calcOid, oid) } return nil } // FastWalkCallback is the signature for the callback given to FastWalkGitRepo() type FastWalkCallback func(parentDir string, info os.FileInfo, err error) // FastWalkGitRepo is a more optimal implementation of filepath.Walk for a Git // repo. The callback guaranteed to be called sequentially. The function returns // once all files and errors have triggered callbacks. // It differs in the following ways: // * Uses goroutines to parallelise large dirs and descent into subdirs // * Does not provide sorted output; parents will always be before children but // there are no other guarantees. Use parentDir argument in the callback to // determine absolute path rather than tracking it yourself // * Automatically ignores any .git directories // * Respects .gitignore contents and skips ignored files/dirs // // rootDir - Absolute path to the top of the repository working directory func FastWalkGitRepo(rootDir string, cb FastWalkCallback) { walker := fastWalkWithExcludeFiles(rootDir, ".gitignore") for file := range walker.ch { cb(file.ParentDir, file.Info, file.Err) } } // Returned from FastWalk with parent directory context // This is needed because FastWalk can provide paths out of order so the // parent dir cannot be implied type fastWalkInfo struct { ParentDir string Info os.FileInfo Err error } type fastWalker struct { rootDir string excludeFilename string ch chan fastWalkInfo limit int32 cur *int32 wg *sync.WaitGroup } // fastWalkWithExcludeFiles walks the contents of a dir, respecting // include/exclude patterns and also loading new exlude patterns from files // named excludeFilename in directories walked // // rootDir - Absolute path to the top of the repository working directory func fastWalkWithExcludeFiles(rootDir, excludeFilename string) *fastWalker { excludePaths := []filepathfilter.Pattern{ filepathfilter.NewPattern(".git"), filepathfilter.NewPattern(filepath.Join("**", ".git")), } limit, _ := strconv.Atoi(os.Getenv("LFS_FASTWALK_LIMIT")) if limit < 1 { limit = runtime.GOMAXPROCS(-1) * 20 } c := int32(0) w := &fastWalker{ rootDir: rootDir, excludeFilename: excludeFilename, limit: int32(limit), cur: &c, ch: make(chan fastWalkInfo, 256), wg: &sync.WaitGroup{}, } go func() { dirFi, err := os.Stat(w.rootDir) if err != nil { w.ch <- fastWalkInfo{Err: err} return } w.Walk(true, "", dirFi, excludePaths) w.Wait() }() return w } // Walk is the main recursive implementation of fast walk. // Sends the file/dir and any contents to the channel so long as it passes the // include/exclude filter. If a dir, parses any excludeFilename found and updates // the excludePaths with its content before (parallel) recursing into contents // Also splits large directories into multiple goroutines. // Increments waitg.Add(1) for each new goroutine launched internally // // workDir - Relative path inside the repository func (w *fastWalker) Walk(isRoot bool, workDir string, itemFi os.FileInfo, excludePaths []filepathfilter.Pattern) { var fullPath string // Absolute path to the current file or dir var parentWorkDir string // Absolute path to the workDir inside the repository if isRoot { fullPath = w.rootDir } else { parentWorkDir = filepath.Join(w.rootDir, workDir) fullPath = filepath.Join(parentWorkDir, itemFi.Name()) } workPath := filepath.Join(workDir, itemFi.Name()) if !filepathfilter.NewFromPatterns(nil, excludePaths).Allows(workPath) { return } w.ch <- fastWalkInfo{ParentDir: parentWorkDir, Info: itemFi} if !itemFi.IsDir() { // Nothing more to do if this is not a dir return } var childWorkDir string if !isRoot { childWorkDir = filepath.Join(workDir, itemFi.Name()) } if len(w.excludeFilename) > 0 { possibleExcludeFile := filepath.Join(fullPath, w.excludeFilename) var err error excludePaths, err = loadExcludeFilename(possibleExcludeFile, childWorkDir, excludePaths) if err != nil { w.ch <- fastWalkInfo{Err: err} } } // The absolute optimal way to scan would be File.Readdirnames but we // still need the Stat() to know whether something is a dir, so use // File.Readdir instead. Means we can provide os.FileInfo to callers like // filepath.Walk as a bonus. df, err := os.Open(fullPath) if err != nil { w.ch <- fastWalkInfo{Err: err} return } // The number of items in a dir we process in each goroutine jobSize := 100 for children, err := df.Readdir(jobSize); err == nil; children, err = df.Readdir(jobSize) { // Parallelise all dirs, and chop large dirs into batches w.walk(children, func(subitems []os.FileInfo) { for _, childFi := range subitems { w.Walk(false, childWorkDir, childFi, excludePaths) } }) } df.Close() if err != nil && err != io.EOF { w.ch <- fastWalkInfo{Err: err} } } func (w *fastWalker) walk(children []os.FileInfo, fn func([]os.FileInfo)) { cur := atomic.AddInt32(w.cur, 1) if cur > w.limit { fn(children) atomic.AddInt32(w.cur, -1) return } w.wg.Add(1) go func() { fn(children) w.wg.Done() atomic.AddInt32(w.cur, -1) }() } func (w *fastWalker) Wait() { w.wg.Wait() close(w.ch) } // loadExcludeFilename reads the given file in gitignore format and returns a // revised array of exclude paths if there are any changes. // If any changes are made a copy of the array is taken so the original is not // modified func loadExcludeFilename(filename, workDir string, excludePaths []filepathfilter.Pattern) ([]filepathfilter.Pattern, error) { f, err := os.OpenFile(filename, os.O_RDONLY, 0644) if err != nil { if os.IsNotExist(err) { return excludePaths, nil } return excludePaths, err } defer f.Close() retPaths := excludePaths modified := false scanner := bufio.NewScanner(f) for scanner.Scan() { line := strings.TrimSpace(scanner.Text()) // Skip blanks, comments and negations (not supported right now) if len(line) == 0 || strings.HasPrefix(line, "#") || strings.HasPrefix(line, "!") { continue } if !modified { // copy on write retPaths = make([]filepathfilter.Pattern, len(excludePaths)) copy(retPaths, excludePaths) modified = true } path := line // Add pattern in context if exclude has separator, or no wildcard // Allow for both styles of separator at this point if strings.ContainsAny(path, "/\\") || !strings.Contains(path, "*") { path = filepath.Join(workDir, line) } retPaths = append(retPaths, filepathfilter.NewPattern(path)) } return retPaths, nil } // SetFileWriteFlag changes write permissions on a file // Used to make a file read-only or not. When writeEnabled = false, the write // bit is removed for all roles. When writeEnabled = true, the behaviour is // different per platform: // On Mac & Linux, the write bit is set only on the owner as per default umask. // All other bits are unaffected. // On Windows, all the write bits are set since Windows doesn't support Unix permissions. func SetFileWriteFlag(path string, writeEnabled bool) error { stat, err := os.Stat(path) if err != nil { return err } mode := uint32(stat.Mode()) if (writeEnabled && (mode&0200) > 0) || (!writeEnabled && (mode&0222) == 0) { // no change needed return nil } if writeEnabled { mode = mode | 0200 // set owner write only // Go's own Chmod makes Windows set all though } else { mode = mode &^ 0222 // disable all write } return os.Chmod(path, os.FileMode(mode)) } git-lfs-2.3.4/tools/filetools_test.go000066400000000000000000000174351317167762300176420ustar00rootroot00000000000000package tools import ( "fmt" "io/ioutil" "os" "path/filepath" "runtime" "sort" "testing" "github.com/git-lfs/git-lfs/subprocess" "github.com/stretchr/testify/assert" ) func TestCleanPathsCleansPaths(t *testing.T) { cleaned := CleanPaths("/foo/bar/,/foo/bar/baz", ",") assert.Equal(t, []string{"/foo/bar", "/foo/bar/baz"}, cleaned) } func TestCleanPathsReturnsNoResultsWhenGivenNoPaths(t *testing.T) { cleaned := CleanPaths("", ",") assert.Empty(t, cleaned) } func TestFastWalkBasic(t *testing.T) { rootDir, err := ioutil.TempDir(os.TempDir(), "GitLfsTestFastWalkBasic") if err != nil { assert.FailNow(t, "Unable to get temp dir: %v", err) } defer os.RemoveAll(rootDir) os.Chdir(rootDir) expectedEntries := createFastWalkInputData(10, 160) walker := fastWalkWithExcludeFiles(expectedEntries[0], "") gotEntries, gotErrors := collectFastWalkResults(walker.ch) assert.Empty(t, gotErrors) sort.Strings(expectedEntries) sort.Strings(gotEntries) assert.Equal(t, expectedEntries, gotEntries) } func BenchmarkFastWalkGitRepoChannels(b *testing.B) { rootDir, err := ioutil.TempDir(os.TempDir(), "GitLfsBenchFastWalkGitRepoChannels") if err != nil { assert.FailNow(b, "Unable to get temp dir: %v", err) } defer os.RemoveAll(rootDir) os.Chdir(rootDir) entries := createFastWalkInputData(1000, 5000) for i := 0; i < b.N; i++ { var files, errors int FastWalkGitRepo(entries[0], func(parent string, info os.FileInfo, err error) { if err != nil { errors++ } else { files++ } }) b.Logf("files: %d, errors: %d", files, errors) } } func BenchmarkFastWalkGitRepoCallback(b *testing.B) { rootDir, err := ioutil.TempDir(os.TempDir(), "GitLfsBenchFastWalkGitRepoCallback") if err != nil { assert.FailNow(b, "Unable to get temp dir: %v", err) } defer os.RemoveAll(rootDir) os.Chdir(rootDir) entries := createFastWalkInputData(1000, 5000) for i := 0; i < b.N; i++ { var files, errors int FastWalkGitRepo(entries[0], func(parentDir string, info os.FileInfo, err error) { if err != nil { errors++ } else { files++ } }) b.Logf("files: %d, errors: %d", files, errors) } } func TestFastWalkGitRepo(t *testing.T) { rootDir, err := ioutil.TempDir(os.TempDir(), "GitLfsTestFastWalkGitRepo") if err != nil { assert.FailNow(t, "Unable to get temp dir: %v", err) } defer os.RemoveAll(rootDir) os.Chdir(rootDir) expectedEntries := createFastWalkInputData(3, 3) mainDir := expectedEntries[0] // Set up a git repo and add some ignored files / dirs subprocess.SimpleExec("git", "init", mainDir) ignored := []string{ "filethatweignore.ign", "foldercontainingignored", "foldercontainingignored/notthisone.ign", "foldercontainingignored/ignoredfolder", "foldercontainingignored/ignoredfolder/file1.txt", "foldercontainingignored/ignoredfolder/file2.txt", "ignoredfolder", "ignoredfolder/file1.txt", "ignoredfolder/file2.txt", "ignoredfrominside", "ignoredfrominside/thisisok.txt", "ignoredfrominside/thisisnot.txt", "ignoredfrominside/thisone", "ignoredfrominside/thisone/file1.txt", } for _, f := range ignored { fullPath := filepath.Join(mainDir, f) if len(filepath.Ext(f)) > 0 { ioutil.WriteFile(fullPath, []byte("TEST"), 0644) } else { os.MkdirAll(fullPath, 0755) } } // write root .gitignore rootGitIgnore := ` # ignore *.ign everywhere *.ign # ignore folder ignoredfolder ` ioutil.WriteFile(filepath.Join(mainDir, ".gitignore"), []byte(rootGitIgnore), 0644) // Subfolder ignore; folder will show up but but subfolder 'thisone' won't subFolderIgnore := ` thisone thisisnot.txt ` ioutil.WriteFile(filepath.Join(mainDir, "ignoredfrominside", ".gitignore"), []byte(subFolderIgnore), 0644) // This dir will be walked but content won't be expectedEntries = append(expectedEntries, filepath.Join(mainDir, "foldercontainingignored")) // This dir will be walked and some of its content but has its own gitignore expectedEntries = append(expectedEntries, filepath.Join(mainDir, "ignoredfrominside")) expectedEntries = append(expectedEntries, filepath.Join(mainDir, "ignoredfrominside", "thisisok.txt")) // Also gitignores expectedEntries = append(expectedEntries, filepath.Join(mainDir, ".gitignore")) expectedEntries = append(expectedEntries, filepath.Join(mainDir, "ignoredfrominside", ".gitignore")) // nothing else should be there gotEntries := make([]string, 0, 1000) gotErrors := make([]error, 0, 5) FastWalkGitRepo(mainDir, func(parent string, info os.FileInfo, err error) { if err != nil { gotErrors = append(gotErrors, err) } else { gotEntries = append(gotEntries, filepath.Join(parent, info.Name())) } }) assert.Empty(t, gotErrors) sort.Strings(expectedEntries) sort.Strings(gotEntries) assert.Equal(t, expectedEntries, gotEntries) } // Make test data - ensure you've Chdir'ed into a temp dir first // Returns list of files/dirs that are created // First entry is the parent dir of all others func createFastWalkInputData(smallFolder, largeFolder int) []string { dirs := []string{ "testroot", "testroot/folder1", "testroot/folder2", "testroot/folder2/subfolder1", "testroot/folder2/subfolder2", "testroot/folder2/subfolder3", "testroot/folder2/subfolder4", "testroot/folder2/subfolder4/subsub", } expectedEntries := make([]string, 0, 250) for i, dir := range dirs { os.MkdirAll(dir, 0755) numFiles := smallFolder expectedEntries = append(expectedEntries, filepath.Clean(dir)) if i >= 3 && i <= 5 { // Bulk test to ensure works with > 1 batch numFiles = largeFolder } for f := 0; f < numFiles; f++ { filename := filepath.Join(dir, fmt.Sprintf("file%d.txt", f)) ioutil.WriteFile(filename, []byte("TEST"), 0644) expectedEntries = append(expectedEntries, filepath.Clean(filename)) } } return expectedEntries } func collectFastWalkResults(fchan <-chan fastWalkInfo) ([]string, []error) { gotEntries := make([]string, 0, 1000) gotErrors := make([]error, 0, 5) for o := range fchan { if o.Err != nil { gotErrors = append(gotErrors, o.Err) } else { gotEntries = append(gotEntries, filepath.Join(o.ParentDir, o.Info.Name())) } } return gotEntries, gotErrors } func getFileMode(filename string) os.FileMode { s, err := os.Stat(filename) if err != nil { return 0000 } return s.Mode() } func TestSetWriteFlag(t *testing.T) { f, err := ioutil.TempFile("", "lfstestwriteflag") assert.Nil(t, err) filename := f.Name() defer os.Remove(filename) f.Close() // Set up with read/write bit for all but no execute assert.Nil(t, os.Chmod(filename, 0666)) assert.Nil(t, SetFileWriteFlag(filename, false)) // should turn off all write assert.EqualValues(t, 0444, getFileMode(filename)) assert.Nil(t, SetFileWriteFlag(filename, true)) // should only add back user write (on Mac/Linux) if runtime.GOOS == "windows" { assert.EqualValues(t, 0666, getFileMode(filename)) } else { assert.EqualValues(t, 0644, getFileMode(filename)) } // Can't run selective UGO tests on Windows as doesn't support separate roles // Also Golang only supports read/write but not execute on Windows if runtime.GOOS != "windows" { // Set up with read/write/execute bit for all but no execute assert.Nil(t, os.Chmod(filename, 0777)) assert.Nil(t, SetFileWriteFlag(filename, false)) // should turn off all write but not execute assert.EqualValues(t, 0555, getFileMode(filename)) assert.Nil(t, SetFileWriteFlag(filename, true)) // should only add back user write (on Mac/Linux) if runtime.GOOS == "windows" { assert.EqualValues(t, 0777, getFileMode(filename)) } else { assert.EqualValues(t, 0755, getFileMode(filename)) } assert.Nil(t, os.Chmod(filename, 0440)) assert.Nil(t, SetFileWriteFlag(filename, false)) assert.EqualValues(t, 0440, getFileMode(filename)) assert.Nil(t, SetFileWriteFlag(filename, true)) // should only add back user write assert.EqualValues(t, 0640, getFileMode(filename)) } } git-lfs-2.3.4/tools/humanize/000077500000000000000000000000001317167762300160625ustar00rootroot00000000000000git-lfs-2.3.4/tools/humanize/humanize.go000066400000000000000000000050101317167762300202250ustar00rootroot00000000000000package humanize import ( "fmt" "math" "strconv" "strings" "unicode" "github.com/git-lfs/git-lfs/errors" ) const ( Byte = 1 << (iota * 10) Kibibyte Mebibyte Gibibyte Tebibyte Pebibyte Kilobyte = 1000 * Byte Megabyte = 1000 * Kilobyte Gigabyte = 1000 * Megabyte Terabyte = 1000 * Gigabyte Petabyte = 1000 * Terabyte ) var bytesTable = map[string]uint64{ "": Byte, "b": Byte, "kib": Kibibyte, "mib": Mebibyte, "gib": Gibibyte, "tib": Tebibyte, "pib": Pebibyte, "kb": Kilobyte, "mb": Megabyte, "gb": Gigabyte, "tb": Terabyte, "pb": Petabyte, } // ParseBytes parses a given human-readable bytes or ibytes string into a number // of bytes, or an error if the string was unable to be parsed. func ParseBytes(str string) (uint64, error) { var sep int for _, r := range str { if !(unicode.IsDigit(r) || r == '.' || r == ',') { break } sep = sep + 1 } var f float64 if s := strings.Replace(str[:sep], ",", "", -1); len(s) > 0 { var err error f, err = strconv.ParseFloat(s, 64) if err != nil { return 0, err } } m, err := ParseByteUnit(str[sep:]) if err != nil { return 0, err } f = f * float64(m) if f >= math.MaxUint64 { return 0, errors.New("number of bytes too large") } return uint64(f), nil } // ParseByteUnit returns the number of bytes in a given unit of storage, or an // error, if that unit is unrecognized. func ParseByteUnit(str string) (uint64, error) { str = strings.TrimSpace(str) str = strings.ToLower(str) if u, ok := bytesTable[str]; ok { return u, nil } return 0, errors.Errorf("unknown unit: %q", str) } var sizes = []string{"B", "KB", "MB", "GB", "TB", "PB"} // FormatBytes outputs the given number of bytes "s" as a human-readable string, // rounding to the nearest half within .01. func FormatBytes(s uint64) string { var e float64 if s == 0 { e = 0 } else { e = math.Floor(log(float64(s), 1000)) } unit := uint64(math.Pow(1000, e)) suffix := sizes[int(e)] return fmt.Sprintf("%s %s", FormatBytesUnit(s, unit), suffix) } // FormatBytesUnit outputs the given number of bytes "s" as a quantity of the // given units "u" to the nearest half within .01. func FormatBytesUnit(s, u uint64) string { var rounded float64 if s < 10 { rounded = float64(s) } else { rounded = math.Floor(float64(s)/float64(u)*10+.5) / 10 } format := "%.0f" if rounded < 10 && u > 1 { format = "%.1f" } return fmt.Sprintf(format, rounded) } // log takes the log base "b" of "n" (\log_b{n}) func log(n, b float64) float64 { return math.Log(n) / math.Log(b) } git-lfs-2.3.4/tools/humanize/humanize_test.go000066400000000000000000000262131317167762300212740ustar00rootroot00000000000000package humanize_test import ( "math" "testing" "github.com/git-lfs/git-lfs/tools/humanize" "github.com/stretchr/testify/assert" ) type ParseBytesTestCase struct { Given string Expected uint64 Err error } func (c *ParseBytesTestCase) Assert(t *testing.T) { got, err := humanize.ParseBytes(c.Given) if c.Err == nil { assert.NoError(t, err, "unexpected error: %s", err) assert.EqualValues(t, c.Expected, got) } else { assert.Equal(t, c.Err, err) } } type FormatBytesTestCase struct { Given uint64 Expected string } func (c *FormatBytesTestCase) Assert(t *testing.T) { assert.Equal(t, c.Expected, humanize.FormatBytes(c.Given)) } type ParseByteUnitTestCase struct { Given string Expected uint64 Err string } func (c *ParseByteUnitTestCase) Assert(t *testing.T) { got, err := humanize.ParseByteUnit(c.Given) if len(c.Err) == 0 { assert.NoError(t, err, "unexpected error: %s", err) assert.EqualValues(t, c.Expected, got) } else { assert.EqualError(t, err, c.Err) } } type FormatBytesUnitTestCase struct { Given uint64 Unit uint64 Expected string } func (c *FormatBytesUnitTestCase) Assert(t *testing.T) { assert.Equal(t, c.Expected, humanize.FormatBytesUnit(c.Given, c.Unit)) } func TestParseBytes(t *testing.T) { for desc, c := range map[string]*ParseBytesTestCase{ "parse byte (zero, empty)": {"", uint64(0), nil}, "parse byte (empty)": {"10", uint64(10 * math.Pow(2, 0)), nil}, "parse byte": {"10B", uint64(10 * math.Pow(2, 0)), nil}, "parse kibibyte": {"20KIB", uint64(20 * math.Pow(2, 10)), nil}, "parse mebibyte": {"30MIB", uint64(30 * math.Pow(2, 20)), nil}, "parse gibibyte": {"40GIB", uint64(40 * math.Pow(2, 30)), nil}, "parse tebibyte": {"50TIB", uint64(50 * math.Pow(2, 40)), nil}, "parse pebibyte": {"60PIB", uint64(60 * math.Pow(2, 50)), nil}, "parse byte (lowercase)": {"10b", uint64(10 * math.Pow(2, 0)), nil}, "parse kibibyte (lowercase)": {"20kib", uint64(20 * math.Pow(2, 10)), nil}, "parse mebibyte (lowercase)": {"30mib", uint64(30 * math.Pow(2, 20)), nil}, "parse gibibyte (lowercase)": {"40gib", uint64(40 * math.Pow(2, 30)), nil}, "parse tebibyte (lowercase)": {"50tib", uint64(50 * math.Pow(2, 40)), nil}, "parse pebibyte (lowercase)": {"60pib", uint64(60 * math.Pow(2, 50)), nil}, "parse byte (with space)": {"10 B", uint64(10 * math.Pow(2, 0)), nil}, "parse kibibyte (with space)": {"20 KIB", uint64(20 * math.Pow(2, 10)), nil}, "parse mebibyte (with space)": {"30 MIB", uint64(30 * math.Pow(2, 20)), nil}, "parse gibibyte (with space)": {"40 GIB", uint64(40 * math.Pow(2, 30)), nil}, "parse tebibyte (with space)": {"50 TIB", uint64(50 * math.Pow(2, 40)), nil}, "parse pebibyte (with space)": {"60 PIB", uint64(60 * math.Pow(2, 50)), nil}, "parse byte (with space, lowercase)": {"10 b", uint64(10 * math.Pow(2, 0)), nil}, "parse kibibyte (with space, lowercase)": {"20 kib", uint64(20 * math.Pow(2, 10)), nil}, "parse mebibyte (with space, lowercase)": {"30 mib", uint64(30 * math.Pow(2, 20)), nil}, "parse gibibyte (with space, lowercase)": {"40 gib", uint64(40 * math.Pow(2, 30)), nil}, "parse tebibyte (with space, lowercase)": {"50 tib", uint64(50 * math.Pow(2, 40)), nil}, "parse pebibyte (with space, lowercase)": {"60 pib", uint64(60 * math.Pow(2, 50)), nil}, "parse kilobyte": {"20KB", uint64(20 * math.Pow(10, 3)), nil}, "parse megabyte": {"30MB", uint64(30 * math.Pow(10, 6)), nil}, "parse gigabyte": {"40GB", uint64(40 * math.Pow(10, 9)), nil}, "parse terabyte": {"50TB", uint64(50 * math.Pow(10, 12)), nil}, "parse petabyte": {"60PB", uint64(60 * math.Pow(10, 15)), nil}, "parse kilobyte (lowercase)": {"20kb", uint64(20 * math.Pow(10, 3)), nil}, "parse megabyte (lowercase)": {"30mb", uint64(30 * math.Pow(10, 6)), nil}, "parse gigabyte (lowercase)": {"40gb", uint64(40 * math.Pow(10, 9)), nil}, "parse terabyte (lowercase)": {"50tb", uint64(50 * math.Pow(10, 12)), nil}, "parse petabyte (lowercase)": {"60pb", uint64(60 * math.Pow(10, 15)), nil}, "parse kilobyte (with space)": {"20 KB", uint64(20 * math.Pow(10, 3)), nil}, "parse megabyte (with space)": {"30 MB", uint64(30 * math.Pow(10, 6)), nil}, "parse gigabyte (with space)": {"40 GB", uint64(40 * math.Pow(10, 9)), nil}, "parse terabyte (with space)": {"50 TB", uint64(50 * math.Pow(10, 12)), nil}, "parse petabyte (with space)": {"60 PB", uint64(60 * math.Pow(10, 15)), nil}, "parse kilobyte (with space, lowercase)": {"20 kb", uint64(20 * math.Pow(10, 3)), nil}, "parse megabyte (with space, lowercase)": {"30 mb", uint64(30 * math.Pow(10, 6)), nil}, "parse gigabyte (with space, lowercase)": {"40 gb", uint64(40 * math.Pow(10, 9)), nil}, "parse terabyte (with space, lowercase)": {"50 tb", uint64(50 * math.Pow(10, 12)), nil}, "parse petabyte (with space, lowercase)": {"60 pb", uint64(60 * math.Pow(10, 15)), nil}, } { t.Run(desc, c.Assert) } } func TestFormatBytes(t *testing.T) { for desc, c := range map[string]*FormatBytesTestCase{ "format bytes": {uint64(1 * math.Pow(10, 0)), "1 B"}, "format kilobytes": {uint64(1 * math.Pow(10, 3)), "1.0 KB"}, "format megabytes": {uint64(1 * math.Pow(10, 6)), "1.0 MB"}, "format gigabytes": {uint64(1 * math.Pow(10, 9)), "1.0 GB"}, "format petabytes": {uint64(1 * math.Pow(10, 12)), "1.0 TB"}, "format terabytes": {uint64(1 * math.Pow(10, 15)), "1.0 PB"}, "format kilobytes under": {uint64(1.49 * math.Pow(10, 3)), "1.5 KB"}, "format megabytes under": {uint64(1.49 * math.Pow(10, 6)), "1.5 MB"}, "format gigabytes under": {uint64(1.49 * math.Pow(10, 9)), "1.5 GB"}, "format petabytes under": {uint64(1.49 * math.Pow(10, 12)), "1.5 TB"}, "format terabytes under": {uint64(1.49 * math.Pow(10, 15)), "1.5 PB"}, "format kilobytes over": {uint64(1.51 * math.Pow(10, 3)), "1.5 KB"}, "format megabytes over": {uint64(1.51 * math.Pow(10, 6)), "1.5 MB"}, "format gigabytes over": {uint64(1.51 * math.Pow(10, 9)), "1.5 GB"}, "format petabytes over": {uint64(1.51 * math.Pow(10, 12)), "1.5 TB"}, "format terabytes over": {uint64(1.51 * math.Pow(10, 15)), "1.5 PB"}, "format kilobytes exact": {uint64(1.3 * math.Pow(10, 3)), "1.3 KB"}, "format megabytes exact": {uint64(1.3 * math.Pow(10, 6)), "1.3 MB"}, "format gigabytes exact": {uint64(1.3 * math.Pow(10, 9)), "1.3 GB"}, "format petabytes exact": {uint64(1.3 * math.Pow(10, 12)), "1.3 TB"}, "format terabytes exact": {uint64(1.3 * math.Pow(10, 15)), "1.3 PB"}, } { t.Run(desc, c.Assert) } } func TestParseByteUnit(t *testing.T) { for desc, c := range map[string]*ParseByteUnitTestCase{ "parse byte": {"B", uint64(math.Pow(2, 0)), ""}, "parse kibibyte": {"KIB", uint64(math.Pow(2, 10)), ""}, "parse mebibyte": {"MIB", uint64(math.Pow(2, 20)), ""}, "parse gibibyte": {"GIB", uint64(math.Pow(2, 30)), ""}, "parse tebibyte": {"TIB", uint64(math.Pow(2, 40)), ""}, "parse pebibyte": {"PIB", uint64(math.Pow(2, 50)), ""}, "parse byte (lowercase)": {"b", uint64(math.Pow(2, 0)), ""}, "parse kibibyte (lowercase)": {"kib", uint64(math.Pow(2, 10)), ""}, "parse mebibyte (lowercase)": {"mib", uint64(math.Pow(2, 20)), ""}, "parse gibibyte (lowercase)": {"gib", uint64(math.Pow(2, 30)), ""}, "parse tebibyte (lowercase)": {"tib", uint64(math.Pow(2, 40)), ""}, "parse pebibyte (lowercase)": {"pib", uint64(math.Pow(2, 50)), ""}, "parse byte (with space)": {" B", uint64(math.Pow(2, 0)), ""}, "parse kibibyte (with space)": {" KIB", uint64(math.Pow(2, 10)), ""}, "parse mebibyte (with space)": {" MIB", uint64(math.Pow(2, 20)), ""}, "parse gibibyte (with space)": {" GIB", uint64(math.Pow(2, 30)), ""}, "parse tebibyte (with space)": {" TIB", uint64(math.Pow(2, 40)), ""}, "parse pebibyte (with space)": {" PIB", uint64(math.Pow(2, 50)), ""}, "parse byte (with space, lowercase)": {" b", uint64(math.Pow(2, 0)), ""}, "parse kibibyte (with space, lowercase)": {" kib", uint64(math.Pow(2, 10)), ""}, "parse mebibyte (with space, lowercase)": {" mib", uint64(math.Pow(2, 20)), ""}, "parse gibibyte (with space, lowercase)": {" gib", uint64(math.Pow(2, 30)), ""}, "parse tebibyte (with space, lowercase)": {" tib", uint64(math.Pow(2, 40)), ""}, "parse pebibyte (with space, lowercase)": {" pib", uint64(math.Pow(2, 50)), ""}, "parse kilobyte": {"KB", uint64(math.Pow(10, 3)), ""}, "parse megabyte": {"MB", uint64(math.Pow(10, 6)), ""}, "parse gigabyte": {"GB", uint64(math.Pow(10, 9)), ""}, "parse terabyte": {"TB", uint64(math.Pow(10, 12)), ""}, "parse petabyte": {"PB", uint64(math.Pow(10, 15)), ""}, "parse kilobyte (lowercase)": {"kb", uint64(math.Pow(10, 3)), ""}, "parse megabyte (lowercase)": {"mb", uint64(math.Pow(10, 6)), ""}, "parse gigabyte (lowercase)": {"gb", uint64(math.Pow(10, 9)), ""}, "parse terabyte (lowercase)": {"tb", uint64(math.Pow(10, 12)), ""}, "parse petabyte (lowercase)": {"pb", uint64(math.Pow(10, 15)), ""}, "parse kilobyte (with space)": {" KB", uint64(math.Pow(10, 3)), ""}, "parse megabyte (with space)": {" MB", uint64(math.Pow(10, 6)), ""}, "parse gigabyte (with space)": {" GB", uint64(math.Pow(10, 9)), ""}, "parse terabyte (with space)": {" TB", uint64(math.Pow(10, 12)), ""}, "parse petabyte (with space)": {" PB", uint64(math.Pow(10, 15)), ""}, "parse kilobyte (with space, lowercase)": {"kb", uint64(math.Pow(10, 3)), ""}, "parse megabyte (with space, lowercase)": {"mb", uint64(math.Pow(10, 6)), ""}, "parse gigabyte (with space, lowercase)": {"gb", uint64(math.Pow(10, 9)), ""}, "parse terabyte (with space, lowercase)": {"tb", uint64(math.Pow(10, 12)), ""}, "parse petabyte (with space, lowercase)": {"pb", uint64(math.Pow(10, 15)), ""}, "parse unknown unit": {"imag", 0, "unknown unit: \"imag\""}, } { t.Run(desc, c.Assert) } } func TestFormatBytesUnit(t *testing.T) { for desc, c := range map[string]*FormatBytesUnitTestCase{ "format bytes": {uint64(1 * math.Pow(10, 0)), humanize.Byte, "1"}, "format kilobytes": {uint64(1 * math.Pow(10, 3)), humanize.Byte, "1000"}, "format megabytes": {uint64(1 * math.Pow(10, 6)), humanize.Byte, "1000000"}, "format gigabytes": {uint64(1 * math.Pow(10, 9)), humanize.Byte, "1000000000"}, "format petabytes": {uint64(1 * math.Pow(10, 12)), humanize.Byte, "1000000000000"}, "format terabytes": {uint64(1 * math.Pow(10, 15)), humanize.Byte, "1000000000000000"}, "format kilobytes under": {uint64(1.49 * math.Pow(10, 3)), humanize.Byte, "1490"}, "format megabytes under": {uint64(1.49 * math.Pow(10, 6)), humanize.Byte, "1490000"}, "format gigabytes under": {uint64(1.49 * math.Pow(10, 9)), humanize.Byte, "1490000000"}, "format petabytes under": {uint64(1.49 * math.Pow(10, 12)), humanize.Byte, "1490000000000"}, "format terabytes under": {uint64(1.49 * math.Pow(10, 15)), humanize.Byte, "1490000000000000"}, "format kilobytes over": {uint64(1.51 * math.Pow(10, 3)), humanize.Byte, "1510"}, "format megabytes over": {uint64(1.51 * math.Pow(10, 6)), humanize.Byte, "1510000"}, "format gigabytes over": {uint64(1.51 * math.Pow(10, 9)), humanize.Byte, "1510000000"}, "format petabytes over": {uint64(1.51 * math.Pow(10, 12)), humanize.Byte, "1510000000000"}, "format terabytes over": {uint64(1.51 * math.Pow(10, 15)), humanize.Byte, "1510000000000000"}, } { t.Run(desc, c.Assert) } } git-lfs-2.3.4/tools/humanize/package.go000066400000000000000000000002431317167762300200030ustar00rootroot00000000000000// package humanize is designed to parse and format "humanized" versions of // numbers with units. // // Based on: github.com/dustin/go-humanize. package humanize git-lfs-2.3.4/tools/iotools.go000066400000000000000000000074671317167762300162770ustar00rootroot00000000000000package tools import ( "bytes" "crypto/sha256" "encoding/hex" "hash" "io" "io/ioutil" "os" "github.com/git-lfs/git-lfs/errors" "github.com/git-lfs/git-lfs/progress" ) const ( // memoryBufferLimit is the number of bytes to buffer in memory before // spooling the contents of an `io.Reader` in `Spool()` to a temporary // file on disk. memoryBufferLimit = 1024 ) // CopyWithCallback copies reader to writer while performing a progress callback func CopyWithCallback(writer io.Writer, reader io.Reader, totalSize int64, cb progress.CopyCallback) (int64, error) { if success, _ := CloneFile(writer, reader); success { if cb != nil { cb(totalSize, totalSize, 0) } return totalSize, nil } if cb == nil { return io.Copy(writer, reader) } cbReader := &progress.CallbackReader{ C: cb, TotalSize: totalSize, Reader: reader, } return io.Copy(writer, cbReader) } // Get a new Hash instance of the type used to hash LFS content func NewLfsContentHash() hash.Hash { return sha256.New() } // HashingReader wraps a reader and calculates the hash of the data as it is read type HashingReader struct { reader io.Reader hasher hash.Hash } func NewHashingReader(r io.Reader) *HashingReader { return &HashingReader{r, NewLfsContentHash()} } func NewHashingReaderPreloadHash(r io.Reader, hash hash.Hash) *HashingReader { return &HashingReader{r, hash} } func (r *HashingReader) Hash() string { return hex.EncodeToString(r.hasher.Sum(nil)) } func (r *HashingReader) Read(b []byte) (int, error) { w, err := r.reader.Read(b) if err == nil || err == io.EOF { _, e := r.hasher.Write(b[0:w]) if e != nil && err == nil { return w, e } } return w, err } // RetriableReader wraps a error response of reader as RetriableError() type RetriableReader struct { reader io.Reader } func NewRetriableReader(r io.Reader) io.Reader { return &RetriableReader{r} } func (r *RetriableReader) Read(b []byte) (int, error) { n, err := r.reader.Read(b) // EOF is a successful response as it is used to signal a graceful end // of input c.f. https://git.io/v6riQ // // Otherwise, if the error is non-nil and already retriable (in the // case that the underlying reader `r.reader` is itself a // `*RetriableReader`, return the error wholesale: if err == nil || err == io.EOF || errors.IsRetriableError(err) { return n, err } return n, errors.NewRetriableError(err) } // Spool spools the contents from 'from' to 'to' by buffering the entire // contents of 'from' into a temprorary file created in the directory "dir". // That buffer is held in memory until the file grows to larger than // 'memoryBufferLimit`, then the remaining contents are spooled to disk. // // The temporary file is cleaned up after the copy is complete. // // The number of bytes written to "to", as well as any error encountered are // returned. func Spool(to io.Writer, from io.Reader, dir string) (n int64, err error) { // First, buffer up to `memoryBufferLimit` in memory. buf := make([]byte, memoryBufferLimit) if bn, err := from.Read(buf); err != nil && err != io.EOF { return int64(bn), err } else { buf = buf[:bn] } var spool io.Reader = bytes.NewReader(buf) if err != io.EOF { // If we weren't at the end of the stream, create a temporary // file, and spool the remaining contents there. tmp, err := ioutil.TempFile(dir, "") if err != nil { return 0, errors.Wrap(err, "spool tmp") } defer os.Remove(tmp.Name()) if n, err = io.Copy(tmp, from); err != nil { return n, errors.Wrap(err, "unable to spool") } if _, err = tmp.Seek(0, io.SeekStart); err != nil { return 0, errors.Wrap(err, "unable to seek") } // The spooled contents will now be the concatenation of the // contents we stored in memory, then the remainder of the // contents on disk. spool = io.MultiReader(spool, tmp) } return io.Copy(to, spool) } git-lfs-2.3.4/tools/iotools_test.go000066400000000000000000000036301317167762300173220ustar00rootroot00000000000000package tools_test import ( "bytes" "io" "testing" "github.com/git-lfs/git-lfs/errors" "github.com/git-lfs/git-lfs/tools" "github.com/stretchr/testify/assert" ) func TestRetriableReaderReturnsSuccessfulReads(t *testing.T) { r := tools.NewRetriableReader(bytes.NewBuffer([]byte{0x1, 0x2, 0x3, 0x4})) var buf [4]byte n, err := r.Read(buf[:]) assert.Nil(t, err) assert.Equal(t, 4, n) assert.Equal(t, []byte{0x1, 0x2, 0x3, 0x4}, buf[:]) } func TestRetriableReaderReturnsEOFs(t *testing.T) { r := tools.NewRetriableReader(bytes.NewBuffer([]byte{ /* empty */ })) var buf [1]byte n, err := r.Read(buf[:]) assert.Equal(t, io.EOF, err) assert.Equal(t, 0, n) } func TestRetriableReaderMakesErrorsRetriable(t *testing.T) { expected := errors.New("example error") r := tools.NewRetriableReader(&ErrReader{expected}) var buf [1]byte n, err := r.Read(buf[:]) assert.Equal(t, 0, n) assert.EqualError(t, err, "LFS: "+expected.Error()) assert.True(t, errors.IsRetriableError(err)) } func TestRetriableReaderDoesNotRewrap(t *testing.T) { // expected is already "retriable", as would be the case if the // underlying reader was a *RetriableReader itself. expected := errors.NewRetriableError(errors.New("example error")) r := tools.NewRetriableReader(&ErrReader{expected}) var buf [1]byte n, err := r.Read(buf[:]) assert.Equal(t, 0, n) // errors.NewRetriableError wraps the given error with the prefix // message "LFS", so these two errors should be equal, indicating that // the RetriableReader did not re-wrap the error it received. assert.EqualError(t, err, expected.Error()) assert.True(t, errors.IsRetriableError(err)) } // ErrReader implements io.Reader and only returns errors. type ErrReader struct { // err is the error that this reader will return. err error } // Read implements io.Reader#Read, and returns (0, e.err). func (e *ErrReader) Read(p []byte) (n int, err error) { return 0, e.err } git-lfs-2.3.4/tools/kv/000077500000000000000000000000001317167762300146625ustar00rootroot00000000000000git-lfs-2.3.4/tools/kv/keyvaluestore.go000066400000000000000000000142661317167762300201240ustar00rootroot00000000000000package kv import ( "encoding/gob" "fmt" "io" "os" "sync" ) // Store provides an in-memory key/value store which is persisted to // a file. The file handle itself is not kept locked for the duration; it is // only locked during load and save, to make it concurrency friendly. When // saving, the store uses optimistic locking to determine whether the db on disk // has been modified by another process; in which case it loads the latest // version and re-applies modifications made during this session. This means // the Lost Update db concurrency issue is possible; so don't use this if you // need more DB integrity than Read Committed isolation levels. type Store struct { // Locks the entire store mu sync.RWMutex filename string log []change // This is the persistent data // version for optimistic locking, this field is incremented with every Save() version int64 db map[string]interface{} } // Type of operation; set or remove type operation int const ( // Set a value for a key setOperation = operation(iota) // Removed a value for a key removeOperation = operation(iota) ) type change struct { op operation key string value interface{} } // NewStore creates a new key/value store and initialises it with contents from // the named file, if it exists func NewStore(filepath string) (*Store, error) { kv := &Store{filename: filepath, db: make(map[string]interface{})} return kv, kv.loadAndMergeIfNeeded() } // Set updates the key/value store in memory // Changes are not persisted until you call Save() func (k *Store) Set(key string, value interface{}) { k.mu.Lock() defer k.mu.Unlock() k.db[key] = value k.logChange(setOperation, key, value) } // Remove removes the key and its value from the store in memory // Changes are not persisted until you call Save() func (k *Store) Remove(key string) { k.mu.Lock() defer k.mu.Unlock() delete(k.db, key) k.logChange(removeOperation, key, nil) } // RemoveAll removes all entries from the store // These changes are not persisted until you call Save() func (k *Store) RemoveAll() { k.mu.Lock() defer k.mu.Unlock() // Log all changes for key, _ := range k.db { k.logChange(removeOperation, key, nil) } k.db = make(map[string]interface{}) } // Visit walks through the entire store via a function; return false from // your visitor function to halt the walk func (k *Store) Visit(cb func(string, interface{}) bool) { // Read-only lock k.mu.RLock() defer k.mu.RUnlock() for k, v := range k.db { if !cb(k, v) { break } } } // Append a change to the log; mutex must already be locked func (k *Store) logChange(op operation, key string, value interface{}) { k.log = append(k.log, change{op, key, value}) } // Get retrieves a value from the store, or nil if it is not present func (k *Store) Get(key string) interface{} { // Read-only lock k.mu.RLock() defer k.mu.RUnlock() // zero value of interface{} is nil so this does what we want return k.db[key] } // Save persists the changes made to disk // If any changes have been written by other code they will be merged func (k *Store) Save() error { k.mu.Lock() defer k.mu.Unlock() // Short-circuit if we have no changes if len(k.log) == 0 { return nil } // firstly peek at version; open read/write to keep lock between check & write f, err := os.OpenFile(k.filename, os.O_RDWR|os.O_CREATE, 0664) if err != nil { return err } defer f.Close() // Only try to merge if > 0 bytes, ignore empty files (decoder will fail) if stat, _ := f.Stat(); stat.Size() > 0 { k.loadAndMergeReaderIfNeeded(f) // Now we overwrite the file f.Seek(0, os.SEEK_SET) f.Truncate(0) } k.version++ enc := gob.NewEncoder(f) if err := enc.Encode(k.version); err != nil { return fmt.Errorf("Error while writing version data to %v: %v", k.filename, err) } if err := enc.Encode(k.db); err != nil { return fmt.Errorf("Error while writing new key/value data to %v: %v", k.filename, err) } // Clear log now that it's saved k.log = nil return nil } // Reads as little as possible from the passed in file to determine if the // contents are different from the version already held. If so, reads the // contents and merges with any outstanding changes. If not, stops early without // reading the rest of the file func (k *Store) loadAndMergeIfNeeded() error { stat, err := os.Stat(k.filename) if err != nil { if os.IsNotExist(err) { return nil // missing is OK } return err } // Do nothing if empty file if stat.Size() == 0 { return nil } f, err := os.OpenFile(k.filename, os.O_RDONLY, 0664) if err == nil { defer f.Close() return k.loadAndMergeReaderIfNeeded(f) } else { return err } } // As loadAndMergeIfNeeded but lets caller decide how to manage file handles func (k *Store) loadAndMergeReaderIfNeeded(f io.Reader) error { var versionOnDisk int64 // Decode *only* the version field to check whether anyone else has // modified the db; gob serializes structs in order so it will always be 1st dec := gob.NewDecoder(f) err := dec.Decode(&versionOnDisk) if err != nil { return fmt.Errorf("Problem checking version of key/value data from %v: %v", k.filename, err) } // Totally uninitialised Version == 0, saved versions are always >=1 if versionOnDisk != k.version { // Reload data & merge var dbOnDisk map[string]interface{} err = dec.Decode(&dbOnDisk) if err != nil { return fmt.Errorf("Problem reading updated key/value data from %v: %v", k.filename, err) } k.reapplyChanges(dbOnDisk) k.version = versionOnDisk } return nil } // reapplyChanges replays the changes made since the last load onto baseDb // and stores the result as our own DB func (k *Store) reapplyChanges(baseDb map[string]interface{}) { for _, change := range k.log { switch change.op { case setOperation: baseDb[change.key] = change.value case removeOperation: delete(baseDb, change.key) } } // Note, log is not cleared here, that only happens on Save since it's a // list of unsaved changes k.db = baseDb } // RegisterTypeForStorage registers a custom type (e.g. a struct) for // use in the key value store. This is necessary if you intend to pass custom // structs to Store.Set() rather than primitive types. func RegisterTypeForStorage(val interface{}) { gob.Register(val) } git-lfs-2.3.4/tools/kv/keyvaluestore_test.go000066400000000000000000000106121317167762300211520ustar00rootroot00000000000000package kv import ( "io/ioutil" "os" "testing" "github.com/stretchr/testify/assert" ) func TestStoreSimple(t *testing.T) { tmpf, err := ioutil.TempFile("", "lfstest1") assert.Nil(t, err) filename := tmpf.Name() defer os.Remove(filename) tmpf.Close() kvs, err := NewStore(filename) assert.Nil(t, err) // We'll include storing custom structs type customData struct { Val1 string Val2 int } // Needed to store custom struct RegisterTypeForStorage(&customData{}) kvs.Set("stringVal", "This is a string value") kvs.Set("intVal", 3) kvs.Set("floatVal", 3.142) kvs.Set("structVal", &customData{"structTest", 20}) s := kvs.Get("stringVal") assert.Equal(t, "This is a string value", s) i := kvs.Get("intVal") assert.Equal(t, 3, i) f := kvs.Get("floatVal") assert.Equal(t, 3.142, f) c := kvs.Get("structVal") assert.Equal(t, c, &customData{"structTest", 20}) n := kvs.Get("noValue") assert.Nil(t, n) kvs.Remove("stringVal") s = kvs.Get("stringVal") assert.Nil(t, s) // Set the string value again before saving kvs.Set("stringVal", "This is a string value") err = kvs.Save() assert.Nil(t, err) kvs = nil // Now confirm that we can read it all back kvs2, err := NewStore(filename) assert.Nil(t, err) s = kvs2.Get("stringVal") assert.Equal(t, "This is a string value", s) i = kvs2.Get("intVal") assert.Equal(t, 3, i) f = kvs2.Get("floatVal") assert.Equal(t, 3.142, f) c = kvs2.Get("structVal") assert.Equal(t, c, &customData{"structTest", 20}) n = kvs2.Get("noValue") assert.Nil(t, n) // Test remove all kvs2.RemoveAll() s = kvs2.Get("stringVal") assert.Nil(t, s) i = kvs2.Get("intVal") assert.Nil(t, i) f = kvs2.Get("floatVal") assert.Nil(t, f) c = kvs2.Get("structVal") assert.Nil(t, c) err = kvs2.Save() assert.Nil(t, err) kvs2 = nil // Now confirm that we can read blank & get nothing kvs, err = NewStore(filename) kvs.Visit(func(k string, v interface{}) bool { // Should not be called assert.Fail(t, "Should be no entries") return true }) } func TestStoreOptimisticConflict(t *testing.T) { tmpf, err := ioutil.TempFile("", "lfstest2") assert.Nil(t, err) filename := tmpf.Name() defer os.Remove(filename) tmpf.Close() kvs1, err := NewStore(filename) assert.Nil(t, err) kvs1.Set("key1", "value1") kvs1.Set("key2", "value2") kvs1.Set("key3", "value3") err = kvs1.Save() assert.Nil(t, err) // Load second copy & modify kvs2, err := NewStore(filename) assert.Nil(t, err) // New keys kvs2.Set("key4", "value4_fromkvs2") kvs2.Set("key5", "value5_fromkvs2") // Modify a key too kvs2.Set("key1", "value1_fromkvs2") err = kvs2.Save() assert.Nil(t, err) // Now modify first copy & save; it should detect optimistic lock issue // New item kvs1.Set("key10", "value10") // Overlapping item; since we save second this will overwrite one from kvs2 kvs1.Set("key4", "value4") err = kvs1.Save() assert.Nil(t, err) // This should have merged changes from kvs2 in the process v := kvs1.Get("key1") assert.Equal(t, "value1_fromkvs2", v) // this one was modified by kvs2 v = kvs1.Get("key2") assert.Equal(t, "value2", v) v = kvs1.Get("key3") assert.Equal(t, "value3", v) v = kvs1.Get("key4") assert.Equal(t, "value4", v) // we overwrote this so would not be merged v = kvs1.Get("key5") assert.Equal(t, "value5_fromkvs2", v) } func TestStoreReduceSize(t *testing.T) { tmpf, err := ioutil.TempFile("", "lfstest3") assert.Nil(t, err) filename := tmpf.Name() defer os.Remove(filename) tmpf.Close() kvs, err := NewStore(filename) assert.Nil(t, err) kvs.Set("key1", "I woke up in a Soho doorway") kvs.Set("key2", "A policeman knew my name") kvs.Set("key3", "He said 'You can go sleep at home tonight") kvs.Set("key4", "If you can get up and walk away'") assert.NotNil(t, kvs.Get("key1")) assert.NotNil(t, kvs.Get("key2")) assert.NotNil(t, kvs.Get("key3")) assert.NotNil(t, kvs.Get("key4")) assert.Nil(t, kvs.Save()) stat1, _ := os.Stat(filename) // Remove all but 1 key & save smaller version kvs.Remove("key2") kvs.Remove("key3") kvs.Remove("key4") assert.Nil(t, kvs.Save()) // Now reload fresh & prove works kvs = nil kvs, err = NewStore(filename) assert.Nil(t, err) assert.NotNil(t, kvs.Get("key1")) assert.Nil(t, kvs.Get("key2")) assert.Nil(t, kvs.Get("key3")) assert.Nil(t, kvs.Get("key4")) stat2, _ := os.Stat(filename) assert.True(t, stat2.Size() < stat1.Size(), "Size should have reduced, was %d now %d", stat1.Size(), stat2.Size()) } git-lfs-2.3.4/tools/math.go000066400000000000000000000012471317167762300155260ustar00rootroot00000000000000package tools // MinInt returns the smaller of two `int`s, "a", or "b". func MinInt(a, b int) int { if a < b { return a } return b } // MaxInt returns the greater of two `int`s, "a", or "b". func MaxInt(a, b int) int { if a > b { return a } return b } // ClampInt returns the integer "n" bounded between "min" and "max". func ClampInt(n, min, max int) int { return MinInt(min, MaxInt(max, n)) } // MinInt64 returns the smaller of two `int`s, "a", or "b". func MinInt64(a, b int64) int64 { if a < b { return a } return b } // MaxInt64 returns the greater of two `int`s, "a", or "b". func MaxInt64(a, b int64) int64 { if a > b { return a } return b } git-lfs-2.3.4/tools/math_test.go000066400000000000000000000010161317167762300165570ustar00rootroot00000000000000package tools import ( "testing" "github.com/stretchr/testify/assert" ) func MinIntPicksTheSmallerInt(t *testing.T) { assert.Equal(t, -1, MinInt(-1, 1)) } func MaxIntPicksTheBiggertInt(t *testing.T) { assert.Equal(t, 1, MaxInt(-1, 1)) } func ClampDiscardsIntsLowerThanMin(t *testing.T) { assert.Equal(t, 0, ClampInt(-1, 0, 1)) } func ClampDiscardsIntsGreaterThanMax(t *testing.T) { assert.Equal(t, 1, ClampInt(2, 0, 1)) } func ClampAcceptsIntsWithinBounds(t *testing.T) { assert.Equal(t, 1, ClampInt(1, 0, 2)) } git-lfs-2.3.4/tools/ordered_set.go000066400000000000000000000123451317167762300170750ustar00rootroot00000000000000package tools // OrderedSet is a unique set of strings that maintains insertion order. type OrderedSet struct { // s is the set of strings that we're keeping track of. s []string // m is a mapping of string value "s" into the index "i" that that // string is present in in the given "s". m map[string]int } // NewOrderedSet creates an ordered set with no values. func NewOrderedSet() *OrderedSet { return NewOrderedSetWithCapacity(0) } // NewOrderedSetWithCapacity creates a new ordered set with no values. The // returned ordered set can be appended to "capacity" number of times before it // grows internally. func NewOrderedSetWithCapacity(capacity int) *OrderedSet { return &OrderedSet{ s: make([]string, 0, capacity), m: make(map[string]int, capacity), } } // NewOrderedSetFromSlice returns a new ordered set with the elements given in // the slice "s". func NewOrderedSetFromSlice(s []string) *OrderedSet { set := NewOrderedSetWithCapacity(len(s)) for _, e := range s { set.Add(e) } return set } // Add adds the given element "i" to the ordered set, unless the element is // already present. It returns whether or not the element was added. func (s *OrderedSet) Add(i string) bool { if _, ok := s.m[i]; ok { return false } s.s = append(s.s, i) s.m[i] = len(s.s) - 1 return true } // Contains returns whether or not the given "i" is contained in this ordered // set. It is a constant-time operation. func (s *OrderedSet) Contains(i string) bool { if _, ok := s.m[i]; ok { return true } return false } // ContainsAll returns whether or not all of the given items in "i" are present // in the ordered set. func (s *OrderedSet) ContainsAll(i ...string) bool { for _, item := range i { if !s.Contains(item) { return false } } return true } // IsSubset returns whether other is a subset of this ordered set. In other // words, it returns whether or not all of the elements in "other" are also // present in this set. func (s *OrderedSet) IsSubset(other *OrderedSet) bool { for _, i := range other.s { if !s.Contains(i) { return false } } return true } // IsSuperset returns whether or not this set is a superset of "other". In other // words, it returns whether or not all of the elements in this set are also in // the set "other". func (s *OrderedSet) IsSuperset(other *OrderedSet) bool { return other.IsSubset(s) } // Union returns a union of this set with the given set "other". It returns the // items that are in either set while maintaining uniqueness constraints. It // preserves ordered within each set, and orders the elements in this set before // the elements in "other". // // It is an O(n+m) operation. func (s *OrderedSet) Union(other *OrderedSet) *OrderedSet { union := NewOrderedSetWithCapacity(other.Cardinality() + s.Cardinality()) for _, e := range s.s { union.Add(e) } for _, e := range other.s { union.Add(e) } return union } // Intersect returns the elements that are in both this set and then given // "ordered" set. It is an O(min(n, m)) (in other words, O(n)) operation. func (s *OrderedSet) Intersect(other *OrderedSet) *OrderedSet { intersection := NewOrderedSetWithCapacity(MinInt( s.Cardinality(), other.Cardinality())) if s.Cardinality() < other.Cardinality() { for _, elem := range s.s { if other.Contains(elem) { intersection.Add(elem) } } } else { for _, elem := range other.s { if s.Contains(elem) { intersection.Add(elem) } } } return intersection } // Difference returns the elements that are in this set, but not included in // other. func (s *OrderedSet) Difference(other *OrderedSet) *OrderedSet { diff := NewOrderedSetWithCapacity(s.Cardinality()) for _, e := range s.s { if !other.Contains(e) { diff.Add(e) } } return diff } // SymmetricDifference returns the elements that are not present in both sets. func (s *OrderedSet) SymmetricDifference(other *OrderedSet) *OrderedSet { left := s.Difference(other) right := other.Difference(s) return left.Union(right) } // Clear removes all elements from this set. func (s *OrderedSet) Clear() { s.s = make([]string, 0) s.m = make(map[string]int, 0) } // Remove removes the given element "i" from this set. func (s *OrderedSet) Remove(i string) { idx, ok := s.m[i] if !ok { return } rest := MinInt(idx+1, len(s.s)-1) s.s = append(s.s[:idx], s.s[rest:]...) for _, e := range s.s[rest:] { s.m[e] = s.m[e] - 1 } delete(s.m, i) } // Cardinality returns the cardinality of this set. func (s *OrderedSet) Cardinality() int { return len(s.s) } // Iter returns a channel which yields the elements in this set in insertion // order. func (s *OrderedSet) Iter() <-chan string { c := make(chan string) go func() { for _, i := range s.s { c <- i } close(c) }() return c } // Equal returns whether this element has the same number, identity and ordering // elements as given in "other". func (s *OrderedSet) Equal(other *OrderedSet) bool { if s.Cardinality() != other.Cardinality() { return false } for e, i := range s.m { if ci, ok := other.m[e]; !ok || ci != i { return false } } return true } // Clone returns a deep copy of this set. func (s *OrderedSet) Clone() *OrderedSet { clone := NewOrderedSetWithCapacity(s.Cardinality()) for _, i := range s.s { clone.Add(i) } return clone } git-lfs-2.3.4/tools/ordered_set_test.go000066400000000000000000000136561317167762300201420ustar00rootroot00000000000000package tools import ( "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestOrderedSetAddAddsElements(t *testing.T) { s := NewOrderedSetFromSlice([]string{"a", "b", "c"}) assert.False(t, s.Contains("d"), "tools: did not expected s to contain \"d\"") assert.True(t, s.Add("d")) assert.True(t, s.Contains("d"), "tools: expected s to contain \"d\"") } func TestOrderedSetContainsReturnsTrueForItemsItContains(t *testing.T) { s := NewOrderedSetFromSlice([]string{"a", "b", "c"}) assert.True(t, s.Contains("b"), "tools: expected s to contain element \"b\"") } func TestOrderedSetContainsReturnsFalseForItemsItDoesNotContains(t *testing.T) { s := NewOrderedSetFromSlice([]string{"a", "b", "c"}) assert.False(t, s.Contains("d"), "tools: did not expect s to contain element \"d\"") } func TestOrderedSetContainsAllReturnsTrueWhenAllElementsAreContained(t *testing.T) { s := NewOrderedSetFromSlice([]string{"a", "b", "c"}) assert.True(t, s.ContainsAll("b", "c"), "tools: expected s to contain element \"b\" and \"c\"") } func TestOrderedSetContainsAllReturnsFalseWhenAllElementsAreNotContained(t *testing.T) { s := NewOrderedSetFromSlice([]string{"a", "b", "c"}) assert.False(t, s.ContainsAll("b", "c", "d"), "tools: did not expect s to contain element \"b\", \"c\" and \"d\"") } func TestOrderedSetIsSubsetReturnsTrueWhenOtherContainsAllElements(t *testing.T) { s1 := NewOrderedSetFromSlice([]string{"a", "b", "c"}) s2 := NewOrderedSetFromSlice([]string{"a", "b"}) assert.True(t, s1.IsSubset(s2), "tools: expected [a, b] to be a subset of [a, b, c]") } func TestOrderedSetIsSubsetReturnsFalseWhenOtherDoesNotContainAllElements(t *testing.T) { s1 := NewOrderedSetFromSlice([]string{"a", "b"}) s2 := NewOrderedSetFromSlice([]string{"a", "b", "c"}) assert.False(t, s1.IsSubset(s2), "tools: did not expect [a, b, c] to be a subset of [a, b]") } func TestOrderedSetIsSupersetReturnsTrueWhenContainsAllElementsOfOther(t *testing.T) { s1 := NewOrderedSetFromSlice([]string{"a", "b"}) s2 := NewOrderedSetFromSlice([]string{"a", "b", "c"}) assert.True(t, s1.IsSuperset(s2), "tools: expected [a, b, c] to be a superset of [a, b]") } func TestOrderedSetIsSupersetReturnsFalseWhenDoesNotContainAllElementsOfOther(t *testing.T) { s1 := NewOrderedSetFromSlice([]string{"a", "b", "c"}) s2 := NewOrderedSetFromSlice([]string{"a", "b"}) assert.False(t, s1.IsSuperset(s2), "tools: did not expect [a, b] to be a superset of [a, b, c]") } func TestOrderedSetUnion(t *testing.T) { s1 := NewOrderedSetFromSlice([]string{"a"}) s2 := NewOrderedSetFromSlice([]string{"b", "a"}) elems := make([]string, 0) for e := range s1.Union(s2).Iter() { elems = append(elems, e) } require.Len(t, elems, 2) assert.Equal(t, "a", elems[0]) assert.Equal(t, "b", elems[1]) } func TestOrderedSetIntersect(t *testing.T) { s1 := NewOrderedSetFromSlice([]string{"a"}) s2 := NewOrderedSetFromSlice([]string{"b", "a"}) elems := make([]string, 0) for e := range s1.Intersect(s2).Iter() { elems = append(elems, e) } require.Len(t, elems, 1) assert.Equal(t, "a", elems[0]) } func TestOrderedSetDifference(t *testing.T) { s1 := NewOrderedSetFromSlice([]string{"a", "b"}) s2 := NewOrderedSetFromSlice([]string{"a"}) elems := make([]string, 0) for e := range s1.Difference(s2).Iter() { elems = append(elems, e) } require.Len(t, elems, 1) assert.Equal(t, "b", elems[0]) } func TestOrderedSetSymmetricDifference(t *testing.T) { s1 := NewOrderedSetFromSlice([]string{"a", "b"}) s2 := NewOrderedSetFromSlice([]string{"b", "c"}) elems := make([]string, 0) for e := range s1.SymmetricDifference(s2).Iter() { elems = append(elems, e) } require.Len(t, elems, 2) assert.Equal(t, "a", elems[0]) assert.Equal(t, "c", elems[1]) } func TestOrderedSetClear(t *testing.T) { s1 := NewOrderedSetFromSlice([]string{"a", "b"}) assert.Equal(t, 2, s1.Cardinality()) s1.Clear() assert.Equal(t, 0, s1.Cardinality()) } func TestOrderedSetRemove(t *testing.T) { s1 := NewOrderedSetFromSlice([]string{"a", "b"}) assert.True(t, s1.Contains("a"), "tools: expected [a, b] to contain 'a'") assert.True(t, s1.Contains("b"), "tools: expected [a, b] to contain 'b'") s1.Remove("a") assert.False(t, s1.Contains("a"), "tools: did not expect to find 'a' in [b]") assert.True(t, s1.Contains("b"), "tools: expected [b] to contain 'b'") } func TestOrderedSetCardinality(t *testing.T) { s1 := NewOrderedSetFromSlice([]string{"a", "b"}) assert.Equal(t, 2, s1.Cardinality(), "tools: expected cardinality of [a, b] to equal 2") } func TestOrderedSetIter(t *testing.T) { s1 := NewOrderedSetFromSlice([]string{"a", "b", "c"}) elems := make([]string, 0) for e := range s1.Iter() { elems = append(elems, e) } require.Len(t, elems, 3) assert.Equal(t, "a", elems[0]) assert.Equal(t, "b", elems[1]) assert.Equal(t, "c", elems[2]) } func TestOrderedSetEqualReturnsTrueWhenSameElementsInSameOrder(t *testing.T) { s1 := NewOrderedSetFromSlice([]string{"a", "b", "c"}) s2 := NewOrderedSetFromSlice([]string{"a", "b", "c"}) assert.True(t, s1.Equal(s2), "tools: expected [a, b, c] to equal [a, b, c]") } func TestOrderedSetEqualReturnsFalseWhenSameElementsInDifferentOrder(t *testing.T) { s1 := NewOrderedSetFromSlice([]string{"a", "b", "c"}) s2 := NewOrderedSetFromSlice([]string{"a", "c", "b"}) assert.False(t, s1.Equal(s2), "tools: did not expect [a, b, c] to equal [a, c, b]") } func TestOrderedSetEqualReturnsFalseWithDifferentCardinalities(t *testing.T) { s1 := NewOrderedSetFromSlice([]string{"a"}) s2 := NewOrderedSetFromSlice([]string{"a", "b"}) assert.False(t, s1.Equal(s2), "tools: did not expect [a] to equal [a, b]") } func TestOrderedSetClone(t *testing.T) { s1 := NewOrderedSetFromSlice([]string{"a", "b", "c"}) s2 := s1.Clone() elems := make([]string, 0) for e := range s2.Iter() { elems = append(elems, e) } require.Len(t, elems, 3) assert.Equal(t, "a", elems[0]) assert.Equal(t, "b", elems[1]) assert.Equal(t, "c", elems[2]) } git-lfs-2.3.4/tools/os_tools.go000066400000000000000000000016251317167762300164360ustar00rootroot00000000000000package tools import ( "bytes" "fmt" "os" "strings" "github.com/git-lfs/git-lfs/subprocess" "github.com/pkg/errors" ) func Getwd() (dir string, err error) { dir, err = os.Getwd() if err != nil { return } if isCygwin() { dir, err = translateCygwinPath(dir) if err != nil { return "", errors.Wrap(err, "convert wd to cygwin") } } return } func translateCygwinPath(path string) (string, error) { cmd := subprocess.ExecCommand("cygpath", "-w", path) buf := &bytes.Buffer{} cmd.Stderr = buf out, err := cmd.Output() output := strings.TrimSpace(string(out)) if err != nil { return path, fmt.Errorf("Failed to translate path from cygwin to windows: %s", buf.String()) } return output, nil } func TranslateCygwinPath(path string) (string, error) { if isCygwin() { var err error path, err = translateCygwinPath(path) if err != nil { return "", err } } return path, nil } git-lfs-2.3.4/tools/str_tools.go000066400000000000000000000054231317167762300166250ustar00rootroot00000000000000package tools import ( "regexp" "strings" ) var ( // quoteFieldRe greedily matches between matching pairs of '', "", or // non-word characters. quoteFieldRe = regexp.MustCompile("'(.*)'|\"(.*)\"|(\\S*)") ) // QuotedFields is an alternative to strings.Fields (see: // https://golang.org/pkg/strings#Fields) that respects spaces between matching // pairs of quotation delimeters. // // For instance, the quoted fields of the string "foo bar 'baz etc'" would be: // []string{"foo", "bar", "baz etc"} // // Whereas the same argument given to strings.Fields, would return: // []string{"foo", "bar", "'baz", "etc'"} func QuotedFields(s string) []string { submatches := quoteFieldRe.FindAllStringSubmatch(s, -1) out := make([]string, 0, len(submatches)) for _, matches := range submatches { // if a leading or trailing space is found, ignore that if matches[0] == "" { continue } // otherwise, find the first non-empty match (inside balanced // quotes, or a space-delimited string) var str string for _, m := range matches[1:] { if len(m) > 0 { str = m break } } out = append(out, str) } return out } // Ljust returns a copied string slice where each element is left justified to // match the width of the longest element in the set. func Ljust(strs []string) []string { llen := len(Longest(strs)) dup := make([]string, len(strs), cap(strs)) copy(dup, strs) for i, str := range strs { width := MaxInt(0, llen-len(str)) padding := strings.Repeat(" ", width) dup[i] = str + padding } return dup } // Rjust returns a copied string slice where each element is right justified to // match the width of the longest element in the set. func Rjust(strs []string) []string { llen := len(Longest(strs)) dup := make([]string, len(strs), cap(strs)) copy(dup, strs) for i, str := range strs { width := MaxInt(0, llen-len(str)) padding := strings.Repeat(" ", width) dup[i] = padding + str } return dup } // Longest returns the longest element in the string slice in O(n) time and O(1) // space. If strs is empty or nil, an empty string will be returned. func Longest(strs []string) string { if len(strs) == 0 { return "" } var longest string var llen int for _, str := range strs { if len(str) >= llen { longest = str llen = len(longest) } } return longest } // Indent returns a string which prepends "\t" TAB characters to the beginning // of each line in the given string "str". func Indent(str string) string { indented := strings.Replace(str, "\n", "\n\t", -1) if len(indented) > 0 { indented = "\t" + indented } return indented } var ( tabRe = regexp.MustCompile(`(?m)^[ \t]+`) ) // Undent removes all leading tabs in the given string "str", line-wise. func Undent(str string) string { return tabRe.ReplaceAllString(str, "") } git-lfs-2.3.4/tools/str_tools_test.go000066400000000000000000000131441317167762300176630ustar00rootroot00000000000000package tools import ( "testing" "github.com/stretchr/testify/assert" ) type QuotedFieldsTestCase struct { Given string Expected []string } func (c *QuotedFieldsTestCase) Assert(t *testing.T) { actual := QuotedFields(c.Given) assert.Equal(t, c.Expected, actual, "tools: expected QuotedFields(%q) to equal %#v (was %#v)", c.Given, c.Expected, actual, ) } func TestQuotedFields(t *testing.T) { for desc, c := range map[string]QuotedFieldsTestCase{ "simple": {`foo bar`, []string{"foo", "bar"}}, "simple trailing": {`foo bar `, []string{"foo", "bar"}}, "simple leading": {` foo bar`, []string{"foo", "bar"}}, "single quotes": {`foo 'bar baz'`, []string{"foo", "bar baz"}}, "single quotes trailing": {`foo 'bar baz' `, []string{"foo", "bar baz"}}, "single quotes leading": {` foo 'bar baz'`, []string{"foo", "bar baz"}}, "single quotes empty": {`foo ''`, []string{"foo", ""}}, "single quotes trailing empty": {`foo '' `, []string{"foo", ""}}, "single quotes leading empty": {` foo ''`, []string{"foo", ""}}, "double quotes": {`foo "bar baz"`, []string{"foo", "bar baz"}}, "double quotes trailing": {`foo "bar baz" `, []string{"foo", "bar baz"}}, "double quotes leading": {` foo "bar baz"`, []string{"foo", "bar baz"}}, "double quotes empty": {`foo ""`, []string{"foo", ""}}, "double quotes trailing empty": {`foo "" `, []string{"foo", ""}}, "double quotes leading empty": {` foo ""`, []string{"foo", ""}}, "nested single quotes": {`foo 'bar 'baz''`, []string{"foo", "bar 'baz'"}}, "nested single quotes trailing": {`foo 'bar 'baz'' `, []string{"foo", "bar 'baz'"}}, "nested single quotes leading": {` foo 'bar 'baz''`, []string{"foo", "bar 'baz'"}}, "nested single quotes empty": {`foo 'bar '''`, []string{"foo", "bar ''"}}, "nested single quotes trailing empty": {`foo 'bar ''' `, []string{"foo", "bar ''"}}, "nested single quotes leading empty": {` foo 'bar '''`, []string{"foo", "bar ''"}}, "nested double quotes": {`foo "bar "baz""`, []string{"foo", `bar "baz"`}}, "nested double quotes trailing": {`foo "bar "baz"" `, []string{"foo", `bar "baz"`}}, "nested double quotes leading": {` foo "bar "baz""`, []string{"foo", `bar "baz"`}}, "nested double quotes empty": {`foo "bar """`, []string{"foo", `bar ""`}}, "nested double quotes trailing empty": {`foo "bar """ `, []string{"foo", `bar ""`}}, "nested double quotes leading empty": {` foo "bar """`, []string{"foo", `bar ""`}}, "mixed quotes": {`foo 'bar "baz"'`, []string{"foo", `bar "baz"`}}, "mixed quotes trailing": {`foo 'bar "baz"' `, []string{"foo", `bar "baz"`}}, "mixed quotes leading": {` foo 'bar "baz"'`, []string{"foo", `bar "baz"`}}, "mixed quotes empty": {`foo 'bar ""'`, []string{"foo", `bar ""`}}, "mixed quotes trailing empty": {`foo 'bar ""' `, []string{"foo", `bar ""`}}, "mixed quotes leading empty": {` foo 'bar ""'`, []string{"foo", `bar ""`}}, } { t.Log(desc) c.Assert(t) } } func TestLongestReturnsEmptyStringGivenEmptySet(t *testing.T) { assert.Equal(t, "", Longest(nil)) } func TestLongestReturnsLongestString(t *testing.T) { assert.Equal(t, "longest", Longest([]string{"short", "longer", "longest"})) } func TestLongestReturnsLastStringGivenSameLength(t *testing.T) { assert.Equal(t, "baz", Longest([]string{"foo", "bar", "baz"})) } func TestRjustRightJustifiesString(t *testing.T) { unjust := []string{ "short", "longer", "longest", } expected := []string{ " short", " longer", "longest", } assert.Equal(t, expected, Rjust(unjust)) } func TestLjustLeftJustifiesString(t *testing.T) { unjust := []string{ "short", "longer", "longest", } expected := []string{ "short ", "longer ", "longest", } assert.Equal(t, expected, Ljust(unjust)) } func TestIndentIndentsStrings(t *testing.T) { assert.Equal(t, "\tfoo\n\tbar", Indent("foo\nbar")) } func TestIndentIndentsSingleLineStrings(t *testing.T) { assert.Equal(t, "\tfoo", Indent("foo")) } func TestIndentReturnsEmptyStrings(t *testing.T) { assert.Equal(t, "", Indent("")) } func TestUndentRemovesLeadingWhitespace(t *testing.T) { assert.Equal(t, "foo", Undent("\t\t\tfoo")) assert.Equal(t, "foo", Undent("foo")) assert.Equal(t, "foo", Undent(" foo")) } func TestUndentRemovesPreservesLinebreaks(t *testing.T) { // No leading space assert.Equal(t, "\r\nfoo", Undent("\r\nfoo")) assert.Equal(t, "foo\r\n", Undent("foo\r\n")) assert.Equal(t, "\r\nfoo\r\n", Undent("\r\nfoo\r\n")) assert.Equal(t, "\nfoo", Undent("\nfoo")) assert.Equal(t, "foo\n", Undent("foo\n")) assert.Equal(t, "\nfoo\n", Undent("\nfoo\n")) // Trim leading space assert.Equal(t, "\r\nfoo", Undent("\r\n foo")) assert.Equal(t, "foo\r\n", Undent(" foo\r\n")) assert.Equal(t, "\r\nfoo\r\n", Undent("\r\n foo\r\n")) assert.Equal(t, "\nfoo", Undent("\n foo")) assert.Equal(t, "foo\n", Undent(" foo\n")) assert.Equal(t, "\nfoo\n", Undent("\n foo\n")) // Preserve trailing space assert.Equal(t, "\r\nfoo ", Undent("\r\nfoo ")) assert.Equal(t, "foo \r\n", Undent("foo \r\n")) assert.Equal(t, "\r\nfoo \r\n", Undent("\r\nfoo \r\n")) assert.Equal(t, "\nfoo ", Undent("\nfoo ")) assert.Equal(t, "foo \n", Undent("foo \n")) assert.Equal(t, "\nfoo \n", Undent("\nfoo \n")) // Trim leading space, preserve trailing space assert.Equal(t, "\r\nfoo ", Undent("\r\n foo ")) assert.Equal(t, "foo \r\n", Undent(" foo \r\n")) assert.Equal(t, "\r\nfoo \r\n", Undent("\r\n foo \r\n")) assert.Equal(t, "\nfoo ", Undent("\n foo ")) assert.Equal(t, "foo \n", Undent(" foo \n")) assert.Equal(t, "\nfoo \n", Undent("\n foo \n")) } git-lfs-2.3.4/tools/stringset.go000066400000000000000000000102061317167762300166120ustar00rootroot00000000000000// Generated by: gen, modified by Steve Streeting // TypeWriter: container // Directive: +gen on main.string // See http://clipperhouse.github.io/gen for documentation // Set is a modification of https://github.com/deckarep/golang-set // The MIT License (MIT) // Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com) package tools // The primary type that represents a set type StringSet map[string]struct{} // Creates and returns a reference to an empty set. func NewStringSet() StringSet { return make(StringSet) } // Creates and returns a reference to an empty set with a capacity. func NewStringSetWithCapacity(capacity int) StringSet { return make(StringSet, capacity) } // Creates and returns a reference to a set from an existing slice func NewStringSetFromSlice(s []string) StringSet { a := NewStringSetWithCapacity(len(s)) for _, item := range s { a.Add(item) } return a } // Adds an item to the current set if it doesn't already exist in the set. func (set StringSet) Add(i string) bool { _, found := set[i] set[i] = struct{}{} return !found //False if it e xisted already } // Determines if a given item is already in the set. func (set StringSet) Contains(i string) bool { _, found := set[i] return found } // Determines if the given items are all in the set func (set StringSet) ContainsAll(i ...string) bool { allSet := NewStringSetFromSlice(i) if allSet.IsSubset(set) { return true } return false } // Determines if every item in the other set is in this set. func (set StringSet) IsSubset(other StringSet) bool { for elem := range set { if !other.Contains(elem) { return false } } return true } // Determines if every item of this set is in the other set. func (set StringSet) IsSuperset(other StringSet) bool { return other.IsSubset(set) } // Returns a new set with all items in both sets. func (set StringSet) Union(other StringSet) StringSet { unionedSet := NewStringSet() for elem := range set { unionedSet.Add(elem) } for elem := range other { unionedSet.Add(elem) } return unionedSet } // Returns a new set with items that exist only in both sets. func (set StringSet) Intersect(other StringSet) StringSet { intersection := NewStringSet() // loop over smaller set if set.Cardinality() < other.Cardinality() { for elem := range set { if other.Contains(elem) { intersection.Add(elem) } } } else { for elem := range other { if set.Contains(elem) { intersection.Add(elem) } } } return intersection } // Returns a new set with items in the current set but not in the other set func (set StringSet) Difference(other StringSet) StringSet { differencedSet := NewStringSet() for elem := range set { if !other.Contains(elem) { differencedSet.Add(elem) } } return differencedSet } // Returns a new set with items in the current set or the other set but not in both. func (set StringSet) SymmetricDifference(other StringSet) StringSet { aDiff := set.Difference(other) bDiff := other.Difference(set) return aDiff.Union(bDiff) } // Clears the entire set to be the empty set. func (set *StringSet) Clear() { *set = make(StringSet) } // Allows the removal of a single item in the set. func (set StringSet) Remove(i string) { delete(set, i) } // Cardinality returns how many items are currently in the set. func (set StringSet) Cardinality() int { return len(set) } // Iter() returns a channel of type string that you can range over. func (set StringSet) Iter() <-chan string { ch := make(chan string) go func() { for elem := range set { ch <- elem } close(ch) }() return ch } // Equal determines if two sets are equal to each other. // If they both are the same size and have the same items they are considered equal. // Order of items is not relevent for sets to be equal. func (set StringSet) Equal(other StringSet) bool { if set.Cardinality() != other.Cardinality() { return false } for elem := range set { if !other.Contains(elem) { return false } } return true } // Returns a clone of the set. // Does NOT clone the underlying elements. func (set StringSet) Clone() StringSet { clonedSet := NewStringSet() for elem := range set { clonedSet.Add(elem) } return clonedSet } git-lfs-2.3.4/tools/time_tools.go000066400000000000000000000013171317167762300167510ustar00rootroot00000000000000package tools import ( "time" ) // IsExpiredAtOrIn returns whether or not the result of calling TimeAtOrIn is // "expired" within "until" units of time from now. func IsExpiredAtOrIn(from time.Time, until time.Duration, at time.Time, in time.Duration) (time.Time, bool) { expiration := TimeAtOrIn(from, at, in) if expiration.IsZero() { return expiration, false } return expiration, expiration.Before(time.Now().Add(until)) } // TimeAtOrIn returns either "at", or the "in" duration added to the current // time. TimeAtOrIn prefers to add a duration rather than return the "at" // parameter. func TimeAtOrIn(from, at time.Time, in time.Duration) time.Time { if in == 0 { return at } return from.Add(in) } git-lfs-2.3.4/tools/time_tools_test.go000066400000000000000000000037011317167762300200070ustar00rootroot00000000000000package tools import ( "testing" "time" "github.com/stretchr/testify/assert" ) func TestTimeAtOrInNoDuration(t *testing.T) { now := time.Now() then := time.Now().Add(24 * time.Hour) got := TimeAtOrIn(now, then, time.Duration(0)) assert.Equal(t, then, got) } func TestTimeAtOrInWithDuration(t *testing.T) { now := time.Now() duration := 5 * time.Minute expected := now.Add(duration) got := TimeAtOrIn(now, now, duration) assert.Equal(t, expected, got) } func TestTimeAtOrInZeroTime(t *testing.T) { now := time.Now() zero := time.Time{} got := TimeAtOrIn(now, zero, 0) assert.Equal(t, zero, got) } func TestIsExpiredAtOrInWithNonZeroTime(t *testing.T) { now := time.Now() within := 5 * time.Minute at := now.Add(10 * time.Minute) in := time.Duration(0) expired, ok := IsExpiredAtOrIn(now, within, at, in) assert.False(t, ok) assert.Equal(t, at, expired) } func TestIsExpiredAtOrInWithNonZeroDuration(t *testing.T) { now := time.Now() within := 5 * time.Minute at := time.Time{} in := 10 * time.Minute expired, ok := IsExpiredAtOrIn(now, within, at, in) assert.Equal(t, now.Add(in), expired) assert.False(t, ok) } func TestIsExpiredAtOrInWithNonZeroTimeExpired(t *testing.T) { now := time.Now() within := 5 * time.Minute at := now.Add(3 * time.Minute) in := time.Duration(0) expired, ok := IsExpiredAtOrIn(now, within, at, in) assert.True(t, ok) assert.Equal(t, at, expired) } func TestIsExpiredAtOrInWithNonZeroDurationExpired(t *testing.T) { now := time.Now() within := 5 * time.Minute at := time.Time{} in := -10 * time.Minute expired, ok := IsExpiredAtOrIn(now, within, at, in) assert.Equal(t, now.Add(in), expired) assert.True(t, ok) } func TestIsExpiredAtOrInWithAmbiguousTime(t *testing.T) { now := time.Now() within := 5 * time.Minute at := now.Add(-10 * time.Minute) in := 10 * time.Minute expired, ok := IsExpiredAtOrIn(now, within, at, in) assert.Equal(t, now.Add(in), expired) assert.False(t, ok) } git-lfs-2.3.4/tools/util_generic.go000066400000000000000000000002201317167762300172340ustar00rootroot00000000000000// +build !linux !cgo package tools import ( "io" ) func CloneFile(writer io.Writer, reader io.Reader) (bool, error) { return false, nil } git-lfs-2.3.4/tools/util_linux.go000066400000000000000000000011751317167762300167710ustar00rootroot00000000000000// +build linux,cgo package tools /* #include #undef BTRFS_IOCTL_MAGIC #define BTRFS_IOCTL_MAGIC 0x94 #undef BTRFS_IOC_CLONE #define BTRFS_IOC_CLONE _IOW (BTRFS_IOCTL_MAGIC, 9, int) */ import "C" import ( "io" "os" "syscall" ) const ( BtrfsIocClone = C.BTRFS_IOC_CLONE ) func CloneFile(writer io.Writer, reader io.Reader) (bool, error) { fdst, fdstFound := writer.(*os.File) fsrc, fsrcFound := reader.(*os.File) if fdstFound && fsrcFound { if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fdst.Fd(), BtrfsIocClone, fsrc.Fd()); err != 0 { return false, err } return true, nil } return false, nil } git-lfs-2.3.4/tools/util_test.go000066400000000000000000000011371317167762300166070ustar00rootroot00000000000000package tools import ( "bytes" "io/ioutil" "testing" "github.com/stretchr/testify/assert" ) func TestCopyWithCallback(t *testing.T) { buf := bytes.NewBufferString("BOOYA") called := 0 calledWritten := make([]int64, 0, 2) n, err := CopyWithCallback(ioutil.Discard, buf, 5, func(total int64, written int64, current int) error { called += 1 calledWritten = append(calledWritten, written) assert.Equal(t, 5, int(total)) return nil }) assert.Nil(t, err) assert.Equal(t, 5, int(n)) assert.Equal(t, 1, called) assert.Len(t, calledWritten, 1) assert.Equal(t, 5, int(calledWritten[0])) } git-lfs-2.3.4/tq/000077500000000000000000000000001317167762300135265ustar00rootroot00000000000000git-lfs-2.3.4/tq/adapterbase.go000066400000000000000000000147761317167762300163470ustar00rootroot00000000000000package tq import ( "fmt" "net/http" "regexp" "strings" "sync" "github.com/git-lfs/git-lfs/lfsapi" "github.com/rubyist/tracerx" ) // adapterBase implements the common functionality for core adapters which // process transfers with N workers handling an oid each, and which wait for // authentication to succeed on one worker before proceeding type adapterBase struct { name string direction Direction transferImpl transferImplementation apiClient *lfsapi.Client remote string jobChan chan *job debugging bool cb ProgressCallback // WaitGroup to sync the completion of all workers workerWait sync.WaitGroup // WaitGroup to sync the completion of all in-flight jobs jobWait *sync.WaitGroup // WaitGroup to serialise the first transfer response to perform login if needed authWait sync.WaitGroup } // transferImplementation must be implemented to provide the actual upload/download // implementation for all core transfer approaches that use adapterBase for // convenience. This function will be called on multiple goroutines so it // must be either stateless or thread safe. However it will never be called // for the same oid in parallel. // If authOkFunc is not nil, implementations must call it as early as possible // when authentication succeeded, before the whole file content is transferred type transferImplementation interface { // WorkerStarting is called when a worker goroutine starts to process jobs // Implementations can run some startup logic here & return some context if needed WorkerStarting(workerNum int) (interface{}, error) // WorkerEnding is called when a worker goroutine is shutting down // Implementations can clean up per-worker resources here, context is as returned from WorkerStarting WorkerEnding(workerNum int, ctx interface{}) // DoTransfer performs a single transfer within a worker. ctx is any context returned from WorkerStarting DoTransfer(ctx interface{}, t *Transfer, cb ProgressCallback, authOkFunc func()) error } func newAdapterBase(name string, dir Direction, ti transferImplementation) *adapterBase { return &adapterBase{ name: name, direction: dir, transferImpl: ti, jobWait: new(sync.WaitGroup), } } func (a *adapterBase) Name() string { return a.name } func (a *adapterBase) Direction() Direction { return a.direction } func (a *adapterBase) Begin(cfg AdapterConfig, cb ProgressCallback) error { a.apiClient = cfg.APIClient() a.remote = cfg.Remote() a.cb = cb a.jobChan = make(chan *job, 100) a.debugging = a.apiClient.OSEnv().Bool("GIT_TRANSFER_TRACE", false) maxConcurrency := cfg.ConcurrentTransfers() a.Trace("xfer: adapter %q Begin() with %d workers", a.Name(), maxConcurrency) a.workerWait.Add(maxConcurrency) a.authWait.Add(1) for i := 0; i < maxConcurrency; i++ { ctx, err := a.transferImpl.WorkerStarting(i) if err != nil { return err } go a.worker(i, ctx) } a.Trace("xfer: adapter %q started", a.Name()) return nil } type job struct { T *Transfer results chan<- TransferResult wg *sync.WaitGroup } func (j *job) Done(err error) { j.results <- TransferResult{j.T, err} j.wg.Done() } func (a *adapterBase) Add(transfers ...*Transfer) <-chan TransferResult { results := make(chan TransferResult, len(transfers)) a.jobWait.Add(len(transfers)) go func() { for _, t := range transfers { a.jobChan <- &job{t, results, a.jobWait} } a.jobWait.Wait() close(results) }() return results } func (a *adapterBase) End() { a.Trace("xfer: adapter %q End()", a.Name()) a.jobWait.Wait() close(a.jobChan) // wait for all transfers to complete a.workerWait.Wait() a.Trace("xfer: adapter %q stopped", a.Name()) } func (a *adapterBase) Trace(format string, args ...interface{}) { if !a.debugging { return } tracerx.Printf(format, args...) } // worker function, many of these run per adapter func (a *adapterBase) worker(workerNum int, ctx interface{}) { a.Trace("xfer: adapter %q worker %d starting", a.Name(), workerNum) waitForAuth := workerNum > 0 signalAuthOnResponse := workerNum == 0 // First worker is the only one allowed to start immediately // The rest wait until successful response from 1st worker to // make sure only 1 login prompt is presented if necessary // Deliberately outside jobChan processing so we know worker 0 will process 1st item if waitForAuth { a.Trace("xfer: adapter %q worker %d waiting for Auth", a.Name(), workerNum) a.authWait.Wait() a.Trace("xfer: adapter %q worker %d auth signal received", a.Name(), workerNum) } for job := range a.jobChan { t := job.T var authCallback func() if signalAuthOnResponse { authCallback = func() { a.authWait.Done() signalAuthOnResponse = false } } a.Trace("xfer: adapter %q worker %d processing job for %q", a.Name(), workerNum, t.Oid) // Actual transfer happens here var err error if t.Size < 0 { err = fmt.Errorf("Git LFS: object %q has invalid size (got: %d)", t.Oid, t.Size) } else { err = a.transferImpl.DoTransfer(ctx, t, a.cb, authCallback) } // Mark the job as completed, and alter all listeners job.Done(err) a.Trace("xfer: adapter %q worker %d finished job for %q", a.Name(), workerNum, t.Oid) } // This will only happen if no jobs were submitted; just wake up all workers to finish if signalAuthOnResponse { a.authWait.Done() } a.Trace("xfer: adapter %q worker %d stopping", a.Name(), workerNum) a.transferImpl.WorkerEnding(workerNum, ctx) a.workerWait.Done() } var httpRE = regexp.MustCompile(`\Ahttps?://`) func (a *adapterBase) newHTTPRequest(method string, rel *Action) (*http.Request, error) { if !httpRE.MatchString(rel.Href) { urlfragment := strings.SplitN(rel.Href, "?", 2)[0] return nil, fmt.Errorf("missing protocol: %q", urlfragment) } req, err := http.NewRequest(method, rel.Href, nil) if err != nil { return nil, err } for key, value := range rel.Header { req.Header.Set(key, value) } return req, nil } func (a *adapterBase) doHTTP(t *Transfer, req *http.Request) (*http.Response, error) { if t.Authenticated { return a.apiClient.Do(req) } return a.apiClient.DoWithAuth(a.remote, req) } func advanceCallbackProgress(cb ProgressCallback, t *Transfer, numBytes int64) { if cb != nil { // Must split into max int sizes since read count is int const maxInt = int(^uint(0) >> 1) for read := int64(0); read < numBytes; { remainder := numBytes - read if remainder > int64(maxInt) { read += int64(maxInt) cb(t.Name, t.Size, read, maxInt) } else { read += remainder cb(t.Name, t.Size, read, int(remainder)) } } } } git-lfs-2.3.4/tq/api.go000066400000000000000000000037311317167762300146320ustar00rootroot00000000000000package tq import ( "time" "github.com/git-lfs/git-lfs/errors" "github.com/git-lfs/git-lfs/lfsapi" "github.com/rubyist/tracerx" ) type tqClient struct { MaxRetries int *lfsapi.Client } type batchRequest struct { Operation string `json:"operation"` Objects []*Transfer `json:"objects"` TransferAdapterNames []string `json:"transfers,omitempty"` } type BatchResponse struct { Objects []*Transfer `json:"objects"` TransferAdapterName string `json:"transfer"` endpoint lfsapi.Endpoint } func Batch(m *Manifest, dir Direction, remote string, objects []*Transfer) (*BatchResponse, error) { if len(objects) == 0 { return &BatchResponse{}, nil } return m.batchClient().Batch(remote, &batchRequest{ Operation: dir.String(), Objects: objects, TransferAdapterNames: m.GetAdapterNames(dir), }) } func (c *tqClient) Batch(remote string, bReq *batchRequest) (*BatchResponse, error) { bRes := &BatchResponse{} if len(bReq.Objects) == 0 { return bRes, nil } if len(bReq.TransferAdapterNames) == 1 && bReq.TransferAdapterNames[0] == "basic" { bReq.TransferAdapterNames = nil } bRes.endpoint = c.Endpoints.Endpoint(bReq.Operation, remote) requestedAt := time.Now() req, err := c.NewRequest("POST", bRes.endpoint, "objects/batch", bReq) if err != nil { return nil, errors.Wrap(err, "batch request") } tracerx.Printf("api: batch %d files", len(bReq.Objects)) req = c.LogRequest(req, "lfs.batch") res, err := c.DoWithAuth(remote, lfsapi.WithRetries(req, c.MaxRetries)) if err != nil { tracerx.Printf("api error: %s", err) return nil, errors.Wrap(err, "batch response") } if err := lfsapi.DecodeJSON(res, bRes); err != nil { return bRes, errors.Wrap(err, "batch response") } if res.StatusCode != 200 { return nil, lfsapi.NewStatusCodeError(res) } for _, obj := range bRes.Objects { for _, a := range obj.Actions { a.createdAt = requestedAt } } return bRes, nil } git-lfs-2.3.4/tq/api_test.go000066400000000000000000000115371317167762300156740ustar00rootroot00000000000000package tq import ( "encoding/json" "fmt" "net/http" "net/http/httptest" "os" "path/filepath" "strings" "testing" "github.com/git-lfs/git-lfs/lfsapi" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/xeipuuv/gojsonschema" ) func TestAPIBatch(t *testing.T) { require.NotNil(t, batchReqSchema, batchReqSchema.Source) require.NotNil(t, batchResSchema, batchResSchema.Source) srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path != "/api/objects/batch" { w.WriteHeader(404) return } assert.Equal(t, "POST", r.Method) assert.Equal(t, "80", r.Header.Get("Content-Length")) bodyLoader, body := gojsonschema.NewReaderLoader(r.Body) bReq := &batchRequest{} err := json.NewDecoder(body).Decode(bReq) r.Body.Close() assert.Nil(t, err) assertSchema(t, batchReqSchema, bodyLoader) assert.EqualValues(t, []string{"basic", "whatev"}, bReq.TransferAdapterNames) if assert.Equal(t, 1, len(bReq.Objects)) { assert.Equal(t, "a", bReq.Objects[0].Oid) } w.Header().Set("Content-Type", "application/json") writeLoader, resWriter := gojsonschema.NewWriterLoader(w) err = json.NewEncoder(resWriter).Encode(&BatchResponse{ TransferAdapterName: "basic", Objects: bReq.Objects, }) assert.Nil(t, err) assertSchema(t, batchResSchema, writeLoader) })) defer srv.Close() c, err := lfsapi.NewClient(nil, lfsapi.UniqTestEnv(map[string]string{ "lfs.url": srv.URL + "/api", })) require.Nil(t, err) tqc := &tqClient{Client: c} bReq := &batchRequest{ TransferAdapterNames: []string{"basic", "whatev"}, Objects: []*Transfer{ &Transfer{Oid: "a", Size: 1}, }, } bRes, err := tqc.Batch("remote", bReq) require.Nil(t, err) assert.Equal(t, "basic", bRes.TransferAdapterName) if assert.Equal(t, 1, len(bRes.Objects)) { assert.Equal(t, "a", bRes.Objects[0].Oid) } } func TestAPIBatchOnlyBasic(t *testing.T) { require.NotNil(t, batchReqSchema, batchReqSchema.Source) require.NotNil(t, batchResSchema, batchResSchema.Source) srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path != "/api/objects/batch" { w.WriteHeader(404) return } assert.Equal(t, "POST", r.Method) bodyLoader, body := gojsonschema.NewReaderLoader(r.Body) bReq := &batchRequest{} err := json.NewDecoder(body).Decode(bReq) r.Body.Close() assert.Nil(t, err) assertSchema(t, batchReqSchema, bodyLoader) assert.Equal(t, 0, len(bReq.TransferAdapterNames)) if assert.Equal(t, 1, len(bReq.Objects)) { assert.Equal(t, "a", bReq.Objects[0].Oid) } w.Header().Set("Content-Type", "application/json") writeLoader, resWriter := gojsonschema.NewWriterLoader(w) err = json.NewEncoder(resWriter).Encode(&BatchResponse{ TransferAdapterName: "basic", Objects: make([]*Transfer, 0), }) assert.Nil(t, err) assertSchema(t, batchResSchema, writeLoader) })) defer srv.Close() c, err := lfsapi.NewClient(nil, lfsapi.UniqTestEnv(map[string]string{ "lfs.url": srv.URL + "/api", })) require.Nil(t, err) tqc := &tqClient{Client: c} bReq := &batchRequest{ TransferAdapterNames: []string{"basic"}, Objects: []*Transfer{ &Transfer{Oid: "a", Size: 1}, }, } bRes, err := tqc.Batch("remote", bReq) require.Nil(t, err) assert.Equal(t, "basic", bRes.TransferAdapterName) } func TestAPIBatchEmptyObjects(t *testing.T) { c, err := lfsapi.NewClient(nil, nil) require.Nil(t, err) tqc := &tqClient{Client: c} bReq := &batchRequest{ TransferAdapterNames: []string{"basic", "whatev"}, } bRes, err := tqc.Batch("remote", bReq) require.Nil(t, err) assert.Equal(t, "", bRes.TransferAdapterName) assert.Equal(t, 0, len(bRes.Objects)) } var ( batchReqSchema *sourcedSchema batchResSchema *sourcedSchema ) func init() { wd, err := os.Getwd() if err != nil { fmt.Println("getwd error:", err) return } batchReqSchema = getSchema(wd, "schemas/http-batch-request-schema.json") batchResSchema = getSchema(wd, "schemas/http-batch-response-schema.json") } type sourcedSchema struct { Source string *gojsonschema.Schema } func getSchema(wd, relpath string) *sourcedSchema { abspath := filepath.ToSlash(filepath.Join(wd, relpath)) s, err := gojsonschema.NewSchema(gojsonschema.NewReferenceLoader(fmt.Sprintf("file:///%s", abspath))) if err != nil { fmt.Printf("schema load error for %q: %+v\n", relpath, err) } return &sourcedSchema{Source: relpath, Schema: s} } func assertSchema(t *testing.T, schema *sourcedSchema, dataLoader gojsonschema.JSONLoader) { res, err := schema.Validate(dataLoader) if assert.Nil(t, err) { if res.Valid() { return } resErrors := res.Errors() valErrors := make([]string, 0, len(resErrors)) for _, resErr := range resErrors { valErrors = append(valErrors, resErr.String()) } t.Errorf("Schema: %s\n%s", schema.Source, strings.Join(valErrors, "\n")) } } git-lfs-2.3.4/tq/basic_download.go000066400000000000000000000166121317167762300170330ustar00rootroot00000000000000package tq import ( "fmt" "hash" "io" "os" "path/filepath" "regexp" "strconv" "github.com/git-lfs/git-lfs/errors" "github.com/git-lfs/git-lfs/localstorage" "github.com/git-lfs/git-lfs/tools" "github.com/rubyist/tracerx" ) // Adapter for basic HTTP downloads, includes resuming via HTTP Range type basicDownloadAdapter struct { *adapterBase } func (a *basicDownloadAdapter) ClearTempStorage() error { return os.RemoveAll(a.tempDir()) } func (a *basicDownloadAdapter) tempDir() string { // Must be dedicated to this adapter as deleted by ClearTempStorage // Also make local to this repo not global, and separate to localstorage temp, // which gets cleared at the end of every invocation d := filepath.Join(localstorage.Objects().RootDir, "incomplete") if err := os.MkdirAll(d, 0755); err != nil { return os.TempDir() } return d } func (a *basicDownloadAdapter) WorkerStarting(workerNum int) (interface{}, error) { return nil, nil } func (a *basicDownloadAdapter) WorkerEnding(workerNum int, ctx interface{}) { } func (a *basicDownloadAdapter) DoTransfer(ctx interface{}, t *Transfer, cb ProgressCallback, authOkFunc func()) error { f, fromByte, hashSoFar, err := a.checkResumeDownload(t) if err != nil { return err } return a.download(t, cb, authOkFunc, f, fromByte, hashSoFar) } // Checks to see if a download can be resumed, and if so returns a non-nil locked file, byte start and hash func (a *basicDownloadAdapter) checkResumeDownload(t *Transfer) (outFile *os.File, fromByte int64, hashSoFar hash.Hash, e error) { // lock the file by opening it for read/write, rather than checking Stat() etc // which could be subject to race conditions by other processes f, err := os.OpenFile(a.downloadFilename(t), os.O_RDWR, 0644) if err != nil { // Create a new file instead, must not already exist or error (permissions / race condition) newfile, err := os.OpenFile(a.downloadFilename(t), os.O_CREATE|os.O_WRONLY|os.O_EXCL, 0644) return newfile, 0, nil, err } // Successfully opened an existing file at this point // Read any existing data into hash then return file handle at end hash := tools.NewLfsContentHash() n, err := io.Copy(hash, f) if err != nil { f.Close() return nil, 0, nil, err } tracerx.Printf("xfer: Attempting to resume download of %q from byte %d", t.Oid, n) return f, n, hash, nil } // Create or open a download file for resuming func (a *basicDownloadAdapter) downloadFilename(t *Transfer) string { // Not a temp file since we will be resuming it return filepath.Join(a.tempDir(), t.Oid+".tmp") } // download starts or resumes and download. Always closes dlFile if non-nil func (a *basicDownloadAdapter) download(t *Transfer, cb ProgressCallback, authOkFunc func(), dlFile *os.File, fromByte int64, hash hash.Hash) error { if dlFile != nil { // ensure we always close dlFile. Note that this does not conflict with the // early close below, as close is idempotent. defer dlFile.Close() } rel, err := t.Rel("download") if err != nil { return err } if rel == nil { return errors.Errorf("Object %s not found on the server.", t.Oid) } req, err := a.newHTTPRequest("GET", rel) if err != nil { return err } if fromByte > 0 { if dlFile == nil || hash == nil { return fmt.Errorf("Cannot restart %v from %d without a file & hash", t.Oid, fromByte) } // We could just use a start byte, but since we know the length be specific req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", fromByte, t.Size-1)) } req = a.apiClient.LogRequest(req, "lfs.data.download") res, err := a.doHTTP(t, req) if err != nil { // Special-case status code 416 () - fall back if fromByte > 0 && dlFile != nil && (res != nil && res.StatusCode == 416) { tracerx.Printf("xfer: server rejected resume download request for %q from byte %d; re-downloading from start", t.Oid, fromByte) dlFile.Close() os.Remove(dlFile.Name()) return a.download(t, cb, authOkFunc, nil, 0, nil) } return errors.NewRetriableError(err) } defer res.Body.Close() // Range request must return 206 & content range to confirm if fromByte > 0 { rangeRequestOk := false var failReason string // check 206 and Content-Range, fall back if either not as expected if res.StatusCode == 206 { // Probably a successful range request, check Content-Range if rangeHdr := res.Header.Get("Content-Range"); rangeHdr != "" { regex := regexp.MustCompile(`bytes (\d+)\-.*`) match := regex.FindStringSubmatch(rangeHdr) if match != nil && len(match) > 1 { contentStart, _ := strconv.ParseInt(match[1], 10, 64) if contentStart == fromByte { rangeRequestOk = true } else { failReason = fmt.Sprintf("Content-Range start byte incorrect: %s expected %d", match[1], fromByte) } } else { failReason = fmt.Sprintf("badly formatted Content-Range header: %q", rangeHdr) } } else { failReason = "missing Content-Range header in response" } } else { failReason = fmt.Sprintf("expected status code 206, received %d", res.StatusCode) } if rangeRequestOk { tracerx.Printf("xfer: server accepted resume download request: %q from byte %d", t.Oid, fromByte) advanceCallbackProgress(cb, t, fromByte) } else { // Abort resume, perform regular download tracerx.Printf("xfer: failed to resume download for %q from byte %d: %s. Re-downloading from start", t.Oid, fromByte, failReason) dlFile.Close() os.Remove(dlFile.Name()) if res.StatusCode == 200 { // If status code was 200 then server just ignored Range header and // sent everything. Don't re-request, use this one from byte 0 dlFile = nil fromByte = 0 hash = nil } else { // re-request needed return a.download(t, cb, authOkFunc, nil, 0, nil) } } } // Signal auth OK on success response, before starting download to free up // other workers immediately if authOkFunc != nil { authOkFunc() } var hasher *tools.HashingReader httpReader := tools.NewRetriableReader(res.Body) if fromByte > 0 && hash != nil { // pre-load hashing reader with previous content hasher = tools.NewHashingReaderPreloadHash(httpReader, hash) } else { hasher = tools.NewHashingReader(httpReader) } if dlFile == nil { // New file start dlFile, err = os.OpenFile(a.downloadFilename(t), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) if err != nil { return err } defer dlFile.Close() } dlfilename := dlFile.Name() // Wrap callback to give name context ccb := func(totalSize int64, readSoFar int64, readSinceLast int) error { if cb != nil { return cb(t.Name, totalSize, readSoFar+fromByte, readSinceLast) } return nil } written, err := tools.CopyWithCallback(dlFile, hasher, res.ContentLength, ccb) if err != nil { return errors.Wrapf(err, "cannot write data to tempfile %q", dlfilename) } if err := dlFile.Close(); err != nil { return fmt.Errorf("can't close tempfile %q: %v", dlfilename, err) } if actual := hasher.Hash(); actual != t.Oid { return fmt.Errorf("Expected OID %s, got %s after %d bytes written", t.Oid, actual, written) } return tools.RenameFileCopyPermissions(dlfilename, t.Path) } func configureBasicDownloadAdapter(m *Manifest) { m.RegisterNewAdapterFunc(BasicAdapterName, Download, func(name string, dir Direction) Adapter { switch dir { case Download: bd := &basicDownloadAdapter{newAdapterBase(name, dir, nil)} // self implements impl bd.transferImpl = bd return bd case Upload: panic("Should never ask this func to upload") } return nil }) } git-lfs-2.3.4/tq/basic_upload.go000066400000000000000000000104051317167762300165020ustar00rootroot00000000000000package tq import ( "io" "io/ioutil" "os" "path/filepath" "strconv" "strings" "github.com/git-lfs/git-lfs/errors" "github.com/git-lfs/git-lfs/lfsapi" "github.com/git-lfs/git-lfs/progress" ) const ( BasicAdapterName = "basic" ) // Adapter for basic uploads (non resumable) type basicUploadAdapter struct { *adapterBase } func (a *basicUploadAdapter) ClearTempStorage() error { // Should be empty already but also remove dir return os.RemoveAll(a.tempDir()) } func (a *basicUploadAdapter) tempDir() string { // Must be dedicated to this adapter as deleted by ClearTempStorage d := filepath.Join(os.TempDir(), "git-lfs-basic-temp") if err := os.MkdirAll(d, 0755); err != nil { return os.TempDir() } return d } func (a *basicUploadAdapter) WorkerStarting(workerNum int) (interface{}, error) { return nil, nil } func (a *basicUploadAdapter) WorkerEnding(workerNum int, ctx interface{}) { } func (a *basicUploadAdapter) DoTransfer(ctx interface{}, t *Transfer, cb ProgressCallback, authOkFunc func()) error { rel, err := t.Rel("upload") if err != nil { return err } if rel == nil { return errors.Errorf("No upload action for object: %s", t.Oid) } req, err := a.newHTTPRequest("PUT", rel) if err != nil { return err } if len(req.Header.Get("Content-Type")) == 0 { req.Header.Set("Content-Type", "application/octet-stream") } if req.Header.Get("Transfer-Encoding") == "chunked" { req.TransferEncoding = []string{"chunked"} } else { req.Header.Set("Content-Length", strconv.FormatInt(t.Size, 10)) } req.ContentLength = t.Size f, err := os.OpenFile(t.Path, os.O_RDONLY, 0644) if err != nil { return errors.Wrap(err, "basic upload") } defer f.Close() // Ensure progress callbacks made while uploading // Wrap callback to give name context ccb := func(totalSize int64, readSoFar int64, readSinceLast int) error { if cb != nil { return cb(t.Name, totalSize, readSoFar, readSinceLast) } return nil } cbr := progress.NewBodyWithCallback(f, t.Size, ccb) var reader lfsapi.ReadSeekCloser = cbr // Signal auth was ok on first read; this frees up other workers to start if authOkFunc != nil { reader = newStartCallbackReader(reader, func() error { authOkFunc() return nil }) } req.Body = reader req = a.apiClient.LogRequest(req, "lfs.data.upload") res, err := a.doHTTP(t, req) if err != nil { // We're about to return a retriable error, meaning that this // transfer will either be retried, or it will fail. // // Either way, let's decrement the number of bytes that we've // read _so far_, so that the next iteration doesn't re-transfer // those bytes, according to the progress meter. if perr := cbr.ResetProgress(); perr != nil { err = errors.Wrap(err, perr.Error()) } return errors.NewRetriableError(err) } // A status code of 403 likely means that an authentication token for the // upload has expired. This can be safely retried. if res.StatusCode == 403 { err = errors.New("http: received status 403") return errors.NewRetriableError(err) } if res.StatusCode > 299 { return errors.Wrapf(nil, "Invalid status for %s %s: %d", req.Method, strings.SplitN(req.URL.String(), "?", 2)[0], res.StatusCode, ) } io.Copy(ioutil.Discard, res.Body) res.Body.Close() return verifyUpload(a.apiClient, a.remote, t) } // startCallbackReader is a reader wrapper which calls a function as soon as the // first Read() call is made. This callback is only made once type startCallbackReader struct { cb func() error cbDone bool lfsapi.ReadSeekCloser } func (s *startCallbackReader) Read(p []byte) (n int, err error) { if !s.cbDone && s.cb != nil { if err := s.cb(); err != nil { return 0, err } s.cbDone = true } return s.ReadSeekCloser.Read(p) } func newStartCallbackReader(r lfsapi.ReadSeekCloser, cb func() error) *startCallbackReader { return &startCallbackReader{ ReadSeekCloser: r, cb: cb, } } func configureBasicUploadAdapter(m *Manifest) { m.RegisterNewAdapterFunc(BasicAdapterName, Upload, func(name string, dir Direction) Adapter { switch dir { case Upload: bu := &basicUploadAdapter{newAdapterBase(name, dir, nil)} // self implements impl bu.transferImpl = bu return bu case Download: panic("Should never ask this func for basic download") } return nil }) } git-lfs-2.3.4/tq/custom.go000066400000000000000000000273621317167762300154010ustar00rootroot00000000000000package tq import ( "bufio" "bytes" "encoding/json" "fmt" "io" "path/filepath" "regexp" "strings" "time" "github.com/git-lfs/git-lfs/errors" "github.com/git-lfs/git-lfs/tools" "github.com/git-lfs/git-lfs/subprocess" "github.com/rubyist/tracerx" ) // Adapter for custom transfer via external process type customAdapter struct { *adapterBase path string args string concurrent bool originalConcurrency int standalone bool } // Struct to capture stderr and write to trace type traceWriter struct { buf bytes.Buffer processName string } func (t *traceWriter) Write(b []byte) (int, error) { n, err := t.buf.Write(b) t.Flush() return n, err } func (t *traceWriter) Flush() { var err error for err == nil { var s string s, err = t.buf.ReadString('\n') if len(s) > 0 { tracerx.Printf("xfer[%v]: %v", t.processName, strings.TrimSpace(s)) } } } type customAdapterWorkerContext struct { workerNum int cmd *subprocess.Cmd stdout io.ReadCloser bufferedOut *bufio.Reader stdin io.WriteCloser errTracer *traceWriter } type customAdapterInitRequest struct { Event string `json:"event"` Operation string `json:"operation"` Remote string `json:"remote"` Concurrent bool `json:"concurrent"` ConcurrentTransfers int `json:"concurrenttransfers"` } func NewCustomAdapterInitRequest( op string, remote string, concurrent bool, concurrentTransfers int, ) *customAdapterInitRequest { return &customAdapterInitRequest{"init", op, remote, concurrent, concurrentTransfers} } type customAdapterTransferRequest struct { // common between upload/download Event string `json:"event"` Oid string `json:"oid"` Size int64 `json:"size"` Path string `json:"path,omitempty"` Action *Action `json:"action"` } func NewCustomAdapterUploadRequest(oid string, size int64, path string, action *Action) *customAdapterTransferRequest { return &customAdapterTransferRequest{"upload", oid, size, path, action} } func NewCustomAdapterDownloadRequest(oid string, size int64, action *Action) *customAdapterTransferRequest { return &customAdapterTransferRequest{"download", oid, size, "", action} } type customAdapterTerminateRequest struct { Event string `json:"event"` } func NewCustomAdapterTerminateRequest() *customAdapterTerminateRequest { return &customAdapterTerminateRequest{"terminate"} } // A common struct that allows all types of response to be identified type customAdapterResponseMessage struct { Event string `json:"event"` Error *ObjectError `json:"error"` Oid string `json:"oid"` Path string `json:"path,omitempty"` // always blank for upload BytesSoFar int64 `json:"bytesSoFar"` BytesSinceLast int `json:"bytesSinceLast"` } func (a *customAdapter) Begin(cfg AdapterConfig, cb ProgressCallback) error { a.originalConcurrency = cfg.ConcurrentTransfers() if a.concurrent { // Use common workers impl, but downgrade workers to number of processes return a.adapterBase.Begin(cfg, cb) } // If config says not to launch multiple processes, downgrade incoming value return a.adapterBase.Begin(&customAdapterConfig{AdapterConfig: cfg}, cb) } func (a *customAdapter) ClearTempStorage() error { // no action requred return nil } func (a *customAdapter) WorkerStarting(workerNum int) (interface{}, error) { // Start a process per worker // If concurrent = false we have already dialled back workers to 1 a.Trace("xfer: starting up custom transfer process %q for worker %d", a.name, workerNum) cmd := subprocess.ExecCommand(a.path, a.args) outp, err := cmd.StdoutPipe() if err != nil { return nil, fmt.Errorf("Failed to get stdout for custom transfer command %q remote: %v", a.path, err) } inp, err := cmd.StdinPipe() if err != nil { return nil, fmt.Errorf("Failed to get stdin for custom transfer command %q remote: %v", a.path, err) } // Capture stderr to trace tracer := &traceWriter{} tracer.processName = filepath.Base(a.path) cmd.Stderr = tracer err = cmd.Start() if err != nil { return nil, fmt.Errorf("Failed to start custom transfer command %q remote: %v", a.path, err) } // Set up buffered reader/writer since we operate on lines ctx := &customAdapterWorkerContext{workerNum, cmd, outp, bufio.NewReader(outp), inp, tracer} // send initiate message initReq := NewCustomAdapterInitRequest( a.getOperationName(), a.remote, a.concurrent, a.originalConcurrency, ) resp, err := a.exchangeMessage(ctx, initReq) if err != nil { a.abortWorkerProcess(ctx) return nil, err } if resp.Error != nil { a.abortWorkerProcess(ctx) return nil, fmt.Errorf("Error initializing custom adapter %q worker %d: %v", a.name, workerNum, resp.Error) } a.Trace("xfer: started custom adapter process %q for worker %d OK", a.path, workerNum) // Save this process context and use in future callbacks return ctx, nil } func (a *customAdapter) getOperationName() string { if a.direction == Download { return "download" } return "upload" } // sendMessage sends a JSON message to the custom adapter process func (a *customAdapter) sendMessage(ctx *customAdapterWorkerContext, req interface{}) error { b, err := json.Marshal(req) if err != nil { return err } a.Trace("xfer: Custom adapter worker %d sending message: %v", ctx.workerNum, string(b)) // Line oriented JSON b = append(b, '\n') _, err = ctx.stdin.Write(b) return err } func (a *customAdapter) readResponse(ctx *customAdapterWorkerContext) (*customAdapterResponseMessage, error) { line, err := ctx.bufferedOut.ReadString('\n') if err != nil { return nil, err } a.Trace("xfer: Custom adapter worker %d received response: %v", ctx.workerNum, strings.TrimSpace(line)) resp := &customAdapterResponseMessage{} err = json.Unmarshal([]byte(line), resp) return resp, err } // exchangeMessage sends a message to a process and reads a response if resp != nil // Only fatal errors to communicate return an error, errors may be embedded in reply func (a *customAdapter) exchangeMessage(ctx *customAdapterWorkerContext, req interface{}) (*customAdapterResponseMessage, error) { err := a.sendMessage(ctx, req) if err != nil { return nil, err } return a.readResponse(ctx) } // shutdownWorkerProcess terminates gracefully a custom adapter process // returns an error if it couldn't shut down gracefully (caller may abortWorkerProcess) func (a *customAdapter) shutdownWorkerProcess(ctx *customAdapterWorkerContext) error { defer ctx.errTracer.Flush() a.Trace("xfer: Shutting down adapter worker %d", ctx.workerNum) finishChan := make(chan error, 1) go func() { termReq := NewCustomAdapterTerminateRequest() err := a.sendMessage(ctx, termReq) if err != nil { finishChan <- err } ctx.stdin.Close() ctx.stdout.Close() finishChan <- ctx.cmd.Wait() }() select { case err := <-finishChan: return err case <-time.After(30 * time.Second): return fmt.Errorf("Timeout while shutting down worker process %d", ctx.workerNum) } } // abortWorkerProcess terminates & aborts untidily, most probably breakdown of comms or internal error func (a *customAdapter) abortWorkerProcess(ctx *customAdapterWorkerContext) { a.Trace("xfer: Aborting worker process: %d", ctx.workerNum) ctx.stdin.Close() ctx.stdout.Close() ctx.cmd.Process.Kill() } func (a *customAdapter) WorkerEnding(workerNum int, ctx interface{}) { customCtx, ok := ctx.(*customAdapterWorkerContext) if !ok { tracerx.Printf("Context object for custom transfer %q was of the wrong type", a.name) return } err := a.shutdownWorkerProcess(customCtx) if err != nil { tracerx.Printf("xfer: error finishing up custom transfer process %q worker %d, aborting: %v", a.path, customCtx.workerNum, err) a.abortWorkerProcess(customCtx) } } func (a *customAdapter) DoTransfer(ctx interface{}, t *Transfer, cb ProgressCallback, authOkFunc func()) error { if ctx == nil { return fmt.Errorf("Custom transfer %q was not properly initialized, see previous errors", a.name) } customCtx, ok := ctx.(*customAdapterWorkerContext) if !ok { return fmt.Errorf("Context object for custom transfer %q was of the wrong type", a.name) } var authCalled bool rel, err := t.Rel(a.getOperationName()) if err != nil { return err } if rel == nil && !a.standalone { return errors.Errorf("Object %s not found on the server.", t.Oid) } var req *customAdapterTransferRequest if a.direction == Upload { req = NewCustomAdapterUploadRequest(t.Oid, t.Size, t.Path, rel) } else { req = NewCustomAdapterDownloadRequest(t.Oid, t.Size, rel) } if err = a.sendMessage(customCtx, req); err != nil { return err } // 1..N replies (including progress & one of download / upload) var complete bool for !complete { resp, err := a.readResponse(customCtx) if err != nil { return err } var wasAuthOk bool switch resp.Event { case "progress": // Progress if resp.Oid != t.Oid { return fmt.Errorf("Unexpected oid %q in response, expecting %q", resp.Oid, t.Oid) } if cb != nil { cb(t.Name, t.Size, resp.BytesSoFar, resp.BytesSinceLast) } wasAuthOk = resp.BytesSoFar > 0 case "complete": // Download/Upload complete if resp.Oid != t.Oid { return fmt.Errorf("Unexpected oid %q in response, expecting %q", resp.Oid, t.Oid) } if resp.Error != nil { return fmt.Errorf("Error transferring %q: %v", t.Oid, resp.Error) } if a.direction == Download { // So we don't have to blindly trust external providers, check SHA if err = tools.VerifyFileHash(t.Oid, resp.Path); err != nil { return fmt.Errorf("Downloaded file failed checks: %v", err) } // Move file to final location if err = tools.RenameFileCopyPermissions(resp.Path, t.Path); err != nil { return fmt.Errorf("Failed to copy downloaded file: %v", err) } } else if a.direction == Upload { if err = verifyUpload(a.apiClient, a.remote, t); err != nil { return err } } wasAuthOk = true complete = true default: return fmt.Errorf("Invalid message %q from custom adapter %q", resp.Event, a.name) } // Fall through from both progress and completion messages // Call auth on first progress or success to free up other workers if wasAuthOk && authOkFunc != nil && !authCalled { authOkFunc() authCalled = true } } return nil } func newCustomAdapter(name string, dir Direction, path, args string, concurrent, standalone bool) *customAdapter { c := &customAdapter{newAdapterBase(name, dir, nil), path, args, concurrent, 3, standalone} // self implements impl c.transferImpl = c return c } // Initialise custom adapters based on current config func configureCustomAdapters(git Env, m *Manifest) { pathRegex := regexp.MustCompile(`lfs.customtransfer.([^.]+).path`) for k, _ := range git.All() { match := pathRegex.FindStringSubmatch(k) if match == nil { continue } name := match[1] path, _ := git.Get(k) // retrieve other values args, _ := git.Get(fmt.Sprintf("lfs.customtransfer.%s.args", name)) concurrent := git.Bool(fmt.Sprintf("lfs.customtransfer.%s.concurrent", name), true) direction, _ := git.Get(fmt.Sprintf("lfs.customtransfer.%s.direction", name)) if len(direction) == 0 { direction = "both" } else { direction = strings.ToLower(direction) } // Separate closure for each since we need to capture vars above newfunc := func(name string, dir Direction) Adapter { standalone := m.standaloneTransferAgent != "" return newCustomAdapter(name, dir, path, args, concurrent, standalone) } if direction == "download" || direction == "both" { m.RegisterNewAdapterFunc(name, Download, newfunc) } if direction == "upload" || direction == "both" { m.RegisterNewAdapterFunc(name, Upload, newfunc) } } } type customAdapterConfig struct { AdapterConfig } func (c *customAdapterConfig) ConcurrentTransfers() int { return 1 } git-lfs-2.3.4/tq/custom_test.go000066400000000000000000000105571317167762300164360ustar00rootroot00000000000000package tq import ( "testing" "github.com/git-lfs/git-lfs/lfsapi" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestCustomTransferBasicConfig(t *testing.T) { path := "/path/to/binary" cli, err := lfsapi.NewClient(nil, lfsapi.UniqTestEnv(map[string]string{ "lfs.customtransfer.testsimple.path": path, })) require.Nil(t, err) m := NewManifestWithClient(cli) u := m.NewUploadAdapter("testsimple") assert.NotNil(t, u, "Upload adapter should be present") cu, _ := u.(*customAdapter) assert.NotNil(t, cu, "Upload adapter should be customAdapter") assert.Equal(t, cu.path, path, "Path should be correct") assert.Equal(t, cu.args, "", "args should be blank") assert.Equal(t, cu.concurrent, true, "concurrent should be defaulted") d := m.NewDownloadAdapter("testsimple") assert.NotNil(t, d, "Download adapter should be present") cd, _ := u.(*customAdapter) assert.NotNil(t, cd, "Download adapter should be customAdapter") assert.Equal(t, cd.path, path, "Path should be correct") assert.Equal(t, cd.args, "", "args should be blank") assert.Equal(t, cd.concurrent, true, "concurrent should be defaulted") } func TestCustomTransferDownloadConfig(t *testing.T) { path := "/path/to/binary" args := "-c 1 --whatever" cli, err := lfsapi.NewClient(nil, lfsapi.UniqTestEnv(map[string]string{ "lfs.customtransfer.testdownload.path": path, "lfs.customtransfer.testdownload.args": args, "lfs.customtransfer.testdownload.concurrent": "false", "lfs.customtransfer.testdownload.direction": "download", })) require.Nil(t, err) m := NewManifestWithClient(cli) u := m.NewUploadAdapter("testdownload") assert.NotNil(t, u, "Upload adapter should always be created") cu, _ := u.(*customAdapter) assert.Nil(t, cu, "Upload adapter should NOT be custom (default to basic)") d := m.NewDownloadAdapter("testdownload") assert.NotNil(t, d, "Download adapter should be present") cd, _ := d.(*customAdapter) assert.NotNil(t, cd, "Download adapter should be customAdapter") assert.Equal(t, cd.path, path, "Path should be correct") assert.Equal(t, cd.args, args, "args should be correct") assert.Equal(t, cd.concurrent, false, "concurrent should be set") } func TestCustomTransferUploadConfig(t *testing.T) { path := "/path/to/binary" args := "-c 1 --whatever" cli, err := lfsapi.NewClient(nil, lfsapi.UniqTestEnv(map[string]string{ "lfs.customtransfer.testupload.path": path, "lfs.customtransfer.testupload.args": args, "lfs.customtransfer.testupload.concurrent": "false", "lfs.customtransfer.testupload.direction": "upload", })) require.Nil(t, err) m := NewManifestWithClient(cli) d := m.NewDownloadAdapter("testupload") assert.NotNil(t, d, "Download adapter should always be created") cd, _ := d.(*customAdapter) assert.Nil(t, cd, "Download adapter should NOT be custom (default to basic)") u := m.NewUploadAdapter("testupload") assert.NotNil(t, u, "Upload adapter should be present") cu, _ := u.(*customAdapter) assert.NotNil(t, cu, "Upload adapter should be customAdapter") assert.Equal(t, cu.path, path, "Path should be correct") assert.Equal(t, cu.args, args, "args should be correct") assert.Equal(t, cu.concurrent, false, "concurrent should be set") } func TestCustomTransferBothConfig(t *testing.T) { path := "/path/to/binary" args := "-c 1 --whatever --yeah" cli, err := lfsapi.NewClient(nil, lfsapi.UniqTestEnv(map[string]string{ "lfs.customtransfer.testboth.path": path, "lfs.customtransfer.testboth.args": args, "lfs.customtransfer.testboth.concurrent": "yes", "lfs.customtransfer.testboth.direction": "both", })) require.Nil(t, err) m := NewManifestWithClient(cli) d := m.NewDownloadAdapter("testboth") assert.NotNil(t, d, "Download adapter should be present") cd, _ := d.(*customAdapter) assert.NotNil(t, cd, "Download adapter should be customAdapter") assert.Equal(t, cd.path, path, "Path should be correct") assert.Equal(t, cd.args, args, "args should be correct") assert.Equal(t, cd.concurrent, true, "concurrent should be set") u := m.NewUploadAdapter("testboth") assert.NotNil(t, u, "Upload adapter should be present") cu, _ := u.(*customAdapter) assert.NotNil(t, cu, "Upload adapter should be customAdapter") assert.Equal(t, cu.path, path, "Path should be correct") assert.Equal(t, cu.args, args, "args should be correct") assert.Equal(t, cu.concurrent, true, "concurrent should be set") } git-lfs-2.3.4/tq/errors.go000066400000000000000000000012541317167762300153730ustar00rootroot00000000000000package tq import "fmt" type MalformedObjectError struct { Name string Oid string missing bool } func newObjectMissingError(name, oid string) error { return &MalformedObjectError{Name: name, Oid: oid, missing: true} } func newCorruptObjectError(name, oid string) error { return &MalformedObjectError{Name: name, Oid: oid, missing: false} } func (e MalformedObjectError) Missing() bool { return e.missing } func (e MalformedObjectError) Corrupt() bool { return !e.Missing() } func (e MalformedObjectError) Error() string { if e.Corrupt() { return fmt.Sprintf("corrupt object: %s (%s)", e.Name, e.Oid) } return fmt.Sprintf("missing object: %s (%s)", e.Name, e.Oid) } git-lfs-2.3.4/tq/errors_test.go000066400000000000000000000011011317167762300164210ustar00rootroot00000000000000package tq import ( "testing" "github.com/stretchr/testify/assert" ) func TestMissingObjectErrorsAreRecognizable(t *testing.T) { err := newObjectMissingError("some-name", "some-oid").(*MalformedObjectError) assert.Equal(t, "some-name", err.Name) assert.Equal(t, "some-oid", err.Oid) assert.True(t, err.Missing()) } func TestCorruptObjectErrorsAreRecognizable(t *testing.T) { err := newCorruptObjectError("some-name", "some-oid").(*MalformedObjectError) assert.Equal(t, "some-name", err.Name) assert.Equal(t, "some-oid", err.Oid) assert.True(t, err.Corrupt()) } git-lfs-2.3.4/tq/manifest.go000066400000000000000000000133731317167762300156720ustar00rootroot00000000000000package tq import ( "sync" "github.com/git-lfs/git-lfs/config" "github.com/git-lfs/git-lfs/lfsapi" "github.com/rubyist/tracerx" ) const ( defaultMaxRetries = 8 defaultConcurrentTransfers = 8 ) type Manifest struct { // maxRetries is the maximum number of retries a single object can // attempt to make before it will be dropped. maxRetries int concurrentTransfers int basicTransfersOnly bool standaloneTransferAgent string tusTransfersAllowed bool downloadAdapterFuncs map[string]NewAdapterFunc uploadAdapterFuncs map[string]NewAdapterFunc apiClient *lfsapi.Client tqClient *tqClient mu sync.Mutex } func (m *Manifest) APIClient() *lfsapi.Client { return m.apiClient } func (m *Manifest) MaxRetries() int { return m.maxRetries } func (m *Manifest) ConcurrentTransfers() int { return m.concurrentTransfers } func (m *Manifest) IsStandaloneTransfer() bool { return m.standaloneTransferAgent != "" } func (m *Manifest) batchClient() *tqClient { if r := m.MaxRetries(); r > 0 { m.tqClient.MaxRetries = r } return m.tqClient } func NewManifest() *Manifest { cli, err := lfsapi.NewClient(nil, nil) if err != nil { tracerx.Printf("unable to init tq.Manifest: %s", err) return nil } return NewManifestWithClient(cli) } func NewManifestWithClient(apiClient *lfsapi.Client) *Manifest { return NewManifestClientOperationRemote(apiClient, "", "") } func NewManifestClientOperationRemote( apiClient *lfsapi.Client, operation, remote string, ) *Manifest { m := &Manifest{ apiClient: apiClient, tqClient: &tqClient{Client: apiClient}, downloadAdapterFuncs: make(map[string]NewAdapterFunc), uploadAdapterFuncs: make(map[string]NewAdapterFunc), } var tusAllowed bool if git := apiClient.GitEnv(); git != nil { if v := git.Int("lfs.transfer.maxretries", 0); v > 0 { m.maxRetries = v } if v := git.Int("lfs.concurrenttransfers", 0); v > 0 { m.concurrentTransfers = v } m.basicTransfersOnly = git.Bool("lfs.basictransfersonly", false) m.standaloneTransferAgent = findStandaloneTransfer( apiClient, operation, remote, ) tusAllowed = git.Bool("lfs.tustransfers", false) configureCustomAdapters(git, m) } if m.maxRetries < 1 { m.maxRetries = defaultMaxRetries } if m.concurrentTransfers < 1 { m.concurrentTransfers = defaultConcurrentTransfers } configureBasicDownloadAdapter(m) configureBasicUploadAdapter(m) if tusAllowed { configureTusAdapter(m) } return m } func findStandaloneTransfer(client *lfsapi.Client, operation, remote string) string { if operation == "" || remote == "" { v, _ := client.GitEnv().Get("lfs.standalonetransferagent") return v } ep := client.Endpoints.RemoteEndpoint(operation, remote) uc := config.NewURLConfig(client.GitEnv()) v, ok := uc.Get("lfs", ep.Url, "standalonetransferagent") if !ok { return "" } return v } // GetAdapterNames returns a list of the names of adapters available to be created func (m *Manifest) GetAdapterNames(dir Direction) []string { switch dir { case Upload: return m.GetUploadAdapterNames() case Download: return m.GetDownloadAdapterNames() } return nil } // GetDownloadAdapterNames returns a list of the names of download adapters available to be created func (m *Manifest) GetDownloadAdapterNames() []string { return m.getAdapterNames(m.downloadAdapterFuncs) } // GetUploadAdapterNames returns a list of the names of upload adapters available to be created func (m *Manifest) GetUploadAdapterNames() []string { return m.getAdapterNames(m.uploadAdapterFuncs) } // getAdapterNames returns a list of the names of adapters available to be created func (m *Manifest) getAdapterNames(adapters map[string]NewAdapterFunc) []string { if m.basicTransfersOnly { return []string{BasicAdapterName} } m.mu.Lock() defer m.mu.Unlock() ret := make([]string, 0, len(adapters)) for n, _ := range adapters { ret = append(ret, n) } return ret } // RegisterNewTransferAdapterFunc registers a new function for creating upload // or download adapters. If a function with that name & direction is already // registered, it is overridden func (m *Manifest) RegisterNewAdapterFunc(name string, dir Direction, f NewAdapterFunc) { m.mu.Lock() defer m.mu.Unlock() switch dir { case Upload: m.uploadAdapterFuncs[name] = f case Download: m.downloadAdapterFuncs[name] = f } } // Create a new adapter by name and direction; default to BasicAdapterName if doesn't exist func (m *Manifest) NewAdapterOrDefault(name string, dir Direction) Adapter { if len(name) == 0 { name = BasicAdapterName } a := m.NewAdapter(name, dir) if a == nil { tracerx.Printf("Defaulting to basic transfer adapter since %q did not exist", name) a = m.NewAdapter(BasicAdapterName, dir) } return a } // Create a new adapter by name and direction, or nil if doesn't exist func (m *Manifest) NewAdapter(name string, dir Direction) Adapter { m.mu.Lock() defer m.mu.Unlock() switch dir { case Upload: if u, ok := m.uploadAdapterFuncs[name]; ok { return u(name, dir) } case Download: if d, ok := m.downloadAdapterFuncs[name]; ok { return d(name, dir) } } return nil } // Create a new download adapter by name, or BasicAdapterName if doesn't exist func (m *Manifest) NewDownloadAdapter(name string) Adapter { return m.NewAdapterOrDefault(name, Download) } // Create a new upload adapter by name, or BasicAdapterName if doesn't exist func (m *Manifest) NewUploadAdapter(name string) Adapter { return m.NewAdapterOrDefault(name, Upload) } // Env is any object with a config.Environment interface. type Env interface { Get(key string) (val string, ok bool) GetAll(key string) []string Bool(key string, def bool) (val bool) Int(key string, def int) (val int) All() map[string][]string } git-lfs-2.3.4/tq/manifest_test.go000066400000000000000000000023461317167762300167270ustar00rootroot00000000000000package tq import ( "testing" "github.com/git-lfs/git-lfs/lfsapi" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestManifestIsConfigurable(t *testing.T) { cli, err := lfsapi.NewClient(nil, lfsapi.UniqTestEnv(map[string]string{ "lfs.transfer.maxretries": "3", })) require.Nil(t, err) m := NewManifestWithClient(cli) assert.Equal(t, 3, m.MaxRetries()) } func TestManifestChecksNTLM(t *testing.T) { cli, err := lfsapi.NewClient(nil, lfsapi.UniqTestEnv(map[string]string{ "lfs.url": "http://foo", "lfs.http://foo.access": "ntlm", "lfs.concurrenttransfers": "3", })) require.Nil(t, err) m := NewManifestWithClient(cli) assert.Equal(t, 8, m.MaxRetries()) } func TestManifestClampsValidValues(t *testing.T) { cli, err := lfsapi.NewClient(nil, lfsapi.UniqTestEnv(map[string]string{ "lfs.transfer.maxretries": "-1", })) require.Nil(t, err) m := NewManifestWithClient(cli) assert.Equal(t, 8, m.MaxRetries()) } func TestManifestIgnoresNonInts(t *testing.T) { cli, err := lfsapi.NewClient(nil, lfsapi.UniqTestEnv(map[string]string{ "lfs.transfer.maxretries": "not_an_int", })) require.Nil(t, err) m := NewManifestWithClient(cli) assert.Equal(t, 8, m.MaxRetries()) } git-lfs-2.3.4/tq/schemas/000077500000000000000000000000001317167762300151515ustar00rootroot00000000000000git-lfs-2.3.4/tq/schemas/http-batch-request-schema.json000066400000000000000000000013721317167762300230310ustar00rootroot00000000000000{ "$schema": "http://json-schema.org/draft-04/schema", "title": "Git LFS HTTPS Batch API Request", "type": "object", "properties": { "transfers": { "type": "array", "items": { "type": "string" } }, "operation": { "type": "string" }, "objects": { "type": "array", "items": { "type": "object", "properties": { "oid": { "type": "string" }, "size": { "type": "number", "minimum": 0 }, "authenticated": { "type": "boolean" } }, "required": ["oid", "size"], "additionalProperties": false } } }, "required": ["objects", "operation"] } git-lfs-2.3.4/tq/schemas/http-batch-response-schema.json000066400000000000000000000037151317167762300232020ustar00rootroot00000000000000{ "$schema": "http://json-schema.org/draft-04/schema", "title": "Git LFS HTTPS Batch API Response", "type": "object", "definitions": { "action": { "type": "object", "properties": { "href": { "type": "string" }, "header": { "type": "object", "additionalProperties": true }, "expires_in": { "type": "number", "maximum": 2147483647, "minimum": -2147483647 }, "expires_at": { "type": "string" } }, "required": ["href"], "additionalProperties": false } }, "properties": { "transfer": { "type": "string" }, "objects": { "type": "array", "items": { "type": "object", "properties": { "oid": { "type": "string" }, "size": { "type": "number", "minimum": 0 }, "authenticated": { "type": "boolean" }, "actions": { "type": "object", "properties": { "download": { "$ref": "#/definitions/action" }, "upload": { "$ref": "#/definitions/action" }, "verify": { "$ref": "#/definitions/action" } }, "additionalProperties": false }, "error": { "type": "object", "properties": { "code": { "type": "number" }, "message": { "type": "string" } }, "required": ["code", "message"], "additionalProperties": false } }, "required": ["oid", "size"], "additionalProperties": false } }, "message": { "type": "string" }, "request_id": { "type": "string" }, "documentation_url": { "type": "string" } }, "required": ["objects"] } git-lfs-2.3.4/tq/transfer.go000066400000000000000000000155131317167762300157060ustar00rootroot00000000000000// Package transfer collects together adapters for uploading and downloading LFS content // NOTE: Subject to change, do not rely on this package from outside git-lfs source package tq import ( "fmt" "time" "github.com/git-lfs/git-lfs/errors" "github.com/git-lfs/git-lfs/lfsapi" "github.com/git-lfs/git-lfs/tools" ) type Direction int const ( Upload = Direction(iota) Download = Direction(iota) ) func (d Direction) String() string { switch d { case Download: return "download" case Upload: return "upload" default: return "" } } type Transfer struct { Name string `json:"name,omitempty"` Oid string `json:"oid,omitempty"` Size int64 `json:"size"` Authenticated bool `json:"authenticated,omitempty"` Actions ActionSet `json:"actions,omitempty"` Links ActionSet `json:"_links,omitempty"` Error *ObjectError `json:"error,omitempty"` Path string `json:"path,omitempty"` } func (t *Transfer) Rel(name string) (*Action, error) { a, err := t.Actions.Get(name) if a != nil || err != nil { return a, err } if t.Links != nil { a, err := t.Links.Get(name) if a != nil || err != nil { return a, err } } return nil, nil } type ObjectError struct { Code int `json:"code"` Message string `json:"message"` } func (e *ObjectError) Error() string { return fmt.Sprintf("[%d] %s", e.Code, e.Message) } // newTransfer returns a copy of the given Transfer, with the name and path // values set. func newTransfer(tr *Transfer, name string, path string) *Transfer { t := &Transfer{ Name: name, Path: path, Oid: tr.Oid, Size: tr.Size, Authenticated: tr.Authenticated, Actions: make(ActionSet), } if tr.Error != nil { t.Error = &ObjectError{ Code: tr.Error.Code, Message: tr.Error.Message, } } for rel, action := range tr.Actions { t.Actions[rel] = &Action{ Href: action.Href, Header: action.Header, ExpiresAt: action.ExpiresAt, ExpiresIn: action.ExpiresIn, createdAt: action.createdAt, } } if tr.Links != nil { t.Links = make(ActionSet) for rel, link := range tr.Links { t.Links[rel] = &Action{ Href: link.Href, Header: link.Header, ExpiresAt: link.ExpiresAt, ExpiresIn: link.ExpiresIn, createdAt: link.createdAt, } } } return t } type Action struct { Href string `json:"href"` Header map[string]string `json:"header,omitempty"` ExpiresAt time.Time `json:"expires_at,omitempty"` ExpiresIn int `json:"expires_in,omitempty"` createdAt time.Time `json:"-"` } func (a *Action) IsExpiredWithin(d time.Duration) (time.Time, bool) { return tools.IsExpiredAtOrIn(a.createdAt, d, a.ExpiresAt, time.Duration(a.ExpiresIn)*time.Second) } type ActionSet map[string]*Action const ( // objectExpirationToTransfer is the duration we expect to have passed // from the time that the object's expires_at (or expires_in) property // is checked to when the transfer is executed. objectExpirationToTransfer = 5 * time.Second ) func (as ActionSet) Get(rel string) (*Action, error) { a, ok := as[rel] if !ok { return nil, nil } if at, expired := a.IsExpiredWithin(objectExpirationToTransfer); expired { return nil, errors.NewRetriableError(&ActionExpiredErr{Rel: rel, At: at}) } return a, nil } type ActionExpiredErr struct { Rel string At time.Time } func (e ActionExpiredErr) Error() string { return fmt.Sprintf("tq: action %q expires at %s", e.Rel, e.At.In(time.Local).Format(time.RFC822)) } func IsActionExpiredError(err error) bool { if _, ok := err.(*ActionExpiredErr); ok { return true } return false } // NewAdapterFunc creates new instances of Adapter. Code that wishes // to provide new Adapter instances should pass an implementation of this // function to RegisterNewTransferAdapterFunc() on a *Manifest. // name and dir are to provide context if one func implements many instances type NewAdapterFunc func(name string, dir Direction) Adapter type ProgressCallback func(name string, totalSize, readSoFar int64, readSinceLast int) error type AdapterConfig interface { APIClient() *lfsapi.Client ConcurrentTransfers() int Remote() string } type adapterConfig struct { apiClient *lfsapi.Client concurrentTransfers int remote string } func (c *adapterConfig) ConcurrentTransfers() int { return c.concurrentTransfers } func (c *adapterConfig) APIClient() *lfsapi.Client { return c.apiClient } func (c *adapterConfig) Remote() string { return c.remote } // Adapter is implemented by types which can upload and/or download LFS // file content to a remote store. Each Adapter accepts one or more requests // which it may schedule and parallelise in whatever way it chooses, clients of // this interface will receive notifications of progress and completion asynchronously. // TransferAdapters support transfers in one direction; if an implementation // provides support for upload and download, it should be instantiated twice, // advertising support for each direction separately. // Note that Adapter only implements the actual upload/download of content // itself; organising the wider process including calling the API to get URLs, // handling progress reporting and retries is the job of the core TransferQueue. // This is so that the orchestration remains core & standard but Adapter // can be changed to physically transfer to different hosts with less code. type Adapter interface { // Name returns the name of this adapter, which is the same for all instances // of this type of adapter Name() string // Direction returns whether this instance is an upload or download instance // Adapter instances can only be one or the other, although the same // type may be instantiated for each direction Direction() Direction // Begin a new batch of uploads or downloads. Call this first, followed by // one or more Add calls. maxConcurrency controls the number of transfers // that may be done at once. The passed in callback will receive updates on // progress. Either argument may be nil if not required by the client. Begin(cfg AdapterConfig, cb ProgressCallback) error // Add queues a download/upload, which will complete asynchronously and // notify the callbacks given to Begin() Add(transfers ...*Transfer) (results <-chan TransferResult) // Indicate that all transfers have been scheduled and resources can be released // once the queued items have completed. // This call blocks until all items have been processed End() // ClearTempStorage clears any temporary files, such as unfinished downloads that // would otherwise be resumed ClearTempStorage() error } // Result of a transfer returned through CompletionChannel() type TransferResult struct { Transfer *Transfer // This will be non-nil if there was an error transferring this item Error error } git-lfs-2.3.4/tq/transfer_queue.go000066400000000000000000000543211317167762300171120ustar00rootroot00000000000000package tq import ( "os" "sort" "sync" "github.com/git-lfs/git-lfs/errors" "github.com/git-lfs/git-lfs/lfsapi" "github.com/git-lfs/git-lfs/progress" "github.com/rubyist/tracerx" ) const ( defaultBatchSize = 100 ) type retryCounter struct { MaxRetries int `git:"lfs.transfer.maxretries"` // cmu guards count cmu sync.Mutex // count maps OIDs to number of retry attempts count map[string]int } // newRetryCounter instantiates a new *retryCounter. It parses the gitconfig // value: `lfs.transfer.maxretries`, and falls back to defaultMaxRetries if none // was provided. // // If it encountered an error in Unmarshaling the *config.Configuration, it will // be returned, otherwise nil. func newRetryCounter() *retryCounter { return &retryCounter{ MaxRetries: defaultMaxRetries, count: make(map[string]int), } } // Increment increments the number of retries for a given OID. It is safe to // call across multiple goroutines. func (r *retryCounter) Increment(oid string) { r.cmu.Lock() defer r.cmu.Unlock() r.count[oid]++ } // CountFor returns the current number of retries for a given OID. It is safe to // call across multiple goroutines. func (r *retryCounter) CountFor(oid string) int { r.cmu.Lock() defer r.cmu.Unlock() return r.count[oid] } // CanRetry returns the current number of retries, and whether or not it exceeds // the maximum number of retries (see: retryCounter.MaxRetries). func (r *retryCounter) CanRetry(oid string) (int, bool) { count := r.CountFor(oid) return count, count < r.MaxRetries } // batch implements the sort.Interface interface and enables sorting on a slice // of `*Transfer`s by object size. // // This interface is implemented here so that the largest objects can be // processed first. Since adding a new batch is unable to occur until the // current batch has finished processing, this enables us to reduce the risk of // a single worker getting tied up on a large item at the end of a batch while // all other workers are sitting idle. type batch []*objectTuple // Concat concatenates two batches together, returning a single, clamped batch as // "left", and the remainder of elements as "right". If the union of the // receiver and "other" has cardinality less than "size", "right" will be // returned as nil. func (b batch) Concat(other batch, size int) (left, right batch) { u := batch(append(b, other...)) if len(u) <= size { return u, nil } return u[:size], u[size:] } func (b batch) ToTransfers() []*Transfer { transfers := make([]*Transfer, 0, len(b)) for _, t := range b { transfers = append(transfers, &Transfer{Oid: t.Oid, Size: t.Size}) } return transfers } func (b batch) Len() int { return len(b) } func (b batch) Less(i, j int) bool { return b[i].Size < b[j].Size } func (b batch) Swap(i, j int) { b[i], b[j] = b[j], b[i] } // TransferQueue organises the wider process of uploading and downloading, // including calling the API, passing the actual transfer request to transfer // adapters, and dealing with progress, errors and retries. type TransferQueue struct { direction Direction client *tqClient remote string adapter Adapter adapterInProgress bool adapterInitMutex sync.Mutex dryRun bool cb progress.CopyCallback meter progress.Meter errors []error transfers map[string]*objects batchSize int bufferDepth int incoming chan *objectTuple // Channel for processing incoming items errorc chan error // Channel for processing errors watchers []chan *Transfer trMutex *sync.Mutex collectorWait sync.WaitGroup errorwait sync.WaitGroup // wait is used to keep track of pending transfers. It is incremented // once per unique OID on Add(), and is decremented when that transfer // is marked as completed or failed, but not retried. wait sync.WaitGroup manifest *Manifest rc *retryCounter } // objects holds a set of objects. type objects struct { completed bool objects []*objectTuple } // All returns all *objectTuple's contained in the *objects set. func (s *objects) All() []*objectTuple { return s.objects } // Append returns a new *objects with the given *objectTuple(s) appended to the // end of the known objects. func (s *objects) Append(os ...*objectTuple) *objects { return &objects{ completed: s.completed, objects: append(s.objects, os...), } } // First returns the first *objectTuple in the chain of objects. func (s *objects) First() *objectTuple { if len(s.objects) == 0 { return nil } return s.objects[0] } type objectTuple struct { Name, Path, Oid string Size int64 } func (o *objectTuple) ToTransfer() *Transfer { return &Transfer{ Name: o.Name, Path: o.Path, Oid: o.Oid, Size: o.Size, } } type Option func(*TransferQueue) func DryRun(dryRun bool) Option { return func(tq *TransferQueue) { tq.dryRun = dryRun } } func WithProgress(m progress.Meter) Option { return func(tq *TransferQueue) { tq.meter = m } } func WithProgressCallback(cb progress.CopyCallback) Option { return func(tq *TransferQueue) { tq.cb = cb } } func WithBatchSize(size int) Option { return func(tq *TransferQueue) { tq.batchSize = size } } func WithBufferDepth(depth int) Option { return func(tq *TransferQueue) { tq.bufferDepth = depth } } // NewTransferQueue builds a TransferQueue, direction and underlying mechanism determined by adapter func NewTransferQueue(dir Direction, manifest *Manifest, remote string, options ...Option) *TransferQueue { q := &TransferQueue{ direction: dir, client: &tqClient{Client: manifest.APIClient()}, remote: remote, errorc: make(chan error), transfers: make(map[string]*objects), trMutex: &sync.Mutex{}, manifest: manifest, rc: newRetryCounter(), } for _, opt := range options { opt(q) } q.rc.MaxRetries = q.manifest.maxRetries q.client.MaxRetries = q.manifest.maxRetries if q.batchSize <= 0 { q.batchSize = defaultBatchSize } if q.bufferDepth <= 0 { q.bufferDepth = q.batchSize } q.incoming = make(chan *objectTuple, q.bufferDepth) if q.meter == nil { q.meter = progress.Noop() } q.collectorWait.Add(1) q.errorwait.Add(1) q.run() return q } // Add adds a *Transfer to the transfer queue. It only increments the amount // of waiting the TransferQueue has to do if the *Transfer "t" is new. // // If another transfer(s) with the same OID has been added to the *TransferQueue // already, the given transfer will not be enqueued, but will be sent to any // channel created by Watch() once the oldest transfer has completed. // // Only one file will be transferred to/from the Path element of the first // transfer. func (q *TransferQueue) Add(name, path, oid string, size int64) { t := &objectTuple{ Name: name, Path: path, Oid: oid, Size: size, } if objs := q.remember(t); len(objs.objects) > 1 { if objs.completed { // If there is already a completed transfer chain for // this OID, then this object is already "done", and can // be sent through as completed to the watchers. for _, w := range q.watchers { w <- t.ToTransfer() } } // If the chain is not done, there is no reason to enqueue this // transfer into 'q.incoming'. tracerx.Printf("already transferring %q, skipping duplicate", t.Oid) return } q.incoming <- t } // remember remembers the *Transfer "t" if the *TransferQueue doesn't already // know about a Transfer with the same OID. // // It returns if the value is new or not. func (q *TransferQueue) remember(t *objectTuple) objects { q.trMutex.Lock() defer q.trMutex.Unlock() if _, ok := q.transfers[t.Oid]; !ok { q.wait.Add(1) q.transfers[t.Oid] = &objects{ objects: []*objectTuple{t}, } return *q.transfers[t.Oid] } q.transfers[t.Oid] = q.transfers[t.Oid].Append(t) return *q.transfers[t.Oid] } // collectBatches collects batches in a loop, prioritizing failed items from the // previous before adding new items. The process works as follows: // // 1. Create a new batch, of size `q.batchSize`, and containing no items // 2. While the batch contains less items than `q.batchSize` AND the channel // is open, read one item from the `q.incoming` channel. // a. If the read was a channel close, go to step 4. // b. If the read was a transferable item, go to step 3. // 3. Append the item to the batch. // 4. Sort the batch by descending object size, make a batch API call, send // the items to the `*adapterBase`. // 5. In a separate goroutine, process the worker results, incrementing and // appending retries if possible. On the main goroutine, accept new items // into "pending". // 6. Concat() the "next" and "pending" batches such that no more items than // the maximum allowed per batch are in next, and the rest are in pending. // 7. If the `q.incoming` channel is open, go to step 2. // 8. If the next batch is empty AND the `q.incoming` channel is closed, // terminate immediately. // // collectBatches runs in its own goroutine. func (q *TransferQueue) collectBatches() { defer q.collectorWait.Done() var closing bool next := q.makeBatch() pending := q.makeBatch() for { for !closing && (len(next) < q.batchSize) { t, ok := <-q.incoming if !ok { closing = true break } next = append(next, t) } // Before enqueuing the next batch, sort by descending object // size. sort.Sort(sort.Reverse(next)) done := make(chan struct{}) var retries batch go func() { var err error retries, err = q.enqueueAndCollectRetriesFor(next) if err != nil { q.errorc <- err } close(done) }() var collected batch collected, closing = q.collectPendingUntil(done) // Ensure the next batch is filled with, in order: // // - retries from the previous batch, // - new additions that were enqueued behind retries, & // - items collected while the batch was processing. next, pending = retries.Concat(append(pending, collected...), q.batchSize) if closing && len(next) == 0 { // If len(next) == 0, there are no items in "pending", // and it is safe to exit. break } } } // collectPendingUntil collects items from q.incoming into a "pending" batch // until the given "done" channel is written to, or is closed. // // A "pending" batch is returned, along with whether or not "q.incoming" is // closed. func (q *TransferQueue) collectPendingUntil(done <-chan struct{}) (pending batch, closing bool) { for { select { case t, ok := <-q.incoming: if !ok { closing = true <-done return } pending = append(pending, t) case <-done: return } } } // enqueueAndCollectRetriesFor makes a Batch API call and returns a "next" batch // containing all of the objects that failed from the previous batch and had // retries availale to them. // // If an error was encountered while making the API request, _all_ of the items // from the previous batch (that have retries available to them) will be // returned immediately, along with the error that was encountered. // // enqueueAndCollectRetriesFor blocks until the entire Batch "batch" has been // processed. func (q *TransferQueue) enqueueAndCollectRetriesFor(batch batch) (batch, error) { next := q.makeBatch() tracerx.Printf("tq: sending batch of size %d", len(batch)) q.meter.Pause() var bRes *BatchResponse if q.manifest.standaloneTransferAgent != "" { // Trust the external transfer agent can do everything by itself. objects := make([]*Transfer, 0, len(batch)) for _, t := range batch { objects = append(objects, &Transfer{Oid: t.Oid, Size: t.Size, Path: t.Path}) } bRes = &BatchResponse{ Objects: objects, TransferAdapterName: q.manifest.standaloneTransferAgent, } } else { // Query the Git LFS server for what transfer method to use and // details such as URLs, authentication, etc. var err error bRes, err = Batch(q.manifest, q.direction, q.remote, batch.ToTransfers()) if err != nil { // If there was an error making the batch API call, mark all of // the objects for retry, and return them along with the error // that was encountered. If any of the objects couldn't be // retried, they will be marked as failed. for _, t := range batch { if q.canRetryObject(t.Oid, err) { q.rc.Increment(t.Oid) next = append(next, t) } else { q.wait.Done() } } return next, err } } if len(bRes.Objects) == 0 { return next, nil } q.useAdapter(bRes.TransferAdapterName) q.meter.Start() toTransfer := make([]*Transfer, 0, len(bRes.Objects)) for _, o := range bRes.Objects { if o.Error != nil { q.errorc <- errors.Wrapf(o.Error, "[%v] %v", o.Oid, o.Error.Message) q.Skip(o.Size) q.wait.Done() continue } q.trMutex.Lock() objects, ok := q.transfers[o.Oid] q.trMutex.Unlock() if !ok { // If we couldn't find any associated // Transfer object, then we give up on the // transfer by telling the progress meter to // skip the number of bytes in "o". q.errorc <- errors.Errorf("[%v] The server returned an unknown OID.", o.Oid) q.Skip(o.Size) q.wait.Done() } else { // Pick t[0], since it will cover all transfers with the // same OID. tr := newTransfer(o, objects.First().Name, objects.First().Path) if a, err := tr.Rel(q.direction.String()); err != nil { // XXX(taylor): duplication if q.canRetryObject(tr.Oid, err) { q.rc.Increment(tr.Oid) count := q.rc.CountFor(tr.Oid) tracerx.Printf("tq: enqueue retry #%d for %q (size: %d): %s", count, tr.Oid, tr.Size, err) next = append(next, objects.First()) } else { q.errorc <- errors.Errorf("[%v] %v", tr.Name, err) q.Skip(o.Size) q.wait.Done() } } else if a == nil && q.manifest.standaloneTransferAgent == "" { q.Skip(o.Size) q.wait.Done() } else { q.meter.StartTransfer(objects.First().Name) toTransfer = append(toTransfer, tr) } } } retries := q.addToAdapter(bRes.endpoint, toTransfer) for t := range retries { q.rc.Increment(t.Oid) count := q.rc.CountFor(t.Oid) tracerx.Printf("tq: enqueue retry #%d for %q (size: %d)", count, t.Oid, t.Size) next = append(next, t) } return next, nil } // makeBatch returns a new, empty batch, with a capacity equal to the maximum // batch size designated by the `*TransferQueue`. func (q *TransferQueue) makeBatch() batch { return make(batch, 0, q.batchSize) } // addToAdapter adds the given "pending" transfers to the transfer adapters and // returns a channel of Transfers that are to be retried in the next batch. // After all of the items in the batch have been processed, the channel is // closed. // // addToAdapter returns immediately, and does not block. func (q *TransferQueue) addToAdapter(e lfsapi.Endpoint, pending []*Transfer) <-chan *objectTuple { retries := make(chan *objectTuple, len(pending)) if err := q.ensureAdapterBegun(e); err != nil { close(retries) q.errorc <- err for _, t := range pending { q.Skip(t.Size) q.wait.Done() } return retries } present, missingResults := q.partitionTransfers(pending) go func() { defer close(retries) var results <-chan TransferResult if q.dryRun { results = q.makeDryRunResults(present) } else { results = q.adapter.Add(present...) } for _, res := range missingResults { q.handleTransferResult(res, retries) } for res := range results { q.handleTransferResult(res, retries) } }() return retries } func (q *TransferQueue) partitionTransfers(transfers []*Transfer) (present []*Transfer, results []TransferResult) { if q.direction != Upload { return transfers, nil } present = make([]*Transfer, 0, len(transfers)) results = make([]TransferResult, 0, len(transfers)) for _, t := range transfers { var err error if t.Size < 0 { err = errors.Errorf("Git LFS: object %q has invalid size (got: %d)", t.Oid, t.Size) } else { fd, serr := os.Stat(t.Path) if serr != nil { if os.IsNotExist(serr) { err = newObjectMissingError(t.Name, t.Oid) } else { err = serr } } else if t.Size != fd.Size() { err = newCorruptObjectError(t.Name, t.Oid) } } if err != nil { results = append(results, TransferResult{ Transfer: t, Error: err, }) } else { present = append(present, t) } } return } // makeDryRunResults returns a channel populated immediately with "successful" // results for all of the given transfers in "ts". func (q *TransferQueue) makeDryRunResults(ts []*Transfer) <-chan TransferResult { results := make(chan TransferResult, len(ts)) for _, t := range ts { results <- TransferResult{t, nil} } close(results) return results } // handleTransferResult observes the transfer result, sending it on the retries // channel if it was able to be retried. func (q *TransferQueue) handleTransferResult( res TransferResult, retries chan<- *objectTuple, ) { oid := res.Transfer.Oid if res.Error != nil { // If there was an error encountered when processing the // transfer (res.Transfer), handle the error as is appropriate: if q.canRetryObject(oid, res.Error) { // If the object can be retried, send it on the retries // channel, where it will be read at the call-site and // its retry count will be incremented. tracerx.Printf("tq: retrying object %s: %s", oid, res.Error) q.trMutex.Lock() objects, ok := q.transfers[oid] q.trMutex.Unlock() if ok { retries <- objects.First() } else { q.errorc <- res.Error } } else { // If the error wasn't retriable, OR the object has // exceeded its retry budget, it will be NOT be sent to // the retry channel, and the error will be reported // immediately. q.errorc <- res.Error q.wait.Done() } } else { q.trMutex.Lock() objects := q.transfers[oid] objects.completed = true // Otherwise, if the transfer was successful, notify all of the // watchers, and mark it as finished. for _, c := range q.watchers { // Send one update for each transfer with the // same OID. for _, t := range objects.All() { c <- &Transfer{ Name: t.Name, Path: t.Path, Oid: t.Oid, Size: t.Size, } } } q.trMutex.Unlock() q.meter.FinishTransfer(res.Transfer.Name) q.wait.Done() } } func (q *TransferQueue) useAdapter(name string) { q.adapterInitMutex.Lock() defer q.adapterInitMutex.Unlock() if q.adapter != nil { if q.adapter.Name() == name { // re-use, this is the normal path return } // If the adapter we're using isn't the same as the one we've been // told to use now, must wait for the current one to finish then switch // This will probably never happen but is just in case server starts // changing adapter support in between batches q.finishAdapter() } q.adapter = q.manifest.NewAdapterOrDefault(name, q.direction) } func (q *TransferQueue) finishAdapter() { if q.adapterInProgress { q.adapter.End() q.adapterInProgress = false q.adapter = nil } } // BatchSize returns the batch size of the receiving *TransferQueue, or, the // number of transfers to accept before beginning work on them. func (q *TransferQueue) BatchSize() int { return q.batchSize } func (q *TransferQueue) Skip(size int64) { q.meter.Skip(size) } func (q *TransferQueue) ensureAdapterBegun(e lfsapi.Endpoint) error { q.adapterInitMutex.Lock() defer q.adapterInitMutex.Unlock() if q.adapterInProgress { return nil } // Progress callback - receives byte updates cb := func(name string, total, read int64, current int) error { q.meter.TransferBytes(q.direction.String(), name, read, total, current) if q.cb != nil { // NOTE: this is the mechanism by which the logpath // specified by GIT_LFS_PROGRESS is written to. // // See: lfs.downloadFile() for more. q.cb(total, read, current) } return nil } tracerx.Printf("tq: starting transfer adapter %q", q.adapter.Name()) err := q.adapter.Begin(q.toAdapterCfg(e), cb) if err != nil { return err } q.adapterInProgress = true return nil } func (q *TransferQueue) toAdapterCfg(e lfsapi.Endpoint) AdapterConfig { apiClient := q.manifest.APIClient() concurrency := q.manifest.ConcurrentTransfers() if apiClient.Endpoints.AccessFor(e.Url) == lfsapi.NTLMAccess { concurrency = 1 } return &adapterConfig{ concurrentTransfers: concurrency, apiClient: apiClient, remote: q.remote, } } // Wait waits for the queue to finish processing all transfers. Once Wait is // called, Add will no longer add transfers to the queue. Any failed // transfers will be automatically retried once. func (q *TransferQueue) Wait() { close(q.incoming) q.wait.Wait() q.collectorWait.Wait() q.finishAdapter() close(q.errorc) for _, watcher := range q.watchers { close(watcher) } q.meter.Finish() q.errorwait.Wait() } // Watch returns a channel where the queue will write the value of each transfer // as it completes. If multiple transfers exist with the same OID, they will all // be recorded here, even though only one actual transfer took place. The // channel will be closed when the queue finishes processing. func (q *TransferQueue) Watch() chan *Transfer { c := make(chan *Transfer, q.batchSize) q.watchers = append(q.watchers, c) return c } // This goroutine collects errors returned from transfers func (q *TransferQueue) errorCollector() { for err := range q.errorc { q.errors = append(q.errors, err) } q.errorwait.Done() } // run begins the transfer queue. It transfers files sequentially or // concurrently depending on the Config.ConcurrentTransfers() value. func (q *TransferQueue) run() { tracerx.Printf("tq: running as batched queue, batch size of %d", q.batchSize) go q.errorCollector() go q.collectBatches() } // canRetry returns whether or not the given error "err" is retriable. func (q *TransferQueue) canRetry(err error) bool { return errors.IsRetriableError(err) } // canRetryObject returns whether the given error is retriable for the object // given by "oid". If the an OID has met its retry limit, then it will not be // able to be retried again. If so, canRetryObject returns whether or not that // given error "err" is retriable. func (q *TransferQueue) canRetryObject(oid string, err error) bool { if count, ok := q.rc.CanRetry(oid); !ok { tracerx.Printf("tq: refusing to retry %q, too many retries (%d)", oid, count) return false } return q.canRetry(err) } // Errors returns any errors encountered during transfer. func (q *TransferQueue) Errors() []error { return q.errors } git-lfs-2.3.4/tq/transfer_queue_test.go000066400000000000000000000015211317167762300201430ustar00rootroot00000000000000package tq import ( "testing" "github.com/stretchr/testify/assert" ) func TestManifestDefaultsToFixedRetries(t *testing.T) { assert.Equal(t, 8, NewManifest().MaxRetries()) } func TestRetryCounterDefaultsToFixedRetries(t *testing.T) { rc := newRetryCounter() assert.Equal(t, 8, rc.MaxRetries) } func TestRetryCounterIncrementsObjects(t *testing.T) { rc := newRetryCounter() rc.Increment("oid") assert.Equal(t, 1, rc.CountFor("oid")) } func TestRetryCounterCanNotRetryAfterExceedingRetryCount(t *testing.T) { rc := newRetryCounter() rc.MaxRetries = 1 rc.Increment("oid") count, canRetry := rc.CanRetry("oid") assert.Equal(t, 1, count) assert.False(t, canRetry) } func TestBatchSizeReturnsBatchSize(t *testing.T) { q := NewTransferQueue( Upload, NewManifest(), "origin", WithBatchSize(3)) assert.Equal(t, 3, q.BatchSize()) } git-lfs-2.3.4/tq/transfer_test.go000066400000000000000000000063261317167762300167470ustar00rootroot00000000000000package tq import ( "testing" "github.com/git-lfs/git-lfs/lfsapi" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) type testAdapter struct { name string dir Direction } func (a *testAdapter) Name() string { return a.name } func (a *testAdapter) Direction() Direction { return a.dir } func (a *testAdapter) Begin(cfg AdapterConfig, cb ProgressCallback) error { return nil } func (a *testAdapter) Add(ts ...*Transfer) (retries <-chan TransferResult) { return nil } func (a *testAdapter) End() { } func (a *testAdapter) ClearTempStorage() error { return nil } func newTestAdapter(name string, dir Direction) Adapter { return &testAdapter{name, dir} } func newRenamedTestAdapter(name string, dir Direction) Adapter { return &testAdapter{"RENAMED", dir} } func testBasicAdapterExists(t *testing.T) { m := NewManifest() assert := assert.New(t) dls := m.GetDownloadAdapterNames() if assert.NotNil(dls) { assert.Equal([]string{"basic"}, dls) } uls := m.GetUploadAdapterNames() if assert.NotNil(uls) { assert.Equal([]string{"basic"}, uls) } da := m.NewDownloadAdapter("basic") if assert.NotNil(da) { assert.Equal("basic", da.Name()) assert.Equal(Download, da.Direction()) } ua := m.NewUploadAdapter("basic") if assert.NotNil(ua) { assert.Equal("basic", ua.Name()) assert.Equal(Upload, ua.Direction()) } } func testAdapterRegAndOverride(t *testing.T) { m := NewManifest() assert := assert.New(t) assert.Nil(m.NewDownloadAdapter("test")) assert.Nil(m.NewUploadAdapter("test")) m.RegisterNewAdapterFunc("test", Upload, newTestAdapter) assert.Nil(m.NewDownloadAdapter("test")) assert.NotNil(m.NewUploadAdapter("test")) m.RegisterNewAdapterFunc("test", Download, newTestAdapter) da := m.NewDownloadAdapter("test") if assert.NotNil(da) { assert.Equal("test", da.Name()) assert.Equal(Download, da.Direction()) } ua := m.NewUploadAdapter("test") if assert.NotNil(ua) { assert.Equal("test", ua.Name()) assert.Equal(Upload, ua.Direction()) } // Test override m.RegisterNewAdapterFunc("test", Upload, newRenamedTestAdapter) ua = m.NewUploadAdapter("test") if assert.NotNil(ua) { assert.Equal("RENAMED", ua.Name()) assert.Equal(Upload, ua.Direction()) } da = m.NewDownloadAdapter("test") if assert.NotNil(da) { assert.Equal("test", da.Name()) assert.Equal(Download, da.Direction()) } m.RegisterNewAdapterFunc("test", Download, newRenamedTestAdapter) da = m.NewDownloadAdapter("test") if assert.NotNil(da) { assert.Equal("RENAMED", da.Name()) assert.Equal(Download, da.Direction()) } } func testAdapterRegButBasicOnly(t *testing.T) { cli, err := lfsapi.NewClient(nil, lfsapi.UniqTestEnv(map[string]string{ "lfs.basictransfersonly": "yes", })) require.Nil(t, err) m := NewManifestWithClient(cli) assert := assert.New(t) m.RegisterNewAdapterFunc("test", Upload, newTestAdapter) m.RegisterNewAdapterFunc("test", Download, newTestAdapter) // Will still be created if we ask for them assert.NotNil(m.NewUploadAdapter("test")) assert.NotNil(m.NewDownloadAdapter("test")) // But list will exclude ld := m.GetDownloadAdapterNames() assert.Equal([]string{BasicAdapterName}, ld) lu := m.GetUploadAdapterNames() assert.Equal([]string{BasicAdapterName}, lu) } git-lfs-2.3.4/tq/tus_upload.go000066400000000000000000000117451317167762300162440ustar00rootroot00000000000000package tq import ( "fmt" "io" "io/ioutil" "os" "strconv" "strings" "github.com/git-lfs/git-lfs/errors" "github.com/git-lfs/git-lfs/lfsapi" "github.com/git-lfs/git-lfs/progress" ) const ( TusAdapterName = "tus" TusVersion = "1.0.0" ) // Adapter for tus.io protocol resumaable uploads type tusUploadAdapter struct { *adapterBase } func (a *tusUploadAdapter) ClearTempStorage() error { // nothing to do, all temp state is on the server end return nil } func (a *tusUploadAdapter) WorkerStarting(workerNum int) (interface{}, error) { return nil, nil } func (a *tusUploadAdapter) WorkerEnding(workerNum int, ctx interface{}) { } func (a *tusUploadAdapter) DoTransfer(ctx interface{}, t *Transfer, cb ProgressCallback, authOkFunc func()) error { rel, err := t.Rel("upload") if err != nil { return err } if rel == nil { return errors.Errorf("No upload action for object: %s", t.Oid) } // Note not supporting the Creation extension since the batch API generates URLs // Also not supporting Concatenation to support parallel uploads of chunks; forward only // 1. Send HEAD request to determine upload start point // Request must include Tus-Resumable header (version) a.Trace("xfer: sending tus.io HEAD request for %q", t.Oid) req, err := a.newHTTPRequest("HEAD", rel) if err != nil { return err } req.Header.Set("Tus-Resumable", TusVersion) res, err := a.doHTTP(t, req) if err != nil { return errors.NewRetriableError(err) } // Response will contain Upload-Offset if supported offHdr := res.Header.Get("Upload-Offset") if len(offHdr) == 0 { return fmt.Errorf("Missing Upload-Offset header from tus.io HEAD response at %q, contact server admin", rel.Href) } offset, err := strconv.ParseInt(offHdr, 10, 64) if err != nil || offset < 0 { return fmt.Errorf("Invalid Upload-Offset value %q in response from tus.io HEAD at %q, contact server admin", offHdr, rel.Href) } // Upload-Offset=size means already completed (skip) // Batch API will probably already detect this, but handle just in case if offset >= t.Size { a.Trace("xfer: tus.io HEAD offset %d indicates %q is already fully uploaded, skipping", offset, t.Oid) advanceCallbackProgress(cb, t, t.Size) return nil } // Open file for uploading f, err := os.OpenFile(t.Path, os.O_RDONLY, 0644) if err != nil { return errors.Wrap(err, "tus upload") } defer f.Close() // Upload-Offset=0 means start from scratch, but still send PATCH if offset == 0 { a.Trace("xfer: tus.io uploading %q from start", t.Oid) } else { a.Trace("xfer: tus.io resuming upload %q from %d", t.Oid, offset) advanceCallbackProgress(cb, t, offset) } // 2. Send PATCH request with byte start point (even if 0) in Upload-Offset // Response status must be 204 // Response Upload-Offset must be request Upload-Offset plus sent bytes // Response may include Upload-Expires header in which case check not passed a.Trace("xfer: sending tus.io PATCH request for %q", t.Oid) req, err = a.newHTTPRequest("PATCH", rel) if err != nil { return err } req.Header.Set("Tus-Resumable", TusVersion) req.Header.Set("Upload-Offset", strconv.FormatInt(offset, 10)) req.Header.Set("Content-Type", "application/offset+octet-stream") req.Header.Set("Content-Length", strconv.FormatInt(t.Size-offset, 10)) req.ContentLength = t.Size - offset // Ensure progress callbacks made while uploading // Wrap callback to give name context ccb := func(totalSize int64, readSoFar int64, readSinceLast int) error { if cb != nil { return cb(t.Name, totalSize, readSoFar, readSinceLast) } return nil } var reader lfsapi.ReadSeekCloser = progress.NewBodyWithCallback(f, t.Size, ccb) reader = newStartCallbackReader(reader, func() error { // seek to the offset since lfsapi.Client rewinds the body if _, err := f.Seek(offset, os.SEEK_CUR); err != nil { return err } // Signal auth was ok on first read; this frees up other workers to start if authOkFunc != nil { authOkFunc() } return nil }) req.Body = reader req = a.apiClient.LogRequest(req, "lfs.data.upload") res, err = a.doHTTP(t, req) if err != nil { return errors.NewRetriableError(err) } // A status code of 403 likely means that an authentication token for the // upload has expired. This can be safely retried. if res.StatusCode == 403 { err = errors.New("http: received status 403") return errors.NewRetriableError(err) } if res.StatusCode > 299 { return errors.Wrapf(nil, "Invalid status for %s %s: %d", req.Method, strings.SplitN(req.URL.String(), "?", 2)[0], res.StatusCode, ) } io.Copy(ioutil.Discard, res.Body) res.Body.Close() return verifyUpload(a.apiClient, a.remote, t) } func configureTusAdapter(m *Manifest) { m.RegisterNewAdapterFunc(TusAdapterName, Upload, func(name string, dir Direction) Adapter { switch dir { case Upload: bu := &tusUploadAdapter{newAdapterBase(name, dir, nil)} // self implements impl bu.transferImpl = bu return bu case Download: panic("Should never ask tus.io to download") } return nil }) } git-lfs-2.3.4/tq/verify.go000066400000000000000000000024721317167762300153660ustar00rootroot00000000000000package tq import ( "net/http" "github.com/git-lfs/git-lfs/lfsapi" "github.com/git-lfs/git-lfs/tools" "github.com/rubyist/tracerx" ) const ( maxVerifiesConfigKey = "lfs.transfer.maxverifies" defaultMaxVerifyAttempts = 3 ) func verifyUpload(c *lfsapi.Client, remote string, t *Transfer) error { action, err := t.Actions.Get("verify") if err != nil { return err } if action == nil { return nil } req, err := http.NewRequest("POST", action.Href, nil) if err != nil { return err } err = lfsapi.MarshalToRequest(req, struct { Oid string `json:"oid"` Size int64 `json:"size"` }{Oid: t.Oid, Size: t.Size}) if err != nil { return err } for key, value := range action.Header { req.Header.Set(key, value) } req.Header.Set("Content-Type", "application/vnd.git-lfs+json") mv := c.GitEnv().Int(maxVerifiesConfigKey, defaultMaxVerifyAttempts) mv = tools.MaxInt(defaultMaxVerifyAttempts, mv) req = c.LogRequest(req, "lfs.verify") for i := 1; i <= mv; i++ { tracerx.Printf("tq: verify %s attempt #%d (max: %d)", t.Oid[:7], i, mv) var res *http.Response if t.Authenticated { res, err = c.Do(req) } else { res, err = c.DoWithAuth(remote, req) } if err != nil { tracerx.Printf("tq: verify err: %+v", err.Error()) } else { err = res.Body.Close() break } } return err } git-lfs-2.3.4/tq/verify_test.go000066400000000000000000000026441317167762300164260ustar00rootroot00000000000000package tq import ( "encoding/json" "net/http" "net/http/httptest" "sync/atomic" "testing" "github.com/git-lfs/git-lfs/lfsapi" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestVerifyWithoutAction(t *testing.T) { c := &lfsapi.Client{} tr := &Transfer{ Oid: "abc", Size: 123, } assert.Nil(t, verifyUpload(c, "origin", tr)) } func TestVerifySuccess(t *testing.T) { var called uint32 srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.String() != "/verify" { w.WriteHeader(http.StatusNotFound) return } atomic.AddUint32(&called, 1) assert.Equal(t, "POST", r.Method) assert.Equal(t, "bar", r.Header.Get("Foo")) assert.Equal(t, "29", r.Header.Get("Content-Length")) assert.Equal(t, "application/vnd.git-lfs+json", r.Header.Get("Content-Type")) var tr Transfer assert.Nil(t, json.NewDecoder(r.Body).Decode(&tr)) assert.Equal(t, "abcd1234", tr.Oid) assert.EqualValues(t, 123, tr.Size) })) defer srv.Close() c, err := lfsapi.NewClient(nil, lfsapi.UniqTestEnv{ "lfs.transfer.maxverifies": "1", }) require.Nil(t, err) tr := &Transfer{ Oid: "abcd1234", Size: 123, Actions: map[string]*Action{ "verify": &Action{ Href: srv.URL + "/verify", Header: map[string]string{ "foo": "bar", }, }, }, } assert.Nil(t, verifyUpload(c, "origin", tr)) assert.EqualValues(t, 1, called) } git-lfs-2.3.4/versioninfo.json000066400000000000000000000004621317167762300163400ustar00rootroot00000000000000{ "FixedFileInfo": { "FileVersion": { "Major": 2, "Minor": 3, "Patch": 4, "Build": 0 } }, "StringFileInfo": { "FileDescription": "Git LFS", "LegalCopyright": "GitHub, Inc. and Git LFS contributors", "ProductName": "Git Large File Storage (LFS)", "ProductVersion": "2.3.4" } }