pax_global_header00006660000000000000000000000064134560522430014516gustar00rootroot0000000000000052 comment=aa6f288c256ff8baf8a7745546a9752323dc0d89 golang-gopkg-src-d-go-git.v4-4.11.0/000077500000000000000000000000001345605224300166775ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/.gitignore000066400000000000000000000000511345605224300206630ustar00rootroot00000000000000coverage.out *~ coverage.txt profile.out golang-gopkg-src-d-go-git.v4-4.11.0/.travis.yml000066400000000000000000000011671345605224300210150ustar00rootroot00000000000000language: go go: - "1.11" - "1.12" go_import_path: gopkg.in/src-d/go-git.v4 env: - GIT_VERSION=master - GIT_VERSION=v1.9.3 - GIT_VERSION=v2.11.0 cache: directories: - $HOME/.git-dist before_script: - export GIT_DIST_PATH=$HOME/.git-dist - make build-git before_install: - git config --global user.email "travis@example.com" - git config --global user.name "Travis CI" install: - go get -v -t ./... script: - export GIT_EXEC_PATH=$GIT_DIST_PATH - export PATH=$GIT_DIST_PATH:$PATH - git version - make test-coverage - go vet ./... after_success: - bash <(curl -s https://codecov.io/bash) golang-gopkg-src-d-go-git.v4-4.11.0/CODE_OF_CONDUCT.md000066400000000000000000000062351345605224300215040ustar00rootroot00000000000000# Contributor Covenant Code of Conduct ## Our Pledge In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation. ## Our Standards Examples of behavior that contributes to creating a positive environment include: * Using welcoming and inclusive language * Being respectful of differing viewpoints and experiences * Gracefully accepting constructive criticism * Focusing on what is best for the community * Showing empathy towards other community members Examples of unacceptable behavior by participants include: * The use of sexualized language or imagery and unwelcome sexual attention or advances * Trolling, insulting/derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or electronic address, without explicit permission * Other conduct which could reasonably be considered inappropriate in a professional setting ## Our Responsibilities Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. ## Scope This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at conduct@sourced.tech. All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html [homepage]: https://www.contributor-covenant.org golang-gopkg-src-d-go-git.v4-4.11.0/COMPATIBILITY.md000066400000000000000000000124131345605224300212330ustar00rootroot00000000000000Supported Capabilities ====================== Here is a non-comprehensive table of git commands and features whose equivalent is supported by go-git. | Feature | Status | Notes | |---------------------------------------|--------|-------| | **config** | | config | ✔ | Reading and modifying per-repository configuration (`.git/config`) is supported. Global configuration (`$HOME/.gitconfig`) is not. | | **getting and creating repositories** | | init | ✔ | Plain init and `--bare` are supported. Flags `--template`, `--separate-git-dir` and `--shared` are not. | | clone | ✔ | Plain clone and equivalents to `--progress`, `--single-branch`, `--depth`, `--origin`, `--recurse-submodules` are supported. Others are not. | | **basic snapshotting** | | add | ✔ | Plain add is supported. Any other flag aren't supported | | status | ✔ | | commit | ✔ | | reset | ✔ | | rm | ✔ | | mv | ✔ | | **branching and merging** | | branch | ✔ | | checkout | ✔ | Basic usages of checkout are supported. | | merge | ✖ | | mergetool | ✖ | | stash | ✖ | | tag | ✔ | | **sharing and updating projects** | | fetch | ✔ | | pull | ✔ | Only supports merges where the merge can be resolved as a fast-forward. | | push | ✔ | | remote | ✔ | | submodule | ✔ | | **inspection and comparison** | | show | ✔ | | log | ✔ | | shortlog | (see log) | | describe | | | **patching** | | apply | ✖ | | cherry-pick | ✖ | | diff | ✔ | Patch object with UnifiedDiff output representation | | rebase | ✖ | | revert | ✖ | | **debugging** | | bisect | ✖ | | blame | ✔ | | grep | ✔ | | **email** || | am | ✖ | | apply | ✖ | | format-patch | ✖ | | send-email | ✖ | | request-pull | ✖ | | **external systems** | | svn | ✖ | | fast-import | ✖ | | **administration** | | clean | ✔ | | gc | ✖ | | fsck | ✖ | | reflog | ✖ | | filter-branch | ✖ | | instaweb | ✖ | | archive | ✖ | | bundle | ✖ | | prune | ✖ | | repack | ✖ | | **server admin** | | daemon | | | update-server-info | | | **advanced** | | notes | ✖ | | replace | ✖ | | worktree | ✖ | | annotate | (see blame) | | **gpg** | | git-verify-commit | ✔ | | git-verify-tag | ✔ | | **plumbing commands** | | cat-file | ✔ | | check-ignore | | | commit-tree | | | count-objects | | | diff-index | | | for-each-ref | ✔ | | hash-object | ✔ | | ls-files | ✔ | | merge-base | | | read-tree | | | rev-list | ✔ | | rev-parse | | | show-ref | ✔ | | symbolic-ref | ✔ | | update-index | | | update-ref | | | verify-pack | | | write-tree | | | **protocols** | | http(s):// (dumb) | ✖ | | http(s):// (smart) | ✔ | | git:// | ✔ | | ssh:// | ✔ | | file:// | ✔ | | custom | ✔ | | **other features** | | gitignore | ✔ | | gitattributes | ✖ | | index version | | | packfile version | | | push-certs | ✖ | golang-gopkg-src-d-go-git.v4-4.11.0/CONTRIBUTING.md000066400000000000000000000063011345605224300211300ustar00rootroot00000000000000# Contributing Guidelines source{d} go-git project is [Apache 2.0 licensed](LICENSE) and accepts contributions via GitHub pull requests. This document outlines some of the conventions on development workflow, commit message formatting, contact points, and other resources to make it easier to get your contribution accepted. ## Certificate of Origin By contributing to this project you agree to the [Developer Certificate of Origin (DCO)](DCO). This document was created by the Linux Kernel community and is a simple statement that you, as a contributor, have the legal right to make the contribution. In order to show your agreement with the DCO you should include at the end of commit message, the following line: `Signed-off-by: John Doe `, using your real name. This can be done easily using the [`-s`](https://github.com/git/git/blob/b2c150d3aa82f6583b9aadfecc5f8fa1c74aca09/Documentation/git-commit.txt#L154-L161) flag on the `git commit`. ## Support Channels The official support channels, for both users and contributors, are: - [StackOverflow go-git tag](https://stackoverflow.com/questions/tagged/go-git) for user questions. - GitHub [Issues](https://github.com/src-d/go-git/issues)* for bug reports and feature requests. - Slack: #go-git room in the [source{d} Slack](https://join.slack.com/t/sourced-community/shared_invite/enQtMjc4Njk5MzEyNzM2LTFjNzY4NjEwZGEwMzRiNTM4MzRlMzQ4MmIzZjkwZmZlM2NjODUxZmJjNDI1OTcxNDAyMmZlNmFjODZlNTg0YWM) *Before opening a new issue or submitting a new pull request, it's helpful to search the project - it's likely that another user has already reported the issue you're facing, or it's a known issue that we're already aware of. ## How to Contribute Pull Requests (PRs) are the main and exclusive way to contribute to the official go-git project. In order for a PR to be accepted it needs to pass a list of requirements: - You should be able to run the same query using `git`. We don't accept features that are not implemented in the official git implementation. - The expected behavior must match the [official git implementation](https://github.com/git/git). - The actual behavior must be correctly explained with natural language and providing a minimum working example in Go that reproduces it. - All PRs must be written in idiomatic Go, formatted according to [gofmt](https://golang.org/cmd/gofmt/), and without any warnings from [go lint](https://github.com/golang/lint) nor [go vet](https://golang.org/cmd/vet/). - They should in general include tests, and those shall pass. - If the PR is a bug fix, it has to include a suite of unit tests for the new functionality. - If the PR is a new feature, it has to come with a suite of unit tests, that tests the new functionality. - In any case, all the PRs have to pass the personal evaluation of at least one of the [maintainers](MAINTAINERS) of go-git. ### Format of the commit message Every commit message should describe what was changed, under which context and, if applicable, the GitHub issue it relates to: ``` plumbing: packp, Skip argument validations for unknown capabilities. Fixes #623 ``` The format can be described more formally as follows: ``` : , . [Fixes #] ``` golang-gopkg-src-d-go-git.v4-4.11.0/DCO000066400000000000000000000026151345605224300172330ustar00rootroot00000000000000Developer Certificate of Origin Version 1.1 Copyright (C) 2004, 2006 The Linux Foundation and its contributors. 660 York Street, Suite 102, San Francisco, CA 94110 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Developer's Certificate of Origin 1.1 By making a contribution to this project, I certify that: (a) The contribution was created in whole or in part by me and I have the right to submit it under the open source license indicated in the file; or (b) The contribution is based upon previous work that, to the best of my knowledge, is covered under an appropriate open source license and I have the right under that license to submit that work with modifications, whether created in whole or in part by me, under the same open source license (unless I am permitted to submit under a different license), as indicated in the file; or (c) The contribution was provided directly to me by some other person who certified (a), (b) or (c) and I have not modified it. (d) I understand and agree that this project and the contribution are public and that a record of the contribution (including all personal information I submit with it, including my sign-off) is maintained indefinitely and may be redistributed consistent with this project or the open source license(s) involved.golang-gopkg-src-d-go-git.v4-4.11.0/LICENSE000066400000000000000000000261341345605224300177120ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright 2018 Sourced Technologies, S.L. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. golang-gopkg-src-d-go-git.v4-4.11.0/MAINTAINERS000066400000000000000000000002241345605224300203720ustar00rootroot00000000000000Máximo Cuadros (@mcuadros) Jeremy Stribling (@strib) Ori Rawlings (@orirawlings) golang-gopkg-src-d-go-git.v4-4.11.0/Makefile000066400000000000000000000021761345605224300203450ustar00rootroot00000000000000# General WORKDIR = $(PWD) # Go parameters GOCMD = go GOTEST = $(GOCMD) test -v # Git config GIT_VERSION ?= GIT_DIST_PATH ?= $(PWD)/.git-dist GIT_REPOSITORY = http://github.com/git/git.git # Coverage COVERAGE_REPORT = coverage.txt COVERAGE_PROFILE = profile.out COVERAGE_MODE = atomic ifneq ($(origin CI), undefined) WORKDIR := $(GOPATH)/src/gopkg.in/src-d/go-git.v4 endif build-git: @if [ -f $(GIT_DIST_PATH)/git ]; then \ echo "nothing to do, using cache $(GIT_DIST_PATH)"; \ else \ git clone $(GIT_REPOSITORY) -b $(GIT_VERSION) --depth 1 --single-branch $(GIT_DIST_PATH); \ cd $(GIT_DIST_PATH); \ make configure; \ ./configure; \ make all; \ fi test: @cd $(WORKDIR); \ $(GOTEST) ./... test-coverage: @cd $(WORKDIR); \ echo "" > $(COVERAGE_REPORT); \ for dir in `find . -name "*.go" | grep -o '.*/' | sort | uniq`; do \ $(GOTEST) $$dir -coverprofile=$(COVERAGE_PROFILE) -covermode=$(COVERAGE_MODE); \ if [ $$? != 0 ]; then \ exit 2; \ fi; \ if [ -f $(COVERAGE_PROFILE) ]; then \ cat $(COVERAGE_PROFILE) >> $(COVERAGE_REPORT); \ rm $(COVERAGE_PROFILE); \ fi; \ done; \ clean: rm -rf $(GIT_DIST_PATH)golang-gopkg-src-d-go-git.v4-4.11.0/README.md000066400000000000000000000110361345605224300201570ustar00rootroot00000000000000![go-git logo](https://cdn.rawgit.com/src-d/artwork/02036484/go-git/files/go-git-github-readme-header.png) [![GoDoc](https://godoc.org/gopkg.in/src-d/go-git.v4?status.svg)](https://godoc.org/github.com/src-d/go-git) [![Build Status](https://travis-ci.org/src-d/go-git.svg)](https://travis-ci.org/src-d/go-git) [![Build status](https://ci.appveyor.com/api/projects/status/nyidskwifo4py6ub?svg=true)](https://ci.appveyor.com/project/mcuadros/go-git) [![codecov.io](https://codecov.io/github/src-d/go-git/coverage.svg)](https://codecov.io/github/src-d/go-git) [![Go Report Card](https://goreportcard.com/badge/github.com/src-d/go-git)](https://goreportcard.com/report/github.com/src-d/go-git) *go-git* is a highly extensible git implementation library written in **pure Go**. It can be used to manipulate git repositories at low level *(plumbing)* or high level *(porcelain)*, through an idiomatic Go API. It also supports several types of storage, such as in-memory filesystems, or custom implementations thanks to the [`Storer`](https://godoc.org/gopkg.in/src-d/go-git.v4/plumbing/storer) interface. It's being actively developed since 2015 and is being used extensively by [source{d}](https://sourced.tech/) and [Keybase](https://keybase.io/blog/encrypted-git-for-everyone), and by many other libraries and tools. Comparison with git ------------------- *go-git* aims to be fully compatible with [git](https://github.com/git/git), all the *porcelain* operations are implemented to work exactly as *git* does. *git* is a humongous project with years of development by thousands of contributors, making it challenging for *go-git* to implement all the features. You can find a comparison of *go-git* vs *git* in the [compatibility documentation](COMPATIBILITY.md). Installation ------------ The recommended way to install *go-git* is: ``` go get -u gopkg.in/src-d/go-git.v4/... ``` > We use [gopkg.in](http://labix.org/gopkg.in) to version the API, this means that when `go get` clones the package, it's the latest tag matching `v4.*` that is cloned and not the master branch. Examples -------- > Please note that the `CheckIfError` and `Info` functions used in the examples are from the [examples package](https://github.com/src-d/go-git/blob/master/_examples/common.go#L17) just to be used in the examples. ### Basic example A basic example that mimics the standard `git clone` command ```go // Clone the given repository to the given directory Info("git clone https://github.com/src-d/go-git") _, err := git.PlainClone("/tmp/foo", false, &git.CloneOptions{ URL: "https://github.com/src-d/go-git", Progress: os.Stdout, }) CheckIfError(err) ``` Outputs: ``` Counting objects: 4924, done. Compressing objects: 100% (1333/1333), done. Total 4924 (delta 530), reused 6 (delta 6), pack-reused 3533 ``` ### In-memory example Cloning a repository into memory and printing the history of HEAD, just like `git log` does ```go // Clones the given repository in memory, creating the remote, the local // branches and fetching the objects, exactly as: Info("git clone https://github.com/src-d/go-siva") r, err := git.Clone(memory.NewStorage(), nil, &git.CloneOptions{ URL: "https://github.com/src-d/go-siva", }) CheckIfError(err) // Gets the HEAD history from HEAD, just like this command: Info("git log") // ... retrieves the branch pointed by HEAD ref, err := r.Head() CheckIfError(err) // ... retrieves the commit history cIter, err := r.Log(&git.LogOptions{From: ref.Hash()}) CheckIfError(err) // ... just iterates over the commits, printing it err = cIter.ForEach(func(c *object.Commit) error { fmt.Println(c) return nil }) CheckIfError(err) ``` Outputs: ``` commit ded8054fd0c3994453e9c8aacaf48d118d42991e Author: Santiago M. Mola Date: Sat Nov 12 21:18:41 2016 +0100 index: ReadFrom/WriteTo returns IndexReadError/IndexWriteError. (#9) commit df707095626f384ce2dc1a83b30f9a21d69b9dfc Author: Santiago M. Mola Date: Fri Nov 11 13:23:22 2016 +0100 readwriter: fix bug when writing index. (#10) When using ReadWriter on an existing siva file, absolute offset for index entries was not being calculated correctly. ... ``` You can find this [example](_examples/log/main.go) and many others in the [examples](_examples) folder. Contribute ---------- [Contributions](https://github.com/src-d/go-git/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22) are more than welcome, if you are interested please take a look to our [Contributing Guidelines](CONTRIBUTING.md). License ------- Apache License Version 2.0, see [LICENSE](LICENSE) golang-gopkg-src-d-go-git.v4-4.11.0/_examples/000077500000000000000000000000001345605224300206545ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/_examples/README.md000066400000000000000000000030741345605224300221370ustar00rootroot00000000000000# go-git: examples Here you can find a list of annotated _go-git_ examples: ### Basic - [showcase](showcase/main.go) - A small showcase of the capabilities of _go-git_ - [open](open/main.go) - Opening a existing repository cloned by _git_ - [clone](clone/main.go) - Cloning a repository - [username and password](clone/auth/basic/username_password/main.go) - Cloning a repository using a username and password - [personal access token](clone/auth/basic/access_token/main.go) - Cloning a repository using a GitHub personal access token - [commit](commit/main.go) - Commit changes to the current branch to an existent repository - [push](push/main.go) - Push repository to default remote (origin) - [pull](pull/main.go) - Pull changes from a remote repository - [checkout](checkout/main.go) - Check out a specific commit from a repository - [log](log/main.go) - Emulate `git log` command output iterating all the commit history from HEAD reference - [branch](branch/main.go) - How to create and remove branches or any other kind of reference. - [tag](tag/main.go) - List/print repository tags - [remotes](remotes/main.go) - Working with remotes: adding, removing, etc - [progress](progress/main.go) - Printing the progress information from the sideband - [revision](revision/main.go) - Solve a revision into a commit ### Advanced - [custom_http](custom_http/main.go) - Replacing the HTTP client using a custom one - [clone with context](context/main.go) - Cloning a repository with graceful cancellation. - [storage](storage/README.md) - Implementing a custom storage system golang-gopkg-src-d-go-git.v4-4.11.0/_examples/branch/000077500000000000000000000000001345605224300221115ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/_examples/branch/main.go000066400000000000000000000023331345605224300233650ustar00rootroot00000000000000package main import ( "os" "gopkg.in/src-d/go-git.v4" . "gopkg.in/src-d/go-git.v4/_examples" "gopkg.in/src-d/go-git.v4/plumbing" ) // An example of how to create and remove branches or any other kind of reference. func main() { CheckArgs("", "") url, directory := os.Args[1], os.Args[2] // Clone the given repository to the given directory Info("git clone %s %s", url, directory) r, err := git.PlainClone(directory, false, &git.CloneOptions{ URL: url, }) CheckIfError(err) // Create a new branch to the current HEAD Info("git branch my-branch") headRef, err := r.Head() CheckIfError(err) // Create a new plumbing.HashReference object with the name of the branch // and the hash from the HEAD. The reference name should be a full reference // name and not an abbreviated one, as is used on the git cli. // // For tags we should use `refs/tags/%s` instead of `refs/heads/%s` used // for branches. ref := plumbing.NewHashReference("refs/heads/my-branch", headRef.Hash()) // The created reference is saved in the storage. err = r.Storer.SetReference(ref) CheckIfError(err) // Or deleted from it. Info("git branch -D my-branch") err = r.Storer.RemoveReference(ref.Name()) CheckIfError(err) } golang-gopkg-src-d-go-git.v4-4.11.0/_examples/checkout/000077500000000000000000000000001345605224300224615ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/_examples/checkout/main.go000066400000000000000000000021331345605224300237330ustar00rootroot00000000000000package main import ( "fmt" "os" "gopkg.in/src-d/go-git.v4" . "gopkg.in/src-d/go-git.v4/_examples" "gopkg.in/src-d/go-git.v4/plumbing" ) // Basic example of how to checkout a specific commit. func main() { CheckArgs("", "", "") url, directory, commit := os.Args[1], os.Args[2], os.Args[3] // Clone the given repository to the given directory Info("git clone %s %s", url, directory) r, err := git.PlainClone(directory, false, &git.CloneOptions{ URL: url, }) CheckIfError(err) // ... retrieving the commit being pointed by HEAD Info("git show-ref --head HEAD") ref, err := r.Head() CheckIfError(err) fmt.Println(ref.Hash()) w, err := r.Worktree() CheckIfError(err) // ... checking out to commit Info("git checkout %s", commit) err = w.Checkout(&git.CheckoutOptions{ Hash: plumbing.NewHash(commit), }) CheckIfError(err) // ... retrieving the commit being pointed by HEAD, it shows that the // repository is pointing to the giving commit in detached mode Info("git show-ref --head HEAD") ref, err = r.Head() CheckIfError(err) fmt.Println(ref.Hash()) } golang-gopkg-src-d-go-git.v4-4.11.0/_examples/clone/000077500000000000000000000000001345605224300217545ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/_examples/clone/auth/000077500000000000000000000000001345605224300227155ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/_examples/clone/auth/basic/000077500000000000000000000000001345605224300237765ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/_examples/clone/auth/basic/access_token/000077500000000000000000000000001345605224300264375ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/_examples/clone/auth/basic/access_token/main.go000066400000000000000000000021061345605224300277110ustar00rootroot00000000000000package main import ( "fmt" "os" git "gopkg.in/src-d/go-git.v4" . "gopkg.in/src-d/go-git.v4/_examples" "gopkg.in/src-d/go-git.v4/plumbing/transport/http" ) func main() { CheckArgs("", "", "") url, directory, token := os.Args[1], os.Args[2], os.Args[3] // Clone the given repository to the given directory Info("git clone %s %s", url, directory) r, err := git.PlainClone(directory, false, &git.CloneOptions{ // The intended use of a GitHub personal access token is in replace of your password // because access tokens can easily be revoked. // https://help.github.com/articles/creating-a-personal-access-token-for-the-command-line/ Auth: &http.BasicAuth{ Username: "abc123", // yes, this can be anything except an empty string Password: token, }, URL: url, Progress: os.Stdout, }) CheckIfError(err) // ... retrieving the branch being pointed by HEAD ref, err := r.Head() CheckIfError(err) // ... retrieving the commit object commit, err := r.CommitObject(ref.Hash()) CheckIfError(err) fmt.Println(commit) } golang-gopkg-src-d-go-git.v4-4.11.0/_examples/clone/auth/basic/username_password/000077500000000000000000000000001345605224300275375ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/_examples/clone/auth/basic/username_password/main.go000066400000000000000000000015311345605224300310120ustar00rootroot00000000000000package main import ( "fmt" "os" git "gopkg.in/src-d/go-git.v4" . "gopkg.in/src-d/go-git.v4/_examples" "gopkg.in/src-d/go-git.v4/plumbing/transport/http" ) func main() { CheckArgs("", "", "", "") url, directory, username, password := os.Args[1], os.Args[2], os.Args[3], os.Args[4] // Clone the given repository to the given directory Info("git clone %s %s", url, directory) r, err := git.PlainClone(directory, false, &git.CloneOptions{ Auth: &http.BasicAuth{ Username: username, Password: password, }, URL: url, Progress: os.Stdout, }) CheckIfError(err) // ... retrieving the branch being pointed by HEAD ref, err := r.Head() CheckIfError(err) // ... retrieving the commit object commit, err := r.CommitObject(ref.Hash()) CheckIfError(err) fmt.Println(commit) } golang-gopkg-src-d-go-git.v4-4.11.0/_examples/clone/main.go000066400000000000000000000013741345605224300232340ustar00rootroot00000000000000package main import ( "fmt" "os" "gopkg.in/src-d/go-git.v4" . "gopkg.in/src-d/go-git.v4/_examples" ) // Basic example of how to clone a repository using clone options. func main() { CheckArgs("", "") url := os.Args[1] directory := os.Args[2] // Clone the given repository to the given directory Info("git clone %s %s --recursive", url, directory) r, err := git.PlainClone(directory, false, &git.CloneOptions{ URL: url, RecurseSubmodules: git.DefaultSubmoduleRecursionDepth, }) CheckIfError(err) // ... retrieving the branch being pointed by HEAD ref, err := r.Head() CheckIfError(err) // ... retrieving the commit object commit, err := r.CommitObject(ref.Hash()) CheckIfError(err) fmt.Println(commit) } golang-gopkg-src-d-go-git.v4-4.11.0/_examples/commit/000077500000000000000000000000001345605224300221445ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/_examples/commit/main.go000066400000000000000000000032361345605224300234230ustar00rootroot00000000000000package main import ( "fmt" "io/ioutil" "os" "path/filepath" "time" "gopkg.in/src-d/go-git.v4" . "gopkg.in/src-d/go-git.v4/_examples" "gopkg.in/src-d/go-git.v4/plumbing/object" ) // Basic example of how to commit changes to the current branch to an existing // repository. func main() { CheckArgs("") directory := os.Args[1] // Opens an already existing repository. r, err := git.PlainOpen(directory) CheckIfError(err) w, err := r.Worktree() CheckIfError(err) // ... we need a file to commit so let's create a new file inside of the // worktree of the project using the go standard library. Info("echo \"hello world!\" > example-git-file") filename := filepath.Join(directory, "example-git-file") err = ioutil.WriteFile(filename, []byte("hello world!"), 0644) CheckIfError(err) // Adds the new file to the staging area. Info("git add example-git-file") _, err = w.Add("example-git-file") CheckIfError(err) // We can verify the current status of the worktree using the method Status. Info("git status --porcelain") status, err := w.Status() CheckIfError(err) fmt.Println(status) // Commits the current staging area to the repository, with the new file // just created. We should provide the object.Signature of Author of the // commit. Info("git commit -m \"example go-git commit\"") commit, err := w.Commit("example go-git commit", &git.CommitOptions{ Author: &object.Signature{ Name: "John Doe", Email: "john@doe.org", When: time.Now(), }, }) CheckIfError(err) // Prints the current HEAD to verify that all worked well. Info("git show -s") obj, err := r.CommitObject(commit) CheckIfError(err) fmt.Println(obj) } golang-gopkg-src-d-go-git.v4-4.11.0/_examples/common.go000066400000000000000000000015761345605224300225040ustar00rootroot00000000000000package examples import ( "fmt" "os" "strings" ) // CheckArgs should be used to ensure the right command line arguments are // passed before executing an example. func CheckArgs(arg ...string) { if len(os.Args) < len(arg)+1 { Warning("Usage: %s %s", os.Args[0], strings.Join(arg, " ")) os.Exit(1) } } // CheckIfError should be used to naively panics if an error is not nil. func CheckIfError(err error) { if err == nil { return } fmt.Printf("\x1b[31;1m%s\x1b[0m\n", fmt.Sprintf("error: %s", err)) os.Exit(1) } // Info should be used to describe the example commands that are about to run. func Info(format string, args ...interface{}) { fmt.Printf("\x1b[34;1m%s\x1b[0m\n", fmt.Sprintf(format, args...)) } // Warning should be used to display a warning func Warning(format string, args ...interface{}) { fmt.Printf("\x1b[36;1m%s\x1b[0m\n", fmt.Sprintf(format, args...)) } golang-gopkg-src-d-go-git.v4-4.11.0/_examples/common_test.go000066400000000000000000000066031345605224300235370ustar00rootroot00000000000000package examples import ( "flag" "go/build" "io/ioutil" "os" "os/exec" "path/filepath" "testing" ) var examplesTest = flag.Bool("examples", false, "run the examples tests") var defaultURL = "https://github.com/git-fixtures/basic.git" var args = map[string][]string{ "branch": {defaultURL, tempFolder()}, "checkout": {defaultURL, tempFolder(), "35e85108805c84807bc66a02d91535e1e24b38b9"}, "clone": {defaultURL, tempFolder()}, "context": {defaultURL, tempFolder()}, "commit": {cloneRepository(defaultURL, tempFolder())}, "custom_http": {defaultURL}, "open": {cloneRepository(defaultURL, tempFolder())}, "progress": {defaultURL, tempFolder()}, "push": {setEmptyRemote(cloneRepository(defaultURL, tempFolder()))}, "revision": {cloneRepository(defaultURL, tempFolder()), "master~2^"}, "showcase": {defaultURL, tempFolder()}, "tag": {cloneRepository(defaultURL, tempFolder())}, "pull": {createRepositoryWithRemote(tempFolder(), defaultURL)}, } var ignored = map[string]bool{} var tempFolders = []string{} func TestExamples(t *testing.T) { flag.Parse() if !*examplesTest && os.Getenv("CI") == "" { t.Skip("skipping examples tests, pass --examples to execute it") return } defer deleteTempFolders() examples, err := filepath.Glob(examplesFolder()) if err != nil { t.Errorf("error finding tests: %s", err) } for _, example := range examples { _, name := filepath.Split(filepath.Dir(example)) if ignored[name] { continue } t.Run(name, func(t *testing.T) { testExample(t, name, example) }) } } func tempFolder() string { path, err := ioutil.TempDir("", "") CheckIfError(err) tempFolders = append(tempFolders, path) return path } func packageFolder() string { return filepath.Join( build.Default.GOPATH, "src", "gopkg.in/src-d/go-git.v4", ) } func examplesFolder() string { return filepath.Join( packageFolder(), "_examples", "*", "main.go", ) } func cloneRepository(url, folder string) string { cmd := exec.Command("git", "clone", url, folder) err := cmd.Run() CheckIfError(err) return folder } func createBareRepository(dir string) string { return createRepository(dir, true) } func createRepository(dir string, isBare bool) string { var cmd *exec.Cmd if isBare { cmd = exec.Command("git", "init", "--bare", dir) } else { cmd = exec.Command("git", "init", dir) } err := cmd.Run() CheckIfError(err) return dir } func createRepositoryWithRemote(local, remote string) string { createRepository(local, false) addRemote(local, remote) return local } func setEmptyRemote(dir string) string { remote := createBareRepository(tempFolder()) setRemote(dir, remote) return dir } func setRemote(local, remote string) { cmd := exec.Command("git", "remote", "set-url", "origin", remote) cmd.Dir = local err := cmd.Run() CheckIfError(err) } func addRemote(local, remote string) { cmd := exec.Command("git", "remote", "add", "origin", remote) cmd.Dir = local err := cmd.Run() CheckIfError(err) } func testExample(t *testing.T, name, example string) { cmd := exec.Command("go", append([]string{ "run", filepath.Join(example), }, args[name]...)...) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr if err := cmd.Run(); err != nil { t.Errorf("error running cmd %q", err) } } func deleteTempFolders() { for _, folder := range tempFolders { err := os.RemoveAll(folder) CheckIfError(err) } } golang-gopkg-src-d-go-git.v4-4.11.0/_examples/context/000077500000000000000000000000001345605224300223405ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/_examples/context/main.go000066400000000000000000000022161345605224300236140ustar00rootroot00000000000000package main import ( "context" "os" "os/signal" "gopkg.in/src-d/go-git.v4" . "gopkg.in/src-d/go-git.v4/_examples" ) // Graceful cancellation example of a basic git operation such as Clone. func main() { CheckArgs("", "") url := os.Args[1] directory := os.Args[2] // Clone the given repository to the given directory Info("git clone %s %s", url, directory) stop := make(chan os.Signal, 1) signal.Notify(stop, os.Interrupt) // The context is the mechanism used by go-git, to support deadlines and // cancellation signals. ctx, cancel := context.WithCancel(context.Background()) defer cancel() // cancel when we are finished consuming integers go func() { <-stop Warning("\nSignal detected, canceling operation...") cancel() }() Warning("To gracefully stop the clone operation, push Crtl-C.") // Using PlainCloneContext we can provide to a context, if the context // is cancelled, the clone operation stops gracefully. _, err := git.PlainCloneContext(ctx, directory, false, &git.CloneOptions{ URL: url, Progress: os.Stdout, }) // If the context was cancelled, an error is returned. CheckIfError(err) } golang-gopkg-src-d-go-git.v4-4.11.0/_examples/custom_http/000077500000000000000000000000001345605224300232255ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/_examples/custom_http/main.go000066400000000000000000000024501345605224300245010ustar00rootroot00000000000000package main import ( "crypto/tls" "fmt" "net/http" "os" "time" "gopkg.in/src-d/go-git.v4" . "gopkg.in/src-d/go-git.v4/_examples" "gopkg.in/src-d/go-git.v4/plumbing/transport/client" githttp "gopkg.in/src-d/go-git.v4/plumbing/transport/http" "gopkg.in/src-d/go-git.v4/storage/memory" ) // Here is an example to configure http client according to our own needs. func main() { CheckArgs("") url := os.Args[1] // Create a custom http(s) client with your config customClient := &http.Client{ // accept any certificate (might be useful for testing) Transport: &http.Transport{ TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, }, // 15 second timeout Timeout: 15 * time.Second, // don't follow redirect CheckRedirect: func(req *http.Request, via []*http.Request) error { return http.ErrUseLastResponse }, } // Override http(s) default protocol to use our custom client client.InstallProtocol("https", githttp.NewClient(customClient)) // Clone repository using the new client if the protocol is https:// Info("git clone %s", url) r, err := git.Clone(memory.NewStorage(), nil, &git.CloneOptions{URL: url}) CheckIfError(err) // Retrieve the branch pointed by HEAD Info("git rev-parse HEAD") head, err := r.Head() CheckIfError(err) fmt.Println(head.Hash()) } golang-gopkg-src-d-go-git.v4-4.11.0/_examples/log/000077500000000000000000000000001345605224300214355ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/_examples/log/main.go000066400000000000000000000022241345605224300227100ustar00rootroot00000000000000package main import ( "fmt" "gopkg.in/src-d/go-git.v4" . "gopkg.in/src-d/go-git.v4/_examples" "gopkg.in/src-d/go-git.v4/plumbing/object" "gopkg.in/src-d/go-git.v4/storage/memory" ) // Example of how to: // - Clone a repository into memory // - Get the HEAD reference // - Using the HEAD reference, obtain the commit this reference is pointing to // - Using the commit, obtain its history and print it func main() { // Clones the given repository, creating the remote, the local branches // and fetching the objects, everything in memory: Info("git clone https://github.com/src-d/go-siva") r, err := git.Clone(memory.NewStorage(), nil, &git.CloneOptions{ URL: "https://github.com/src-d/go-siva", }) CheckIfError(err) // Gets the HEAD history from HEAD, just like this command: Info("git log") // ... retrieves the branch pointed by HEAD ref, err := r.Head() CheckIfError(err) // ... retrieves the commit history cIter, err := r.Log(&git.LogOptions{From: ref.Hash()}) CheckIfError(err) // ... just iterates over the commits, printing it err = cIter.ForEach(func(c *object.Commit) error { fmt.Println(c) return nil }) CheckIfError(err) } golang-gopkg-src-d-go-git.v4-4.11.0/_examples/open/000077500000000000000000000000001345605224300216155ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/_examples/open/main.go000066400000000000000000000014761345605224300231000ustar00rootroot00000000000000package main import ( "fmt" "os" "gopkg.in/src-d/go-git.v4" . "gopkg.in/src-d/go-git.v4/_examples" "gopkg.in/src-d/go-git.v4/plumbing/object" ) // Open an existing repository in a specific folder. func main() { CheckArgs("") path := os.Args[1] // We instanciate a new repository targeting the given path (the .git folder) r, err := git.PlainOpen(path) CheckIfError(err) // Length of the HEAD history Info("git rev-list HEAD --count") // ... retrieving the HEAD reference ref, err := r.Head() CheckIfError(err) // ... retrieves the commit history cIter, err := r.Log(&git.LogOptions{From: ref.Hash()}) CheckIfError(err) // ... just iterates over the commits var cCount int err = cIter.ForEach(func(c *object.Commit) error { cCount++ return nil }) CheckIfError(err) fmt.Println(cCount) } golang-gopkg-src-d-go-git.v4-4.11.0/_examples/progress/000077500000000000000000000000001345605224300225205ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/_examples/progress/main.go000066400000000000000000000013221345605224300237710ustar00rootroot00000000000000package main import ( "os" "gopkg.in/src-d/go-git.v4" . "gopkg.in/src-d/go-git.v4/_examples" ) // Example of how to show the progress when you do a basic clone operation. func main() { CheckArgs("", "") url := os.Args[1] directory := os.Args[2] // Clone the given repository to the given directory Info("git clone %s %s", url, directory) _, err := git.PlainClone(directory, false, &git.CloneOptions{ URL: url, Depth: 1, // as git does, when you make a clone, pull or some other operations the // server sends information via the sideband, this information can being // collected provinding a io.Writer to the CloneOptions options Progress: os.Stdout, }) CheckIfError(err) } golang-gopkg-src-d-go-git.v4-4.11.0/_examples/pull/000077500000000000000000000000001345605224300216305ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/_examples/pull/main.go000066400000000000000000000014261345605224300231060ustar00rootroot00000000000000package main import ( "fmt" "os" "gopkg.in/src-d/go-git.v4" . "gopkg.in/src-d/go-git.v4/_examples" ) // Pull changes from a remote repository func main() { CheckArgs("") path := os.Args[1] // We instance\iate a new repository targeting the given path (the .git folder) r, err := git.PlainOpen(path) CheckIfError(err) // Get the working directory for the repository w, err := r.Worktree() CheckIfError(err) // Pull the latest changes from the origin remote and merge into the current branch Info("git pull origin") err = w.Pull(&git.PullOptions{RemoteName: "origin"}) CheckIfError(err) // Print the latest commit that was just pulled ref, err := r.Head() CheckIfError(err) commit, err := r.CommitObject(ref.Hash()) CheckIfError(err) fmt.Println(commit) } golang-gopkg-src-d-go-git.v4-4.11.0/_examples/push/000077500000000000000000000000001345605224300216335ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/_examples/push/main.go000066400000000000000000000006521345605224300231110ustar00rootroot00000000000000package main import ( "os" "gopkg.in/src-d/go-git.v4" . "gopkg.in/src-d/go-git.v4/_examples" ) // Example of how to open a repository in a specific path, and push to // its default remote (origin). func main() { CheckArgs("") path := os.Args[1] r, err := git.PlainOpen(path) CheckIfError(err) Info("git push") // push using default options err = r.Push(&git.PushOptions{}) CheckIfError(err) } golang-gopkg-src-d-go-git.v4-4.11.0/_examples/remotes/000077500000000000000000000000001345605224300223325ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/_examples/remotes/main.go000066400000000000000000000032671345605224300236150ustar00rootroot00000000000000package main import ( "fmt" "gopkg.in/src-d/go-git.v4" . "gopkg.in/src-d/go-git.v4/_examples" "gopkg.in/src-d/go-git.v4/config" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/storage/memory" ) // Example of how to: // - Create a new in-memory repository // - Create a new remote named "example" // - List remotes and print them // - Pull using the new remote "example" // - Iterate the references again, but only showing hash references, not symbolic ones // - Remove remote "example" func main() { // Create a new repository Info("git init") r, err := git.Init(memory.NewStorage(), nil) CheckIfError(err) // Add a new remote, with the default fetch refspec Info("git remote add example https://github.com/git-fixtures/basic.git") _, err = r.CreateRemote(&config.RemoteConfig{ Name: "example", URLs: []string{"https://github.com/git-fixtures/basic.git"}, }) CheckIfError(err) // List remotes from a repository Info("git remotes -v") list, err := r.Remotes() CheckIfError(err) for _, r := range list { fmt.Println(r) } // Fetch using the new remote Info("git fetch example") err = r.Fetch(&git.FetchOptions{ RemoteName: "example", }) CheckIfError(err) // List the branches // > git show-ref Info("git show-ref") refs, err := r.References() CheckIfError(err) err = refs.ForEach(func(ref *plumbing.Reference) error { // The HEAD is omitted in a `git show-ref` so we ignore the symbolic // references, the HEAD if ref.Type() == plumbing.SymbolicReference { return nil } fmt.Println(ref) return nil }) CheckIfError(err) // Delete the example remote Info("git remote rm example") err = r.DeleteRemote("example") CheckIfError(err) } golang-gopkg-src-d-go-git.v4-4.11.0/_examples/revision/000077500000000000000000000000001345605224300225125ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/_examples/revision/main.go000066400000000000000000000012641345605224300237700ustar00rootroot00000000000000package main import ( "fmt" "os" "gopkg.in/src-d/go-git.v4" . "gopkg.in/src-d/go-git.v4/_examples" "gopkg.in/src-d/go-git.v4/plumbing" ) // Example how to resolve a revision into its commit counterpart func main() { CheckArgs("", "") path := os.Args[1] revision := os.Args[2] // We instantiate a new repository targeting the given path (the .git folder) r, err := git.PlainOpen(path) CheckIfError(err) // Resolve revision into a sha1 commit, only some revisions are resolved // look at the doc to get more details Info("git rev-parse %s", revision) h, err := r.ResolveRevision(plumbing.Revision(revision)) CheckIfError(err) fmt.Println(h.String()) } golang-gopkg-src-d-go-git.v4-4.11.0/_examples/showcase/000077500000000000000000000000001345605224300224705ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/_examples/showcase/main.go000066400000000000000000000034471345605224300237530ustar00rootroot00000000000000package main import ( "fmt" "os" "strings" "gopkg.in/src-d/go-git.v4" "gopkg.in/src-d/go-git.v4/plumbing/object" . "gopkg.in/src-d/go-git.v4/_examples" ) // Example of an specific use case: // - Clone a repository in a specific path // - Get the HEAD reference // - Using the HEAD reference, obtain the commit this reference is pointing to // - Print the commit content // - Using the commit, iterate over all its files and print them // - Print all the commit history with commit messages, short hash and the // first line of the commit message func main() { CheckArgs(" ") url := os.Args[1] path := os.Args[2] // Clone the given repository, creating the remote, the local branches // and fetching the objects, exactly as: Info("git clone %s %s", url, path) r, err := git.PlainClone(path, false, &git.CloneOptions{URL: url}) CheckIfError(err) // Getting the latest commit on the current branch Info("git log -1") // ... retrieving the branch being pointed by HEAD ref, err := r.Head() CheckIfError(err) // ... retrieving the commit object commit, err := r.CommitObject(ref.Hash()) CheckIfError(err) fmt.Println(commit) // List the tree from HEAD Info("git ls-tree -r HEAD") // ... retrieve the tree from the commit tree, err := commit.Tree() CheckIfError(err) // ... get the files iterator and print the file tree.Files().ForEach(func(f *object.File) error { fmt.Printf("100644 blob %s %s\n", f.Hash, f.Name) return nil }) // List the history of the repository Info("git log --oneline") commitIter, err := r.Log(&git.LogOptions{From: commit.Hash}) CheckIfError(err) err = commitIter.ForEach(func(c *object.Commit) error { hash := c.Hash.String() line := strings.Split(c.Message, "\n") fmt.Println(hash[:7], line[0]) return nil }) CheckIfError(err) } golang-gopkg-src-d-go-git.v4-4.11.0/_examples/storage/000077500000000000000000000000001345605224300223205ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/_examples/storage/README.md000066400000000000000000000115051345605224300236010ustar00rootroot00000000000000# go-git + aerospike: a git repository backed by a database This is an example of a [go-git](https://github.com/src-d/go-git) repository backed by [Aerospike](http://www.aerospike.com/). ### and what this means ... *git* has a very well defined storage system, the `.git` directory, present on any repository. This is the place where `git` stores all the [`objects`](https://git-scm.com/book/en/v2/Git-Internals-Git-Objects), [`references`](https://git-scm.com/book/es/v2/Git-Internals-Git-References) and [`configuration`](https://git-scm.com/docs/git-config#_configuration_file). This information is stored in plain files. Our original **go-git** version was designed to work in memory, some time after we added support to read the `.git`, and now we have added support for fully customized [storages](https://godoc.org/gopkg.in/src-d/go-git.v4/storage#Storer). This means that the internal database of any repository can be saved and accessed on any support, databases, distributed filesystems, etc. This functionality is pretty similar to the [libgit2 backends](http://blog.deveo.com/your-git-repository-in-a-database-pluggable-backends-in-libgit2/) Installation ------------ What do you need? You need an *aerospike* server. The easiest way to get one for testing is running the official **docker** container provided by Aerospike: ``` docker run -d -p 3000:3000 --name aerospike aerospike/aerospike-server ``` Now, we need the sample code. ``` go get -u github.com/mcuadros/go-git-aerospike/... ``` Running this command will make the binary `go-git-aerospike`. if you have `GOPATH` on your `PATH`, you are ready to go. If not, this is a great moment. Usage ----- ### Cloning the repository into the database Running the command `go-git-aerospike` with the `clone` option followed by the URL of a git repository clones the repository into the database, storing all the git objects in it: ```sh go-git-aerospike clone https://github.com/src-d/flamingo.git ``` The repository is stored in the aerospike database. This means that all the internal objects like commits, trees, blobs and tags are `records` in different `sets` in the `test` namespace: ```sql aql> SELECT hash, type, url FROM test.commit ``` ``` +--------------------------------------------+----------+-------+-----------------------------------+ | hash | type | blob | url | +--------------------------------------------+----------+-------+-----------------------------------+ | "c94450c805876e49b38d2ff1103b8c09cdd2aef4" | "commit" | 00 00 | ...github.com/src-d/flamingo.git" | | "7f71640877608ee9cfe584fac216f03f9aebb523" | "commit" | 00 00 | ...github.com/src-d/flamingo.git" | | "255f097450dd91812c4eb7b9e0d3a4f034f2acaf" | "commit" | 00 00 | ...github.com/src-d/flamingo.git" | +--------------------------------------------+----------+-------+-----------------------------------+ 102 rows in set (0.071 secs) ``` And also the references and the configuration (remotes) are stored in it. ```sql aql> SELECT name, target, url FROM test.reference ``` ``` +------------------------------+--------------------------------------------+-----------------------+ | name | target | url | +------------------------------+--------------------------------------------+-----------------------+ | "HEAD" | "ref: refs/heads/master" | ...rc-d/flamingo.git" | | "refs/heads/master" | "ed3e1aa2e46584cb803ed356cb5d8855f6d05660" | ...rc-d/flamingo.git" | | "refs/remotes/origin/master" | "ed3e1aa2e46584cb803ed356cb5d8855f6d05660" | ...rc-d/flamingo.git" | +------------------------------+--------------------------------------------+-----------------------+ 3 rows in set (0.046 secs) ``` ### Reading the repository Running the `log` command, a `git log --online` like result is printed: ```sh go-git-aerospike log https://github.com/src-d/flamingo.git ``` The URL of the repository is the way we identify the objects in the `set`s, since we can clone several repositories to the same database. ``` ed3e1aa ID is also allowed in SendFormTo and SayTo 2031f3e Handle close of message channel in WaitForMessage e784495 Add SendFormTo and SayTo 447748a Make text in attachments accept markdown 595b4e7 Form author name and author icon and text groupfield 0f2e315 Test for InvokeAction 0dc7c9a Handle closing of channel b3f167b Implement InvokeAction ``` The process has read all the commits and all the needed objects from the aerospike sets. ### Playing with the database If you want to explore the database, you can execute the `aql` tool and run some queries: ```sh docker run -it aerospike/aerospike-tools aql -h 172.17.0.1 aql> SELECT * FROM test; ``` golang-gopkg-src-d-go-git.v4-4.11.0/_examples/tag/000077500000000000000000000000001345605224300214275ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/_examples/tag/main.go000066400000000000000000000017711345605224300227100ustar00rootroot00000000000000package main import ( "fmt" "os" "gopkg.in/src-d/go-git.v4" . "gopkg.in/src-d/go-git.v4/_examples" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/object" ) // Basic example of how to list tags. func main() { CheckArgs("") path := os.Args[1] // We instanciate a new repository targeting the given path (the .git folder) r, err := git.PlainOpen(path) CheckIfError(err) // List all tag references, both lightweight tags and annotated tags Info("git show-ref --tag") tagrefs, err := r.Tags() CheckIfError(err) err = tagrefs.ForEach(func(t *plumbing.Reference) error { fmt.Println(t) return nil }) CheckIfError(err) // Print each annotated tag object (lightweight tags are not included) Info("for t in $(git show-ref --tag); do if [ \"$(git cat-file -t $t)\" = \"tag\" ]; then git cat-file -p $t ; fi; done") tags, err := r.TagObjects() CheckIfError(err) err = tags.ForEach(func(t *object.Tag) error { fmt.Println(t) return nil }) CheckIfError(err) } golang-gopkg-src-d-go-git.v4-4.11.0/appveyor.yml000066400000000000000000000006631345605224300212740ustar00rootroot00000000000000version: "{build}" platform: x64 matrix: allow_failures: - platform: x64 clone_folder: c:\gopath\src\gopkg.in\src-d\go-git.v4 environment: GOPATH: c:\gopath install: - set PATH=%GOPATH%\bin;c:\go\bin;"C:\Program Files\Git\mingw64\bin";%PATH% - go version - go get -v -t ./... - git config --global user.email "travis@example.com" - git config --global user.name "Travis CI build_script: - go test -v ./... golang-gopkg-src-d-go-git.v4-4.11.0/blame.go000066400000000000000000000202431345605224300203070ustar00rootroot00000000000000package git import ( "bytes" "errors" "fmt" "strconv" "strings" "time" "unicode/utf8" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/object" "gopkg.in/src-d/go-git.v4/utils/diff" ) // BlameResult represents the result of a Blame operation. type BlameResult struct { // Path is the path of the File that we're blaming. Path string // Rev (Revision) is the hash of the specified Commit used to generate this result. Rev plumbing.Hash // Lines contains every line with its authorship. Lines []*Line } // Blame returns a BlameResult with the information about the last author of // each line from file `path` at commit `c`. func Blame(c *object.Commit, path string) (*BlameResult, error) { // The file to blame is identified by the input arguments: // commit and path. commit is a Commit object obtained from a Repository. Path // represents a path to a specific file contained into the repository. // // Blaming a file is a two step process: // // 1. Create a linear history of the commits affecting a file. We use // revlist.New for that. // // 2. Then build a graph with a node for every line in every file in // the history of the file. // // Each node is assigned a commit: Start by the nodes in the first // commit. Assign that commit as the creator of all its lines. // // Then jump to the nodes in the next commit, and calculate the diff // between the two files. Newly created lines get // assigned the new commit as its origin. Modified lines also get // this new commit. Untouched lines retain the old commit. // // All this work is done in the assignOrigin function which holds all // the internal relevant data in a "blame" struct, that is not // exported. // // TODO: ways to improve the efficiency of this function: // 1. Improve revlist // 2. Improve how to traverse the history (example a backward traversal will // be much more efficient) // // TODO: ways to improve the function in general: // 1. Add memoization between revlist and assign. // 2. It is using much more memory than needed, see the TODOs below. b := new(blame) b.fRev = c b.path = path // get all the file revisions if err := b.fillRevs(); err != nil { return nil, err } // calculate the line tracking graph and fill in // file contents in data. if err := b.fillGraphAndData(); err != nil { return nil, err } file, err := b.fRev.File(b.path) if err != nil { return nil, err } finalLines, err := file.Lines() if err != nil { return nil, err } // Each node (line) holds the commit where it was introduced or // last modified. To achieve that we use the FORWARD algorithm // described in Zimmermann, et al. "Mining Version Archives for // Co-changed Lines", in proceedings of the Mining Software // Repositories workshop, Shanghai, May 22-23, 2006. lines, err := newLines(finalLines, b.sliceGraph(len(b.graph)-1)) if err != nil { return nil, err } return &BlameResult{ Path: path, Rev: c.Hash, Lines: lines, }, nil } // Line values represent the contents and author of a line in BlamedResult values. type Line struct { // Author is the email address of the last author that modified the line. Author string // Text is the original text of the line. Text string // Date is when the original text of the line was introduced Date time.Time // Hash is the commit hash that introduced the original line Hash plumbing.Hash } func newLine(author, text string, date time.Time, hash plumbing.Hash) *Line { return &Line{ Author: author, Text: text, Hash: hash, Date: date, } } func newLines(contents []string, commits []*object.Commit) ([]*Line, error) { lcontents := len(contents) lcommits := len(commits) if lcontents != lcommits { if lcontents == lcommits-1 && contents[lcontents-1] != "\n" { contents = append(contents, "\n") } else { return nil, errors.New("contents and commits have different length") } } result := make([]*Line, 0, lcontents) for i := range contents { result = append(result, newLine( commits[i].Author.Email, contents[i], commits[i].Author.When, commits[i].Hash, )) } return result, nil } // this struct is internally used by the blame function to hold its // inputs, outputs and state. type blame struct { // the path of the file to blame path string // the commit of the final revision of the file to blame fRev *object.Commit // the chain of revisions affecting the the file to blame revs []*object.Commit // the contents of the file across all its revisions data []string // the graph of the lines in the file across all the revisions graph [][]*object.Commit } // calculate the history of a file "path", starting from commit "from", sorted by commit date. func (b *blame) fillRevs() error { var err error b.revs, err = references(b.fRev, b.path) return err } // build graph of a file from its revision history func (b *blame) fillGraphAndData() error { //TODO: not all commits are needed, only the current rev and the prev b.graph = make([][]*object.Commit, len(b.revs)) b.data = make([]string, len(b.revs)) // file contents in all the revisions // for every revision of the file, starting with the first // one... for i, rev := range b.revs { // get the contents of the file file, err := rev.File(b.path) if err != nil { return nil } b.data[i], err = file.Contents() if err != nil { return err } nLines := countLines(b.data[i]) // create a node for each line b.graph[i] = make([]*object.Commit, nLines) // assign a commit to each node // if this is the first revision, then the node is assigned to // this first commit. if i == 0 { for j := 0; j < nLines; j++ { b.graph[i][j] = (*object.Commit)(b.revs[i]) } } else { // if this is not the first commit, then assign to the old // commit or to the new one, depending on what the diff // says. b.assignOrigin(i, i-1) } } return nil } // sliceGraph returns a slice of commits (one per line) for a particular // revision of a file (0=first revision). func (b *blame) sliceGraph(i int) []*object.Commit { fVs := b.graph[i] result := make([]*object.Commit, 0, len(fVs)) for _, v := range fVs { c := object.Commit(*v) result = append(result, &c) } return result } // Assigns origin to vertexes in current (c) rev from data in its previous (p) // revision func (b *blame) assignOrigin(c, p int) { // assign origin based on diff info hunks := diff.Do(b.data[p], b.data[c]) sl := -1 // source line dl := -1 // destination line for h := range hunks { hLines := countLines(hunks[h].Text) for hl := 0; hl < hLines; hl++ { switch { case hunks[h].Type == 0: sl++ dl++ b.graph[c][dl] = b.graph[p][sl] case hunks[h].Type == 1: dl++ b.graph[c][dl] = (*object.Commit)(b.revs[c]) case hunks[h].Type == -1: sl++ default: panic("unreachable") } } } } // GoString prints the results of a Blame using git-blame's style. func (b *blame) GoString() string { var buf bytes.Buffer file, err := b.fRev.File(b.path) if err != nil { panic("PrettyPrint: internal error in repo.Data") } contents, err := file.Contents() if err != nil { panic("PrettyPrint: internal error in repo.Data") } lines := strings.Split(contents, "\n") // max line number length mlnl := len(strconv.Itoa(len(lines))) // max author length mal := b.maxAuthorLength() format := fmt.Sprintf("%%s (%%-%ds %%%dd) %%s\n", mal, mlnl) fVs := b.graph[len(b.graph)-1] for ln, v := range fVs { fmt.Fprintf(&buf, format, v.Hash.String()[:8], prettyPrintAuthor(fVs[ln]), ln+1, lines[ln]) } return buf.String() } // utility function to pretty print the author. func prettyPrintAuthor(c *object.Commit) string { return fmt.Sprintf("%s %s", c.Author.Name, c.Author.When.Format("2006-01-02")) } // utility function to calculate the number of runes needed // to print the longest author name in the blame of a file. func (b *blame) maxAuthorLength() int { memo := make(map[plumbing.Hash]struct{}, len(b.graph)-1) fVs := b.graph[len(b.graph)-1] m := 0 for ln := range fVs { if _, ok := memo[fVs[ln].Hash]; ok { continue } memo[fVs[ln].Hash] = struct{}{} m = max(m, utf8.RuneCountInString(prettyPrintAuthor(fVs[ln]))) } return m } func max(a, b int) int { if a > b { return a } return b } golang-gopkg-src-d-go-git.v4-4.11.0/blame_test.go000066400000000000000000000647401345605224300213600ustar00rootroot00000000000000package git import ( "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/object" . "gopkg.in/check.v1" "gopkg.in/src-d/go-git-fixtures.v3" ) type BlameSuite struct { BaseSuite } var _ = Suite(&BlameSuite{}) func (s *BlameSuite) TestNewLines(c *C) { h := plumbing.NewHash("ce9f123d790717599aaeb76bc62510de437761be") lines, err := newLines([]string{"foo"}, []*object.Commit{{ Hash: h, Message: "foo", }}) c.Assert(err, IsNil) c.Assert(lines, HasLen, 1) c.Assert(lines[0].Text, Equals, "foo") c.Assert(lines[0].Hash, Equals, h) } func (s *BlameSuite) TestNewLinesWithNewLine(c *C) { lines, err := newLines([]string{"foo"}, []*object.Commit{ {Message: "foo"}, {Message: "bar"}, }) c.Assert(err, IsNil) c.Assert(lines, HasLen, 2) c.Assert(lines[0].Text, Equals, "foo") c.Assert(lines[1].Text, Equals, "\n") } type blameTest struct { repo string rev string path string blames []string // the commits blamed for each line } // run a blame on all the suite's tests func (s *BlameSuite) TestBlame(c *C) { for _, t := range blameTests { r := s.NewRepositoryFromPackfile(fixtures.ByURL(t.repo).One()) exp := s.mockBlame(c, t, r) commit, err := r.CommitObject(plumbing.NewHash(t.rev)) c.Assert(err, IsNil) obt, err := Blame(commit, t.path) c.Assert(err, IsNil) c.Assert(obt, DeepEquals, exp) for i, l := range obt.Lines { c.Assert(l.Hash.String(), Equals, t.blames[i]) } } } func (s *BlameSuite) mockBlame(c *C, t blameTest, r *Repository) (blame *BlameResult) { commit, err := r.CommitObject(plumbing.NewHash(t.rev)) c.Assert(err, IsNil, Commentf("%v: repo=%s, rev=%s", err, t.repo, t.rev)) f, err := commit.File(t.path) c.Assert(err, IsNil) lines, err := f.Lines() c.Assert(err, IsNil) c.Assert(len(t.blames), Equals, len(lines), Commentf( "repo=%s, path=%s, rev=%s: the number of lines in the file and the number of expected blames differ (len(blames)=%d, len(lines)=%d)\nblames=%#q\nlines=%#q", t.repo, t.path, t.rev, len(t.blames), len(lines), t.blames, lines)) blamedLines := make([]*Line, 0, len(t.blames)) for i := range t.blames { commit, err := r.CommitObject(plumbing.NewHash(t.blames[i])) c.Assert(err, IsNil) l := &Line{ Author: commit.Author.Email, Text: lines[i], Date: commit.Author.When, Hash: commit.Hash, } blamedLines = append(blamedLines, l) } return &BlameResult{ Path: t.path, Rev: plumbing.NewHash(t.rev), Lines: blamedLines, } } // utility function to avoid writing so many repeated commits func repeat(s string, n int) []string { if n < 0 { panic("repeat: n < 0") } r := make([]string, 0, n) for i := 0; i < n; i++ { r = append(r, s) } return r } // utility function to concat slices func concat(vargs ...[]string) []string { var r []string for _, ss := range vargs { r = append(r, ss...) } return r } var blameTests = [...]blameTest{ // use the blame2humantest.bash script to easily add more tests. {"https://github.com/git-fixtures/basic.git", "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", "binary.jpg", concat( repeat("35e85108805c84807bc66a02d91535e1e24b38b9", 285), )}, {"https://github.com/git-fixtures/basic.git", "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", "CHANGELOG", concat( repeat("b8e471f58bcbca63b07bda20e428190409c2db47", 1), )}, {"https://github.com/git-fixtures/basic.git", "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", "go/example.go", concat( repeat("918c48b83bd081e863dbe1b80f8998f058cd8294", 142), )}, {"https://github.com/git-fixtures/basic.git", "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", "json/long.json", concat( repeat("af2d6a6954d532f8ffb47615169c8fdf9d383a1a", 6492), )}, {"https://github.com/git-fixtures/basic.git", "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", "json/short.json", concat( repeat("af2d6a6954d532f8ffb47615169c8fdf9d383a1a", 22), )}, {"https://github.com/git-fixtures/basic.git", "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", "LICENSE", concat( repeat("b029517f6300c2da0f4b651b8642506cd6aaf45d", 22), )}, {"https://github.com/git-fixtures/basic.git", "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", "php/crappy.php", concat( repeat("918c48b83bd081e863dbe1b80f8998f058cd8294", 259), )}, {"https://github.com/git-fixtures/basic.git", "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", "vendor/foo.go", concat( repeat("6ecf0ef2c2dffb796033e5a02219af86ec6584e5", 7), )}, /* // Failed {"https://github.com/spinnaker/spinnaker.git", "f39d86f59a0781f130e8de6b2115329c1fbe9545", "InstallSpinnaker.sh", concat( repeat("ce9f123d790717599aaeb76bc62510de437761be", 2), repeat("a47d0aaeda421f06df248ad65bd58230766bf118", 1), repeat("23673af3ad70b50bba7fdafadc2323302f5ba520", 1), repeat("ce9f123d790717599aaeb76bc62510de437761be", 29), repeat("9a06d3f20eabb254d0a1e2ff7735ef007ccd595e", 1), repeat("ce9f123d790717599aaeb76bc62510de437761be", 4), repeat("a47d0aaeda421f06df248ad65bd58230766bf118", 1), repeat("ce9f123d790717599aaeb76bc62510de437761be", 5), repeat("0c5bb1e4392e751f884f3c57de5d4aee72c40031", 2), repeat("d4b48a39aba7d3bd3e8abef2274a95b112d1ae73", 3), repeat("ce9f123d790717599aaeb76bc62510de437761be", 7), repeat("8980daf661408a3faa1f22c225702a5c1d11d5c9", 1), repeat("b7015a5d36990d69a054482556127b9c7404a24a", 1), repeat("8980daf661408a3faa1f22c225702a5c1d11d5c9", 2), repeat("ce9f123d790717599aaeb76bc62510de437761be", 5), repeat("d4b48a39aba7d3bd3e8abef2274a95b112d1ae73", 7), repeat("ce9f123d790717599aaeb76bc62510de437761be", 3), repeat("d4b48a39aba7d3bd3e8abef2274a95b112d1ae73", 6), repeat("ce9f123d790717599aaeb76bc62510de437761be", 10), repeat("b7015a5d36990d69a054482556127b9c7404a24a", 4), repeat("0c5bb1e4392e751f884f3c57de5d4aee72c40031", 2), repeat("ce9f123d790717599aaeb76bc62510de437761be", 2), repeat("8980daf661408a3faa1f22c225702a5c1d11d5c9", 4), repeat("23673af3ad70b50bba7fdafadc2323302f5ba520", 4), repeat("d4b48a39aba7d3bd3e8abef2274a95b112d1ae73", 4), repeat("ce9f123d790717599aaeb76bc62510de437761be", 1), repeat("d4b48a39aba7d3bd3e8abef2274a95b112d1ae73", 1), repeat("ce9f123d790717599aaeb76bc62510de437761be", 1), repeat("b7015a5d36990d69a054482556127b9c7404a24a", 1), repeat("0c5bb1e4392e751f884f3c57de5d4aee72c40031", 1), repeat("8980daf661408a3faa1f22c225702a5c1d11d5c9", 1), repeat("ce9f123d790717599aaeb76bc62510de437761be", 13), repeat("b7015a5d36990d69a054482556127b9c7404a24a", 2), repeat("8eb116de9128c314ac8a6f5310ca500b8c74f5db", 6), repeat("ce9f123d790717599aaeb76bc62510de437761be", 1), repeat("b7015a5d36990d69a054482556127b9c7404a24a", 2), repeat("0c5bb1e4392e751f884f3c57de5d4aee72c40031", 1), repeat("8eb116de9128c314ac8a6f5310ca500b8c74f5db", 4), repeat("8980daf661408a3faa1f22c225702a5c1d11d5c9", 1), repeat("8eb116de9128c314ac8a6f5310ca500b8c74f5db", 3), repeat("ce9f123d790717599aaeb76bc62510de437761be", 2), repeat("8980daf661408a3faa1f22c225702a5c1d11d5c9", 1), repeat("8eb116de9128c314ac8a6f5310ca500b8c74f5db", 4), repeat("8980daf661408a3faa1f22c225702a5c1d11d5c9", 1), repeat("8eb116de9128c314ac8a6f5310ca500b8c74f5db", 3), repeat("8980daf661408a3faa1f22c225702a5c1d11d5c9", 15), repeat("b41d7c0e5b20bbe7c8eb6606731a3ff68f4e3941", 1), repeat("8eb116de9128c314ac8a6f5310ca500b8c74f5db", 1), repeat("b41d7c0e5b20bbe7c8eb6606731a3ff68f4e3941", 8), repeat("8eb116de9128c314ac8a6f5310ca500b8c74f5db", 2), repeat("8980daf661408a3faa1f22c225702a5c1d11d5c9", 12), repeat("505577dc87d300cf562dc4702a05a5615d90d855", 1), repeat("8980daf661408a3faa1f22c225702a5c1d11d5c9", 5), repeat("370d61cdbc1f3c90db6759f1599ccbabd40ad6c1", 1), repeat("8980daf661408a3faa1f22c225702a5c1d11d5c9", 4), repeat("8eb116de9128c314ac8a6f5310ca500b8c74f5db", 1), repeat("b41d7c0e5b20bbe7c8eb6606731a3ff68f4e3941", 5), repeat("8eb116de9128c314ac8a6f5310ca500b8c74f5db", 3), repeat("b41d7c0e5b20bbe7c8eb6606731a3ff68f4e3941", 2), repeat("8eb116de9128c314ac8a6f5310ca500b8c74f5db", 2), repeat("8980daf661408a3faa1f22c225702a5c1d11d5c9", 9), repeat("8eb116de9128c314ac8a6f5310ca500b8c74f5db", 1), repeat("b41d7c0e5b20bbe7c8eb6606731a3ff68f4e3941", 3), repeat("8eb116de9128c314ac8a6f5310ca500b8c74f5db", 4), repeat("8980daf661408a3faa1f22c225702a5c1d11d5c9", 1), repeat("8eb116de9128c314ac8a6f5310ca500b8c74f5db", 1), repeat("8980daf661408a3faa1f22c225702a5c1d11d5c9", 6), repeat("ce9f123d790717599aaeb76bc62510de437761be", 1), repeat("b7015a5d36990d69a054482556127b9c7404a24a", 1), repeat("ce9f123d790717599aaeb76bc62510de437761be", 1), repeat("8980daf661408a3faa1f22c225702a5c1d11d5c9", 1), repeat("ce9f123d790717599aaeb76bc62510de437761be", 6), repeat("d2f6214b625db706384b378a29cc4c22237db97a", 1), repeat("ce9f123d790717599aaeb76bc62510de437761be", 3), repeat("d2f6214b625db706384b378a29cc4c22237db97a", 1), repeat("8980daf661408a3faa1f22c225702a5c1d11d5c9", 1), repeat("ce9f123d790717599aaeb76bc62510de437761be", 4), repeat("b7015a5d36990d69a054482556127b9c7404a24a", 1), repeat("c9c2a0ec03968ab17e8b16fdec9661eb1dbea173", 1), repeat("d2f6214b625db706384b378a29cc4c22237db97a", 2), repeat("8980daf661408a3faa1f22c225702a5c1d11d5c9", 1), repeat("b7015a5d36990d69a054482556127b9c7404a24a", 1), repeat("ce9f123d790717599aaeb76bc62510de437761be", 12), repeat("6328ee836affafc1b52127147b5ca07300ac78e6", 1), repeat("ce9f123d790717599aaeb76bc62510de437761be", 5), repeat("6328ee836affafc1b52127147b5ca07300ac78e6", 1), repeat("ce9f123d790717599aaeb76bc62510de437761be", 3), repeat("6328ee836affafc1b52127147b5ca07300ac78e6", 1), repeat("01e65d67eed8afcb67a6bdf1c962541f62b299c9", 5), repeat("ce9f123d790717599aaeb76bc62510de437761be", 3), repeat("a47d0aaeda421f06df248ad65bd58230766bf118", 5), repeat("6328ee836affafc1b52127147b5ca07300ac78e6", 1), repeat("ce9f123d790717599aaeb76bc62510de437761be", 5), repeat("6328ee836affafc1b52127147b5ca07300ac78e6", 1), repeat("ce9f123d790717599aaeb76bc62510de437761be", 1), repeat("01e65d67eed8afcb67a6bdf1c962541f62b299c9", 2), repeat("6328ee836affafc1b52127147b5ca07300ac78e6", 1), repeat("01e65d67eed8afcb67a6bdf1c962541f62b299c9", 1), repeat("6328ee836affafc1b52127147b5ca07300ac78e6", 1), repeat("b2c7142082d52b09ca20228606c31c7479c0833e", 1), repeat("ce9f123d790717599aaeb76bc62510de437761be", 1), repeat("495c7118e7cf757aa04eab410b64bfb5b5149ad2", 1), repeat("d4b48a39aba7d3bd3e8abef2274a95b112d1ae73", 1), repeat("495c7118e7cf757aa04eab410b64bfb5b5149ad2", 3), repeat("d4b48a39aba7d3bd3e8abef2274a95b112d1ae73", 1), repeat("495c7118e7cf757aa04eab410b64bfb5b5149ad2", 1), repeat("50d0556563599366f29cb286525780004fa5a317", 1), repeat("dd2d03c19658ff96d371aef00e75e2e54702da0e", 1), repeat("d4b48a39aba7d3bd3e8abef2274a95b112d1ae73", 1), repeat("dd2d03c19658ff96d371aef00e75e2e54702da0e", 2), repeat("ce9f123d790717599aaeb76bc62510de437761be", 2), repeat("01e65d67eed8afcb67a6bdf1c962541f62b299c9", 1), repeat("6328ee836affafc1b52127147b5ca07300ac78e6", 1), repeat("8980daf661408a3faa1f22c225702a5c1d11d5c9", 1), repeat("b5c6053a46993b20d1b91e7b7206bffa54669ad7", 1), repeat("9e74d009894d73dd07773ea6b3bdd8323db980f7", 1), repeat("ce9f123d790717599aaeb76bc62510de437761be", 1), repeat("d4b48a39aba7d3bd3e8abef2274a95b112d1ae73", 4), repeat("01e65d67eed8afcb67a6bdf1c962541f62b299c9", 1), repeat("6328ee836affafc1b52127147b5ca07300ac78e6", 1), repeat("ce9f123d790717599aaeb76bc62510de437761be", 1), repeat("b7015a5d36990d69a054482556127b9c7404a24a", 1), repeat("ce9f123d790717599aaeb76bc62510de437761be", 1), repeat("d2f6214b625db706384b378a29cc4c22237db97a", 1), repeat("8980daf661408a3faa1f22c225702a5c1d11d5c9", 3), repeat("b41d7c0e5b20bbe7c8eb6606731a3ff68f4e3941", 2), repeat("ce9f123d790717599aaeb76bc62510de437761be", 2), repeat("8980daf661408a3faa1f22c225702a5c1d11d5c9", 4), repeat("d2f6214b625db706384b378a29cc4c22237db97a", 1), repeat("b7015a5d36990d69a054482556127b9c7404a24a", 1), repeat("8980daf661408a3faa1f22c225702a5c1d11d5c9", 5), repeat("b41d7c0e5b20bbe7c8eb6606731a3ff68f4e3941", 2), repeat("d2f6214b625db706384b378a29cc4c22237db97a", 1), repeat("ce9f123d790717599aaeb76bc62510de437761be", 5), repeat("ba486de7c025457963701114c683dcd4708e1dee", 4), repeat("6328ee836affafc1b52127147b5ca07300ac78e6", 1), repeat("01e65d67eed8afcb67a6bdf1c962541f62b299c9", 1), repeat("6328ee836affafc1b52127147b5ca07300ac78e6", 1), repeat("01e65d67eed8afcb67a6bdf1c962541f62b299c9", 3), repeat("6328ee836affafc1b52127147b5ca07300ac78e6", 1), repeat("01e65d67eed8afcb67a6bdf1c962541f62b299c9", 3), repeat("6328ee836affafc1b52127147b5ca07300ac78e6", 2), repeat("01e65d67eed8afcb67a6bdf1c962541f62b299c9", 3), repeat("3de4f77c105f700f50d9549d32b9a05a01b46c4b", 1), repeat("8980daf661408a3faa1f22c225702a5c1d11d5c9", 2), repeat("370d61cdbc1f3c90db6759f1599ccbabd40ad6c1", 6), repeat("dd7e66c862209e8b912694a582a09c0db3227f0d", 1), repeat("8980daf661408a3faa1f22c225702a5c1d11d5c9", 2), repeat("dd7e66c862209e8b912694a582a09c0db3227f0d", 3), repeat("8980daf661408a3faa1f22c225702a5c1d11d5c9", 1), repeat("dd7e66c862209e8b912694a582a09c0db3227f0d", 1), repeat("8980daf661408a3faa1f22c225702a5c1d11d5c9", 3), )}, */ {"https://github.com/spinnaker/spinnaker.git", "f39d86f59a0781f130e8de6b2115329c1fbe9545", "pylib/spinnaker/reconfigure_spinnaker.py", concat( repeat("a24001f6938d425d0e7504bdf5d27fc866a85c3d", 22), repeat("c89dab0d42f1856d157357e9010f8cc6a12f5b1f", 7), )}, {"https://github.com/spinnaker/spinnaker.git", "f39d86f59a0781f130e8de6b2115329c1fbe9545", "pylib/spinnaker/validate_configuration.py", concat( repeat("a24001f6938d425d0e7504bdf5d27fc866a85c3d", 29), repeat("1e3d328a2cabda5d0aaddc5dec65271343e0dc37", 19), repeat("a24001f6938d425d0e7504bdf5d27fc866a85c3d", 15), repeat("b5d999e2986e190d81767cd3cfeda0260f9f6fb8", 1), repeat("a24001f6938d425d0e7504bdf5d27fc866a85c3d", 12), repeat("1e14f94bcf82694fdc7e2dcbbfdbbed58db0f4d9", 1), repeat("a24001f6938d425d0e7504bdf5d27fc866a85c3d", 4), repeat("b5d999e2986e190d81767cd3cfeda0260f9f6fb8", 8), repeat("a24001f6938d425d0e7504bdf5d27fc866a85c3d", 1), repeat("b5d999e2986e190d81767cd3cfeda0260f9f6fb8", 4), repeat("a24001f6938d425d0e7504bdf5d27fc866a85c3d", 46), repeat("1e14f94bcf82694fdc7e2dcbbfdbbed58db0f4d9", 1), repeat("a24001f6938d425d0e7504bdf5d27fc866a85c3d", 4), repeat("1e3d328a2cabda5d0aaddc5dec65271343e0dc37", 42), repeat("a24001f6938d425d0e7504bdf5d27fc866a85c3d", 1), repeat("1e3d328a2cabda5d0aaddc5dec65271343e0dc37", 1), repeat("a24001f6938d425d0e7504bdf5d27fc866a85c3d", 3), repeat("1e3d328a2cabda5d0aaddc5dec65271343e0dc37", 1), repeat("a24001f6938d425d0e7504bdf5d27fc866a85c3d", 1), repeat("1e14f94bcf82694fdc7e2dcbbfdbbed58db0f4d9", 8), repeat("a24001f6938d425d0e7504bdf5d27fc866a85c3d", 1), repeat("1e14f94bcf82694fdc7e2dcbbfdbbed58db0f4d9", 2), repeat("a24001f6938d425d0e7504bdf5d27fc866a85c3d", 3), repeat("1e3d328a2cabda5d0aaddc5dec65271343e0dc37", 3), repeat("a24001f6938d425d0e7504bdf5d27fc866a85c3d", 12), repeat("1e14f94bcf82694fdc7e2dcbbfdbbed58db0f4d9", 10), repeat("a24001f6938d425d0e7504bdf5d27fc866a85c3d", 69), repeat("b5d999e2986e190d81767cd3cfeda0260f9f6fb8", 7), repeat("a24001f6938d425d0e7504bdf5d27fc866a85c3d", 4), )}, {"https://github.com/spinnaker/spinnaker.git", "f39d86f59a0781f130e8de6b2115329c1fbe9545", "pylib/spinnaker/run.py", concat( repeat("a24001f6938d425d0e7504bdf5d27fc866a85c3d", 185), )}, /* // Fail by 3 {"https://github.com/spinnaker/spinnaker.git", "f39d86f59a0781f130e8de6b2115329c1fbe9545", "pylib/spinnaker/configurator.py", concat( repeat("a24001f6938d425d0e7504bdf5d27fc866a85c3d", 53), repeat("c89dab0d42f1856d157357e9010f8cc6a12f5b1f", 1), repeat("a24001f6938d425d0e7504bdf5d27fc866a85c3d", 4), repeat("e805183c72f0426fb073728c01901c2fd2db1da6", 1), repeat("a24001f6938d425d0e7504bdf5d27fc866a85c3d", 6), repeat("023d4fb17b76e0fe0764971df8b8538b735a1d67", 1), repeat("a24001f6938d425d0e7504bdf5d27fc866a85c3d", 36), repeat("1e14f94bcf82694fdc7e2dcbbfdbbed58db0f4d9", 1), repeat("a24001f6938d425d0e7504bdf5d27fc866a85c3d", 3), repeat("1e14f94bcf82694fdc7e2dcbbfdbbed58db0f4d9", 3), repeat("a24001f6938d425d0e7504bdf5d27fc866a85c3d", 4), repeat("c89dab0d42f1856d157357e9010f8cc6a12f5b1f", 13), repeat("a24001f6938d425d0e7504bdf5d27fc866a85c3d", 2), repeat("c89dab0d42f1856d157357e9010f8cc6a12f5b1f", 18), repeat("a24001f6938d425d0e7504bdf5d27fc866a85c3d", 2), repeat("1e14f94bcf82694fdc7e2dcbbfdbbed58db0f4d9", 1), repeat("023d4fb17b76e0fe0764971df8b8538b735a1d67", 17), repeat("c89dab0d42f1856d157357e9010f8cc6a12f5b1f", 1), repeat("a24001f6938d425d0e7504bdf5d27fc866a85c3d", 43), )}, */ {"https://github.com/spinnaker/spinnaker.git", "f39d86f59a0781f130e8de6b2115329c1fbe9545", "pylib/spinnaker/__init__.py", []string{}}, {"https://github.com/spinnaker/spinnaker.git", "f39d86f59a0781f130e8de6b2115329c1fbe9545", "gradle/wrapper/gradle-wrapper.jar", concat( repeat("11d6c1020b1765e236ca65b2709d37b5bfdba0f4", 1), repeat("bc02440df2ff95a014a7b3cb11b98c3a2bded777", 7), repeat("11d6c1020b1765e236ca65b2709d37b5bfdba0f4", 2), repeat("bc02440df2ff95a014a7b3cb11b98c3a2bded777", 2), repeat("11d6c1020b1765e236ca65b2709d37b5bfdba0f4", 3), repeat("bc02440df2ff95a014a7b3cb11b98c3a2bded777", 1), repeat("11d6c1020b1765e236ca65b2709d37b5bfdba0f4", 1), repeat("bc02440df2ff95a014a7b3cb11b98c3a2bded777", 10), repeat("11d6c1020b1765e236ca65b2709d37b5bfdba0f4", 11), repeat("bc02440df2ff95a014a7b3cb11b98c3a2bded777", 29), repeat("11d6c1020b1765e236ca65b2709d37b5bfdba0f4", 7), repeat("bc02440df2ff95a014a7b3cb11b98c3a2bded777", 58), repeat("11d6c1020b1765e236ca65b2709d37b5bfdba0f4", 1), repeat("bc02440df2ff95a014a7b3cb11b98c3a2bded777", 1), repeat("11d6c1020b1765e236ca65b2709d37b5bfdba0f4", 1), repeat("bc02440df2ff95a014a7b3cb11b98c3a2bded777", 1), repeat("11d6c1020b1765e236ca65b2709d37b5bfdba0f4", 2), repeat("bc02440df2ff95a014a7b3cb11b98c3a2bded777", 1), repeat("11d6c1020b1765e236ca65b2709d37b5bfdba0f4", 2), repeat("bc02440df2ff95a014a7b3cb11b98c3a2bded777", 1), repeat("11d6c1020b1765e236ca65b2709d37b5bfdba0f4", 13), repeat("bc02440df2ff95a014a7b3cb11b98c3a2bded777", 1), repeat("11d6c1020b1765e236ca65b2709d37b5bfdba0f4", 4), repeat("bc02440df2ff95a014a7b3cb11b98c3a2bded777", 1), repeat("11d6c1020b1765e236ca65b2709d37b5bfdba0f4", 3), repeat("bc02440df2ff95a014a7b3cb11b98c3a2bded777", 1), repeat("11d6c1020b1765e236ca65b2709d37b5bfdba0f4", 13), repeat("bc02440df2ff95a014a7b3cb11b98c3a2bded777", 1), repeat("11d6c1020b1765e236ca65b2709d37b5bfdba0f4", 2), repeat("bc02440df2ff95a014a7b3cb11b98c3a2bded777", 9), repeat("11d6c1020b1765e236ca65b2709d37b5bfdba0f4", 3), repeat("bc02440df2ff95a014a7b3cb11b98c3a2bded777", 1), repeat("11d6c1020b1765e236ca65b2709d37b5bfdba0f4", 1), repeat("bc02440df2ff95a014a7b3cb11b98c3a2bded777", 17), repeat("11d6c1020b1765e236ca65b2709d37b5bfdba0f4", 3), repeat("bc02440df2ff95a014a7b3cb11b98c3a2bded777", 6), repeat("11d6c1020b1765e236ca65b2709d37b5bfdba0f4", 6), repeat("bc02440df2ff95a014a7b3cb11b98c3a2bded777", 1), repeat("11d6c1020b1765e236ca65b2709d37b5bfdba0f4", 3), repeat("bc02440df2ff95a014a7b3cb11b98c3a2bded777", 5), repeat("11d6c1020b1765e236ca65b2709d37b5bfdba0f4", 4), repeat("bc02440df2ff95a014a7b3cb11b98c3a2bded777", 1), repeat("11d6c1020b1765e236ca65b2709d37b5bfdba0f4", 3), repeat("bc02440df2ff95a014a7b3cb11b98c3a2bded777", 1), repeat("11d6c1020b1765e236ca65b2709d37b5bfdba0f4", 2), repeat("bc02440df2ff95a014a7b3cb11b98c3a2bded777", 1), repeat("11d6c1020b1765e236ca65b2709d37b5bfdba0f4", 6), repeat("bc02440df2ff95a014a7b3cb11b98c3a2bded777", 55), )}, {"https://github.com/spinnaker/spinnaker.git", "f39d86f59a0781f130e8de6b2115329c1fbe9545", "config/settings.js", concat( repeat("ae904e8d60228c21c47368f6a10f1cc9ca3aeebf", 17), repeat("99534ecc895fe17a1d562bb3049d4168a04d0865", 1), repeat("ae904e8d60228c21c47368f6a10f1cc9ca3aeebf", 43), repeat("d2838db9f6ef9628645e7d04cd9658a83e8708ea", 1), repeat("637ba49300f701cfbd859c1ccf13c4f39a9ba1c8", 1), repeat("ae904e8d60228c21c47368f6a10f1cc9ca3aeebf", 13), )}, /* // fail a few lines {"https://github.com/spinnaker/spinnaker.git", "f39d86f59a0781f130e8de6b2115329c1fbe9545", "config/default-spinnaker-local.yml", concat( repeat("ae904e8d60228c21c47368f6a10f1cc9ca3aeebf", 9), repeat("5e09821cbd7d710405b61cab0a795c2982a71b9c", 2), repeat("99534ecc895fe17a1d562bb3049d4168a04d0865", 1), repeat("ae904e8d60228c21c47368f6a10f1cc9ca3aeebf", 2), repeat("a596972a661d9a7deca8abd18b52ce1a39516e89", 1), repeat("ae904e8d60228c21c47368f6a10f1cc9ca3aeebf", 5), repeat("5e09821cbd7d710405b61cab0a795c2982a71b9c", 2), repeat("a596972a661d9a7deca8abd18b52ce1a39516e89", 1), repeat("ae904e8d60228c21c47368f6a10f1cc9ca3aeebf", 5), repeat("5e09821cbd7d710405b61cab0a795c2982a71b9c", 1), repeat("8980daf661408a3faa1f22c225702a5c1d11d5c9", 1), repeat("ae904e8d60228c21c47368f6a10f1cc9ca3aeebf", 25), repeat("caf6d62e8285d4681514dd8027356fb019bc97ff", 1), repeat("eaf7614cad81e8ab5c813dd4821129d0c04ea449", 1), repeat("caf6d62e8285d4681514dd8027356fb019bc97ff", 1), repeat("ae904e8d60228c21c47368f6a10f1cc9ca3aeebf", 24), repeat("974b775a8978b120ff710cac93a21c7387b914c9", 2), repeat("3ce7b902a51bac2f10994f7d1f251b616c975e54", 1), repeat("5a2a845bc08974a36d599a4a4b7e25be833823b0", 6), repeat("41e96c54a478e5d09dd07ed7feb2d8d08d8c7e3c", 14), repeat("7c8d9a6081d9cb7a56c479bfe64d70540ea32795", 5), repeat("5a2a845bc08974a36d599a4a4b7e25be833823b0", 2), )}, */ /* // fail one line {"https://github.com/spinnaker/spinnaker.git", "f39d86f59a0781f130e8de6b2115329c1fbe9545", "config/spinnaker.yml", concat( repeat("ae904e8d60228c21c47368f6a10f1cc9ca3aeebf", 32), repeat("41e96c54a478e5d09dd07ed7feb2d8d08d8c7e3c", 2), repeat("5a2a845bc08974a36d599a4a4b7e25be833823b0", 1), repeat("41e96c54a478e5d09dd07ed7feb2d8d08d8c7e3c", 6), repeat("5a2a845bc08974a36d599a4a4b7e25be833823b0", 2), repeat("41e96c54a478e5d09dd07ed7feb2d8d08d8c7e3c", 2), repeat("5a2a845bc08974a36d599a4a4b7e25be833823b0", 2), repeat("41e96c54a478e5d09dd07ed7feb2d8d08d8c7e3c", 3), repeat("7c8d9a6081d9cb7a56c479bfe64d70540ea32795", 3), repeat("ae904e8d60228c21c47368f6a10f1cc9ca3aeebf", 50), repeat("974b775a8978b120ff710cac93a21c7387b914c9", 2), repeat("d4553dac205023fa77652308af1a2d1cf52138fb", 1), repeat("ae904e8d60228c21c47368f6a10f1cc9ca3aeebf", 9), repeat("caf6d62e8285d4681514dd8027356fb019bc97ff", 1), repeat("eaf7614cad81e8ab5c813dd4821129d0c04ea449", 1), repeat("caf6d62e8285d4681514dd8027356fb019bc97ff", 1), repeat("ae904e8d60228c21c47368f6a10f1cc9ca3aeebf", 39), repeat("079e42e7c979541b6fab7343838f7b9fd4a360cd", 6), repeat("ae904e8d60228c21c47368f6a10f1cc9ca3aeebf", 15), )}, */ /* {"https://github.com/spinnaker/spinnaker.git", "f39d86f59a0781f130e8de6b2115329c1fbe9545", "dev/install_development.sh", concat( repeat("99534ecc895fe17a1d562bb3049d4168a04d0865", 1), repeat("d1ff4e13e9e0b500821aa558373878f93487e34b", 71), )}, */ /* // FAIL two lines interchanged {"https://github.com/spinnaker/spinnaker.git", "f39d86f59a0781f130e8de6b2115329c1fbe9545", "dev/bootstrap_dev.sh", concat( repeat("a24001f6938d425d0e7504bdf5d27fc866a85c3d", 95), repeat("838aed816872c52ed435e4876a7b64dba0bed500", 1), repeat("a24001f6938d425d0e7504bdf5d27fc866a85c3d", 10), repeat("838aed816872c52ed435e4876a7b64dba0bed500", 7), repeat("a24001f6938d425d0e7504bdf5d27fc866a85c3d", 2), repeat("838aed816872c52ed435e4876a7b64dba0bed500", 1), repeat("a24001f6938d425d0e7504bdf5d27fc866a85c3d", 1), repeat("838aed816872c52ed435e4876a7b64dba0bed500", 3), repeat("a24001f6938d425d0e7504bdf5d27fc866a85c3d", 4), repeat("838aed816872c52ed435e4876a7b64dba0bed500", 1), repeat("a24001f6938d425d0e7504bdf5d27fc866a85c3d", 12), repeat("838aed816872c52ed435e4876a7b64dba0bed500", 2), repeat("a24001f6938d425d0e7504bdf5d27fc866a85c3d", 2), repeat("838aed816872c52ed435e4876a7b64dba0bed500", 1), repeat("a24001f6938d425d0e7504bdf5d27fc866a85c3d", 2), repeat("838aed816872c52ed435e4876a7b64dba0bed500", 1), repeat("a24001f6938d425d0e7504bdf5d27fc866a85c3d", 3), repeat("d1ff4e13e9e0b500821aa558373878f93487e34b", 6), repeat("838aed816872c52ed435e4876a7b64dba0bed500", 1), repeat("d1ff4e13e9e0b500821aa558373878f93487e34b", 4), repeat("a24001f6938d425d0e7504bdf5d27fc866a85c3d", 1), repeat("376599177551c3f04ccc94d71bbb4d037dec0c3f", 2), repeat("a24001f6938d425d0e7504bdf5d27fc866a85c3d", 17), repeat("d1ff4e13e9e0b500821aa558373878f93487e34b", 2), repeat("a24001f6938d425d0e7504bdf5d27fc866a85c3d", 2), repeat("d1ff4e13e9e0b500821aa558373878f93487e34b", 2), repeat("a24001f6938d425d0e7504bdf5d27fc866a85c3d", 3), repeat("d1ff4e13e9e0b500821aa558373878f93487e34b", 1), repeat("a24001f6938d425d0e7504bdf5d27fc866a85c3d", 3), repeat("838aed816872c52ed435e4876a7b64dba0bed500", 1), repeat("a24001f6938d425d0e7504bdf5d27fc866a85c3d", 5), repeat("838aed816872c52ed435e4876a7b64dba0bed500", 1), repeat("a24001f6938d425d0e7504bdf5d27fc866a85c3d", 5), repeat("d1ff4e13e9e0b500821aa558373878f93487e34b", 8), repeat("838aed816872c52ed435e4876a7b64dba0bed500", 4), repeat("d1ff4e13e9e0b500821aa558373878f93487e34b", 1), repeat("838aed816872c52ed435e4876a7b64dba0bed500", 6), repeat("a24001f6938d425d0e7504bdf5d27fc866a85c3d", 1), repeat("838aed816872c52ed435e4876a7b64dba0bed500", 4), repeat("a24001f6938d425d0e7504bdf5d27fc866a85c3d", 10), repeat("d1ff4e13e9e0b500821aa558373878f93487e34b", 2), repeat("fc28a378558cdb5bbc08b6dcb96ee77c5b716760", 1), repeat("a24001f6938d425d0e7504bdf5d27fc866a85c3d", 1), repeat("d1ff4e13e9e0b500821aa558373878f93487e34b", 1), repeat("a24001f6938d425d0e7504bdf5d27fc866a85c3d", 8), repeat("d1ff4e13e9e0b500821aa558373878f93487e34b", 1), repeat("fc28a378558cdb5bbc08b6dcb96ee77c5b716760", 1), repeat("d1ff4e13e9e0b500821aa558373878f93487e34b", 1), repeat("a24001f6938d425d0e7504bdf5d27fc866a85c3d", 4), repeat("24551a5d486969a2972ee05e87f16444890f9555", 1), repeat("a24001f6938d425d0e7504bdf5d27fc866a85c3d", 2), repeat("24551a5d486969a2972ee05e87f16444890f9555", 1), repeat("a24001f6938d425d0e7504bdf5d27fc866a85c3d", 8), repeat("838aed816872c52ed435e4876a7b64dba0bed500", 13), repeat("a24001f6938d425d0e7504bdf5d27fc866a85c3d", 5), repeat("24551a5d486969a2972ee05e87f16444890f9555", 1), repeat("838aed816872c52ed435e4876a7b64dba0bed500", 8), )}, */ /* // FAIL move? {"https://github.com/spinnaker/spinnaker.git", "f39d86f59a0781f130e8de6b2115329c1fbe9545", "dev/create_google_dev_vm.sh", concat( repeat("a24001f6938d425d0e7504bdf5d27fc866a85c3d", 20), )}, */ } golang-gopkg-src-d-go-git.v4-4.11.0/cli/000077500000000000000000000000001345605224300174465ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/cli/go-git/000077500000000000000000000000001345605224300206345ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/cli/go-git/main.go000066400000000000000000000016601345605224300221120ustar00rootroot00000000000000package main import ( "os" "path/filepath" "github.com/jessevdk/go-flags" ) const ( bin = "go-git" receivePackBin = "git-receive-pack" uploadPackBin = "git-upload-pack" ) func main() { switch filepath.Base(os.Args[0]) { case receivePackBin: os.Args = append([]string{"git", "receive-pack"}, os.Args[1:]...) case uploadPackBin: os.Args = append([]string{"git", "upload-pack"}, os.Args[1:]...) } parser := flags.NewNamedParser(bin, flags.Default) parser.AddCommand("receive-pack", "", "", &CmdReceivePack{}) parser.AddCommand("upload-pack", "", "", &CmdUploadPack{}) parser.AddCommand("version", "Show the version information.", "", &CmdVersion{}) _, err := parser.Parse() if err != nil { if e, ok := err.(*flags.Error); ok && e.Type == flags.ErrCommandRequired { parser.WriteHelp(os.Stdout) } os.Exit(1) } } type cmd struct { Verbose bool `short:"v" description:"Activates the verbose mode"` } golang-gopkg-src-d-go-git.v4-4.11.0/cli/go-git/receive_pack.go000066400000000000000000000012561345605224300236070ustar00rootroot00000000000000package main import ( "fmt" "os" "path/filepath" "gopkg.in/src-d/go-git.v4/plumbing/transport/file" ) type CmdReceivePack struct { cmd Args struct { GitDir string `positional-arg-name:"git-dir" required:"true"` } `positional-args:"yes"` } func (CmdReceivePack) Usage() string { //TODO: git-receive-pack returns error code 129 if arguments are invalid. return fmt.Sprintf("usage: %s ", os.Args[0]) } func (c *CmdReceivePack) Execute(args []string) error { gitDir, err := filepath.Abs(c.Args.GitDir) if err != nil { return err } if err := file.ServeReceivePack(gitDir); err != nil { fmt.Fprintln(os.Stderr, "ERR:", err) os.Exit(128) } return nil } golang-gopkg-src-d-go-git.v4-4.11.0/cli/go-git/upload_pack.go000066400000000000000000000013521345605224300234460ustar00rootroot00000000000000package main import ( "fmt" "os" "path/filepath" "gopkg.in/src-d/go-git.v4/plumbing/transport/file" ) type CmdUploadPack struct { cmd Args struct { GitDir string `positional-arg-name:"git-dir" required:"true"` } `positional-args:"yes"` } func (CmdUploadPack) Usage() string { //TODO: usage: git upload-pack [--strict] [--timeout=] //TODO: git-upload-pack returns error code 129 if arguments are invalid. return fmt.Sprintf("usage: %s ", os.Args[0]) } func (c *CmdUploadPack) Execute(args []string) error { gitDir, err := filepath.Abs(c.Args.GitDir) if err != nil { return err } if err := file.ServeUploadPack(gitDir); err != nil { fmt.Fprintln(os.Stderr, "ERR:", err) os.Exit(128) } return nil } golang-gopkg-src-d-go-git.v4-4.11.0/cli/go-git/version.go000066400000000000000000000003271345605224300226520ustar00rootroot00000000000000package main import "fmt" var version string var build string type CmdVersion struct{} func (c *CmdVersion) Execute(args []string) error { fmt.Printf("%s (%s) - build %s\n", bin, version, build) return nil } golang-gopkg-src-d-go-git.v4-4.11.0/common.go000066400000000000000000000007471345605224300205260ustar00rootroot00000000000000package git import "strings" const defaultDotGitPath = ".git" // countLines returns the number of lines in a string à la git, this is // The newline character is assumed to be '\n'. The empty string // contains 0 lines. If the last line of the string doesn't end with a // newline, it will still be considered a line. func countLines(s string) int { if s == "" { return 0 } nEOL := strings.Count(s, "\n") if strings.HasSuffix(s, "\n") { return nEOL } return nEOL + 1 } golang-gopkg-src-d-go-git.v4-4.11.0/common_test.go000066400000000000000000000072721345605224300215650ustar00rootroot00000000000000package git import ( "testing" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/cache" "gopkg.in/src-d/go-git.v4/plumbing/format/packfile" "gopkg.in/src-d/go-git.v4/plumbing/transport" "gopkg.in/src-d/go-git.v4/storage/filesystem" "gopkg.in/src-d/go-git.v4/storage/memory" . "gopkg.in/check.v1" "gopkg.in/src-d/go-billy.v4" "gopkg.in/src-d/go-billy.v4/memfs" "gopkg.in/src-d/go-billy.v4/util" "gopkg.in/src-d/go-git-fixtures.v3" ) func Test(t *testing.T) { TestingT(t) } type BaseSuite struct { fixtures.Suite Repository *Repository backupProtocol transport.Transport cache map[string]*Repository } func (s *BaseSuite) SetUpSuite(c *C) { s.Suite.SetUpSuite(c) s.buildBasicRepository(c) s.cache = make(map[string]*Repository) } func (s *BaseSuite) TearDownSuite(c *C) { s.Suite.TearDownSuite(c) } func (s *BaseSuite) buildBasicRepository(c *C) { f := fixtures.Basic().One() s.Repository = s.NewRepository(f) } // NewRepository returns a new repository using the .git folder, if the fixture // is tagged as worktree the filesystem from fixture is used, otherwise a new // memfs filesystem is used as worktree. func (s *BaseSuite) NewRepository(f *fixtures.Fixture) *Repository { var worktree, dotgit billy.Filesystem if f.Is("worktree") { r, err := PlainOpen(f.Worktree().Root()) if err != nil { panic(err) } return r } dotgit = f.DotGit() worktree = memfs.New() st := filesystem.NewStorage(dotgit, cache.NewObjectLRUDefault()) r, err := Open(st, worktree) if err != nil { panic(err) } return r } // NewRepositoryWithEmptyWorktree returns a new repository using the .git folder // from the fixture but without a empty memfs worktree, the index and the // modules are deleted from the .git folder. func (s *BaseSuite) NewRepositoryWithEmptyWorktree(f *fixtures.Fixture) *Repository { dotgit := f.DotGit() err := dotgit.Remove("index") if err != nil { panic(err) } err = util.RemoveAll(dotgit, "modules") if err != nil { panic(err) } worktree := memfs.New() st := filesystem.NewStorage(dotgit, cache.NewObjectLRUDefault()) r, err := Open(st, worktree) if err != nil { panic(err) } return r } func (s *BaseSuite) NewRepositoryFromPackfile(f *fixtures.Fixture) *Repository { h := f.PackfileHash.String() if r, ok := s.cache[h]; ok { return r } storer := memory.NewStorage() p := f.Packfile() defer p.Close() if err := packfile.UpdateObjectStorage(storer, p); err != nil { panic(err) } storer.SetReference(plumbing.NewHashReference(plumbing.HEAD, f.Head)) r, err := Open(storer, memfs.New()) if err != nil { panic(err) } s.cache[h] = r return r } func (s *BaseSuite) GetBasicLocalRepositoryURL() string { fixture := fixtures.Basic().One() return s.GetLocalRepositoryURL(fixture) } func (s *BaseSuite) GetLocalRepositoryURL(f *fixtures.Fixture) string { return f.DotGit().Root() } type SuiteCommon struct{} var _ = Suite(&SuiteCommon{}) var countLinesTests = [...]struct { i string // the string we want to count lines from e int // the expected number of lines in i }{ {"", 0}, {"a", 1}, {"a\n", 1}, {"a\nb", 2}, {"a\nb\n", 2}, {"a\nb\nc", 3}, {"a\nb\nc\n", 3}, {"a\n\n\nb\n", 4}, {"first line\n\tsecond line\nthird line\n", 3}, } func (s *SuiteCommon) TestCountLines(c *C) { for i, t := range countLinesTests { o := countLines(t.i) c.Assert(o, Equals, t.e, Commentf("subtest %d, input=%q", i, t.i)) } } func AssertReferences(c *C, r *Repository, expected map[string]string) { for name, target := range expected { expected := plumbing.NewReferenceFromStrings(name, target) obtained, err := r.Reference(expected.Name(), true) c.Assert(err, IsNil) c.Assert(obtained, DeepEquals, expected) } } golang-gopkg-src-d-go-git.v4-4.11.0/config/000077500000000000000000000000001345605224300201445ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/config/branch.go000066400000000000000000000025331345605224300217330ustar00rootroot00000000000000package config import ( "errors" "gopkg.in/src-d/go-git.v4/plumbing" format "gopkg.in/src-d/go-git.v4/plumbing/format/config" ) var ( errBranchEmptyName = errors.New("branch config: empty name") errBranchInvalidMerge = errors.New("branch config: invalid merge") ) // Branch contains information on the // local branches and which remote to track type Branch struct { // Name of branch Name string // Remote name of remote to track Remote string // Merge is the local refspec for the branch Merge plumbing.ReferenceName raw *format.Subsection } // Validate validates fields of branch func (b *Branch) Validate() error { if b.Name == "" { return errBranchEmptyName } if b.Merge != "" && !b.Merge.IsBranch() { return errBranchInvalidMerge } return nil } func (b *Branch) marshal() *format.Subsection { if b.raw == nil { b.raw = &format.Subsection{} } b.raw.Name = b.Name if b.Remote == "" { b.raw.RemoveOption(remoteSection) } else { b.raw.SetOption(remoteSection, b.Remote) } if b.Merge == "" { b.raw.RemoveOption(mergeKey) } else { b.raw.SetOption(mergeKey, string(b.Merge)) } return b.raw } func (b *Branch) unmarshal(s *format.Subsection) error { b.raw = s b.Name = b.raw.Name b.Remote = b.raw.Options.Get(remoteSection) b.Merge = plumbing.ReferenceName(b.raw.Options.Get(mergeKey)) return b.Validate() } golang-gopkg-src-d-go-git.v4-4.11.0/config/branch_test.go000066400000000000000000000033661345605224300227770ustar00rootroot00000000000000package config import ( . "gopkg.in/check.v1" "gopkg.in/src-d/go-git.v4/plumbing" ) type BranchSuite struct{} var _ = Suite(&BranchSuite{}) func (b *BranchSuite) TestValidateName(c *C) { goodBranch := Branch{ Name: "master", Remote: "some_remote", Merge: "refs/heads/master", } badBranch := Branch{ Remote: "some_remote", Merge: "refs/heads/master", } c.Assert(goodBranch.Validate(), IsNil) c.Assert(badBranch.Validate(), NotNil) } func (b *BranchSuite) TestValidateMerge(c *C) { goodBranch := Branch{ Name: "master", Remote: "some_remote", Merge: "refs/heads/master", } badBranch := Branch{ Name: "master", Remote: "some_remote", Merge: "blah", } c.Assert(goodBranch.Validate(), IsNil) c.Assert(badBranch.Validate(), NotNil) } func (b *BranchSuite) TestMarshall(c *C) { expected := []byte(`[core] bare = false [branch "branch-tracking-on-clone"] remote = fork merge = refs/heads/branch-tracking-on-clone `) cfg := NewConfig() cfg.Branches["branch-tracking-on-clone"] = &Branch{ Name: "branch-tracking-on-clone", Remote: "fork", Merge: plumbing.ReferenceName("refs/heads/branch-tracking-on-clone"), } actual, err := cfg.Marshal() c.Assert(err, IsNil) c.Assert(string(actual), Equals, string(expected)) } func (b *BranchSuite) TestUnmarshall(c *C) { input := []byte(`[core] bare = false [branch "branch-tracking-on-clone"] remote = fork merge = refs/heads/branch-tracking-on-clone `) cfg := NewConfig() err := cfg.Unmarshal(input) c.Assert(err, IsNil) branch := cfg.Branches["branch-tracking-on-clone"] c.Assert(branch.Name, Equals, "branch-tracking-on-clone") c.Assert(branch.Remote, Equals, "fork") c.Assert(branch.Merge, Equals, plumbing.ReferenceName("refs/heads/branch-tracking-on-clone")) } golang-gopkg-src-d-go-git.v4-4.11.0/config/config.go000066400000000000000000000225431345605224300217460ustar00rootroot00000000000000// Package config contains the abstraction of multiple config files package config import ( "bytes" "errors" "fmt" "sort" "strconv" "gopkg.in/src-d/go-git.v4/internal/url" format "gopkg.in/src-d/go-git.v4/plumbing/format/config" ) const ( // DefaultFetchRefSpec is the default refspec used for fetch. DefaultFetchRefSpec = "+refs/heads/*:refs/remotes/%s/*" // DefaultPushRefSpec is the default refspec used for push. DefaultPushRefSpec = "refs/heads/*:refs/heads/*" ) // ConfigStorer generic storage of Config object type ConfigStorer interface { Config() (*Config, error) SetConfig(*Config) error } var ( ErrInvalid = errors.New("config invalid key in remote or branch") ErrRemoteConfigNotFound = errors.New("remote config not found") ErrRemoteConfigEmptyURL = errors.New("remote config: empty URL") ErrRemoteConfigEmptyName = errors.New("remote config: empty name") ) // Config contains the repository configuration // ftp://www.kernel.org/pub/software/scm/git/docs/git-config.html#FILES type Config struct { Core struct { // IsBare if true this repository is assumed to be bare and has no // working directory associated with it. IsBare bool // Worktree is the path to the root of the working tree. Worktree string // CommentChar is the character indicating the start of a // comment for commands like commit and tag CommentChar string } Pack struct { // Window controls the size of the sliding window for delta // compression. The default is 10. A value of 0 turns off // delta compression entirely. Window uint } // Remotes list of repository remotes, the key of the map is the name // of the remote, should equal to RemoteConfig.Name. Remotes map[string]*RemoteConfig // Submodules list of repository submodules, the key of the map is the name // of the submodule, should equal to Submodule.Name. Submodules map[string]*Submodule // Branches list of branches, the key is the branch name and should // equal Branch.Name Branches map[string]*Branch // Raw contains the raw information of a config file. The main goal is // preserve the parsed information from the original format, to avoid // dropping unsupported fields. Raw *format.Config } // NewConfig returns a new empty Config. func NewConfig() *Config { config := &Config{ Remotes: make(map[string]*RemoteConfig), Submodules: make(map[string]*Submodule), Branches: make(map[string]*Branch), Raw: format.New(), } config.Pack.Window = DefaultPackWindow return config } // Validate validates the fields and sets the default values. func (c *Config) Validate() error { for name, r := range c.Remotes { if r.Name != name { return ErrInvalid } if err := r.Validate(); err != nil { return err } } for name, b := range c.Branches { if b.Name != name { return ErrInvalid } if err := b.Validate(); err != nil { return err } } return nil } const ( remoteSection = "remote" submoduleSection = "submodule" branchSection = "branch" coreSection = "core" packSection = "pack" fetchKey = "fetch" urlKey = "url" bareKey = "bare" worktreeKey = "worktree" commentCharKey = "commentChar" windowKey = "window" mergeKey = "merge" // DefaultPackWindow holds the number of previous objects used to // generate deltas. The value 10 is the same used by git command. DefaultPackWindow = uint(10) ) // Unmarshal parses a git-config file and stores it. func (c *Config) Unmarshal(b []byte) error { r := bytes.NewBuffer(b) d := format.NewDecoder(r) c.Raw = format.New() if err := d.Decode(c.Raw); err != nil { return err } c.unmarshalCore() if err := c.unmarshalPack(); err != nil { return err } unmarshalSubmodules(c.Raw, c.Submodules) if err := c.unmarshalBranches(); err != nil { return err } return c.unmarshalRemotes() } func (c *Config) unmarshalCore() { s := c.Raw.Section(coreSection) if s.Options.Get(bareKey) == "true" { c.Core.IsBare = true } c.Core.Worktree = s.Options.Get(worktreeKey) c.Core.CommentChar = s.Options.Get(commentCharKey) } func (c *Config) unmarshalPack() error { s := c.Raw.Section(packSection) window := s.Options.Get(windowKey) if window == "" { c.Pack.Window = DefaultPackWindow } else { winUint, err := strconv.ParseUint(window, 10, 32) if err != nil { return err } c.Pack.Window = uint(winUint) } return nil } func (c *Config) unmarshalRemotes() error { s := c.Raw.Section(remoteSection) for _, sub := range s.Subsections { r := &RemoteConfig{} if err := r.unmarshal(sub); err != nil { return err } c.Remotes[r.Name] = r } return nil } func unmarshalSubmodules(fc *format.Config, submodules map[string]*Submodule) { s := fc.Section(submoduleSection) for _, sub := range s.Subsections { m := &Submodule{} m.unmarshal(sub) if m.Validate() == ErrModuleBadPath { continue } submodules[m.Name] = m } } func (c *Config) unmarshalBranches() error { bs := c.Raw.Section(branchSection) for _, sub := range bs.Subsections { b := &Branch{} if err := b.unmarshal(sub); err != nil { return err } c.Branches[b.Name] = b } return nil } // Marshal returns Config encoded as a git-config file. func (c *Config) Marshal() ([]byte, error) { c.marshalCore() c.marshalPack() c.marshalRemotes() c.marshalSubmodules() c.marshalBranches() buf := bytes.NewBuffer(nil) if err := format.NewEncoder(buf).Encode(c.Raw); err != nil { return nil, err } return buf.Bytes(), nil } func (c *Config) marshalCore() { s := c.Raw.Section(coreSection) s.SetOption(bareKey, fmt.Sprintf("%t", c.Core.IsBare)) if c.Core.Worktree != "" { s.SetOption(worktreeKey, c.Core.Worktree) } } func (c *Config) marshalPack() { s := c.Raw.Section(packSection) if c.Pack.Window != DefaultPackWindow { s.SetOption(windowKey, fmt.Sprintf("%d", c.Pack.Window)) } } func (c *Config) marshalRemotes() { s := c.Raw.Section(remoteSection) newSubsections := make(format.Subsections, 0, len(c.Remotes)) added := make(map[string]bool) for _, subsection := range s.Subsections { if remote, ok := c.Remotes[subsection.Name]; ok { newSubsections = append(newSubsections, remote.marshal()) added[subsection.Name] = true } } remoteNames := make([]string, 0, len(c.Remotes)) for name := range c.Remotes { remoteNames = append(remoteNames, name) } sort.Strings(remoteNames) for _, name := range remoteNames { if !added[name] { newSubsections = append(newSubsections, c.Remotes[name].marshal()) } } s.Subsections = newSubsections } func (c *Config) marshalSubmodules() { s := c.Raw.Section(submoduleSection) s.Subsections = make(format.Subsections, len(c.Submodules)) var i int for _, r := range c.Submodules { section := r.marshal() // the submodule section at config is a subset of the .gitmodule file // we should remove the non-valid options for the config file. section.RemoveOption(pathKey) s.Subsections[i] = section i++ } } func (c *Config) marshalBranches() { s := c.Raw.Section(branchSection) newSubsections := make(format.Subsections, 0, len(c.Branches)) added := make(map[string]bool) for _, subsection := range s.Subsections { if branch, ok := c.Branches[subsection.Name]; ok { newSubsections = append(newSubsections, branch.marshal()) added[subsection.Name] = true } } branchNames := make([]string, 0, len(c.Branches)) for name := range c.Branches { branchNames = append(branchNames, name) } sort.Strings(branchNames) for _, name := range branchNames { if !added[name] { newSubsections = append(newSubsections, c.Branches[name].marshal()) } } s.Subsections = newSubsections } // RemoteConfig contains the configuration for a given remote repository. type RemoteConfig struct { // Name of the remote Name string // URLs the URLs of a remote repository. It must be non-empty. Fetch will // always use the first URL, while push will use all of them. URLs []string // Fetch the default set of "refspec" for fetch operation Fetch []RefSpec // raw representation of the subsection, filled by marshal or unmarshal are // called raw *format.Subsection } // Validate validates the fields and sets the default values. func (c *RemoteConfig) Validate() error { if c.Name == "" { return ErrRemoteConfigEmptyName } if len(c.URLs) == 0 { return ErrRemoteConfigEmptyURL } for _, r := range c.Fetch { if err := r.Validate(); err != nil { return err } } if len(c.Fetch) == 0 { c.Fetch = []RefSpec{RefSpec(fmt.Sprintf(DefaultFetchRefSpec, c.Name))} } return nil } func (c *RemoteConfig) unmarshal(s *format.Subsection) error { c.raw = s fetch := []RefSpec{} for _, f := range c.raw.Options.GetAll(fetchKey) { rs := RefSpec(f) if err := rs.Validate(); err != nil { return err } fetch = append(fetch, rs) } c.Name = c.raw.Name c.URLs = append([]string(nil), c.raw.Options.GetAll(urlKey)...) c.Fetch = fetch return nil } func (c *RemoteConfig) marshal() *format.Subsection { if c.raw == nil { c.raw = &format.Subsection{} } c.raw.Name = c.Name if len(c.URLs) == 0 { c.raw.RemoveOption(urlKey) } else { c.raw.SetOption(urlKey, c.URLs...) } if len(c.Fetch) == 0 { c.raw.RemoveOption(fetchKey) } else { var values []string for _, rs := range c.Fetch { values = append(values, rs.String()) } c.raw.SetOption(fetchKey, values...) } return c.raw } func (c *RemoteConfig) IsFirstURLLocal() bool { return url.IsLocalEndpoint(c.URLs[0]) } golang-gopkg-src-d-go-git.v4-4.11.0/config/config_test.go000066400000000000000000000146441345605224300230100ustar00rootroot00000000000000package config import ( . "gopkg.in/check.v1" "gopkg.in/src-d/go-git.v4/plumbing" ) type ConfigSuite struct{} var _ = Suite(&ConfigSuite{}) func (s *ConfigSuite) TestUnmarshall(c *C) { input := []byte(`[core] bare = true worktree = foo commentchar = bar [pack] window = 20 [remote "origin"] url = git@github.com:mcuadros/go-git.git fetch = +refs/heads/*:refs/remotes/origin/* [remote "alt"] url = git@github.com:mcuadros/go-git.git url = git@github.com:src-d/go-git.git fetch = +refs/heads/*:refs/remotes/origin/* fetch = +refs/pull/*:refs/remotes/origin/pull/* [remote "win-local"] url = X:\\Git\\ [submodule "qux"] path = qux url = https://github.com/foo/qux.git branch = bar [branch "master"] remote = origin merge = refs/heads/master `) cfg := NewConfig() err := cfg.Unmarshal(input) c.Assert(err, IsNil) c.Assert(cfg.Core.IsBare, Equals, true) c.Assert(cfg.Core.Worktree, Equals, "foo") c.Assert(cfg.Core.CommentChar, Equals, "bar") c.Assert(cfg.Pack.Window, Equals, uint(20)) c.Assert(cfg.Remotes, HasLen, 3) c.Assert(cfg.Remotes["origin"].Name, Equals, "origin") c.Assert(cfg.Remotes["origin"].URLs, DeepEquals, []string{"git@github.com:mcuadros/go-git.git"}) c.Assert(cfg.Remotes["origin"].Fetch, DeepEquals, []RefSpec{"+refs/heads/*:refs/remotes/origin/*"}) c.Assert(cfg.Remotes["alt"].Name, Equals, "alt") c.Assert(cfg.Remotes["alt"].URLs, DeepEquals, []string{"git@github.com:mcuadros/go-git.git", "git@github.com:src-d/go-git.git"}) c.Assert(cfg.Remotes["alt"].Fetch, DeepEquals, []RefSpec{"+refs/heads/*:refs/remotes/origin/*", "+refs/pull/*:refs/remotes/origin/pull/*"}) c.Assert(cfg.Remotes["win-local"].Name, Equals, "win-local") c.Assert(cfg.Remotes["win-local"].URLs, DeepEquals, []string{"X:\\Git\\"}) c.Assert(cfg.Submodules, HasLen, 1) c.Assert(cfg.Submodules["qux"].Name, Equals, "qux") c.Assert(cfg.Submodules["qux"].URL, Equals, "https://github.com/foo/qux.git") c.Assert(cfg.Submodules["qux"].Branch, Equals, "bar") c.Assert(cfg.Branches["master"].Remote, Equals, "origin") c.Assert(cfg.Branches["master"].Merge, Equals, plumbing.ReferenceName("refs/heads/master")) } func (s *ConfigSuite) TestMarshall(c *C) { output := []byte(`[core] bare = true worktree = bar [pack] window = 20 [remote "alt"] url = git@github.com:mcuadros/go-git.git url = git@github.com:src-d/go-git.git fetch = +refs/heads/*:refs/remotes/origin/* fetch = +refs/pull/*:refs/remotes/origin/pull/* [remote "origin"] url = git@github.com:mcuadros/go-git.git [remote "win-local"] url = "X:\\Git\\" [submodule "qux"] url = https://github.com/foo/qux.git [branch "master"] remote = origin merge = refs/heads/master `) cfg := NewConfig() cfg.Core.IsBare = true cfg.Core.Worktree = "bar" cfg.Pack.Window = 20 cfg.Remotes["origin"] = &RemoteConfig{ Name: "origin", URLs: []string{"git@github.com:mcuadros/go-git.git"}, } cfg.Remotes["alt"] = &RemoteConfig{ Name: "alt", URLs: []string{"git@github.com:mcuadros/go-git.git", "git@github.com:src-d/go-git.git"}, Fetch: []RefSpec{"+refs/heads/*:refs/remotes/origin/*", "+refs/pull/*:refs/remotes/origin/pull/*"}, } cfg.Remotes["win-local"] = &RemoteConfig{ Name: "win-local", URLs: []string{"X:\\Git\\"}, } cfg.Submodules["qux"] = &Submodule{ Name: "qux", URL: "https://github.com/foo/qux.git", } cfg.Branches["master"] = &Branch{ Name: "master", Remote: "origin", Merge: "refs/heads/master", } b, err := cfg.Marshal() c.Assert(err, IsNil) c.Assert(string(b), Equals, string(output)) } func (s *ConfigSuite) TestUnmarshallMarshall(c *C) { input := []byte(`[core] bare = true worktree = foo custom = ignored [pack] window = 20 [remote "origin"] url = git@github.com:mcuadros/go-git.git fetch = +refs/heads/*:refs/remotes/origin/* mirror = true [remote "win-local"] url = "X:\\Git\\" [branch "master"] remote = origin merge = refs/heads/master `) cfg := NewConfig() err := cfg.Unmarshal(input) c.Assert(err, IsNil) output, err := cfg.Marshal() c.Assert(err, IsNil) c.Assert(string(output), DeepEquals, string(input)) } func (s *ConfigSuite) TestValidateConfig(c *C) { config := &Config{ Remotes: map[string]*RemoteConfig{ "bar": { Name: "bar", URLs: []string{"http://foo/bar"}, }, }, Branches: map[string]*Branch{ "bar": { Name: "bar", }, "foo": { Name: "foo", Remote: "origin", Merge: plumbing.ReferenceName("refs/heads/foo"), }, }, } c.Assert(config.Validate(), IsNil) } func (s *ConfigSuite) TestValidateInvalidRemote(c *C) { config := &Config{ Remotes: map[string]*RemoteConfig{ "foo": {Name: "foo"}, }, } c.Assert(config.Validate(), Equals, ErrRemoteConfigEmptyURL) } func (s *ConfigSuite) TestValidateInvalidRemoteKey(c *C) { config := &Config{ Remotes: map[string]*RemoteConfig{ "bar": {Name: "foo"}, }, } c.Assert(config.Validate(), Equals, ErrInvalid) } func (s *ConfigSuite) TestRemoteConfigValidateMissingURL(c *C) { config := &RemoteConfig{Name: "foo"} c.Assert(config.Validate(), Equals, ErrRemoteConfigEmptyURL) } func (s *ConfigSuite) TestRemoteConfigValidateMissingName(c *C) { config := &RemoteConfig{} c.Assert(config.Validate(), Equals, ErrRemoteConfigEmptyName) } func (s *ConfigSuite) TestRemoteConfigValidateDefault(c *C) { config := &RemoteConfig{Name: "foo", URLs: []string{"http://foo/bar"}} c.Assert(config.Validate(), IsNil) fetch := config.Fetch c.Assert(fetch, HasLen, 1) c.Assert(fetch[0].String(), Equals, "+refs/heads/*:refs/remotes/foo/*") } func (s *ConfigSuite) TestValidateInvalidBranchKey(c *C) { config := &Config{ Branches: map[string]*Branch{ "foo": { Name: "bar", Remote: "origin", Merge: plumbing.ReferenceName("refs/heads/bar"), }, }, } c.Assert(config.Validate(), Equals, ErrInvalid) } func (s *ConfigSuite) TestValidateInvalidBranch(c *C) { config := &Config{ Branches: map[string]*Branch{ "bar": { Name: "bar", Remote: "origin", Merge: plumbing.ReferenceName("refs/heads/bar"), }, "foo": { Name: "foo", Remote: "origin", Merge: plumbing.ReferenceName("baz"), }, }, } c.Assert(config.Validate(), Equals, errBranchInvalidMerge) } func (s *ConfigSuite) TestRemoteConfigDefaultValues(c *C) { config := NewConfig() c.Assert(config.Remotes, HasLen, 0) c.Assert(config.Branches, HasLen, 0) c.Assert(config.Submodules, HasLen, 0) c.Assert(config.Raw, NotNil) c.Assert(config.Pack.Window, Equals, DefaultPackWindow) } golang-gopkg-src-d-go-git.v4-4.11.0/config/modules.go000066400000000000000000000056551345605224300221560ustar00rootroot00000000000000package config import ( "bytes" "errors" "regexp" format "gopkg.in/src-d/go-git.v4/plumbing/format/config" ) var ( ErrModuleEmptyURL = errors.New("module config: empty URL") ErrModuleEmptyPath = errors.New("module config: empty path") ErrModuleBadPath = errors.New("submodule has an invalid path") ) var ( // Matches module paths with dotdot ".." components. dotdotPath = regexp.MustCompile(`(^|[/\\])\.\.([/\\]|$)`) ) // Modules defines the submodules properties, represents a .gitmodules file // https://www.kernel.org/pub/software/scm/git/docs/gitmodules.html type Modules struct { // Submodules is a map of submodules being the key the name of the submodule. Submodules map[string]*Submodule raw *format.Config } // NewModules returns a new empty Modules func NewModules() *Modules { return &Modules{ Submodules: make(map[string]*Submodule), raw: format.New(), } } const ( pathKey = "path" branchKey = "branch" ) // Unmarshal parses a git-config file and stores it. func (m *Modules) Unmarshal(b []byte) error { r := bytes.NewBuffer(b) d := format.NewDecoder(r) m.raw = format.New() if err := d.Decode(m.raw); err != nil { return err } unmarshalSubmodules(m.raw, m.Submodules) return nil } // Marshal returns Modules encoded as a git-config file. func (m *Modules) Marshal() ([]byte, error) { s := m.raw.Section(submoduleSection) s.Subsections = make(format.Subsections, len(m.Submodules)) var i int for _, r := range m.Submodules { s.Subsections[i] = r.marshal() i++ } buf := bytes.NewBuffer(nil) if err := format.NewEncoder(buf).Encode(m.raw); err != nil { return nil, err } return buf.Bytes(), nil } // Submodule defines a submodule. type Submodule struct { // Name module name Name string // Path defines the path, relative to the top-level directory of the Git // working tree. Path string // URL defines a URL from which the submodule repository can be cloned. URL string // Branch is a remote branch name for tracking updates in the upstream // submodule. Optional value. Branch string // raw representation of the subsection, filled by marshal or unmarshal are // called. raw *format.Subsection } // Validate validates the fields and sets the default values. func (m *Submodule) Validate() error { if m.Path == "" { return ErrModuleEmptyPath } if m.URL == "" { return ErrModuleEmptyURL } if dotdotPath.MatchString(m.Path) { return ErrModuleBadPath } return nil } func (m *Submodule) unmarshal(s *format.Subsection) { m.raw = s m.Name = m.raw.Name m.Path = m.raw.Option(pathKey) m.URL = m.raw.Option(urlKey) m.Branch = m.raw.Option(branchKey) } func (m *Submodule) marshal() *format.Subsection { if m.raw == nil { m.raw = &format.Subsection{} } m.raw.Name = m.Name if m.raw.Name == "" { m.raw.Name = m.Path } m.raw.SetOption(pathKey, m.Path) m.raw.SetOption(urlKey, m.URL) if m.Branch != "" { m.raw.SetOption(branchKey, m.Branch) } return m.raw } golang-gopkg-src-d-go-git.v4-4.11.0/config/modules_test.go000066400000000000000000000041631345605224300232060ustar00rootroot00000000000000package config import . "gopkg.in/check.v1" type ModulesSuite struct{} var _ = Suite(&ModulesSuite{}) func (s *ModulesSuite) TestValidateMissingURL(c *C) { m := &Submodule{Path: "foo"} c.Assert(m.Validate(), Equals, ErrModuleEmptyURL) } func (s *ModulesSuite) TestValidateBadPath(c *C) { input := []string{ `..`, `../`, `../bar`, `/..`, `/../bar`, `foo/..`, `foo/../`, `foo/../bar`, } for _, p := range input { m := &Submodule{ Path: p, URL: "https://example.com/", } c.Assert(m.Validate(), Equals, ErrModuleBadPath) } } func (s *ModulesSuite) TestValidateMissingName(c *C) { m := &Submodule{URL: "bar"} c.Assert(m.Validate(), Equals, ErrModuleEmptyPath) } func (s *ModulesSuite) TestMarshall(c *C) { input := []byte(`[submodule "qux"] path = qux url = baz branch = bar `) cfg := NewModules() cfg.Submodules["qux"] = &Submodule{Path: "qux", URL: "baz", Branch: "bar"} output, err := cfg.Marshal() c.Assert(err, IsNil) c.Assert(output, DeepEquals, input) } func (s *ModulesSuite) TestUnmarshall(c *C) { input := []byte(`[submodule "qux"] path = qux url = https://github.com/foo/qux.git [submodule "foo/bar"] path = foo/bar url = https://github.com/foo/bar.git branch = dev [submodule "suspicious"] path = ../../foo/bar url = https://github.com/foo/bar.git `) cfg := NewModules() err := cfg.Unmarshal(input) c.Assert(err, IsNil) c.Assert(cfg.Submodules, HasLen, 2) c.Assert(cfg.Submodules["qux"].Name, Equals, "qux") c.Assert(cfg.Submodules["qux"].URL, Equals, "https://github.com/foo/qux.git") c.Assert(cfg.Submodules["foo/bar"].Name, Equals, "foo/bar") c.Assert(cfg.Submodules["foo/bar"].URL, Equals, "https://github.com/foo/bar.git") c.Assert(cfg.Submodules["foo/bar"].Branch, Equals, "dev") } func (s *ModulesSuite) TestUnmarshallMarshall(c *C) { input := []byte(`[submodule "foo/bar"] path = foo/bar url = https://github.com/foo/bar.git ignore = all `) cfg := NewModules() err := cfg.Unmarshal(input) c.Assert(err, IsNil) output, err := cfg.Marshal() c.Assert(err, IsNil) c.Assert(string(output), DeepEquals, string(input)) } golang-gopkg-src-d-go-git.v4-4.11.0/config/refspec.go000066400000000000000000000066501345605224300221310ustar00rootroot00000000000000package config import ( "errors" "strings" "gopkg.in/src-d/go-git.v4/plumbing" ) const ( refSpecWildcard = "*" refSpecForce = "+" refSpecSeparator = ":" ) var ( ErrRefSpecMalformedSeparator = errors.New("malformed refspec, separators are wrong") ErrRefSpecMalformedWildcard = errors.New("malformed refspec, mismatched number of wildcards") ) // RefSpec is a mapping from local branches to remote references // The format of the refspec is an optional +, followed by :, where // is the pattern for references on the remote side and is where // those references will be written locally. The + tells Git to update the // reference even if it isn’t a fast-forward. // eg.: "+refs/heads/*:refs/remotes/origin/*" // // https://git-scm.com/book/es/v2/Git-Internals-The-Refspec type RefSpec string // Validate validates the RefSpec func (s RefSpec) Validate() error { spec := string(s) if strings.Count(spec, refSpecSeparator) != 1 { return ErrRefSpecMalformedSeparator } sep := strings.Index(spec, refSpecSeparator) if sep == len(spec)-1 { return ErrRefSpecMalformedSeparator } ws := strings.Count(spec[0:sep], refSpecWildcard) wd := strings.Count(spec[sep+1:], refSpecWildcard) if ws == wd && ws < 2 && wd < 2 { return nil } return ErrRefSpecMalformedWildcard } // IsForceUpdate returns if update is allowed in non fast-forward merges. func (s RefSpec) IsForceUpdate() bool { return s[0] == refSpecForce[0] } // IsDelete returns true if the refspec indicates a delete (empty src). func (s RefSpec) IsDelete() bool { return s[0] == refSpecSeparator[0] } // Src return the src side. func (s RefSpec) Src() string { spec := string(s) var start int if s.IsForceUpdate() { start = 1 } else { start = 0 } end := strings.Index(spec, refSpecSeparator) return spec[start:end] } // Match match the given plumbing.ReferenceName against the source. func (s RefSpec) Match(n plumbing.ReferenceName) bool { if !s.IsWildcard() { return s.matchExact(n) } return s.matchGlob(n) } // IsWildcard returns true if the RefSpec contains a wildcard. func (s RefSpec) IsWildcard() bool { return strings.Contains(string(s), refSpecWildcard) } func (s RefSpec) matchExact(n plumbing.ReferenceName) bool { return s.Src() == n.String() } func (s RefSpec) matchGlob(n plumbing.ReferenceName) bool { src := s.Src() name := n.String() wildcard := strings.Index(src, refSpecWildcard) var prefix, suffix string prefix = src[0:wildcard] if len(src) < wildcard { suffix = src[wildcard+1 : len(suffix)] } return len(name) > len(prefix)+len(suffix) && strings.HasPrefix(name, prefix) && strings.HasSuffix(name, suffix) } // Dst returns the destination for the given remote reference. func (s RefSpec) Dst(n plumbing.ReferenceName) plumbing.ReferenceName { spec := string(s) start := strings.Index(spec, refSpecSeparator) + 1 dst := spec[start:] src := s.Src() if !s.IsWildcard() { return plumbing.ReferenceName(dst) } name := n.String() ws := strings.Index(src, refSpecWildcard) wd := strings.Index(dst, refSpecWildcard) match := name[ws : len(name)-(len(src)-(ws+1))] return plumbing.ReferenceName(dst[0:wd] + match + dst[wd+1:]) } func (s RefSpec) String() string { return string(s) } // MatchAny returns true if any of the RefSpec match with the given ReferenceName. func MatchAny(l []RefSpec, n plumbing.ReferenceName) bool { for _, r := range l { if r.Match(n) { return true } } return false } golang-gopkg-src-d-go-git.v4-4.11.0/config/refspec_test.go000066400000000000000000000102741345605224300231650ustar00rootroot00000000000000package config import ( "testing" . "gopkg.in/check.v1" "gopkg.in/src-d/go-git.v4/plumbing" ) type RefSpecSuite struct{} var _ = Suite(&RefSpecSuite{}) func Test(t *testing.T) { TestingT(t) } func (s *RefSpecSuite) TestRefSpecIsValid(c *C) { spec := RefSpec("+refs/heads/*:refs/remotes/origin/*") c.Assert(spec.Validate(), Equals, nil) spec = RefSpec("refs/heads/*:refs/remotes/origin/") c.Assert(spec.Validate(), Equals, ErrRefSpecMalformedWildcard) spec = RefSpec("refs/heads/master:refs/remotes/origin/master") c.Assert(spec.Validate(), Equals, nil) spec = RefSpec(":refs/heads/master") c.Assert(spec.Validate(), Equals, nil) spec = RefSpec(":refs/heads/*") c.Assert(spec.Validate(), Equals, ErrRefSpecMalformedWildcard) spec = RefSpec(":*") c.Assert(spec.Validate(), Equals, ErrRefSpecMalformedWildcard) spec = RefSpec("refs/heads/*") c.Assert(spec.Validate(), Equals, ErrRefSpecMalformedSeparator) spec = RefSpec("refs/heads:") c.Assert(spec.Validate(), Equals, ErrRefSpecMalformedSeparator) } func (s *RefSpecSuite) TestRefSpecIsForceUpdate(c *C) { spec := RefSpec("+refs/heads/*:refs/remotes/origin/*") c.Assert(spec.IsForceUpdate(), Equals, true) spec = RefSpec("refs/heads/*:refs/remotes/origin/*") c.Assert(spec.IsForceUpdate(), Equals, false) } func (s *RefSpecSuite) TestRefSpecIsDelete(c *C) { spec := RefSpec(":refs/heads/master") c.Assert(spec.IsDelete(), Equals, true) spec = RefSpec("+refs/heads/*:refs/remotes/origin/*") c.Assert(spec.IsDelete(), Equals, false) spec = RefSpec("refs/heads/*:refs/remotes/origin/*") c.Assert(spec.IsDelete(), Equals, false) } func (s *RefSpecSuite) TestRefSpecSrc(c *C) { spec := RefSpec("refs/heads/*:refs/remotes/origin/*") c.Assert(spec.Src(), Equals, "refs/heads/*") spec = RefSpec("+refs/heads/*:refs/remotes/origin/*") c.Assert(spec.Src(), Equals, "refs/heads/*") spec = RefSpec(":refs/heads/master") c.Assert(spec.Src(), Equals, "") spec = RefSpec("refs/heads/love+hate:refs/heads/love+hate") c.Assert(spec.Src(), Equals, "refs/heads/love+hate") spec = RefSpec("+refs/heads/love+hate:refs/heads/love+hate") c.Assert(spec.Src(), Equals, "refs/heads/love+hate") } func (s *RefSpecSuite) TestRefSpecMatch(c *C) { spec := RefSpec("refs/heads/master:refs/remotes/origin/master") c.Assert(spec.Match(plumbing.ReferenceName("refs/heads/foo")), Equals, false) c.Assert(spec.Match(plumbing.ReferenceName("refs/heads/master")), Equals, true) spec = RefSpec("+refs/heads/master:refs/remotes/origin/master") c.Assert(spec.Match(plumbing.ReferenceName("refs/heads/foo")), Equals, false) c.Assert(spec.Match(plumbing.ReferenceName("refs/heads/master")), Equals, true) spec = RefSpec(":refs/heads/master") c.Assert(spec.Match(plumbing.ReferenceName("")), Equals, true) c.Assert(spec.Match(plumbing.ReferenceName("refs/heads/master")), Equals, false) spec = RefSpec("refs/heads/love+hate:heads/love+hate") c.Assert(spec.Match(plumbing.ReferenceName("refs/heads/love+hate")), Equals, true) spec = RefSpec("+refs/heads/love+hate:heads/love+hate") c.Assert(spec.Match(plumbing.ReferenceName("refs/heads/love+hate")), Equals, true) } func (s *RefSpecSuite) TestRefSpecMatchGlob(c *C) { spec := RefSpec("refs/heads/*:refs/remotes/origin/*") c.Assert(spec.Match(plumbing.ReferenceName("refs/tag/foo")), Equals, false) c.Assert(spec.Match(plumbing.ReferenceName("refs/heads/foo")), Equals, true) } func (s *RefSpecSuite) TestRefSpecDst(c *C) { spec := RefSpec("refs/heads/master:refs/remotes/origin/master") c.Assert( spec.Dst(plumbing.ReferenceName("refs/heads/master")).String(), Equals, "refs/remotes/origin/master", ) } func (s *RefSpecSuite) TestRefSpecDstBlob(c *C) { spec := RefSpec("refs/heads/*:refs/remotes/origin/*") c.Assert( spec.Dst(plumbing.ReferenceName("refs/heads/foo")).String(), Equals, "refs/remotes/origin/foo", ) } func (s *RefSpecSuite) TestMatchAny(c *C) { specs := []RefSpec{ "refs/heads/bar:refs/remotes/origin/foo", "refs/heads/foo:refs/remotes/origin/bar", } c.Assert(MatchAny(specs, plumbing.ReferenceName("refs/heads/foo")), Equals, true) c.Assert(MatchAny(specs, plumbing.ReferenceName("refs/heads/bar")), Equals, true) c.Assert(MatchAny(specs, plumbing.ReferenceName("refs/heads/master")), Equals, false) } golang-gopkg-src-d-go-git.v4-4.11.0/doc.go000066400000000000000000000010051345605224300177670ustar00rootroot00000000000000// A highly extensible git implementation in pure Go. // // go-git aims to reach the completeness of libgit2 or jgit, nowadays covers the // majority of the plumbing read operations and some of the main write // operations, but lacks the main porcelain operations such as merges. // // It is highly extensible, we have been following the open/close principle in // its design to facilitate extensions, mainly focusing the efforts on the // persistence of the objects. package git // import "gopkg.in/src-d/go-git.v4" golang-gopkg-src-d-go-git.v4-4.11.0/example_test.go000066400000000000000000000073421345605224300217260ustar00rootroot00000000000000package git_test import ( "fmt" "io" "io/ioutil" "log" "os" "path/filepath" "gopkg.in/src-d/go-git.v4" "gopkg.in/src-d/go-git.v4/config" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/transport/http" "gopkg.in/src-d/go-git.v4/storage/memory" "gopkg.in/src-d/go-billy.v4/memfs" ) func ExampleClone() { // Filesystem abstraction based on memory fs := memfs.New() // Git objects storer based on memory storer := memory.NewStorage() // Clones the repository into the worktree (fs) and storer all the .git // content into the storer _, err := git.Clone(storer, fs, &git.CloneOptions{ URL: "https://github.com/git-fixtures/basic.git", }) if err != nil { log.Fatal(err) } // Prints the content of the CHANGELOG file from the cloned repository changelog, err := fs.Open("CHANGELOG") if err != nil { log.Fatal(err) } io.Copy(os.Stdout, changelog) // Output: Initial changelog } func ExamplePlainClone() { // Tempdir to clone the repository dir, err := ioutil.TempDir("", "clone-example") if err != nil { log.Fatal(err) } defer os.RemoveAll(dir) // clean up // Clones the repository into the given dir, just as a normal git clone does _, err = git.PlainClone(dir, false, &git.CloneOptions{ URL: "https://github.com/git-fixtures/basic.git", }) if err != nil { log.Fatal(err) } // Prints the content of the CHANGELOG file from the cloned repository changelog, err := os.Open(filepath.Join(dir, "CHANGELOG")) if err != nil { log.Fatal(err) } io.Copy(os.Stdout, changelog) // Output: Initial changelog } func ExamplePlainClone_usernamePassword() { // Tempdir to clone the repository dir, err := ioutil.TempDir("", "clone-example") if err != nil { log.Fatal(err) } defer os.RemoveAll(dir) // clean up // Clones the repository into the given dir, just as a normal git clone does _, err = git.PlainClone(dir, false, &git.CloneOptions{ URL: "https://github.com/git-fixtures/basic.git", Auth: &http.BasicAuth{ Username: "username", Password: "password", }, }) if err != nil { log.Fatal(err) } } func ExamplePlainClone_accessToken() { // Tempdir to clone the repository dir, err := ioutil.TempDir("", "clone-example") if err != nil { log.Fatal(err) } defer os.RemoveAll(dir) // clean up // Clones the repository into the given dir, just as a normal git clone does _, err = git.PlainClone(dir, false, &git.CloneOptions{ URL: "https://github.com/git-fixtures/basic.git", Auth: &http.BasicAuth{ Username: "abc123", // anything except an empty string Password: "github_access_token", }, }) if err != nil { log.Fatal(err) } } func ExampleRepository_References() { r, _ := git.Clone(memory.NewStorage(), nil, &git.CloneOptions{ URL: "https://github.com/git-fixtures/basic.git", }) // simulating a git show-ref refs, _ := r.References() refs.ForEach(func(ref *plumbing.Reference) error { if ref.Type() == plumbing.HashReference { fmt.Println(ref) } return nil }) // Example Output: // 6ecf0ef2c2dffb796033e5a02219af86ec6584e5 refs/remotes/origin/master // e8d3ffab552895c19b9fcf7aa264d277cde33881 refs/remotes/origin/branch // 6ecf0ef2c2dffb796033e5a02219af86ec6584e5 refs/heads/master } func ExampleRepository_CreateRemote() { r, _ := git.Init(memory.NewStorage(), nil) // Add a new remote, with the default fetch refspec _, err := r.CreateRemote(&config.RemoteConfig{ Name: "example", URLs: []string{"https://github.com/git-fixtures/basic.git"}, }) if err != nil { log.Fatal(err) } list, err := r.Remotes() if err != nil { log.Fatal(err) } for _, r := range list { fmt.Println(r) } // Example Output: // example https://github.com/git-fixtures/basic.git (fetch) // example https://github.com/git-fixtures/basic.git (push) } golang-gopkg-src-d-go-git.v4-4.11.0/go.mod000066400000000000000000000023271345605224300200110ustar00rootroot00000000000000module gopkg.in/src-d/go-git.v4 require ( github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 // indirect github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/emirpasic/gods v1.9.0 github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 // indirect github.com/gliderlabs/ssh v0.1.1 github.com/google/go-cmp v0.2.0 github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 github.com/jessevdk/go-flags v1.4.0 github.com/kevinburke/ssh_config v0.0.0-20180830205328-81db2a75821e github.com/mitchellh/go-homedir v1.0.0 github.com/pelletier/go-buffruneio v0.2.0 // indirect github.com/pkg/errors v0.8.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/sergi/go-diff v1.0.0 github.com/src-d/gcfg v1.4.0 github.com/stretchr/testify v1.2.2 // indirect github.com/xanzy/ssh-agent v0.2.0 golang.org/x/crypto v0.0.0-20180904163835-0709b304e793 golang.org/x/net v0.0.0-20180906233101-161cd47e91fd // indirect golang.org/x/text v0.3.0 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 gopkg.in/src-d/go-billy.v4 v4.2.1 gopkg.in/src-d/go-git-fixtures.v3 v3.1.1 gopkg.in/warnings.v0 v0.1.2 // indirect ) golang-gopkg-src-d-go-git.v4-4.11.0/go.sum000066400000000000000000000124731345605224300200410ustar00rootroot00000000000000github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 h1:uSoVVbwJiQipAclBbw+8quDsfcvFjOpI5iCf4p/cqCs= github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/emirpasic/gods v1.9.0 h1:rUF4PuzEjMChMiNsVjdI+SyLu7rEqpQ5reNFnhC7oFo= github.com/emirpasic/gods v1.9.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/gliderlabs/ssh v0.1.1 h1:j3L6gSLQalDETeEg/Jg0mGY0/y/N6zI2xX1978P0Uqw= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= github.com/jessevdk/go-flags v1.4.0 h1:4IU2WS7AumrZ/40jfhf4QVDMsQwqA7VEHozFRrGARJA= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/kevinburke/ssh_config v0.0.0-20180830205328-81db2a75821e h1:RgQk53JHp/Cjunrr1WlsXSZpqXn+uREuHvUVcK82CV8= github.com/kevinburke/ssh_config v0.0.0-20180830205328-81db2a75821e/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/mitchellh/go-homedir v1.0.0 h1:vKb8ShqSby24Yrqr/yDYkuFz8d0WUjys40rvnGC8aR0= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/pelletier/go-buffruneio v0.2.0 h1:U4t4R6YkofJ5xHm3dJzuRpPZ0mr5MMCoAWooScCR7aA= github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo= github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/src-d/gcfg v1.3.0 h1:2BEDr8r0I0b8h/fOqwtxCEiq2HJu8n2JGZJQFGXWLjg= github.com/src-d/gcfg v1.3.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI= github.com/src-d/gcfg v1.4.0 h1:xXbNR5AlLSA315x2UO+fTSSAXCDf+Ar38/6oyGbDKQ4= github.com/src-d/gcfg v1.4.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI= github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/xanzy/ssh-agent v0.2.0 h1:Adglfbi5p9Z0BmK2oKU9nTG+zKfniSfnaMYB+ULd+Ro= github.com/xanzy/ssh-agent v0.2.0/go.mod h1:0NyE30eGUDliuLEHJgYte/zncp2zdTStcOnWhgSqHD8= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793 h1:u+LnwYTOOW7Ukr/fppxEb1Nwz0AtPflrblfvUudpo+I= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/sys v0.0.0-20180903190138-2b024373dcd9 h1:lkiLiLBHGoH3XnqSLUIaBsilGMUjI+Uy2Xu2JLUtTas= golang.org/x/sys v0.0.0-20180903190138-2b024373dcd9/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/src-d/go-billy.v4 v4.2.1 h1:omN5CrMrMcQ+4I8bJ0wEhOBPanIRWzFC953IiXKdYzo= gopkg.in/src-d/go-billy.v4 v4.2.1/go.mod h1:tm33zBoOwxjYHZIE+OV8bxTWFMJLrconzFMd38aARFk= gopkg.in/src-d/go-git-fixtures.v3 v3.1.1 h1:XWW/s5W18RaJpmo1l0IYGqXKuJITWRFuA45iOf1dKJs= gopkg.in/src-d/go-git-fixtures.v3 v3.1.1/go.mod h1:dLBcvytrw/TYZsNTWCnkNF2DSIlzWYqTe3rJR56Ac7g= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= golang-gopkg-src-d-go-git.v4-4.11.0/internal/000077500000000000000000000000001345605224300205135ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/internal/revision/000077500000000000000000000000001345605224300223515ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/internal/revision/parser.go000066400000000000000000000313111345605224300241730ustar00rootroot00000000000000// Package revision extracts git revision from string // More informations about revision : https://www.kernel.org/pub/software/scm/git/docs/gitrevisions.html package revision import ( "bytes" "fmt" "io" "regexp" "strconv" "time" ) // ErrInvalidRevision is emitted if string doesn't match valid revision type ErrInvalidRevision struct { s string } func (e *ErrInvalidRevision) Error() string { return "Revision invalid : " + e.s } // Revisioner represents a revision component. // A revision is made of multiple revision components // obtained after parsing a revision string, // for instance revision "master~" will be converted in // two revision components Ref and TildePath type Revisioner interface { } // Ref represents a reference name : HEAD, master type Ref string // TildePath represents ~, ~{n} type TildePath struct { Depth int } // CaretPath represents ^, ^{n} type CaretPath struct { Depth int } // CaretReg represents ^{/foo bar} type CaretReg struct { Regexp *regexp.Regexp Negate bool } // CaretType represents ^{commit} type CaretType struct { ObjectType string } // AtReflog represents @{n} type AtReflog struct { Depth int } // AtCheckout represents @{-n} type AtCheckout struct { Depth int } // AtUpstream represents @{upstream}, @{u} type AtUpstream struct { BranchName string } // AtPush represents @{push} type AtPush struct { BranchName string } // AtDate represents @{"2006-01-02T15:04:05Z"} type AtDate struct { Date time.Time } // ColonReg represents :/foo bar type ColonReg struct { Regexp *regexp.Regexp Negate bool } // ColonPath represents :./ : type ColonPath struct { Path string } // ColonStagePath represents ::/ type ColonStagePath struct { Path string Stage int } // Parser represents a parser // use to tokenize and transform to revisioner chunks // a given string type Parser struct { s *scanner currentParsedChar struct { tok token lit string } unreadLastChar bool } // NewParserFromString returns a new instance of parser from a string. func NewParserFromString(s string) *Parser { return NewParser(bytes.NewBufferString(s)) } // NewParser returns a new instance of parser. func NewParser(r io.Reader) *Parser { return &Parser{s: newScanner(r)} } // scan returns the next token from the underlying scanner // or the last scanned token if an unscan was requested func (p *Parser) scan() (token, string, error) { if p.unreadLastChar { p.unreadLastChar = false return p.currentParsedChar.tok, p.currentParsedChar.lit, nil } tok, lit, err := p.s.scan() p.currentParsedChar.tok, p.currentParsedChar.lit = tok, lit return tok, lit, err } // unscan pushes the previously read token back onto the buffer. func (p *Parser) unscan() { p.unreadLastChar = true } // Parse explode a revision string into revisioner chunks func (p *Parser) Parse() ([]Revisioner, error) { var rev Revisioner var revs []Revisioner var tok token var err error for { tok, _, err = p.scan() if err != nil { return nil, err } switch tok { case at: rev, err = p.parseAt() case tilde: rev, err = p.parseTilde() case caret: rev, err = p.parseCaret() case colon: rev, err = p.parseColon() case eof: err = p.validateFullRevision(&revs) if err != nil { return []Revisioner{}, err } return revs, nil default: p.unscan() rev, err = p.parseRef() } if err != nil { return []Revisioner{}, err } revs = append(revs, rev) } } // validateFullRevision ensures all revisioner chunks make a valid revision func (p *Parser) validateFullRevision(chunks *[]Revisioner) error { var hasReference bool for i, chunk := range *chunks { switch chunk.(type) { case Ref: if i == 0 { hasReference = true } else { return &ErrInvalidRevision{`reference must be defined once at the beginning`} } case AtDate: if len(*chunks) == 1 || hasReference && len(*chunks) == 2 { return nil } return &ErrInvalidRevision{`"@" statement is not valid, could be : @{}, @{}`} case AtReflog: if len(*chunks) == 1 || hasReference && len(*chunks) == 2 { return nil } return &ErrInvalidRevision{`"@" statement is not valid, could be : @{}, @{}`} case AtCheckout: if len(*chunks) == 1 { return nil } return &ErrInvalidRevision{`"@" statement is not valid, could be : @{-}`} case AtUpstream: if len(*chunks) == 1 || hasReference && len(*chunks) == 2 { return nil } return &ErrInvalidRevision{`"@" statement is not valid, could be : @{upstream}, @{upstream}, @{u}, @{u}`} case AtPush: if len(*chunks) == 1 || hasReference && len(*chunks) == 2 { return nil } return &ErrInvalidRevision{`"@" statement is not valid, could be : @{push}, @{push}`} case TildePath, CaretPath, CaretReg: if !hasReference { return &ErrInvalidRevision{`"~" or "^" statement must have a reference defined at the beginning`} } case ColonReg: if len(*chunks) == 1 { return nil } return &ErrInvalidRevision{`":" statement is not valid, could be : :/`} case ColonPath: if i == len(*chunks)-1 && hasReference || len(*chunks) == 1 { return nil } return &ErrInvalidRevision{`":" statement is not valid, could be : :`} case ColonStagePath: if len(*chunks) == 1 { return nil } return &ErrInvalidRevision{`":" statement is not valid, could be : ::`} } } return nil } // parseAt extract @ statements func (p *Parser) parseAt() (Revisioner, error) { var tok, nextTok token var lit, nextLit string var err error tok, _, err = p.scan() if err != nil { return nil, err } if tok != obrace { p.unscan() return Ref("HEAD"), nil } tok, lit, err = p.scan() if err != nil { return nil, err } nextTok, nextLit, err = p.scan() if err != nil { return nil, err } switch { case tok == word && (lit == "u" || lit == "upstream") && nextTok == cbrace: return AtUpstream{}, nil case tok == word && lit == "push" && nextTok == cbrace: return AtPush{}, nil case tok == number && nextTok == cbrace: n, _ := strconv.Atoi(lit) return AtReflog{n}, nil case tok == minus && nextTok == number: n, _ := strconv.Atoi(nextLit) t, _, err := p.scan() if err != nil { return nil, err } if t != cbrace { return nil, &ErrInvalidRevision{fmt.Sprintf(`missing "}" in @{-n} structure`)} } return AtCheckout{n}, nil default: p.unscan() date := lit for { tok, lit, err = p.scan() if err != nil { return nil, err } switch { case tok == cbrace: t, err := time.Parse("2006-01-02T15:04:05Z", date) if err != nil { return nil, &ErrInvalidRevision{fmt.Sprintf(`wrong date "%s" must fit ISO-8601 format : 2006-01-02T15:04:05Z`, date)} } return AtDate{t}, nil default: date += lit } } } } // parseTilde extract ~ statements func (p *Parser) parseTilde() (Revisioner, error) { var tok token var lit string var err error tok, lit, err = p.scan() if err != nil { return nil, err } switch { case tok == number: n, _ := strconv.Atoi(lit) return TildePath{n}, nil default: p.unscan() return TildePath{1}, nil } } // parseCaret extract ^ statements func (p *Parser) parseCaret() (Revisioner, error) { var tok token var lit string var err error tok, lit, err = p.scan() if err != nil { return nil, err } switch { case tok == obrace: r, err := p.parseCaretBraces() if err != nil { return nil, err } return r, nil case tok == number: n, _ := strconv.Atoi(lit) if n > 2 { return nil, &ErrInvalidRevision{fmt.Sprintf(`"%s" found must be 0, 1 or 2 after "^"`, lit)} } return CaretPath{n}, nil default: p.unscan() return CaretPath{1}, nil } } // parseCaretBraces extract ^{} statements func (p *Parser) parseCaretBraces() (Revisioner, error) { var tok, nextTok token var lit, _ string start := true var re string var negate bool var err error for { tok, lit, err = p.scan() if err != nil { return nil, err } nextTok, _, err = p.scan() if err != nil { return nil, err } switch { case tok == word && nextTok == cbrace && (lit == "commit" || lit == "tree" || lit == "blob" || lit == "tag" || lit == "object"): return CaretType{lit}, nil case re == "" && tok == cbrace: return CaretType{"tag"}, nil case re == "" && tok == emark && nextTok == emark: re += lit case re == "" && tok == emark && nextTok == minus: negate = true case re == "" && tok == emark: return nil, &ErrInvalidRevision{fmt.Sprintf(`revision suffix brace component sequences starting with "/!" others than those defined are reserved`)} case re == "" && tok == slash: p.unscan() case tok != slash && start: return nil, &ErrInvalidRevision{fmt.Sprintf(`"%s" is not a valid revision suffix brace component`, lit)} case tok != cbrace: p.unscan() re += lit case tok == cbrace: p.unscan() reg, err := regexp.Compile(re) if err != nil { return CaretReg{}, &ErrInvalidRevision{fmt.Sprintf(`revision suffix brace component, %s`, err.Error())} } return CaretReg{reg, negate}, nil } start = false } } // parseColon extract : statements func (p *Parser) parseColon() (Revisioner, error) { var tok token var err error tok, _, err = p.scan() if err != nil { return nil, err } switch tok { case slash: return p.parseColonSlash() default: p.unscan() return p.parseColonDefault() } } // parseColonSlash extract :/ statements func (p *Parser) parseColonSlash() (Revisioner, error) { var tok, nextTok token var lit string var re string var negate bool var err error for { tok, lit, err = p.scan() if err != nil { return nil, err } nextTok, _, err = p.scan() if err != nil { return nil, err } switch { case tok == emark && nextTok == emark: re += lit case re == "" && tok == emark && nextTok == minus: negate = true case re == "" && tok == emark: return nil, &ErrInvalidRevision{fmt.Sprintf(`revision suffix brace component sequences starting with "/!" others than those defined are reserved`)} case tok == eof: p.unscan() reg, err := regexp.Compile(re) if err != nil { return ColonReg{}, &ErrInvalidRevision{fmt.Sprintf(`revision suffix brace component, %s`, err.Error())} } return ColonReg{reg, negate}, nil default: p.unscan() re += lit } } } // parseColonDefault extract : statements func (p *Parser) parseColonDefault() (Revisioner, error) { var tok token var lit string var path string var stage int var err error var n = -1 tok, lit, err = p.scan() if err != nil { return nil, err } nextTok, _, err := p.scan() if err != nil { return nil, err } if tok == number && nextTok == colon { n, _ = strconv.Atoi(lit) } switch n { case 0, 1, 2, 3: stage = n default: path += lit p.unscan() } for { tok, lit, err = p.scan() if err != nil { return nil, err } switch { case tok == eof && n == -1: return ColonPath{path}, nil case tok == eof: return ColonStagePath{path, stage}, nil default: path += lit } } } // parseRef extract reference name func (p *Parser) parseRef() (Revisioner, error) { var tok, prevTok token var lit, buf string var endOfRef bool var err error for { tok, lit, err = p.scan() if err != nil { return nil, err } switch tok { case eof, at, colon, tilde, caret: endOfRef = true } err := p.checkRefFormat(tok, lit, prevTok, buf, endOfRef) if err != nil { return "", err } if endOfRef { p.unscan() return Ref(buf), nil } buf += lit prevTok = tok } } // checkRefFormat ensure reference name follow rules defined here : // https://git-scm.com/docs/git-check-ref-format func (p *Parser) checkRefFormat(token token, literal string, previousToken token, buffer string, endOfRef bool) error { switch token { case aslash, space, control, qmark, asterisk, obracket: return &ErrInvalidRevision{fmt.Sprintf(`must not contains "%s"`, literal)} } switch { case (token == dot || token == slash) && buffer == "": return &ErrInvalidRevision{fmt.Sprintf(`must not start with "%s"`, literal)} case previousToken == slash && endOfRef: return &ErrInvalidRevision{`must not end with "/"`} case previousToken == dot && endOfRef: return &ErrInvalidRevision{`must not end with "."`} case token == dot && previousToken == slash: return &ErrInvalidRevision{`must not contains "/."`} case previousToken == dot && token == dot: return &ErrInvalidRevision{`must not contains ".."`} case previousToken == slash && token == slash: return &ErrInvalidRevision{`must not contains consecutively "/"`} case (token == slash || endOfRef) && len(buffer) > 4 && buffer[len(buffer)-5:] == ".lock": return &ErrInvalidRevision{"cannot end with .lock"} } return nil } golang-gopkg-src-d-go-git.v4-4.11.0/internal/revision/parser_test.go000066400000000000000000000262561345605224300252460ustar00rootroot00000000000000package revision import ( "bytes" "regexp" "time" . "gopkg.in/check.v1" ) type ParserSuite struct{} var _ = Suite(&ParserSuite{}) func (s *ParserSuite) TestErrInvalidRevision(c *C) { e := ErrInvalidRevision{"test"} c.Assert(e.Error(), Equals, "Revision invalid : test") } func (s *ParserSuite) TestNewParserFromString(c *C) { p := NewParserFromString("test") c.Assert(p, FitsTypeOf, &Parser{}) } func (s *ParserSuite) TestScan(c *C) { parser := NewParser(bytes.NewBufferString("Hello world !")) expected := []struct { t token s string }{ { word, "Hello", }, { space, " ", }, { word, "world", }, { space, " ", }, { emark, "!", }, } for i := 0; ; { tok, str, err := parser.scan() if tok == eof { return } c.Assert(err, Equals, nil) c.Assert(str, Equals, expected[i].s) c.Assert(tok, Equals, expected[i].t) i++ } } func (s *ParserSuite) TestUnscan(c *C) { parser := NewParser(bytes.NewBufferString("Hello world !")) tok, str, err := parser.scan() c.Assert(err, Equals, nil) c.Assert(str, Equals, "Hello") c.Assert(tok, Equals, word) parser.unscan() tok, str, err = parser.scan() c.Assert(err, Equals, nil) c.Assert(str, Equals, "Hello") c.Assert(tok, Equals, word) } func (s *ParserSuite) TestParseWithValidExpression(c *C) { tim, _ := time.Parse("2006-01-02T15:04:05Z", "2016-12-16T21:42:47Z") datas := map[string]Revisioner{ "@": []Revisioner{Ref("HEAD")}, "@~3": []Revisioner{ Ref("HEAD"), TildePath{3}, }, "@{2016-12-16T21:42:47Z}": []Revisioner{AtDate{tim}}, "@{1}": []Revisioner{AtReflog{1}}, "@{-1}": []Revisioner{AtCheckout{1}}, "master@{upstream}": []Revisioner{ Ref("master"), AtUpstream{}, }, "@{upstream}": []Revisioner{ AtUpstream{}, }, "@{u}": []Revisioner{ AtUpstream{}, }, "master@{push}": []Revisioner{ Ref("master"), AtPush{}, }, "master@{2016-12-16T21:42:47Z}": []Revisioner{ Ref("master"), AtDate{tim}, }, "HEAD^": []Revisioner{ Ref("HEAD"), CaretPath{1}, }, "master~3": []Revisioner{ Ref("master"), TildePath{3}, }, "v0.99.8^{commit}": []Revisioner{ Ref("v0.99.8"), CaretType{"commit"}, }, "v0.99.8^{}": []Revisioner{ Ref("v0.99.8"), CaretType{"tag"}, }, "HEAD^{/fix nasty bug}": []Revisioner{ Ref("HEAD"), CaretReg{regexp.MustCompile("fix nasty bug"), false}, }, ":/fix nasty bug": []Revisioner{ ColonReg{regexp.MustCompile("fix nasty bug"), false}, }, "HEAD:README": []Revisioner{ Ref("HEAD"), ColonPath{"README"}, }, ":README": []Revisioner{ ColonPath{"README"}, }, "master:./README": []Revisioner{ Ref("master"), ColonPath{"./README"}, }, "master^1~:./README": []Revisioner{ Ref("master"), CaretPath{1}, TildePath{1}, ColonPath{"./README"}, }, ":0:README": []Revisioner{ ColonStagePath{"README", 0}, }, ":3:README": []Revisioner{ ColonStagePath{"README", 3}, }, "master~1^{/update}~5~^^1": []Revisioner{ Ref("master"), TildePath{1}, CaretReg{regexp.MustCompile("update"), false}, TildePath{5}, TildePath{1}, CaretPath{1}, CaretPath{1}, }, } for d, expected := range datas { parser := NewParser(bytes.NewBufferString(d)) result, err := parser.Parse() c.Assert(err, Equals, nil) c.Assert(result, DeepEquals, expected) } } func (s *ParserSuite) TestParseWithUnValidExpression(c *C) { datas := map[string]error{ "..": &ErrInvalidRevision{`must not start with "."`}, "master^1master": &ErrInvalidRevision{`reference must be defined once at the beginning`}, "master^1@{2016-12-16T21:42:47Z}": &ErrInvalidRevision{`"@" statement is not valid, could be : @{}, @{}`}, "master^1@{1}": &ErrInvalidRevision{`"@" statement is not valid, could be : @{}, @{}`}, "master@{-1}": &ErrInvalidRevision{`"@" statement is not valid, could be : @{-}`}, "master^1@{upstream}": &ErrInvalidRevision{`"@" statement is not valid, could be : @{upstream}, @{upstream}, @{u}, @{u}`}, "master^1@{u}": &ErrInvalidRevision{`"@" statement is not valid, could be : @{upstream}, @{upstream}, @{u}, @{u}`}, "master^1@{push}": &ErrInvalidRevision{`"@" statement is not valid, could be : @{push}, @{push}`}, "^1": &ErrInvalidRevision{`"~" or "^" statement must have a reference defined at the beginning`}, "^{/test}": &ErrInvalidRevision{`"~" or "^" statement must have a reference defined at the beginning`}, "~1": &ErrInvalidRevision{`"~" or "^" statement must have a reference defined at the beginning`}, "master:/test": &ErrInvalidRevision{`":" statement is not valid, could be : :/`}, "master:0:README": &ErrInvalidRevision{`":" statement is not valid, could be : ::`}, } for s, e := range datas { parser := NewParser(bytes.NewBufferString(s)) _, err := parser.Parse() c.Assert(err, DeepEquals, e) } } func (s *ParserSuite) TestParseAtWithValidExpression(c *C) { tim, _ := time.Parse("2006-01-02T15:04:05Z", "2016-12-16T21:42:47Z") datas := map[string]Revisioner{ "": Ref("HEAD"), "{1}": AtReflog{1}, "{-1}": AtCheckout{1}, "{push}": AtPush{}, "{upstream}": AtUpstream{}, "{u}": AtUpstream{}, "{2016-12-16T21:42:47Z}": AtDate{tim}, } for d, expected := range datas { parser := NewParser(bytes.NewBufferString(d)) result, err := parser.parseAt() c.Assert(err, Equals, nil) c.Assert(result, DeepEquals, expected) } } func (s *ParserSuite) TestParseAtWithUnValidExpression(c *C) { datas := map[string]error{ "{test}": &ErrInvalidRevision{`wrong date "test" must fit ISO-8601 format : 2006-01-02T15:04:05Z`}, "{-1": &ErrInvalidRevision{`missing "}" in @{-n} structure`}, } for s, e := range datas { parser := NewParser(bytes.NewBufferString(s)) _, err := parser.parseAt() c.Assert(err, DeepEquals, e) } } func (s *ParserSuite) TestParseCaretWithValidExpression(c *C) { datas := map[string]Revisioner{ "": CaretPath{1}, "2": CaretPath{2}, "{}": CaretType{"tag"}, "{commit}": CaretType{"commit"}, "{tree}": CaretType{"tree"}, "{blob}": CaretType{"blob"}, "{tag}": CaretType{"tag"}, "{object}": CaretType{"object"}, "{/hello world !}": CaretReg{regexp.MustCompile("hello world !"), false}, "{/!-hello world !}": CaretReg{regexp.MustCompile("hello world !"), true}, "{/!! hello world !}": CaretReg{regexp.MustCompile("! hello world !"), false}, } for d, expected := range datas { parser := NewParser(bytes.NewBufferString(d)) result, err := parser.parseCaret() c.Assert(err, Equals, nil) c.Assert(result, DeepEquals, expected) } } func (s *ParserSuite) TestParseCaretWithUnValidExpression(c *C) { datas := map[string]error{ "3": &ErrInvalidRevision{`"3" found must be 0, 1 or 2 after "^"`}, "{test}": &ErrInvalidRevision{`"test" is not a valid revision suffix brace component`}, "{/!test}": &ErrInvalidRevision{`revision suffix brace component sequences starting with "/!" others than those defined are reserved`}, "{/test**}": &ErrInvalidRevision{"revision suffix brace component, error parsing regexp: invalid nested repetition operator: `**`"}, } for s, e := range datas { parser := NewParser(bytes.NewBufferString(s)) _, err := parser.parseCaret() c.Assert(err, DeepEquals, e) } } func (s *ParserSuite) TestParseTildeWithValidExpression(c *C) { datas := map[string]Revisioner{ "3": TildePath{3}, "1": TildePath{1}, "": TildePath{1}, } for d, expected := range datas { parser := NewParser(bytes.NewBufferString(d)) result, err := parser.parseTilde() c.Assert(err, Equals, nil) c.Assert(result, DeepEquals, expected) } } func (s *ParserSuite) TestParseColonWithValidExpression(c *C) { datas := map[string]Revisioner{ "/hello world !": ColonReg{regexp.MustCompile("hello world !"), false}, "/!-hello world !": ColonReg{regexp.MustCompile("hello world !"), true}, "/!! hello world !": ColonReg{regexp.MustCompile("! hello world !"), false}, "../parser.go": ColonPath{"../parser.go"}, "./parser.go": ColonPath{"./parser.go"}, "parser.go": ColonPath{"parser.go"}, "0:parser.go": ColonStagePath{"parser.go", 0}, "1:parser.go": ColonStagePath{"parser.go", 1}, "2:parser.go": ColonStagePath{"parser.go", 2}, "3:parser.go": ColonStagePath{"parser.go", 3}, } for d, expected := range datas { parser := NewParser(bytes.NewBufferString(d)) result, err := parser.parseColon() c.Assert(err, Equals, nil) c.Assert(result, DeepEquals, expected) } } func (s *ParserSuite) TestParseColonWithUnValidExpression(c *C) { datas := map[string]error{ "/!test": &ErrInvalidRevision{`revision suffix brace component sequences starting with "/!" others than those defined are reserved`}, "/*": &ErrInvalidRevision{"revision suffix brace component, error parsing regexp: missing argument to repetition operator: `*`"}, } for s, e := range datas { parser := NewParser(bytes.NewBufferString(s)) _, err := parser.parseColon() c.Assert(err, DeepEquals, e) } } func (s *ParserSuite) TestParseRefWithValidName(c *C) { datas := []string{ "lock", "master", "v1.0.0", "refs/stash", "refs/tags/v1.0.0", "refs/heads/master", "refs/remotes/test", "refs/remotes/origin/HEAD", "refs/remotes/origin/master", } for _, d := range datas { parser := NewParser(bytes.NewBufferString(d)) result, err := parser.parseRef() c.Assert(err, Equals, nil) c.Assert(result, Equals, Ref(d)) } } func (s *ParserSuite) TestParseRefWithUnvalidName(c *C) { datas := map[string]error{ ".master": &ErrInvalidRevision{`must not start with "."`}, "/master": &ErrInvalidRevision{`must not start with "/"`}, "master/": &ErrInvalidRevision{`must not end with "/"`}, "master.": &ErrInvalidRevision{`must not end with "."`}, "refs/remotes/.origin/HEAD": &ErrInvalidRevision{`must not contains "/."`}, "test..test": &ErrInvalidRevision{`must not contains ".."`}, "test..": &ErrInvalidRevision{`must not contains ".."`}, "test test": &ErrInvalidRevision{`must not contains " "`}, "test*test": &ErrInvalidRevision{`must not contains "*"`}, "test?test": &ErrInvalidRevision{`must not contains "?"`}, "test\\test": &ErrInvalidRevision{`must not contains "\"`}, "test[test": &ErrInvalidRevision{`must not contains "["`}, "te//st": &ErrInvalidRevision{`must not contains consecutively "/"`}, "refs/remotes/test.lock/HEAD": &ErrInvalidRevision{`cannot end with .lock`}, "test.lock": &ErrInvalidRevision{`cannot end with .lock`}, } for s, e := range datas { parser := NewParser(bytes.NewBufferString(s)) _, err := parser.parseRef() c.Assert(err, DeepEquals, e) } } golang-gopkg-src-d-go-git.v4-4.11.0/internal/revision/scanner.go000066400000000000000000000044221345605224300243330ustar00rootroot00000000000000package revision import ( "bufio" "io" "unicode" ) // runeCategoryValidator takes a rune as input and // validates it belongs to a rune category type runeCategoryValidator func(r rune) bool // tokenizeExpression aggegates a series of runes matching check predicate into a single // string and provides given tokenType as token type func tokenizeExpression(ch rune, tokenType token, check runeCategoryValidator, r *bufio.Reader) (token, string, error) { var data []rune data = append(data, ch) for { c, _, err := r.ReadRune() if c == zeroRune { break } if err != nil { return tokenError, "", err } if check(c) { data = append(data, c) } else { err := r.UnreadRune() if err != nil { return tokenError, "", err } return tokenType, string(data), nil } } return tokenType, string(data), nil } var zeroRune = rune(0) // scanner represents a lexical scanner. type scanner struct { r *bufio.Reader } // newScanner returns a new instance of scanner. func newScanner(r io.Reader) *scanner { return &scanner{r: bufio.NewReader(r)} } // Scan extracts tokens and their strings counterpart // from the reader func (s *scanner) scan() (token, string, error) { ch, _, err := s.r.ReadRune() if err != nil && err != io.EOF { return tokenError, "", err } switch ch { case zeroRune: return eof, "", nil case ':': return colon, string(ch), nil case '~': return tilde, string(ch), nil case '^': return caret, string(ch), nil case '.': return dot, string(ch), nil case '/': return slash, string(ch), nil case '{': return obrace, string(ch), nil case '}': return cbrace, string(ch), nil case '-': return minus, string(ch), nil case '@': return at, string(ch), nil case '\\': return aslash, string(ch), nil case '?': return qmark, string(ch), nil case '*': return asterisk, string(ch), nil case '[': return obracket, string(ch), nil case '!': return emark, string(ch), nil } if unicode.IsSpace(ch) { return space, string(ch), nil } if unicode.IsControl(ch) { return control, string(ch), nil } if unicode.IsLetter(ch) { return tokenizeExpression(ch, word, unicode.IsLetter, s.r) } if unicode.IsNumber(ch) { return tokenizeExpression(ch, number, unicode.IsNumber, s.r) } return tokenError, string(ch), nil } golang-gopkg-src-d-go-git.v4-4.11.0/internal/revision/scanner_test.go000066400000000000000000000111001345605224300253610ustar00rootroot00000000000000package revision import ( "bytes" "testing" . "gopkg.in/check.v1" ) func Test(t *testing.T) { TestingT(t) } type ScannerSuite struct{} var _ = Suite(&ScannerSuite{}) func (s *ScannerSuite) TestReadColon(c *C) { scanner := newScanner(bytes.NewBufferString(":")) tok, data, err := scanner.scan() c.Assert(err, Equals, nil) c.Assert(data, Equals, ":") c.Assert(tok, Equals, colon) } func (s *ScannerSuite) TestReadTilde(c *C) { scanner := newScanner(bytes.NewBufferString("~")) tok, data, err := scanner.scan() c.Assert(err, Equals, nil) c.Assert(data, Equals, "~") c.Assert(tok, Equals, tilde) } func (s *ScannerSuite) TestReadCaret(c *C) { scanner := newScanner(bytes.NewBufferString("^")) tok, data, err := scanner.scan() c.Assert(err, Equals, nil) c.Assert(data, Equals, "^") c.Assert(tok, Equals, caret) } func (s *ScannerSuite) TestReadDot(c *C) { scanner := newScanner(bytes.NewBufferString(".")) tok, data, err := scanner.scan() c.Assert(err, Equals, nil) c.Assert(data, Equals, ".") c.Assert(tok, Equals, dot) } func (s *ScannerSuite) TestReadSlash(c *C) { scanner := newScanner(bytes.NewBufferString("/")) tok, data, err := scanner.scan() c.Assert(err, Equals, nil) c.Assert(data, Equals, "/") c.Assert(tok, Equals, slash) } func (s *ScannerSuite) TestReadEOF(c *C) { scanner := newScanner(bytes.NewBufferString(string(rune(0)))) tok, data, err := scanner.scan() c.Assert(err, Equals, nil) c.Assert(data, Equals, "") c.Assert(tok, Equals, eof) } func (s *ScannerSuite) TestReadNumber(c *C) { scanner := newScanner(bytes.NewBufferString("1234")) tok, data, err := scanner.scan() c.Assert(err, Equals, nil) c.Assert(data, Equals, "1234") c.Assert(tok, Equals, number) } func (s *ScannerSuite) TestReadSpace(c *C) { scanner := newScanner(bytes.NewBufferString(" ")) tok, data, err := scanner.scan() c.Assert(err, Equals, nil) c.Assert(data, Equals, " ") c.Assert(tok, Equals, space) } func (s *ScannerSuite) TestReadControl(c *C) { scanner := newScanner(bytes.NewBufferString("")) tok, data, err := scanner.scan() c.Assert(err, Equals, nil) c.Assert(data, Equals, "\x01") c.Assert(tok, Equals, control) } func (s *ScannerSuite) TestReadOpenBrace(c *C) { scanner := newScanner(bytes.NewBufferString("{")) tok, data, err := scanner.scan() c.Assert(err, Equals, nil) c.Assert(data, Equals, "{") c.Assert(tok, Equals, obrace) } func (s *ScannerSuite) TestReadCloseBrace(c *C) { scanner := newScanner(bytes.NewBufferString("}")) tok, data, err := scanner.scan() c.Assert(err, Equals, nil) c.Assert(data, Equals, "}") c.Assert(tok, Equals, cbrace) } func (s *ScannerSuite) TestReadMinus(c *C) { scanner := newScanner(bytes.NewBufferString("-")) tok, data, err := scanner.scan() c.Assert(err, Equals, nil) c.Assert(data, Equals, "-") c.Assert(tok, Equals, minus) } func (s *ScannerSuite) TestReadAt(c *C) { scanner := newScanner(bytes.NewBufferString("@")) tok, data, err := scanner.scan() c.Assert(err, Equals, nil) c.Assert(data, Equals, "@") c.Assert(tok, Equals, at) } func (s *ScannerSuite) TestReadAntislash(c *C) { scanner := newScanner(bytes.NewBufferString("\\")) tok, data, err := scanner.scan() c.Assert(err, Equals, nil) c.Assert(data, Equals, "\\") c.Assert(tok, Equals, aslash) } func (s *ScannerSuite) TestReadQuestionMark(c *C) { scanner := newScanner(bytes.NewBufferString("?")) tok, data, err := scanner.scan() c.Assert(err, Equals, nil) c.Assert(data, Equals, "?") c.Assert(tok, Equals, qmark) } func (s *ScannerSuite) TestReadAsterisk(c *C) { scanner := newScanner(bytes.NewBufferString("*")) tok, data, err := scanner.scan() c.Assert(err, Equals, nil) c.Assert(data, Equals, "*") c.Assert(tok, Equals, asterisk) } func (s *ScannerSuite) TestReadOpenBracket(c *C) { scanner := newScanner(bytes.NewBufferString("[")) tok, data, err := scanner.scan() c.Assert(err, Equals, nil) c.Assert(data, Equals, "[") c.Assert(tok, Equals, obracket) } func (s *ScannerSuite) TestReadExclamationMark(c *C) { scanner := newScanner(bytes.NewBufferString("!")) tok, data, err := scanner.scan() c.Assert(err, Equals, nil) c.Assert(data, Equals, "!") c.Assert(tok, Equals, emark) } func (s *ScannerSuite) TestReadWord(c *C) { scanner := newScanner(bytes.NewBufferString("abcde")) tok, data, err := scanner.scan() c.Assert(err, Equals, nil) c.Assert(data, Equals, "abcde") c.Assert(tok, Equals, word) } func (s *ScannerSuite) TestReadTokenError(c *C) { scanner := newScanner(bytes.NewBufferString("`")) tok, data, err := scanner.scan() c.Assert(err, Equals, nil) c.Assert(data, Equals, "`") c.Assert(tok, Equals, tokenError) } golang-gopkg-src-d-go-git.v4-4.11.0/internal/revision/token.go000066400000000000000000000004121345605224300240150ustar00rootroot00000000000000package revision // token represents a entity extracted from string parsing type token int const ( eof token = iota aslash asterisk at caret cbrace colon control dot emark minus number obrace obracket qmark slash space tilde tokenError word ) golang-gopkg-src-d-go-git.v4-4.11.0/internal/url/000077500000000000000000000000001345605224300213155ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/internal/url/url.go000066400000000000000000000021671345605224300224540ustar00rootroot00000000000000package url import ( "regexp" ) var ( isSchemeRegExp = regexp.MustCompile(`^[^:]+://`) scpLikeUrlRegExp = regexp.MustCompile(`^(?:(?P[^@]+)@)?(?P[^:\s]+):(?:(?P[0-9]{1,5})/)?(?P[^\\].*)$`) ) // MatchesScheme returns true if the given string matches a URL-like // format scheme. func MatchesScheme(url string) bool { return isSchemeRegExp.MatchString(url) } // MatchesScpLike returns true if the given string matches an SCP-like // format scheme. func MatchesScpLike(url string) bool { return scpLikeUrlRegExp.MatchString(url) } // FindScpLikeComponents returns the user, host, port and path of the // given SCP-like URL. func FindScpLikeComponents(url string) (user, host, port, path string) { m := scpLikeUrlRegExp.FindStringSubmatch(url) return m[1], m[2], m[3], m[4] } // IsLocalEndpoint returns true if the given URL string specifies a // local file endpoint. For example, on a Linux machine, // `/home/user/src/go-git` would match as a local endpoint, but // `https://github.com/src-d/go-git` would not. func IsLocalEndpoint(url string) bool { return !MatchesScheme(url) && !MatchesScpLike(url) } golang-gopkg-src-d-go-git.v4-4.11.0/object_walker.go000066400000000000000000000054171345605224300220500ustar00rootroot00000000000000package git import ( "fmt" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/filemode" "gopkg.in/src-d/go-git.v4/plumbing/object" "gopkg.in/src-d/go-git.v4/storage" ) type objectWalker struct { Storer storage.Storer // seen is the set of objects seen in the repo. // seen map can become huge if walking over large // repos. Thus using struct{} as the value type. seen map[plumbing.Hash]struct{} } func newObjectWalker(s storage.Storer) *objectWalker { return &objectWalker{s, map[plumbing.Hash]struct{}{}} } // walkAllRefs walks all (hash) refererences from the repo. func (p *objectWalker) walkAllRefs() error { // Walk over all the references in the repo. it, err := p.Storer.IterReferences() if err != nil { return err } defer it.Close() err = it.ForEach(func(ref *plumbing.Reference) error { // Exit this iteration early for non-hash references. if ref.Type() != plumbing.HashReference { return nil } return p.walkObjectTree(ref.Hash()) }) return err } func (p *objectWalker) isSeen(hash plumbing.Hash) bool { _, seen := p.seen[hash] return seen } func (p *objectWalker) add(hash plumbing.Hash) { p.seen[hash] = struct{}{} } // walkObjectTree walks over all objects and remembers references // to them in the objectWalker. This is used instead of the revlist // walks because memory usage is tight with huge repos. func (p *objectWalker) walkObjectTree(hash plumbing.Hash) error { // Check if we have already seen, and mark this object if p.isSeen(hash) { return nil } p.add(hash) // Fetch the object. obj, err := object.GetObject(p.Storer, hash) if err != nil { return fmt.Errorf("Getting object %s failed: %v", hash, err) } // Walk all children depending on object type. switch obj := obj.(type) { case *object.Commit: err = p.walkObjectTree(obj.TreeHash) if err != nil { return err } for _, h := range obj.ParentHashes { err = p.walkObjectTree(h) if err != nil { return err } } case *object.Tree: for i := range obj.Entries { // Shortcut for blob objects: // 'or' the lower bits of a mode and check that it // it matches a filemode.Executable. The type information // is in the higher bits, but this is the cleanest way // to handle plain files with different modes. // Other non-tree objects are somewhat rare, so they // are not special-cased. if obj.Entries[i].Mode|0755 == filemode.Executable { p.add(obj.Entries[i].Hash) continue } // Normal walk for sub-trees (and symlinks etc). err = p.walkObjectTree(obj.Entries[i].Hash) if err != nil { return err } } case *object.Tag: return p.walkObjectTree(obj.Target) default: // Error out on unhandled object types. return fmt.Errorf("Unknown object %X %s %T\n", obj.ID(), obj.Type(), obj) } return nil } golang-gopkg-src-d-go-git.v4-4.11.0/options.go000066400000000000000000000353201345605224300207240ustar00rootroot00000000000000package git import ( "errors" "regexp" "strings" "golang.org/x/crypto/openpgp" "gopkg.in/src-d/go-git.v4/config" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/object" "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/sideband" "gopkg.in/src-d/go-git.v4/plumbing/transport" ) // SubmoduleRescursivity defines how depth will affect any submodule recursive // operation. type SubmoduleRescursivity uint const ( // DefaultRemoteName name of the default Remote, just like git command. DefaultRemoteName = "origin" // NoRecurseSubmodules disables the recursion for a submodule operation. NoRecurseSubmodules SubmoduleRescursivity = 0 // DefaultSubmoduleRecursionDepth allow recursion in a submodule operation. DefaultSubmoduleRecursionDepth SubmoduleRescursivity = 10 ) var ( ErrMissingURL = errors.New("URL field is required") ) // CloneOptions describes how a clone should be performed. type CloneOptions struct { // The (possibly remote) repository URL to clone from. URL string // Auth credentials, if required, to use with the remote repository. Auth transport.AuthMethod // Name of the remote to be added, by default `origin`. RemoteName string // Remote branch to clone. ReferenceName plumbing.ReferenceName // Fetch only ReferenceName if true. SingleBranch bool // No checkout of HEAD after clone if true. NoCheckout bool // Limit fetching to the specified number of commits. Depth int // RecurseSubmodules after the clone is created, initialize all submodules // within, using their default settings. This option is ignored if the // cloned repository does not have a worktree. RecurseSubmodules SubmoduleRescursivity // Progress is where the human readable information sent by the server is // stored, if nil nothing is stored and the capability (if supported) // no-progress, is sent to the server to avoid send this information. Progress sideband.Progress // Tags describe how the tags will be fetched from the remote repository, // by default is AllTags. Tags TagMode } // Validate validates the fields and sets the default values. func (o *CloneOptions) Validate() error { if o.URL == "" { return ErrMissingURL } if o.RemoteName == "" { o.RemoteName = DefaultRemoteName } if o.ReferenceName == "" { o.ReferenceName = plumbing.HEAD } if o.Tags == InvalidTagMode { o.Tags = AllTags } return nil } // PullOptions describes how a pull should be performed. type PullOptions struct { // Name of the remote to be pulled. If empty, uses the default. RemoteName string // Remote branch to clone. If empty, uses HEAD. ReferenceName plumbing.ReferenceName // Fetch only ReferenceName if true. SingleBranch bool // Limit fetching to the specified number of commits. Depth int // Auth credentials, if required, to use with the remote repository. Auth transport.AuthMethod // RecurseSubmodules controls if new commits of all populated submodules // should be fetched too. RecurseSubmodules SubmoduleRescursivity // Progress is where the human readable information sent by the server is // stored, if nil nothing is stored and the capability (if supported) // no-progress, is sent to the server to avoid send this information. Progress sideband.Progress // Force allows the pull to update a local branch even when the remote // branch does not descend from it. Force bool } // Validate validates the fields and sets the default values. func (o *PullOptions) Validate() error { if o.RemoteName == "" { o.RemoteName = DefaultRemoteName } if o.ReferenceName == "" { o.ReferenceName = plumbing.HEAD } return nil } type TagMode int const ( InvalidTagMode TagMode = iota // TagFollowing any tag that points into the histories being fetched is also // fetched. TagFollowing requires a server with `include-tag` capability // in order to fetch the annotated tags objects. TagFollowing // AllTags fetch all tags from the remote (i.e., fetch remote tags // refs/tags/* into local tags with the same name) AllTags //NoTags fetch no tags from the remote at all NoTags ) // FetchOptions describes how a fetch should be performed type FetchOptions struct { // Name of the remote to fetch from. Defaults to origin. RemoteName string RefSpecs []config.RefSpec // Depth limit fetching to the specified number of commits from the tip of // each remote branch history. Depth int // Auth credentials, if required, to use with the remote repository. Auth transport.AuthMethod // Progress is where the human readable information sent by the server is // stored, if nil nothing is stored and the capability (if supported) // no-progress, is sent to the server to avoid send this information. Progress sideband.Progress // Tags describe how the tags will be fetched from the remote repository, // by default is TagFollowing. Tags TagMode // Force allows the fetch to update a local branch even when the remote // branch does not descend from it. Force bool } // Validate validates the fields and sets the default values. func (o *FetchOptions) Validate() error { if o.RemoteName == "" { o.RemoteName = DefaultRemoteName } if o.Tags == InvalidTagMode { o.Tags = TagFollowing } for _, r := range o.RefSpecs { if err := r.Validate(); err != nil { return err } } return nil } // PushOptions describes how a push should be performed. type PushOptions struct { // RemoteName is the name of the remote to be pushed to. RemoteName string // RefSpecs specify what destination ref to update with what source // object. A refspec with empty src can be used to delete a reference. RefSpecs []config.RefSpec // Auth credentials, if required, to use with the remote repository. Auth transport.AuthMethod // Progress is where the human readable information sent by the server is // stored, if nil nothing is stored. Progress sideband.Progress } // Validate validates the fields and sets the default values. func (o *PushOptions) Validate() error { if o.RemoteName == "" { o.RemoteName = DefaultRemoteName } if len(o.RefSpecs) == 0 { o.RefSpecs = []config.RefSpec{ config.RefSpec(config.DefaultPushRefSpec), } } for _, r := range o.RefSpecs { if err := r.Validate(); err != nil { return err } } return nil } // SubmoduleUpdateOptions describes how a submodule update should be performed. type SubmoduleUpdateOptions struct { // Init, if true initializes the submodules recorded in the index. Init bool // NoFetch tell to the update command to not fetch new objects from the // remote site. NoFetch bool // RecurseSubmodules the update is performed not only in the submodules of // the current repository but also in any nested submodules inside those // submodules (and so on). Until the SubmoduleRescursivity is reached. RecurseSubmodules SubmoduleRescursivity // Auth credentials, if required, to use with the remote repository. Auth transport.AuthMethod } var ( ErrBranchHashExclusive = errors.New("Branch and Hash are mutually exclusive") ErrCreateRequiresBranch = errors.New("Branch is mandatory when Create is used") ) // CheckoutOptions describes how a checkout operation should be performed. type CheckoutOptions struct { // Hash is the hash of the commit to be checked out. If used, HEAD will be // in detached mode. If Create is not used, Branch and Hash are mutually // exclusive. Hash plumbing.Hash // Branch to be checked out, if Branch and Hash are empty is set to `master`. Branch plumbing.ReferenceName // Create a new branch named Branch and start it at Hash. Create bool // Force, if true when switching branches, proceed even if the index or the // working tree differs from HEAD. This is used to throw away local changes Force bool } // Validate validates the fields and sets the default values. func (o *CheckoutOptions) Validate() error { if !o.Create && !o.Hash.IsZero() && o.Branch != "" { return ErrBranchHashExclusive } if o.Create && o.Branch == "" { return ErrCreateRequiresBranch } if o.Branch == "" { o.Branch = plumbing.Master } return nil } // ResetMode defines the mode of a reset operation. type ResetMode int8 const ( // MixedReset resets the index but not the working tree (i.e., the changed // files are preserved but not marked for commit) and reports what has not // been updated. This is the default action. MixedReset ResetMode = iota // HardReset resets the index and working tree. Any changes to tracked files // in the working tree are discarded. HardReset // MergeReset resets the index and updates the files in the working tree // that are different between Commit and HEAD, but keeps those which are // different between the index and working tree (i.e. which have changes // which have not been added). // // If a file that is different between Commit and the index has unstaged // changes, reset is aborted. MergeReset // SoftReset does not touch the index file or the working tree at all (but // resets the head to , just like all modes do). This leaves all // your changed files "Changes to be committed", as git status would put it. SoftReset ) // ResetOptions describes how a reset operation should be performed. type ResetOptions struct { // Commit, if commit is present set the current branch head (HEAD) to it. Commit plumbing.Hash // Mode, form resets the current branch head to Commit and possibly updates // the index (resetting it to the tree of Commit) and the working tree // depending on Mode. If empty MixedReset is used. Mode ResetMode } // Validate validates the fields and sets the default values. func (o *ResetOptions) Validate(r *Repository) error { if o.Commit == plumbing.ZeroHash { ref, err := r.Head() if err != nil { return err } o.Commit = ref.Hash() } return nil } type LogOrder int8 const ( LogOrderDefault LogOrder = iota LogOrderDFS LogOrderDFSPost LogOrderBSF LogOrderCommitterTime ) // LogOptions describes how a log action should be performed. type LogOptions struct { // When the From option is set the log will only contain commits // reachable from it. If this option is not set, HEAD will be used as // the default From. From plumbing.Hash // The default traversal algorithm is Depth-first search // set Order=LogOrderCommitterTime for ordering by committer time (more compatible with `git log`) // set Order=LogOrderBSF for Breadth-first search Order LogOrder // Show only those commits in which the specified file was inserted/updated. // It is equivalent to running `git log -- `. FileName *string // Pretend as if all the refs in refs/, along with HEAD, are listed on the command line as . // It is equivalent to running `git log --all`. // If set on true, the From option will be ignored. All bool } var ( ErrMissingAuthor = errors.New("author field is required") ) // CommitOptions describes how a commit operation should be performed. type CommitOptions struct { // All automatically stage files that have been modified and deleted, but // new files you have not told Git about are not affected. All bool // Author is the author's signature of the commit. Author *object.Signature // Committer is the committer's signature of the commit. If Committer is // nil the Author signature is used. Committer *object.Signature // Parents are the parents commits for the new commit, by default when // len(Parents) is zero, the hash of HEAD reference is used. Parents []plumbing.Hash // SignKey denotes a key to sign the commit with. A nil value here means the // commit will not be signed. The private key must be present and already // decrypted. SignKey *openpgp.Entity } // Validate validates the fields and sets the default values. func (o *CommitOptions) Validate(r *Repository) error { if o.Author == nil { return ErrMissingAuthor } if o.Committer == nil { o.Committer = o.Author } if len(o.Parents) == 0 { head, err := r.Head() if err != nil && err != plumbing.ErrReferenceNotFound { return err } if head != nil { o.Parents = []plumbing.Hash{head.Hash()} } } return nil } var ( ErrMissingName = errors.New("name field is required") ErrMissingTagger = errors.New("tagger field is required") ErrMissingMessage = errors.New("message field is required") ) // CreateTagOptions describes how a tag object should be created. type CreateTagOptions struct { // Tagger defines the signature of the tag creator. Tagger *object.Signature // Message defines the annotation of the tag. It is canonicalized during // validation into the format expected by git - no leading whitespace and // ending in a newline. Message string // SignKey denotes a key to sign the tag with. A nil value here means the tag // will not be signed. The private key must be present and already decrypted. SignKey *openpgp.Entity } // Validate validates the fields and sets the default values. func (o *CreateTagOptions) Validate(r *Repository, hash plumbing.Hash) error { if o.Tagger == nil { return ErrMissingTagger } if o.Message == "" { return ErrMissingMessage } // Canonicalize the message into the expected message format. o.Message = strings.TrimSpace(o.Message) + "\n" return nil } // ListOptions describes how a remote list should be performed. type ListOptions struct { // Auth credentials, if required, to use with the remote repository. Auth transport.AuthMethod } // CleanOptions describes how a clean should be performed. type CleanOptions struct { Dir bool } // GrepOptions describes how a grep should be performed. type GrepOptions struct { // Patterns are compiled Regexp objects to be matched. Patterns []*regexp.Regexp // InvertMatch selects non-matching lines. InvertMatch bool // CommitHash is the hash of the commit from which worktree should be derived. CommitHash plumbing.Hash // ReferenceName is the branch or tag name from which worktree should be derived. ReferenceName plumbing.ReferenceName // PathSpecs are compiled Regexp objects of pathspec to use in the matching. PathSpecs []*regexp.Regexp } var ( ErrHashOrReference = errors.New("ambiguous options, only one of CommitHash or ReferenceName can be passed") ) // Validate validates the fields and sets the default values. func (o *GrepOptions) Validate(w *Worktree) error { if !o.CommitHash.IsZero() && o.ReferenceName != "" { return ErrHashOrReference } // If none of CommitHash and ReferenceName are provided, set commit hash of // the repository's head. if o.CommitHash.IsZero() && o.ReferenceName == "" { ref, err := w.r.Head() if err != nil { return err } o.CommitHash = ref.Hash() } return nil } // PlainOpenOptions describes how opening a plain repository should be // performed. type PlainOpenOptions struct { // DetectDotGit defines whether parent directories should be // walked until a .git directory or file is found. DetectDotGit bool } // Validate validates the fields and sets the default values. func (o *PlainOpenOptions) Validate() error { return nil } golang-gopkg-src-d-go-git.v4-4.11.0/options_test.go000066400000000000000000000013541345605224300217630ustar00rootroot00000000000000package git import ( . "gopkg.in/check.v1" "gopkg.in/src-d/go-git.v4/plumbing/object" ) type OptionsSuite struct { BaseSuite } var _ = Suite(&OptionsSuite{}) func (s *OptionsSuite) TestCommitOptionsParentsFromHEAD(c *C) { o := CommitOptions{Author: &object.Signature{}} err := o.Validate(s.Repository) c.Assert(err, IsNil) c.Assert(o.Parents, HasLen, 1) } func (s *OptionsSuite) TestCommitOptionsMissingAuthor(c *C) { o := CommitOptions{} err := o.Validate(s.Repository) c.Assert(err, Equals, ErrMissingAuthor) } func (s *OptionsSuite) TestCommitOptionsCommitter(c *C) { sig := &object.Signature{} o := CommitOptions{Author: sig} err := o.Validate(s.Repository) c.Assert(err, IsNil) c.Assert(o.Committer, Equals, o.Author) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/000077500000000000000000000000001345605224300205145ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/cache/000077500000000000000000000000001345605224300215575ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/cache/buffer_lru.go000066400000000000000000000043021345605224300242400ustar00rootroot00000000000000package cache import ( "container/list" "sync" ) // BufferLRU implements an object cache with an LRU eviction policy and a // maximum size (measured in object size). type BufferLRU struct { MaxSize FileSize actualSize FileSize ll *list.List cache map[int64]*list.Element mut sync.Mutex } // NewBufferLRU creates a new BufferLRU with the given maximum size. The maximum // size will never be exceeded. func NewBufferLRU(maxSize FileSize) *BufferLRU { return &BufferLRU{MaxSize: maxSize} } // NewBufferLRUDefault creates a new BufferLRU with the default cache size. func NewBufferLRUDefault() *BufferLRU { return &BufferLRU{MaxSize: DefaultMaxSize} } type buffer struct { Key int64 Slice []byte } // Put puts a buffer into the cache. If the buffer is already in the cache, it // will be marked as used. Otherwise, it will be inserted. A buffers might // be evicted to make room for the new one. func (c *BufferLRU) Put(key int64, slice []byte) { c.mut.Lock() defer c.mut.Unlock() if c.cache == nil { c.actualSize = 0 c.cache = make(map[int64]*list.Element, 1000) c.ll = list.New() } bufSize := FileSize(len(slice)) if ee, ok := c.cache[key]; ok { oldBuf := ee.Value.(buffer) // in this case bufSize is a delta: new size - old size bufSize -= FileSize(len(oldBuf.Slice)) c.ll.MoveToFront(ee) ee.Value = buffer{key, slice} } else { if bufSize > c.MaxSize { return } ee := c.ll.PushFront(buffer{key, slice}) c.cache[key] = ee } c.actualSize += bufSize for c.actualSize > c.MaxSize { last := c.ll.Back() lastObj := last.Value.(buffer) lastSize := FileSize(len(lastObj.Slice)) c.ll.Remove(last) delete(c.cache, lastObj.Key) c.actualSize -= lastSize } } // Get returns a buffer by its key. It marks the buffer as used. If the buffer // is not in the cache, (nil, false) will be returned. func (c *BufferLRU) Get(key int64) ([]byte, bool) { c.mut.Lock() defer c.mut.Unlock() ee, ok := c.cache[key] if !ok { return nil, false } c.ll.MoveToFront(ee) return ee.Value.(buffer).Slice, true } // Clear the content of this buffer cache. func (c *BufferLRU) Clear() { c.mut.Lock() defer c.mut.Unlock() c.ll = nil c.cache = nil c.actualSize = 0 } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/cache/buffer_test.go000066400000000000000000000056521345605224300244260ustar00rootroot00000000000000package cache import ( "bytes" "sync" . "gopkg.in/check.v1" ) type BufferSuite struct { c map[string]Buffer aBuffer []byte bBuffer []byte cBuffer []byte dBuffer []byte eBuffer []byte } var _ = Suite(&BufferSuite{}) func (s *BufferSuite) SetUpTest(c *C) { s.aBuffer = []byte("a") s.bBuffer = []byte("bbb") s.cBuffer = []byte("c") s.dBuffer = []byte("d") s.eBuffer = []byte("ee") s.c = make(map[string]Buffer) s.c["two_bytes"] = NewBufferLRU(2 * Byte) s.c["default_lru"] = NewBufferLRUDefault() } func (s *BufferSuite) TestPutSameBuffer(c *C) { for _, o := range s.c { o.Put(1, s.aBuffer) o.Put(1, s.aBuffer) _, ok := o.Get(1) c.Assert(ok, Equals, true) } } func (s *ObjectSuite) TestPutSameBufferWithDifferentSize(c *C) { aBuffer := []byte("a") bBuffer := []byte("bbb") cBuffer := []byte("ccccc") dBuffer := []byte("ddddddd") cache := NewBufferLRU(7 * Byte) cache.Put(1, aBuffer) cache.Put(1, bBuffer) cache.Put(1, cBuffer) cache.Put(1, dBuffer) c.Assert(cache.MaxSize, Equals, 7*Byte) c.Assert(cache.actualSize, Equals, 7*Byte) c.Assert(cache.ll.Len(), Equals, 1) buf, ok := cache.Get(1) c.Assert(bytes.Equal(buf, dBuffer), Equals, true) c.Assert(FileSize(len(buf)), Equals, 7*Byte) c.Assert(ok, Equals, true) } func (s *BufferSuite) TestPutBigBuffer(c *C) { for _, o := range s.c { o.Put(1, s.bBuffer) _, ok := o.Get(2) c.Assert(ok, Equals, false) } } func (s *BufferSuite) TestPutCacheOverflow(c *C) { // this test only works with an specific size o := s.c["two_bytes"] o.Put(1, s.aBuffer) o.Put(2, s.cBuffer) o.Put(3, s.dBuffer) obj, ok := o.Get(1) c.Assert(ok, Equals, false) c.Assert(obj, IsNil) obj, ok = o.Get(2) c.Assert(ok, Equals, true) c.Assert(obj, NotNil) obj, ok = o.Get(3) c.Assert(ok, Equals, true) c.Assert(obj, NotNil) } func (s *BufferSuite) TestEvictMultipleBuffers(c *C) { o := s.c["two_bytes"] o.Put(1, s.cBuffer) o.Put(2, s.dBuffer) // now cache is full with two objects o.Put(3, s.eBuffer) // this put should evict all previous objects obj, ok := o.Get(1) c.Assert(ok, Equals, false) c.Assert(obj, IsNil) obj, ok = o.Get(2) c.Assert(ok, Equals, false) c.Assert(obj, IsNil) obj, ok = o.Get(3) c.Assert(ok, Equals, true) c.Assert(obj, NotNil) } func (s *BufferSuite) TestClear(c *C) { for _, o := range s.c { o.Put(1, s.aBuffer) o.Clear() obj, ok := o.Get(1) c.Assert(ok, Equals, false) c.Assert(obj, IsNil) } } func (s *BufferSuite) TestConcurrentAccess(c *C) { for _, o := range s.c { var wg sync.WaitGroup for i := 0; i < 1000; i++ { wg.Add(3) go func(i int) { o.Put(int64(i), []byte{00}) wg.Done() }(i) go func(i int) { if i%30 == 0 { o.Clear() } wg.Done() }(i) go func(i int) { o.Get(int64(i)) wg.Done() }(i) } wg.Wait() } } func (s *BufferSuite) TestDefaultLRU(c *C) { defaultLRU := s.c["default_lru"].(*BufferLRU) c.Assert(defaultLRU.MaxSize, Equals, DefaultMaxSize) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/cache/common.go000066400000000000000000000022661345605224300234040ustar00rootroot00000000000000package cache import "gopkg.in/src-d/go-git.v4/plumbing" const ( Byte FileSize = 1 << (iota * 10) KiByte MiByte GiByte ) type FileSize int64 const DefaultMaxSize FileSize = 96 * MiByte // Object is an interface to a object cache. type Object interface { // Put puts the given object into the cache. Whether this object will // actually be put into the cache or not is implementation specific. Put(o plumbing.EncodedObject) // Get gets an object from the cache given its hash. The second return value // is true if the object was returned, and false otherwise. Get(k plumbing.Hash) (plumbing.EncodedObject, bool) // Clear clears every object from the cache. Clear() } // Buffer is an interface to a buffer cache. type Buffer interface { // Put puts a buffer into the cache. If the buffer is already in the cache, // it will be marked as used. Otherwise, it will be inserted. Buffer might // be evicted to make room for the new one. Put(key int64, slice []byte) // Get returns a buffer by its key. It marks the buffer as used. If the // buffer is not in the cache, (nil, false) will be returned. Get(key int64) ([]byte, bool) // Clear clears every object from the cache. Clear() } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/cache/object_lru.go000066400000000000000000000044631345605224300242450ustar00rootroot00000000000000package cache import ( "container/list" "sync" "gopkg.in/src-d/go-git.v4/plumbing" ) // ObjectLRU implements an object cache with an LRU eviction policy and a // maximum size (measured in object size). type ObjectLRU struct { MaxSize FileSize actualSize FileSize ll *list.List cache map[interface{}]*list.Element mut sync.Mutex } // NewObjectLRU creates a new ObjectLRU with the given maximum size. The maximum // size will never be exceeded. func NewObjectLRU(maxSize FileSize) *ObjectLRU { return &ObjectLRU{MaxSize: maxSize} } // NewObjectLRUDefault creates a new ObjectLRU with the default cache size. func NewObjectLRUDefault() *ObjectLRU { return &ObjectLRU{MaxSize: DefaultMaxSize} } // Put puts an object into the cache. If the object is already in the cache, it // will be marked as used. Otherwise, it will be inserted. A single object might // be evicted to make room for the new object. func (c *ObjectLRU) Put(obj plumbing.EncodedObject) { c.mut.Lock() defer c.mut.Unlock() if c.cache == nil { c.actualSize = 0 c.cache = make(map[interface{}]*list.Element, 1000) c.ll = list.New() } objSize := FileSize(obj.Size()) key := obj.Hash() if ee, ok := c.cache[key]; ok { oldObj := ee.Value.(plumbing.EncodedObject) // in this case objSize is a delta: new size - old size objSize -= FileSize(oldObj.Size()) c.ll.MoveToFront(ee) ee.Value = obj } else { if objSize > c.MaxSize { return } ee := c.ll.PushFront(obj) c.cache[key] = ee } c.actualSize += objSize for c.actualSize > c.MaxSize { last := c.ll.Back() if last == nil { c.actualSize = 0 break } lastObj := last.Value.(plumbing.EncodedObject) lastSize := FileSize(lastObj.Size()) c.ll.Remove(last) delete(c.cache, lastObj.Hash()) c.actualSize -= lastSize } } // Get returns an object by its hash. It marks the object as used. If the object // is not in the cache, (nil, false) will be returned. func (c *ObjectLRU) Get(k plumbing.Hash) (plumbing.EncodedObject, bool) { c.mut.Lock() defer c.mut.Unlock() ee, ok := c.cache[k] if !ok { return nil, false } c.ll.MoveToFront(ee) return ee.Value.(plumbing.EncodedObject), true } // Clear the content of this object cache. func (c *ObjectLRU) Clear() { c.mut.Lock() defer c.mut.Unlock() c.ll = nil c.cache = nil c.actualSize = 0 } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/cache/object_test.go000066400000000000000000000107621345605224300244210ustar00rootroot00000000000000package cache import ( "fmt" "io" "sync" "testing" "gopkg.in/src-d/go-git.v4/plumbing" . "gopkg.in/check.v1" ) func Test(t *testing.T) { TestingT(t) } type ObjectSuite struct { c map[string]Object aObject plumbing.EncodedObject bObject plumbing.EncodedObject cObject plumbing.EncodedObject dObject plumbing.EncodedObject eObject plumbing.EncodedObject } var _ = Suite(&ObjectSuite{}) func (s *ObjectSuite) SetUpTest(c *C) { s.aObject = newObject("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", 1*Byte) s.bObject = newObject("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", 3*Byte) s.cObject = newObject("cccccccccccccccccccccccccccccccccccccccc", 1*Byte) s.dObject = newObject("dddddddddddddddddddddddddddddddddddddddd", 1*Byte) s.eObject = newObject("eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee", 2*Byte) s.c = make(map[string]Object) s.c["two_bytes"] = NewObjectLRU(2 * Byte) s.c["default_lru"] = NewObjectLRUDefault() } func (s *ObjectSuite) TestPutSameObject(c *C) { for _, o := range s.c { o.Put(s.aObject) o.Put(s.aObject) _, ok := o.Get(s.aObject.Hash()) c.Assert(ok, Equals, true) } } func (s *ObjectSuite) TestPutSameObjectWithDifferentSize(c *C) { const hash = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" cache := NewObjectLRU(7 * Byte) cache.Put(newObject(hash, 1*Byte)) cache.Put(newObject(hash, 3*Byte)) cache.Put(newObject(hash, 5*Byte)) cache.Put(newObject(hash, 7*Byte)) c.Assert(cache.MaxSize, Equals, 7*Byte) c.Assert(cache.actualSize, Equals, 7*Byte) c.Assert(cache.ll.Len(), Equals, 1) obj, ok := cache.Get(plumbing.NewHash(hash)) c.Assert(obj.Hash(), Equals, plumbing.NewHash(hash)) c.Assert(FileSize(obj.Size()), Equals, 7*Byte) c.Assert(ok, Equals, true) } func (s *ObjectSuite) TestPutBigObject(c *C) { for _, o := range s.c { o.Put(s.bObject) _, ok := o.Get(s.aObject.Hash()) c.Assert(ok, Equals, false) } } func (s *ObjectSuite) TestPutCacheOverflow(c *C) { // this test only works with an specific size o := s.c["two_bytes"] o.Put(s.aObject) o.Put(s.cObject) o.Put(s.dObject) obj, ok := o.Get(s.aObject.Hash()) c.Assert(ok, Equals, false) c.Assert(obj, IsNil) obj, ok = o.Get(s.cObject.Hash()) c.Assert(ok, Equals, true) c.Assert(obj, NotNil) obj, ok = o.Get(s.dObject.Hash()) c.Assert(ok, Equals, true) c.Assert(obj, NotNil) } func (s *ObjectSuite) TestEvictMultipleObjects(c *C) { o := s.c["two_bytes"] o.Put(s.cObject) o.Put(s.dObject) // now cache is full with two objects o.Put(s.eObject) // this put should evict all previous objects obj, ok := o.Get(s.cObject.Hash()) c.Assert(ok, Equals, false) c.Assert(obj, IsNil) obj, ok = o.Get(s.dObject.Hash()) c.Assert(ok, Equals, false) c.Assert(obj, IsNil) obj, ok = o.Get(s.eObject.Hash()) c.Assert(ok, Equals, true) c.Assert(obj, NotNil) } func (s *ObjectSuite) TestClear(c *C) { for _, o := range s.c { o.Put(s.aObject) o.Clear() obj, ok := o.Get(s.aObject.Hash()) c.Assert(ok, Equals, false) c.Assert(obj, IsNil) } } func (s *ObjectSuite) TestConcurrentAccess(c *C) { for _, o := range s.c { var wg sync.WaitGroup for i := 0; i < 1000; i++ { wg.Add(3) go func(i int) { o.Put(newObject(fmt.Sprint(i), FileSize(i))) wg.Done() }(i) go func(i int) { if i%30 == 0 { o.Clear() } wg.Done() }(i) go func(i int) { o.Get(plumbing.NewHash(fmt.Sprint(i))) wg.Done() }(i) } wg.Wait() } } func (s *ObjectSuite) TestDefaultLRU(c *C) { defaultLRU := s.c["default_lru"].(*ObjectLRU) c.Assert(defaultLRU.MaxSize, Equals, DefaultMaxSize) } func (s *ObjectSuite) TestObjectUpdateOverflow(c *C) { o := NewObjectLRU(9 * Byte) a1 := newObject(s.aObject.Hash().String(), 9*Byte) a2 := newObject(s.aObject.Hash().String(), 1*Byte) b := newObject(s.bObject.Hash().String(), 1*Byte) o.Put(a1) a1.SetSize(-5) o.Put(a2) o.Put(b) } type dummyObject struct { hash plumbing.Hash size FileSize } func newObject(hash string, size FileSize) plumbing.EncodedObject { return &dummyObject{ hash: plumbing.NewHash(hash), size: size, } } func (d *dummyObject) Hash() plumbing.Hash { return d.hash } func (*dummyObject) Type() plumbing.ObjectType { return plumbing.InvalidObject } func (*dummyObject) SetType(plumbing.ObjectType) {} func (d *dummyObject) Size() int64 { return int64(d.size) } func (d *dummyObject) SetSize(s int64) { d.size = FileSize(s) } func (*dummyObject) Reader() (io.ReadCloser, error) { return nil, nil } func (*dummyObject) Writer() (io.WriteCloser, error) { return nil, nil } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/error.go000066400000000000000000000011151345605224300221720ustar00rootroot00000000000000package plumbing import "fmt" type PermanentError struct { Err error } func NewPermanentError(err error) *PermanentError { if err == nil { return nil } return &PermanentError{Err: err} } func (e *PermanentError) Error() string { return fmt.Sprintf("permanent client error: %s", e.Err.Error()) } type UnexpectedError struct { Err error } func NewUnexpectedError(err error) *UnexpectedError { if err == nil { return nil } return &UnexpectedError{Err: err} } func (e *UnexpectedError) Error() string { return fmt.Sprintf("unexpected client error: %s", e.Err.Error()) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/filemode/000077500000000000000000000000001345605224300223005ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/filemode/filemode.go000066400000000000000000000133631345605224300244210ustar00rootroot00000000000000package filemode import ( "encoding/binary" "fmt" "os" "strconv" ) // A FileMode represents the kind of tree entries used by git. It // resembles regular file systems modes, although FileModes are // considerably simpler (there are not so many), and there are some, // like Submodule that has no file system equivalent. type FileMode uint32 const ( // Empty is used as the FileMode of tree elements when comparing // trees in the following situations: // // - the mode of tree elements before their creation. - the mode of // tree elements after their deletion. - the mode of unmerged // elements when checking the index. // // Empty has no file system equivalent. As Empty is the zero value // of FileMode, it is also returned by New and // NewFromOsNewFromOSFileMode along with an error, when they fail. Empty FileMode = 0 // Dir represent a Directory. Dir FileMode = 0040000 // Regular represent non-executable files. Please note this is not // the same as golang regular files, which include executable files. Regular FileMode = 0100644 // Deprecated represent non-executable files with the group writable // bit set. This mode was supported by the first versions of git, // but it has been deprecatred nowadays. This library uses them // internally, so you can read old packfiles, but will treat them as // Regulars when interfacing with the outside world. This is the // standard git behaviuor. Deprecated FileMode = 0100664 // Executable represents executable files. Executable FileMode = 0100755 // Symlink represents symbolic links to files. Symlink FileMode = 0120000 // Submodule represents git submodules. This mode has no file system // equivalent. Submodule FileMode = 0160000 ) // New takes the octal string representation of a FileMode and returns // the FileMode and a nil error. If the string can not be parsed to a // 32 bit unsigned octal number, it returns Empty and the parsing error. // // Example: "40000" means Dir, "100644" means Regular. // // Please note this function does not check if the returned FileMode // is valid in git or if it is malformed. For instance, "1" will // return the malformed FileMode(1) and a nil error. func New(s string) (FileMode, error) { n, err := strconv.ParseUint(s, 8, 32) if err != nil { return Empty, err } return FileMode(n), nil } // NewFromOSFileMode returns the FileMode used by git to represent // the provided file system modes and a nil error on success. If the // file system mode cannot be mapped to any valid git mode (as with // sockets or named pipes), it will return Empty and an error. // // Note that some git modes cannot be generated from os.FileModes, like // Deprecated and Submodule; while Empty will be returned, along with an // error, only when the method fails. func NewFromOSFileMode(m os.FileMode) (FileMode, error) { if m.IsRegular() { if isSetTemporary(m) { return Empty, fmt.Errorf("no equivalent git mode for %s", m) } if isSetCharDevice(m) { return Empty, fmt.Errorf("no equivalent git mode for %s", m) } if isSetUserExecutable(m) { return Executable, nil } return Regular, nil } if m.IsDir() { return Dir, nil } if isSetSymLink(m) { return Symlink, nil } return Empty, fmt.Errorf("no equivalent git mode for %s", m) } func isSetCharDevice(m os.FileMode) bool { return m&os.ModeCharDevice != 0 } func isSetTemporary(m os.FileMode) bool { return m&os.ModeTemporary != 0 } func isSetUserExecutable(m os.FileMode) bool { return m&0100 != 0 } func isSetSymLink(m os.FileMode) bool { return m&os.ModeSymlink != 0 } // Bytes return a slice of 4 bytes with the mode in little endian // encoding. func (m FileMode) Bytes() []byte { ret := make([]byte, 4) binary.LittleEndian.PutUint32(ret, uint32(m)) return ret[:] } // IsMalformed returns if the FileMode should not appear in a git packfile, // this is: Empty and any other mode not mentioned as a constant in this // package. func (m FileMode) IsMalformed() bool { return m != Dir && m != Regular && m != Deprecated && m != Executable && m != Symlink && m != Submodule } // String returns the FileMode as a string in the standatd git format, // this is, an octal number padded with ceros to 7 digits. Malformed // modes are printed in that same format, for easier debugging. // // Example: Regular is "0100644", Empty is "0000000". func (m FileMode) String() string { return fmt.Sprintf("%07o", uint32(m)) } // IsRegular returns if the FileMode represents that of a regular file, // this is, either Regular or Deprecated. Please note that Executable // are not regular even though in the UNIX tradition, they usually are: // See the IsFile method. func (m FileMode) IsRegular() bool { return m == Regular || m == Deprecated } // IsFile returns if the FileMode represents that of a file, this is, // Regular, Deprecated, Excutable or Link. func (m FileMode) IsFile() bool { return m == Regular || m == Deprecated || m == Executable || m == Symlink } // ToOSFileMode returns the os.FileMode to be used when creating file // system elements with the given git mode and a nil error on success. // // When the provided mode cannot be mapped to a valid file system mode // (e.g. Submodule) it returns os.FileMode(0) and an error. // // The returned file mode does not take into account the umask. func (m FileMode) ToOSFileMode() (os.FileMode, error) { switch m { case Dir: return os.ModePerm | os.ModeDir, nil case Submodule: return os.ModePerm | os.ModeDir, nil case Regular: return os.FileMode(0644), nil // Deprecated is no longer allowed: treated as a Regular instead case Deprecated: return os.FileMode(0644), nil case Executable: return os.FileMode(0755), nil case Symlink: return os.ModePerm | os.ModeSymlink, nil } return os.FileMode(0), fmt.Errorf("malformed mode (%s)", m) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/filemode/filemode_test.go000066400000000000000000000233051345605224300254550ustar00rootroot00000000000000package filemode import ( "os" "testing" . "gopkg.in/check.v1" ) func Test(t *testing.T) { TestingT(t) } type ModeSuite struct{} var _ = Suite(&ModeSuite{}) func (s *ModeSuite) TestNew(c *C) { for _, test := range [...]struct { input string expected FileMode }{ // these are the ones used in the packfile codification // of the tree entries {input: "40000", expected: Dir}, {input: "100644", expected: Regular}, {input: "100664", expected: Deprecated}, {input: "100755", expected: Executable}, {input: "120000", expected: Symlink}, {input: "160000", expected: Submodule}, // these are are not used by standard git to codify modes in // packfiles, but they often appear when parsing some git // outputs ("git diff-tree", for instance). {input: "000000", expected: Empty}, {input: "040000", expected: Dir}, // these are valid inputs, but probably means there is a bug // somewhere. {input: "0", expected: Empty}, {input: "42", expected: FileMode(042)}, {input: "00000000000100644", expected: Regular}, } { comment := Commentf("input = %q", test.input) obtained, err := New(test.input) c.Assert(obtained, Equals, test.expected, comment) c.Assert(err, IsNil, comment) } } func (s *ModeSuite) TestNewErrors(c *C) { for _, input := range [...]string{ "0x81a4", // Regular in hex "-rw-r--r--", // Regular in default UNIX representation "", "-42", "9", // this is no octal "09", // looks like octal, but it is not "mode", "-100644", "+100644", } { comment := Commentf("input = %q", input) obtained, err := New(input) c.Assert(obtained, Equals, Empty, comment) c.Assert(err, Not(IsNil), comment) } } // fixtures for testing NewModeFromOSFileMode type fixture struct { input os.FileMode expected FileMode err string // error regexp, empty string for nil error } func (f fixture) test(c *C) { obtained, err := NewFromOSFileMode(f.input) comment := Commentf("input = %s (%07o)", f.input, uint32(f.input)) c.Assert(obtained, Equals, f.expected, comment) if f.err != "" { c.Assert(err, ErrorMatches, f.err, comment) } else { c.Assert(err, IsNil, comment) } } func (s *ModeSuite) TestNewFromOsFileModeSimplePerms(c *C) { for _, f := range [...]fixture{ {os.FileMode(0755) | os.ModeDir, Dir, ""}, // drwxr-xr-x {os.FileMode(0700) | os.ModeDir, Dir, ""}, // drwx------ {os.FileMode(0500) | os.ModeDir, Dir, ""}, // dr-x------ {os.FileMode(0644), Regular, ""}, // -rw-r--r-- {os.FileMode(0660), Regular, ""}, // -rw-rw---- {os.FileMode(0640), Regular, ""}, // -rw-r----- {os.FileMode(0600), Regular, ""}, // -rw------- {os.FileMode(0400), Regular, ""}, // -r-------- {os.FileMode(0000), Regular, ""}, // ---------- {os.FileMode(0755), Executable, ""}, // -rwxr-xr-x {os.FileMode(0700), Executable, ""}, // -rwx------ {os.FileMode(0500), Executable, ""}, // -r-x------ {os.FileMode(0744), Executable, ""}, // -rwxr--r-- {os.FileMode(0540), Executable, ""}, // -r-xr----- {os.FileMode(0550), Executable, ""}, // -r-xr-x--- {os.FileMode(0777) | os.ModeSymlink, Symlink, ""}, // Lrwxrwxrwx } { f.test(c) } } func (s *ModeSuite) TestNewFromOsFileModeAppend(c *C) { // append files are just regular files fixture{ input: os.FileMode(0644) | os.ModeAppend, // arw-r--r-- expected: Regular, err: "", }.test(c) } func (s *ModeSuite) TestNewFromOsFileModeExclusive(c *C) { // exclusive files are just regular or executable files fixture{ input: os.FileMode(0644) | os.ModeExclusive, // lrw-r--r-- expected: Regular, err: "", }.test(c) fixture{ input: os.FileMode(0755) | os.ModeExclusive, // lrwxr-xr-x expected: Executable, err: "", }.test(c) } func (s *ModeSuite) TestNewFromOsFileModeTemporary(c *C) { // temporaty files are ignored fixture{ input: os.FileMode(0644) | os.ModeTemporary, // Trw-r--r-- expected: Empty, err: "no equivalent.*", }.test(c) fixture{ input: os.FileMode(0755) | os.ModeTemporary, // Trwxr-xr-x expected: Empty, err: "no equivalent.*", }.test(c) } func (s *ModeSuite) TestNewFromOsFileModeDevice(c *C) { // device files has no git equivalent fixture{ input: os.FileMode(0644) | os.ModeDevice, // Drw-r--r-- expected: Empty, err: "no equivalent.*", }.test(c) } func (s *ModeSuite) TestNewFromOsFileNamedPipe(c *C) { // named pipes files has not git equivalent fixture{ input: os.FileMode(0644) | os.ModeNamedPipe, // prw-r--r-- expected: Empty, err: "no equivalent.*", }.test(c) } func (s *ModeSuite) TestNewFromOsFileModeSocket(c *C) { // sockets has no git equivalent fixture{ input: os.FileMode(0644) | os.ModeSocket, // Srw-r--r-- expected: Empty, err: "no equivalent.*", }.test(c) } func (s *ModeSuite) TestNewFromOsFileModeSetuid(c *C) { // Setuid are just executables fixture{ input: os.FileMode(0755) | os.ModeSetuid, // urwxr-xr-x expected: Executable, err: "", }.test(c) } func (s *ModeSuite) TestNewFromOsFileModeSetgid(c *C) { // Setguid are regular or executables, depending on the owner perms fixture{ input: os.FileMode(0644) | os.ModeSetgid, // grw-r--r-- expected: Regular, err: "", }.test(c) fixture{ input: os.FileMode(0755) | os.ModeSetgid, // grwxr-xr-x expected: Executable, err: "", }.test(c) } func (s *ModeSuite) TestNewFromOsFileModeCharDevice(c *C) { // char devices has no git equivalent fixture{ input: os.FileMode(0644) | os.ModeCharDevice, // crw-r--r-- expected: Empty, err: "no equivalent.*", }.test(c) } func (s *ModeSuite) TestNewFromOsFileModeSticky(c *C) { // dirs with the sticky bit are just dirs fixture{ input: os.FileMode(0755) | os.ModeDir | os.ModeSticky, // dtrwxr-xr-x expected: Dir, err: "", }.test(c) } func (s *ModeSuite) TestByte(c *C) { for _, test := range [...]struct { input FileMode expected []byte }{ {FileMode(0), []byte{0x00, 0x00, 0x00, 0x00}}, {FileMode(1), []byte{0x01, 0x00, 0x00, 0x00}}, {FileMode(15), []byte{0x0f, 0x00, 0x00, 0x00}}, {FileMode(16), []byte{0x10, 0x00, 0x00, 0x00}}, {FileMode(255), []byte{0xff, 0x00, 0x00, 0x00}}, {FileMode(256), []byte{0x00, 0x01, 0x00, 0x00}}, {Empty, []byte{0x00, 0x00, 0x00, 0x00}}, {Dir, []byte{0x00, 0x40, 0x00, 0x00}}, {Regular, []byte{0xa4, 0x81, 0x00, 0x00}}, {Deprecated, []byte{0xb4, 0x81, 0x00, 0x00}}, {Executable, []byte{0xed, 0x81, 0x00, 0x00}}, {Symlink, []byte{0x00, 0xa0, 0x00, 0x00}}, {Submodule, []byte{0x00, 0xe0, 0x00, 0x00}}, } { c.Assert(test.input.Bytes(), DeepEquals, test.expected, Commentf("input = %s", test.input)) } } func (s *ModeSuite) TestIsMalformed(c *C) { for _, test := range [...]struct { mode FileMode expected bool }{ {Empty, true}, {Dir, false}, {Regular, false}, {Deprecated, false}, {Executable, false}, {Symlink, false}, {Submodule, false}, {FileMode(01), true}, {FileMode(010), true}, {FileMode(0100), true}, {FileMode(01000), true}, {FileMode(010000), true}, {FileMode(0100000), true}, } { c.Assert(test.mode.IsMalformed(), Equals, test.expected) } } func (s *ModeSuite) TestString(c *C) { for _, test := range [...]struct { mode FileMode expected string }{ {Empty, "0000000"}, {Dir, "0040000"}, {Regular, "0100644"}, {Deprecated, "0100664"}, {Executable, "0100755"}, {Symlink, "0120000"}, {Submodule, "0160000"}, {FileMode(01), "0000001"}, {FileMode(010), "0000010"}, {FileMode(0100), "0000100"}, {FileMode(01000), "0001000"}, {FileMode(010000), "0010000"}, {FileMode(0100000), "0100000"}, } { c.Assert(test.mode.String(), Equals, test.expected) } } func (s *ModeSuite) TestIsRegular(c *C) { for _, test := range [...]struct { mode FileMode expected bool }{ {Empty, false}, {Dir, false}, {Regular, true}, {Deprecated, true}, {Executable, false}, {Symlink, false}, {Submodule, false}, {FileMode(01), false}, {FileMode(010), false}, {FileMode(0100), false}, {FileMode(01000), false}, {FileMode(010000), false}, {FileMode(0100000), false}, } { c.Assert(test.mode.IsRegular(), Equals, test.expected) } } func (s *ModeSuite) TestIsFile(c *C) { for _, test := range [...]struct { mode FileMode expected bool }{ {Empty, false}, {Dir, false}, {Regular, true}, {Deprecated, true}, {Executable, true}, {Symlink, true}, {Submodule, false}, {FileMode(01), false}, {FileMode(010), false}, {FileMode(0100), false}, {FileMode(01000), false}, {FileMode(010000), false}, {FileMode(0100000), false}, } { c.Assert(test.mode.IsFile(), Equals, test.expected) } } func (s *ModeSuite) TestToOSFileMode(c *C) { for _, test := range [...]struct { input FileMode expected os.FileMode errRegExp string // empty string for nil error }{ {Empty, os.FileMode(0), "malformed.*"}, {Dir, os.ModePerm | os.ModeDir, ""}, {Regular, os.FileMode(0644), ""}, {Deprecated, os.FileMode(0644), ""}, {Executable, os.FileMode(0755), ""}, {Symlink, os.ModePerm | os.ModeSymlink, ""}, {Submodule, os.ModePerm | os.ModeDir, ""}, {FileMode(01), os.FileMode(0), "malformed.*"}, {FileMode(010), os.FileMode(0), "malformed.*"}, {FileMode(0100), os.FileMode(0), "malformed.*"}, {FileMode(01000), os.FileMode(0), "malformed.*"}, {FileMode(010000), os.FileMode(0), "malformed.*"}, {FileMode(0100000), os.FileMode(0), "malformed.*"}, } { obtained, err := test.input.ToOSFileMode() comment := Commentf("input = %s", test.input) if test.errRegExp != "" { c.Assert(obtained, Equals, os.FileMode(0), comment) c.Assert(err, ErrorMatches, test.errRegExp, comment) } else { c.Assert(obtained, Equals, test.expected, comment) c.Assert(err, IsNil, comment) } } } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/000077500000000000000000000000001345605224300220045ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/config/000077500000000000000000000000001345605224300232515ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/config/common.go000066400000000000000000000046021345605224300250720ustar00rootroot00000000000000package config // New creates a new config instance. func New() *Config { return &Config{} } // Config contains all the sections, comments and includes from a config file. type Config struct { Comment *Comment Sections Sections Includes Includes } // Includes is a list of Includes in a config file. type Includes []*Include // Include is a reference to an included config file. type Include struct { Path string Config *Config } // Comment string without the prefix '#' or ';'. type Comment string const ( // NoSubsection token is passed to Config.Section and Config.SetSection to // represent the absence of a section. NoSubsection = "" ) // Section returns a existing section with the given name or creates a new one. func (c *Config) Section(name string) *Section { for i := len(c.Sections) - 1; i >= 0; i-- { s := c.Sections[i] if s.IsName(name) { return s } } s := &Section{Name: name} c.Sections = append(c.Sections, s) return s } // AddOption adds an option to a given section and subsection. Use the // NoSubsection constant for the subsection argument if no subsection is wanted. func (c *Config) AddOption(section string, subsection string, key string, value string) *Config { if subsection == "" { c.Section(section).AddOption(key, value) } else { c.Section(section).Subsection(subsection).AddOption(key, value) } return c } // SetOption sets an option to a given section and subsection. Use the // NoSubsection constant for the subsection argument if no subsection is wanted. func (c *Config) SetOption(section string, subsection string, key string, value string) *Config { if subsection == "" { c.Section(section).SetOption(key, value) } else { c.Section(section).Subsection(subsection).SetOption(key, value) } return c } // RemoveSection removes a section from a config file. func (c *Config) RemoveSection(name string) *Config { result := Sections{} for _, s := range c.Sections { if !s.IsName(name) { result = append(result, s) } } c.Sections = result return c } // RemoveSubsection remove s a subsection from a config file. func (c *Config) RemoveSubsection(section string, subsection string) *Config { for _, s := range c.Sections { if s.IsName(section) { result := Subsections{} for _, ss := range s.Subsections { if !ss.IsName(subsection) { result = append(result, ss) } } s.Subsections = result } } return c } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/config/common_test.go000066400000000000000000000042761345605224300261400ustar00rootroot00000000000000package config import ( "testing" . "gopkg.in/check.v1" ) func Test(t *testing.T) { TestingT(t) } type CommonSuite struct{} var _ = Suite(&CommonSuite{}) func (s *CommonSuite) TestConfig_SetOption(c *C) { obtained := New().SetOption("section", NoSubsection, "key1", "value1") expected := &Config{ Sections: []*Section{ { Name: "section", Options: []*Option{ {Key: "key1", Value: "value1"}, }, }, }, } c.Assert(obtained, DeepEquals, expected) obtained = obtained.SetOption("section", NoSubsection, "key1", "value1") c.Assert(obtained, DeepEquals, expected) obtained = New().SetOption("section", "subsection", "key1", "value1") expected = &Config{ Sections: []*Section{ { Name: "section", Subsections: []*Subsection{ { Name: "subsection", Options: []*Option{ {Key: "key1", Value: "value1"}, }, }, }, }, }, } c.Assert(obtained, DeepEquals, expected) obtained = obtained.SetOption("section", "subsection", "key1", "value1") c.Assert(obtained, DeepEquals, expected) } func (s *CommonSuite) TestConfig_AddOption(c *C) { obtained := New().AddOption("section", NoSubsection, "key1", "value1") expected := &Config{ Sections: []*Section{ { Name: "section", Options: []*Option{ {Key: "key1", Value: "value1"}, }, }, }, } c.Assert(obtained, DeepEquals, expected) } func (s *CommonSuite) TestConfig_RemoveSection(c *C) { sect := New(). AddOption("section1", NoSubsection, "key1", "value1"). AddOption("section2", NoSubsection, "key1", "value1") expected := New(). AddOption("section1", NoSubsection, "key1", "value1") c.Assert(sect.RemoveSection("other"), DeepEquals, sect) c.Assert(sect.RemoveSection("section2"), DeepEquals, expected) } func (s *CommonSuite) TestConfig_RemoveSubsection(c *C) { sect := New(). AddOption("section1", "sub1", "key1", "value1"). AddOption("section1", "sub2", "key1", "value1") expected := New(). AddOption("section1", "sub1", "key1", "value1") c.Assert(sect.RemoveSubsection("section1", "other"), DeepEquals, sect) c.Assert(sect.RemoveSubsection("other", "other"), DeepEquals, sect) c.Assert(sect.RemoveSubsection("section1", "sub2"), DeepEquals, expected) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/config/decoder.go000066400000000000000000000013371345605224300252110ustar00rootroot00000000000000package config import ( "io" "github.com/src-d/gcfg" ) // A Decoder reads and decodes config files from an input stream. type Decoder struct { io.Reader } // NewDecoder returns a new decoder that reads from r. func NewDecoder(r io.Reader) *Decoder { return &Decoder{r} } // Decode reads the whole config from its input and stores it in the // value pointed to by config. func (d *Decoder) Decode(config *Config) error { cb := func(s string, ss string, k string, v string, bv bool) error { if ss == "" && k == "" { config.Section(s) return nil } if ss != "" && k == "" { config.Section(s).Subsection(ss) return nil } config.AddOption(s, ss, k, v) return nil } return gcfg.ReadWithCallback(d, cb) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/config/decoder_test.go000066400000000000000000000033141345605224300262450ustar00rootroot00000000000000package config import ( "bytes" . "gopkg.in/check.v1" ) type DecoderSuite struct{} var _ = Suite(&DecoderSuite{}) func (s *DecoderSuite) TestDecode(c *C) { for idx, fixture := range fixtures { r := bytes.NewReader([]byte(fixture.Raw)) d := NewDecoder(r) cfg := &Config{} err := d.Decode(cfg) c.Assert(err, IsNil, Commentf("decoder error for fixture: %d", idx)) buf := bytes.NewBuffer(nil) e := NewEncoder(buf) _ = e.Encode(cfg) c.Assert(cfg, DeepEquals, fixture.Config, Commentf("bad result for fixture: %d, %s", idx, buf.String())) } } func (s *DecoderSuite) TestDecodeFailsWithIdentBeforeSection(c *C) { t := ` key=value [section] key=value ` decodeFails(c, t) } func (s *DecoderSuite) TestDecodeFailsWithEmptySectionName(c *C) { t := ` [] key=value ` decodeFails(c, t) } func (s *DecoderSuite) TestDecodeFailsWithEmptySubsectionName(c *C) { t := ` [remote ""] key=value ` decodeFails(c, t) } func (s *DecoderSuite) TestDecodeFailsWithBadSubsectionName(c *C) { t := ` [remote origin"] key=value ` decodeFails(c, t) t = ` [remote "origin] key=value ` decodeFails(c, t) } func (s *DecoderSuite) TestDecodeFailsWithTrailingGarbage(c *C) { t := ` [remote]garbage key=value ` decodeFails(c, t) t = ` [remote "origin"]garbage key=value ` decodeFails(c, t) } func (s *DecoderSuite) TestDecodeFailsWithGarbage(c *C) { decodeFails(c, "---") decodeFails(c, "????") decodeFails(c, "[sect\nkey=value") decodeFails(c, "sect]\nkey=value") decodeFails(c, `[section]key="value`) decodeFails(c, `[section]key=value"`) } func decodeFails(c *C, text string) { r := bytes.NewReader([]byte(text)) d := NewDecoder(r) cfg := &Config{} err := d.Decode(cfg) c.Assert(err, NotNil) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/config/doc.go000066400000000000000000000117341345605224300243530ustar00rootroot00000000000000// Package config implements encoding and decoding of git config files. // // Configuration File // ------------------ // // The Git configuration file contains a number of variables that affect // the Git commands' behavior. The `.git/config` file in each repository // is used to store the configuration for that repository, and // `$HOME/.gitconfig` is used to store a per-user configuration as // fallback values for the `.git/config` file. The file `/etc/gitconfig` // can be used to store a system-wide default configuration. // // The configuration variables are used by both the Git plumbing // and the porcelains. The variables are divided into sections, wherein // the fully qualified variable name of the variable itself is the last // dot-separated segment and the section name is everything before the last // dot. The variable names are case-insensitive, allow only alphanumeric // characters and `-`, and must start with an alphabetic character. Some // variables may appear multiple times; we say then that the variable is // multivalued. // // Syntax // ~~~~~~ // // The syntax is fairly flexible and permissive; whitespaces are mostly // ignored. The '#' and ';' characters begin comments to the end of line, // blank lines are ignored. // // The file consists of sections and variables. A section begins with // the name of the section in square brackets and continues until the next // section begins. Section names are case-insensitive. Only alphanumeric // characters, `-` and `.` are allowed in section names. Each variable // must belong to some section, which means that there must be a section // header before the first setting of a variable. // // Sections can be further divided into subsections. To begin a subsection // put its name in double quotes, separated by space from the section name, // in the section header, like in the example below: // // -------- // [section "subsection"] // // -------- // // Subsection names are case sensitive and can contain any characters except // newline (doublequote `"` and backslash can be included by escaping them // as `\"` and `\\`, respectively). Section headers cannot span multiple // lines. Variables may belong directly to a section or to a given subsection. // You can have `[section]` if you have `[section "subsection"]`, but you // don't need to. // // There is also a deprecated `[section.subsection]` syntax. With this // syntax, the subsection name is converted to lower-case and is also // compared case sensitively. These subsection names follow the same // restrictions as section names. // // All the other lines (and the remainder of the line after the section // header) are recognized as setting variables, in the form // 'name = value' (or just 'name', which is a short-hand to say that // the variable is the boolean "true"). // The variable names are case-insensitive, allow only alphanumeric characters // and `-`, and must start with an alphabetic character. // // A line that defines a value can be continued to the next line by // ending it with a `\`; the backquote and the end-of-line are // stripped. Leading whitespaces after 'name =', the remainder of the // line after the first comment character '#' or ';', and trailing // whitespaces of the line are discarded unless they are enclosed in // double quotes. Internal whitespaces within the value are retained // verbatim. // // Inside double quotes, double quote `"` and backslash `\` characters // must be escaped: use `\"` for `"` and `\\` for `\`. // // The following escape sequences (beside `\"` and `\\`) are recognized: // `\n` for newline character (NL), `\t` for horizontal tabulation (HT, TAB) // and `\b` for backspace (BS). Other char escape sequences (including octal // escape sequences) are invalid. // // Includes // ~~~~~~~~ // // You can include one config file from another by setting the special // `include.path` variable to the name of the file to be included. The // variable takes a pathname as its value, and is subject to tilde // expansion. // // The included file is expanded immediately, as if its contents had been // found at the location of the include directive. If the value of the // `include.path` variable is a relative path, the path is considered to be // relative to the configuration file in which the include directive was // found. See below for examples. // // // Example // ~~~~~~~ // // # Core variables // [core] // ; Don't trust file modes // filemode = false // // # Our diff algorithm // [diff] // external = /usr/local/bin/diff-wrapper // renames = true // // [branch "devel"] // remote = origin // merge = refs/heads/devel // // # Proxy settings // [core] // gitProxy="ssh" for "kernel.org" // gitProxy=default-proxy ; for the rest // // [include] // path = /path/to/foo.inc ; include by absolute path // path = foo ; expand "foo" relative to the current file // path = ~/foo ; expand "foo" in your `$HOME` directory // package config golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/config/encoder.go000066400000000000000000000027031345605224300252210ustar00rootroot00000000000000package config import ( "fmt" "io" "strings" ) // An Encoder writes config files to an output stream. type Encoder struct { w io.Writer } // NewEncoder returns a new encoder that writes to w. func NewEncoder(w io.Writer) *Encoder { return &Encoder{w} } // Encode writes the config in git config format to the stream of the encoder. func (e *Encoder) Encode(cfg *Config) error { for _, s := range cfg.Sections { if err := e.encodeSection(s); err != nil { return err } } return nil } func (e *Encoder) encodeSection(s *Section) error { if len(s.Options) > 0 { if err := e.printf("[%s]\n", s.Name); err != nil { return err } if err := e.encodeOptions(s.Options); err != nil { return err } } for _, ss := range s.Subsections { if err := e.encodeSubsection(s.Name, ss); err != nil { return err } } return nil } func (e *Encoder) encodeSubsection(sectionName string, s *Subsection) error { //TODO: escape if err := e.printf("[%s \"%s\"]\n", sectionName, s.Name); err != nil { return err } return e.encodeOptions(s.Options) } func (e *Encoder) encodeOptions(opts Options) error { for _, o := range opts { pattern := "\t%s = %s\n" if strings.Contains(o.Value, "\\") { pattern = "\t%s = %q\n" } if err := e.printf(pattern, o.Key, o.Value); err != nil { return err } } return nil } func (e *Encoder) printf(msg string, args ...interface{}) error { _, err := fmt.Fprintf(e.w, msg, args...) return err } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/config/encoder_test.go000066400000000000000000000007031345605224300262560ustar00rootroot00000000000000package config import ( "bytes" . "gopkg.in/check.v1" ) type EncoderSuite struct{} var _ = Suite(&EncoderSuite{}) func (s *EncoderSuite) TestEncode(c *C) { for idx, fixture := range fixtures { buf := &bytes.Buffer{} e := NewEncoder(buf) err := e.Encode(fixture.Config) c.Assert(err, IsNil, Commentf("encoder error for fixture: %d", idx)) c.Assert(buf.String(), Equals, fixture.Text, Commentf("bad result for fixture: %d", idx)) } } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/config/fixtures_test.go000066400000000000000000000041751345605224300265170ustar00rootroot00000000000000package config type Fixture struct { Text string Raw string Config *Config } var fixtures = []*Fixture{ { Raw: "", Text: "", Config: New(), }, { Raw: ";Comments only", Text: "", Config: New(), }, { Raw: "#Comments only", Text: "", Config: New(), }, { Raw: "[core]\nrepositoryformatversion=0", Text: "[core]\n\trepositoryformatversion = 0\n", Config: New().AddOption("core", "", "repositoryformatversion", "0"), }, { Raw: "[core]\n\trepositoryformatversion = 0\n", Text: "[core]\n\trepositoryformatversion = 0\n", Config: New().AddOption("core", "", "repositoryformatversion", "0"), }, { Raw: ";Commment\n[core]\n;Comment\nrepositoryformatversion = 0\n", Text: "[core]\n\trepositoryformatversion = 0\n", Config: New().AddOption("core", "", "repositoryformatversion", "0"), }, { Raw: "#Commment\n#Comment\n[core]\n#Comment\nrepositoryformatversion = 0\n", Text: "[core]\n\trepositoryformatversion = 0\n", Config: New().AddOption("core", "", "repositoryformatversion", "0"), }, { Raw: ` [sect1] opt1 = value1 [sect1 "subsect1"] opt2 = value2 `, Text: `[sect1] opt1 = value1 [sect1 "subsect1"] opt2 = value2 `, Config: New(). AddOption("sect1", "", "opt1", "value1"). AddOption("sect1", "subsect1", "opt2", "value2"), }, { Raw: ` [sect1] opt1 = value1 [sect1 "subsect1"] opt2 = value2 [sect1] opt1 = value1b [sect1 "subsect1"] opt2 = value2b [sect1 "subsect2"] opt2 = value2 `, Text: `[sect1] opt1 = value1 opt1 = value1b [sect1 "subsect1"] opt2 = value2 opt2 = value2b [sect1 "subsect2"] opt2 = value2 `, Config: New(). AddOption("sect1", "", "opt1", "value1"). AddOption("sect1", "", "opt1", "value1b"). AddOption("sect1", "subsect1", "opt2", "value2"). AddOption("sect1", "subsect1", "opt2", "value2b"). AddOption("sect1", "subsect2", "opt2", "value2"), }, { Raw: ` [sect1] opt1 = value1 opt1 = value2 `, Text: `[sect1] opt1 = value1 opt1 = value2 `, Config: New(). AddOption("sect1", "", "opt1", "value1"). AddOption("sect1", "", "opt1", "value2"), }, } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/config/option.go000066400000000000000000000045431345605224300251160ustar00rootroot00000000000000package config import ( "fmt" "strings" ) // Option defines a key/value entity in a config file. type Option struct { // Key preserving original caseness. // Use IsKey instead to compare key regardless of caseness. Key string // Original value as string, could be not normalized. Value string } type Options []*Option // IsKey returns true if the given key matches // this option's key in a case-insensitive comparison. func (o *Option) IsKey(key string) bool { return strings.ToLower(o.Key) == strings.ToLower(key) } func (opts Options) GoString() string { var strs []string for _, opt := range opts { strs = append(strs, fmt.Sprintf("%#v", opt)) } return strings.Join(strs, ", ") } // Get gets the value for the given key if set, // otherwise it returns the empty string. // // Note that there is no difference // // This matches git behaviour since git v1.8.1-rc1, // if there are multiple definitions of a key, the // last one wins. // // See: http://article.gmane.org/gmane.linux.kernel/1407184 // // In order to get all possible values for the same key, // use GetAll. func (opts Options) Get(key string) string { for i := len(opts) - 1; i >= 0; i-- { o := opts[i] if o.IsKey(key) { return o.Value } } return "" } // GetAll returns all possible values for the same key. func (opts Options) GetAll(key string) []string { result := []string{} for _, o := range opts { if o.IsKey(key) { result = append(result, o.Value) } } return result } func (opts Options) withoutOption(key string) Options { result := Options{} for _, o := range opts { if !o.IsKey(key) { result = append(result, o) } } return result } func (opts Options) withAddedOption(key string, value string) Options { return append(opts, &Option{key, value}) } func (opts Options) withSettedOption(key string, values ...string) Options { var result Options var added []string for _, o := range opts { if !o.IsKey(key) { result = append(result, o) continue } if contains(values, o.Value) { added = append(added, o.Value) result = append(result, o) continue } } for _, value := range values { if contains(added, value) { continue } result = result.withAddedOption(key, value) } return result } func contains(haystack []string, needle string) bool { for _, s := range haystack { if s == needle { return true } } return false } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/config/option_test.go000066400000000000000000000016621345605224300261540ustar00rootroot00000000000000package config import ( . "gopkg.in/check.v1" ) type OptionSuite struct{} var _ = Suite(&OptionSuite{}) func (s *OptionSuite) TestOptions_GetAll(c *C) { o := Options{ &Option{"k", "v"}, &Option{"ok", "v1"}, &Option{"K", "v2"}, } c.Assert(o.GetAll("k"), DeepEquals, []string{"v", "v2"}) c.Assert(o.GetAll("K"), DeepEquals, []string{"v", "v2"}) c.Assert(o.GetAll("ok"), DeepEquals, []string{"v1"}) c.Assert(o.GetAll("unexistant"), DeepEquals, []string{}) o = Options{} c.Assert(o.GetAll("k"), DeepEquals, []string{}) } func (s *OptionSuite) TestOption_IsKey(c *C) { c.Assert((&Option{Key: "key"}).IsKey("key"), Equals, true) c.Assert((&Option{Key: "key"}).IsKey("KEY"), Equals, true) c.Assert((&Option{Key: "KEY"}).IsKey("key"), Equals, true) c.Assert((&Option{Key: "key"}).IsKey("other"), Equals, false) c.Assert((&Option{Key: "key"}).IsKey(""), Equals, false) c.Assert((&Option{Key: ""}).IsKey("key"), Equals, false) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/config/section.go000066400000000000000000000102021345605224300252370ustar00rootroot00000000000000package config import ( "fmt" "strings" ) // Section is the representation of a section inside git configuration files. // Each Section contains Options that are used by both the Git plumbing // and the porcelains. // Sections can be further divided into subsections. To begin a subsection // put its name in double quotes, separated by space from the section name, // in the section header, like in the example below: // // [section "subsection"] // // All the other lines (and the remainder of the line after the section header) // are recognized as option variables, in the form "name = value" (or just name, // which is a short-hand to say that the variable is the boolean "true"). // The variable names are case-insensitive, allow only alphanumeric characters // and -, and must start with an alphabetic character: // // [section "subsection1"] // option1 = value1 // option2 // [section "subsection2"] // option3 = value2 // type Section struct { Name string Options Options Subsections Subsections } type Subsection struct { Name string Options Options } type Sections []*Section func (s Sections) GoString() string { var strs []string for _, ss := range s { strs = append(strs, fmt.Sprintf("%#v", ss)) } return strings.Join(strs, ", ") } type Subsections []*Subsection func (s Subsections) GoString() string { var strs []string for _, ss := range s { strs = append(strs, fmt.Sprintf("%#v", ss)) } return strings.Join(strs, ", ") } // IsName checks if the name provided is equals to the Section name, case insensitive. func (s *Section) IsName(name string) bool { return strings.ToLower(s.Name) == strings.ToLower(name) } // Option return the value for the specified key. Empty string is returned if // key does not exists. func (s *Section) Option(key string) string { return s.Options.Get(key) } // AddOption adds a new Option to the Section. The updated Section is returned. func (s *Section) AddOption(key string, value string) *Section { s.Options = s.Options.withAddedOption(key, value) return s } // SetOption adds a new Option to the Section. If the option already exists, is replaced. // The updated Section is returned. func (s *Section) SetOption(key string, value string) *Section { s.Options = s.Options.withSettedOption(key, value) return s } // Remove an option with the specified key. The updated Section is returned. func (s *Section) RemoveOption(key string) *Section { s.Options = s.Options.withoutOption(key) return s } // Subsection returns a Subsection from the specified Section. If the // Subsection does not exists, new one is created and added to Section. func (s *Section) Subsection(name string) *Subsection { for i := len(s.Subsections) - 1; i >= 0; i-- { ss := s.Subsections[i] if ss.IsName(name) { return ss } } ss := &Subsection{Name: name} s.Subsections = append(s.Subsections, ss) return ss } // HasSubsection checks if the Section has a Subsection with the specified name. func (s *Section) HasSubsection(name string) bool { for _, ss := range s.Subsections { if ss.IsName(name) { return true } } return false } // IsName checks if the name of the subsection is exactly the specified name. func (s *Subsection) IsName(name string) bool { return s.Name == name } // Option returns an option with the specified key. If the option does not exists, // empty spring will be returned. func (s *Subsection) Option(key string) string { return s.Options.Get(key) } // AddOption adds a new Option to the Subsection. The updated Subsection is returned. func (s *Subsection) AddOption(key string, value string) *Subsection { s.Options = s.Options.withAddedOption(key, value) return s } // SetOption adds a new Option to the Subsection. If the option already exists, is replaced. // The updated Subsection is returned. func (s *Subsection) SetOption(key string, value ...string) *Subsection { s.Options = s.Options.withSettedOption(key, value...) return s } // RemoveOption removes the option with the specified key. The updated Subsection is returned. func (s *Subsection) RemoveOption(key string) *Subsection { s.Options = s.Options.withoutOption(key) return s } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/config/section_test.go000066400000000000000000000041131345605224300263020ustar00rootroot00000000000000package config import ( . "gopkg.in/check.v1" ) type SectionSuite struct{} var _ = Suite(&SectionSuite{}) func (s *SectionSuite) TestSection_Option(c *C) { sect := &Section{ Options: []*Option{ {Key: "key1", Value: "value1"}, {Key: "key2", Value: "value2"}, {Key: "key1", Value: "value3"}, }, } c.Assert(sect.Option("otherkey"), Equals, "") c.Assert(sect.Option("key2"), Equals, "value2") c.Assert(sect.Option("key1"), Equals, "value3") } func (s *SectionSuite) TestSubsection_Option(c *C) { sect := &Subsection{ Options: []*Option{ {Key: "key1", Value: "value1"}, {Key: "key2", Value: "value2"}, {Key: "key1", Value: "value3"}, }, } c.Assert(sect.Option("otherkey"), Equals, "") c.Assert(sect.Option("key2"), Equals, "value2") c.Assert(sect.Option("key1"), Equals, "value3") } func (s *SectionSuite) TestSection_RemoveOption(c *C) { sect := &Section{ Options: []*Option{ {Key: "key1", Value: "value1"}, {Key: "key2", Value: "value2"}, {Key: "key1", Value: "value3"}, }, } c.Assert(sect.RemoveOption("otherkey"), DeepEquals, sect) expected := &Section{ Options: []*Option{ {Key: "key2", Value: "value2"}, }, } c.Assert(sect.RemoveOption("key1"), DeepEquals, expected) } func (s *SectionSuite) TestSubsection_RemoveOption(c *C) { sect := &Subsection{ Options: []*Option{ {Key: "key1", Value: "value1"}, {Key: "key2", Value: "value2"}, {Key: "key1", Value: "value3"}, }, } c.Assert(sect.RemoveOption("otherkey"), DeepEquals, sect) expected := &Subsection{ Options: []*Option{ {Key: "key2", Value: "value2"}, }, } c.Assert(sect.RemoveOption("key1"), DeepEquals, expected) } func (s *SectionSuite) TestSubsection_SetOption(c *C) { sect := &Subsection{ Options: []*Option{ {Key: "key1", Value: "value1"}, {Key: "key2", Value: "value2"}, {Key: "key1", Value: "value3"}, }, } expected := &Subsection{ Options: []*Option{ {Key: "key1", Value: "value1"}, {Key: "key2", Value: "value2"}, {Key: "key1", Value: "value4"}, }, } c.Assert(sect.SetOption("key1", "value1", "value4"), DeepEquals, expected) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/diff/000077500000000000000000000000001345605224300227145ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/diff/patch.go000066400000000000000000000033221345605224300243420ustar00rootroot00000000000000package diff import ( "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/filemode" ) // Operation defines the operation of a diff item. type Operation int const ( // Equal item represents a equals diff. Equal Operation = iota // Add item represents an insert diff. Add // Delete item represents a delete diff. Delete ) // Patch represents a collection of steps to transform several files. type Patch interface { // FilePatches returns a slice of patches per file. FilePatches() []FilePatch // Message returns an optional message that can be at the top of the // Patch representation. Message() string } // FilePatch represents the necessary steps to transform one file to another. type FilePatch interface { // IsBinary returns true if this patch is representing a binary file. IsBinary() bool // Files returns the from and to Files, with all the necessary metadata to // about them. If the patch creates a new file, "from" will be nil. // If the patch deletes a file, "to" will be nil. Files() (from, to File) // Chunks returns a slice of ordered changes to transform "from" File to // "to" File. If the file is a binary one, Chunks will be empty. Chunks() []Chunk } // File contains all the file metadata necessary to print some patch formats. type File interface { // Hash returns the File Hash. Hash() plumbing.Hash // Mode returns the FileMode. Mode() filemode.FileMode // Path returns the complete Path to the file, including the filename. Path() string } // Chunk represents a portion of a file transformation to another. type Chunk interface { // Content contains the portion of the file. Content() string // Type contains the Operation to do with this Chunk. Type() Operation } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/diff/unified_encoder.go000066400000000000000000000170631345605224300263740ustar00rootroot00000000000000package diff import ( "bytes" "fmt" "io" "strings" "gopkg.in/src-d/go-git.v4/plumbing" ) const ( diffInit = "diff --git a/%s b/%s\n" chunkStart = "@@ -" chunkMiddle = " +" chunkEnd = " @@%s\n" chunkCount = "%d,%d" noFilePath = "/dev/null" aDir = "a/" bDir = "b/" fPath = "--- %s\n" tPath = "+++ %s\n" binary = "Binary files %s and %s differ\n" addLine = "+%s\n" deleteLine = "-%s\n" equalLine = " %s\n" oldMode = "old mode %o\n" newMode = "new mode %o\n" deletedFileMode = "deleted file mode %o\n" newFileMode = "new file mode %o\n" renameFrom = "from" renameTo = "to" renameFileMode = "rename %s %s\n" indexAndMode = "index %s..%s %o\n" indexNoMode = "index %s..%s\n" DefaultContextLines = 3 ) // UnifiedEncoder encodes an unified diff into the provided Writer. // There are some unsupported features: // - Similarity index for renames // - Sort hash representation type UnifiedEncoder struct { io.Writer // ctxLines is the count of unchanged lines that will appear // surrounding a change. ctxLines int buf bytes.Buffer } func NewUnifiedEncoder(w io.Writer, ctxLines int) *UnifiedEncoder { return &UnifiedEncoder{ctxLines: ctxLines, Writer: w} } func (e *UnifiedEncoder) Encode(patch Patch) error { e.printMessage(patch.Message()) if err := e.encodeFilePatch(patch.FilePatches()); err != nil { return err } _, err := e.buf.WriteTo(e) return err } func (e *UnifiedEncoder) encodeFilePatch(filePatches []FilePatch) error { for _, p := range filePatches { f, t := p.Files() if err := e.header(f, t, p.IsBinary()); err != nil { return err } g := newHunksGenerator(p.Chunks(), e.ctxLines) for _, c := range g.Generate() { c.WriteTo(&e.buf) } } return nil } func (e *UnifiedEncoder) printMessage(message string) { isEmpty := message == "" hasSuffix := strings.HasSuffix(message, "\n") if !isEmpty && !hasSuffix { message = message + "\n" } e.buf.WriteString(message) } func (e *UnifiedEncoder) header(from, to File, isBinary bool) error { switch { case from == nil && to == nil: return nil case from != nil && to != nil: hashEquals := from.Hash() == to.Hash() fmt.Fprintf(&e.buf, diffInit, from.Path(), to.Path()) if from.Mode() != to.Mode() { fmt.Fprintf(&e.buf, oldMode+newMode, from.Mode(), to.Mode()) } if from.Path() != to.Path() { fmt.Fprintf(&e.buf, renameFileMode+renameFileMode, renameFrom, from.Path(), renameTo, to.Path()) } if from.Mode() != to.Mode() && !hashEquals { fmt.Fprintf(&e.buf, indexNoMode, from.Hash(), to.Hash()) } else if !hashEquals { fmt.Fprintf(&e.buf, indexAndMode, from.Hash(), to.Hash(), from.Mode()) } if !hashEquals { e.pathLines(isBinary, aDir+from.Path(), bDir+to.Path()) } case from == nil: fmt.Fprintf(&e.buf, diffInit, to.Path(), to.Path()) fmt.Fprintf(&e.buf, newFileMode, to.Mode()) fmt.Fprintf(&e.buf, indexNoMode, plumbing.ZeroHash, to.Hash()) e.pathLines(isBinary, noFilePath, bDir+to.Path()) case to == nil: fmt.Fprintf(&e.buf, diffInit, from.Path(), from.Path()) fmt.Fprintf(&e.buf, deletedFileMode, from.Mode()) fmt.Fprintf(&e.buf, indexNoMode, from.Hash(), plumbing.ZeroHash) e.pathLines(isBinary, aDir+from.Path(), noFilePath) } return nil } func (e *UnifiedEncoder) pathLines(isBinary bool, fromPath, toPath string) { format := fPath + tPath if isBinary { format = binary } fmt.Fprintf(&e.buf, format, fromPath, toPath) } type hunksGenerator struct { fromLine, toLine int ctxLines int chunks []Chunk current *hunk hunks []*hunk beforeContext, afterContext []string } func newHunksGenerator(chunks []Chunk, ctxLines int) *hunksGenerator { return &hunksGenerator{ chunks: chunks, ctxLines: ctxLines, } } func (c *hunksGenerator) Generate() []*hunk { for i, chunk := range c.chunks { ls := splitLines(chunk.Content()) lsLen := len(ls) switch chunk.Type() { case Equal: c.fromLine += lsLen c.toLine += lsLen c.processEqualsLines(ls, i) case Delete: if lsLen != 0 { c.fromLine++ } c.processHunk(i, chunk.Type()) c.fromLine += lsLen - 1 c.current.AddOp(chunk.Type(), ls...) case Add: if lsLen != 0 { c.toLine++ } c.processHunk(i, chunk.Type()) c.toLine += lsLen - 1 c.current.AddOp(chunk.Type(), ls...) } if i == len(c.chunks)-1 && c.current != nil { c.hunks = append(c.hunks, c.current) } } return c.hunks } func (c *hunksGenerator) processHunk(i int, op Operation) { if c.current != nil { return } var ctxPrefix string linesBefore := len(c.beforeContext) if linesBefore > c.ctxLines { ctxPrefix = " " + c.beforeContext[linesBefore-c.ctxLines-1] c.beforeContext = c.beforeContext[linesBefore-c.ctxLines:] linesBefore = c.ctxLines } c.current = &hunk{ctxPrefix: ctxPrefix} c.current.AddOp(Equal, c.beforeContext...) switch op { case Delete: c.current.fromLine, c.current.toLine = c.addLineNumbers(c.fromLine, c.toLine, linesBefore, i, Add) case Add: c.current.toLine, c.current.fromLine = c.addLineNumbers(c.toLine, c.fromLine, linesBefore, i, Delete) } c.beforeContext = nil } // addLineNumbers obtains the line numbers in a new chunk func (c *hunksGenerator) addLineNumbers(la, lb int, linesBefore int, i int, op Operation) (cla, clb int) { cla = la - linesBefore // we need to search for a reference for the next diff switch { case linesBefore != 0 && c.ctxLines != 0: if lb > c.ctxLines { clb = lb - c.ctxLines + 1 } else { clb = 1 } case c.ctxLines == 0: clb = lb case i != len(c.chunks)-1: next := c.chunks[i+1] if next.Type() == op || next.Type() == Equal { // this diff will be into this chunk clb = lb + 1 } } return } func (c *hunksGenerator) processEqualsLines(ls []string, i int) { if c.current == nil { c.beforeContext = append(c.beforeContext, ls...) return } c.afterContext = append(c.afterContext, ls...) if len(c.afterContext) <= c.ctxLines*2 && i != len(c.chunks)-1 { c.current.AddOp(Equal, c.afterContext...) c.afterContext = nil } else { ctxLines := c.ctxLines if ctxLines > len(c.afterContext) { ctxLines = len(c.afterContext) } c.current.AddOp(Equal, c.afterContext[:ctxLines]...) c.hunks = append(c.hunks, c.current) c.current = nil c.beforeContext = c.afterContext[ctxLines:] c.afterContext = nil } } func splitLines(s string) []string { out := strings.Split(s, "\n") if out[len(out)-1] == "" { out = out[:len(out)-1] } return out } type hunk struct { fromLine int toLine int fromCount int toCount int ctxPrefix string ops []*op } func (c *hunk) WriteTo(buf *bytes.Buffer) { buf.WriteString(chunkStart) if c.fromCount == 1 { fmt.Fprintf(buf, "%d", c.fromLine) } else { fmt.Fprintf(buf, chunkCount, c.fromLine, c.fromCount) } buf.WriteString(chunkMiddle) if c.toCount == 1 { fmt.Fprintf(buf, "%d", c.toLine) } else { fmt.Fprintf(buf, chunkCount, c.toLine, c.toCount) } fmt.Fprintf(buf, chunkEnd, c.ctxPrefix) for _, d := range c.ops { buf.WriteString(d.String()) } } func (c *hunk) AddOp(t Operation, s ...string) { ls := len(s) switch t { case Add: c.toCount += ls case Delete: c.fromCount += ls case Equal: c.toCount += ls c.fromCount += ls } for _, l := range s { c.ops = append(c.ops, &op{l, t}) } } type op struct { text string t Operation } func (o *op) String() string { var prefix string switch o.t { case Add: prefix = addLine case Delete: prefix = deleteLine case Equal: prefix = equalLine } return fmt.Sprintf(prefix, o.text) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/diff/unified_encoder_test.go000066400000000000000000000361141345605224300274310ustar00rootroot00000000000000package diff import ( "bytes" "testing" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/filemode" . "gopkg.in/check.v1" ) func Test(t *testing.T) { TestingT(t) } type UnifiedEncoderTestSuite struct{} var _ = Suite(&UnifiedEncoderTestSuite{}) func (s *UnifiedEncoderTestSuite) TestBothFilesEmpty(c *C) { buffer := bytes.NewBuffer(nil) e := NewUnifiedEncoder(buffer, 1) err := e.Encode(testPatch{filePatches: []testFilePatch{{}}}) c.Assert(err, IsNil) } func (s *UnifiedEncoderTestSuite) TestBinaryFile(c *C) { buffer := bytes.NewBuffer(nil) e := NewUnifiedEncoder(buffer, 1) p := testPatch{ message: "", filePatches: []testFilePatch{{ from: &testFile{ mode: filemode.Regular, path: "binary", seed: "something", }, to: &testFile{ mode: filemode.Regular, path: "binary", seed: "otherthing", }, }}, } err := e.Encode(p) c.Assert(err, IsNil) c.Assert(buffer.String(), Equals, `diff --git a/binary b/binary index a459bc245bdbc45e1bca99e7fe61731da5c48da4..6879395eacf3cc7e5634064ccb617ac7aa62be7d 100644 Binary files a/binary and b/binary differ `) } func (s *UnifiedEncoderTestSuite) TestEncode(c *C) { for _, f := range fixtures { c.Log("executing: ", f.desc) buffer := bytes.NewBuffer(nil) e := NewUnifiedEncoder(buffer, f.context) err := e.Encode(f.patch) c.Assert(err, IsNil) c.Assert(buffer.String(), Equals, f.diff) } } var oneChunkPatch Patch = testPatch{ message: "", filePatches: []testFilePatch{{ from: &testFile{ mode: filemode.Regular, path: "onechunk.txt", seed: "A\nB\nC\nD\nE\nF\nG\nH\nI\nJ\nK\nL\nM\nN\nÑ\nO\nP\nQ\nR\nS\nT\nU\nV\nW\nX\nY\nZ", }, to: &testFile{ mode: filemode.Regular, path: "onechunk.txt", seed: "B\nC\nD\nE\nF\nG\nI\nJ\nK\nL\nM\nN\nO\nP\nQ\nR\nS\nT\nV\nW\nX\nY\nZ", }, chunks: []testChunk{{ content: "A\n", op: Delete, }, { content: "B\nC\nD\nE\nF\nG", op: Equal, }, { content: "H\n", op: Delete, }, { content: "I\nJ\nK\nL\nM\nN\n", op: Equal, }, { content: "Ñ\n", op: Delete, }, { content: "O\nP\nQ\nR\nS\nT\n", op: Equal, }, { content: "U\n", op: Delete, }, { content: "V\nW\nX\nY\nZ", op: Equal, }}, }}, } var oneChunkPatchInverted Patch = testPatch{ message: "", filePatches: []testFilePatch{{ to: &testFile{ mode: filemode.Regular, path: "onechunk.txt", seed: "A\nB\nC\nD\nE\nF\nG\nH\nI\nJ\nK\nL\nM\nN\nÑ\nO\nP\nQ\nR\nS\nT\nU\nV\nW\nX\nY\nZ", }, from: &testFile{ mode: filemode.Regular, path: "onechunk.txt", seed: "B\nC\nD\nE\nF\nG\nI\nJ\nK\nL\nM\nN\nO\nP\nQ\nR\nS\nT\nV\nW\nX\nY\nZ", }, chunks: []testChunk{{ content: "A\n", op: Add, }, { content: "B\nC\nD\nE\nF\nG", op: Equal, }, { content: "H\n", op: Add, }, { content: "I\nJ\nK\nL\nM\nN\n", op: Equal, }, { content: "Ñ\n", op: Add, }, { content: "O\nP\nQ\nR\nS\nT\n", op: Equal, }, { content: "U\n", op: Add, }, { content: "V\nW\nX\nY\nZ", op: Equal, }}, }}, } var fixtures []*fixture = []*fixture{{ patch: testPatch{ message: "", filePatches: []testFilePatch{{ from: &testFile{ mode: filemode.Regular, path: "README.md", seed: "hello\nworld\n", }, to: &testFile{ mode: filemode.Regular, path: "README.md", seed: "hello\nbug\n", }, chunks: []testChunk{{ content: "hello", op: Equal, }, { content: "world", op: Delete, }, { content: "bug", op: Add, }}, }}, }, desc: "positive negative number", context: 2, diff: `diff --git a/README.md b/README.md index 94954abda49de8615a048f8d2e64b5de848e27a1..f3dad9514629b9ff9136283ae331ad1fc95748a8 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,2 @@ hello -world +bug `, }, { patch: testPatch{ message: "", filePatches: []testFilePatch{{ from: &testFile{ mode: filemode.Regular, path: "test.txt", seed: "test", }, to: &testFile{ mode: filemode.Executable, path: "test.txt", seed: "test", }, chunks: nil, }}, }, desc: "make executable", context: 1, diff: `diff --git a/test.txt b/test.txt old mode 100644 new mode 100755 `, }, { patch: testPatch{ message: "", filePatches: []testFilePatch{{ from: &testFile{ mode: filemode.Regular, path: "test.txt", seed: "test", }, to: &testFile{ mode: filemode.Regular, path: "test1.txt", seed: "test", }, chunks: nil, }}, }, desc: "rename file", context: 1, diff: `diff --git a/test.txt b/test1.txt rename from test.txt rename to test1.txt `, }, { patch: testPatch{ message: "", filePatches: []testFilePatch{{ from: &testFile{ mode: filemode.Regular, path: "test.txt", seed: "test", }, to: &testFile{ mode: filemode.Regular, path: "test1.txt", seed: "test1", }, chunks: []testChunk{{ content: "test", op: Delete, }, { content: "test1", op: Add, }}, }}, }, desc: "rename file with changes", context: 1, diff: `diff --git a/test.txt b/test1.txt rename from test.txt rename to test1.txt index 30d74d258442c7c65512eafab474568dd706c430..f079749c42ffdcc5f52ed2d3a6f15b09307e975e 100644 --- a/test.txt +++ b/test1.txt @@ -1 +1 @@ -test +test1 `, }, { patch: testPatch{ message: "", filePatches: []testFilePatch{{ from: &testFile{ mode: filemode.Regular, path: "test.txt", seed: "test", }, to: &testFile{ mode: filemode.Executable, path: "test1.txt", seed: "test", }, chunks: nil, }}, }, desc: "rename with file mode change", context: 1, diff: `diff --git a/test.txt b/test1.txt old mode 100644 new mode 100755 rename from test.txt rename to test1.txt `, }, { patch: testPatch{ message: "", filePatches: []testFilePatch{{ from: &testFile{ mode: filemode.Regular, path: "test.txt", seed: "test", }, to: &testFile{ mode: filemode.Regular, path: "test.txt", seed: "test2", }, chunks: []testChunk{{ content: "test", op: Delete, }, { content: "test2", op: Add, }}, }}, }, desc: "one line change", context: 1, diff: `diff --git a/test.txt b/test.txt index 30d74d258442c7c65512eafab474568dd706c430..d606037cb232bfda7788a8322492312d55b2ae9d 100644 --- a/test.txt +++ b/test.txt @@ -1 +1 @@ -test +test2 `, }, { patch: testPatch{ message: "this is the message\n", filePatches: []testFilePatch{{ from: &testFile{ mode: filemode.Regular, path: "test.txt", seed: "test", }, to: &testFile{ mode: filemode.Regular, path: "test.txt", seed: "test2", }, chunks: []testChunk{{ content: "test", op: Delete, }, { content: "test2", op: Add, }}, }}, }, desc: "one line change with message", context: 1, diff: `this is the message diff --git a/test.txt b/test.txt index 30d74d258442c7c65512eafab474568dd706c430..d606037cb232bfda7788a8322492312d55b2ae9d 100644 --- a/test.txt +++ b/test.txt @@ -1 +1 @@ -test +test2 `, }, { patch: testPatch{ message: "this is the message", filePatches: []testFilePatch{{ from: &testFile{ mode: filemode.Regular, path: "test.txt", seed: "test", }, to: &testFile{ mode: filemode.Regular, path: "test.txt", seed: "test2", }, chunks: []testChunk{{ content: "test", op: Delete, }, { content: "test2", op: Add, }}, }}, }, desc: "one line change with message and no end of line", context: 1, diff: `this is the message diff --git a/test.txt b/test.txt index 30d74d258442c7c65512eafab474568dd706c430..d606037cb232bfda7788a8322492312d55b2ae9d 100644 --- a/test.txt +++ b/test.txt @@ -1 +1 @@ -test +test2 `, }, { patch: testPatch{ message: "", filePatches: []testFilePatch{{ from: nil, to: &testFile{ mode: filemode.Regular, path: "new.txt", seed: "test\ntest2\test3", }, chunks: []testChunk{{ content: "test\ntest2\ntest3", op: Add, }}, }}, }, desc: "new file", context: 1, diff: `diff --git a/new.txt b/new.txt new file mode 100644 index 0000000000000000000000000000000000000000..65c8dd02a42273038658a22b1cb29c8d9457ca12 --- /dev/null +++ b/new.txt @@ -0,0 +1,3 @@ +test +test2 +test3 `, }, { patch: testPatch{ message: "", filePatches: []testFilePatch{{ from: &testFile{ mode: filemode.Regular, path: "old.txt", seed: "test", }, to: nil, chunks: []testChunk{{ content: "test", op: Delete, }}, }}, }, desc: "delete file", context: 1, diff: `diff --git a/old.txt b/old.txt deleted file mode 100644 index 30d74d258442c7c65512eafab474568dd706c430..0000000000000000000000000000000000000000 --- a/old.txt +++ /dev/null @@ -1 +0,0 @@ -test `, }, { patch: oneChunkPatch, desc: "modified deleting lines file with context to 1", context: 1, diff: `diff --git a/onechunk.txt b/onechunk.txt index ab5eed5d4a2c33aeef67e0188ee79bed666bde6f..0adddcde4fd38042c354518351820eb06c417c82 100644 --- a/onechunk.txt +++ b/onechunk.txt @@ -1,2 +1 @@ -A B @@ -7,3 +6,2 @@ F G -H I @@ -14,3 +12,2 @@ M N -Ñ O @@ -21,3 +18,2 @@ S T -U V `, }, { patch: oneChunkPatch, desc: "modified deleting lines file with context to 2", context: 2, diff: `diff --git a/onechunk.txt b/onechunk.txt index ab5eed5d4a2c33aeef67e0188ee79bed666bde6f..0adddcde4fd38042c354518351820eb06c417c82 100644 --- a/onechunk.txt +++ b/onechunk.txt @@ -1,3 +1,2 @@ -A B C @@ -6,5 +5,4 @@ E F G -H I J @@ -13,5 +11,4 @@ L M N -Ñ O P @@ -20,5 +17,4 @@ R S T -U V W `, }, { patch: oneChunkPatch, desc: "modified deleting lines file with context to 6", context: 6, diff: `diff --git a/onechunk.txt b/onechunk.txt index ab5eed5d4a2c33aeef67e0188ee79bed666bde6f..0adddcde4fd38042c354518351820eb06c417c82 100644 --- a/onechunk.txt +++ b/onechunk.txt @@ -1,27 +1,23 @@ -A B C D E F G -H I J K L M N -Ñ O P Q R S T -U V W X Y Z `, }, { patch: oneChunkPatch, desc: "modified deleting lines file with context to 3", context: 3, diff: `diff --git a/onechunk.txt b/onechunk.txt index ab5eed5d4a2c33aeef67e0188ee79bed666bde6f..0adddcde4fd38042c354518351820eb06c417c82 100644 --- a/onechunk.txt +++ b/onechunk.txt @@ -1,25 +1,21 @@ -A B C D E F G -H I J K L M N -Ñ O P Q R S T -U V W X `, }, { patch: oneChunkPatch, desc: "modified deleting lines file with context to 4", context: 4, diff: `diff --git a/onechunk.txt b/onechunk.txt index ab5eed5d4a2c33aeef67e0188ee79bed666bde6f..0adddcde4fd38042c354518351820eb06c417c82 100644 --- a/onechunk.txt +++ b/onechunk.txt @@ -1,26 +1,22 @@ -A B C D E F G -H I J K L M N -Ñ O P Q R S T -U V W X Y `, }, { patch: oneChunkPatch, desc: "modified deleting lines file with context to 0", context: 0, diff: `diff --git a/onechunk.txt b/onechunk.txt index ab5eed5d4a2c33aeef67e0188ee79bed666bde6f..0adddcde4fd38042c354518351820eb06c417c82 100644 --- a/onechunk.txt +++ b/onechunk.txt @@ -1 +0,0 @@ -A @@ -8 +6,0 @@ G -H @@ -15 +12,0 @@ N -Ñ @@ -22 +18,0 @@ T -U `, }, { patch: oneChunkPatchInverted, desc: "modified adding lines file with context to 1", context: 1, diff: `diff --git a/onechunk.txt b/onechunk.txt index 0adddcde4fd38042c354518351820eb06c417c82..ab5eed5d4a2c33aeef67e0188ee79bed666bde6f 100644 --- a/onechunk.txt +++ b/onechunk.txt @@ -1 +1,2 @@ +A B @@ -6,2 +7,3 @@ F G +H I @@ -12,2 +14,3 @@ M N +Ñ O @@ -18,2 +21,3 @@ S T +U V `, }, { patch: oneChunkPatchInverted, desc: "modified adding lines file with context to 2", context: 2, diff: `diff --git a/onechunk.txt b/onechunk.txt index 0adddcde4fd38042c354518351820eb06c417c82..ab5eed5d4a2c33aeef67e0188ee79bed666bde6f 100644 --- a/onechunk.txt +++ b/onechunk.txt @@ -1,2 +1,3 @@ +A B C @@ -5,4 +6,5 @@ E F G +H I J @@ -11,4 +13,5 @@ L M N +Ñ O P @@ -17,4 +20,5 @@ R S T +U V W `, }, { patch: oneChunkPatchInverted, desc: "modified adding lines file with context to 3", context: 3, diff: `diff --git a/onechunk.txt b/onechunk.txt index 0adddcde4fd38042c354518351820eb06c417c82..ab5eed5d4a2c33aeef67e0188ee79bed666bde6f 100644 --- a/onechunk.txt +++ b/onechunk.txt @@ -1,21 +1,25 @@ +A B C D E F G +H I J K L M N +Ñ O P Q R S T +U V W X `, }, { patch: oneChunkPatchInverted, desc: "modified adding lines file with context to 4", context: 4, diff: `diff --git a/onechunk.txt b/onechunk.txt index 0adddcde4fd38042c354518351820eb06c417c82..ab5eed5d4a2c33aeef67e0188ee79bed666bde6f 100644 --- a/onechunk.txt +++ b/onechunk.txt @@ -1,22 +1,26 @@ +A B C D E F G +H I J K L M N +Ñ O P Q R S T +U V W X Y `, }, { patch: oneChunkPatchInverted, desc: "modified adding lines file with context to 0", context: 0, diff: `diff --git a/onechunk.txt b/onechunk.txt index 0adddcde4fd38042c354518351820eb06c417c82..ab5eed5d4a2c33aeef67e0188ee79bed666bde6f 100644 --- a/onechunk.txt +++ b/onechunk.txt @@ -0,0 +1 @@ +A @@ -6,0 +8 @@ G +H @@ -12,0 +15 @@ N +Ñ @@ -18,0 +22 @@ T +U `, }, { patch: testPatch{ message: "", filePatches: []testFilePatch{{ from: &testFile{ mode: filemode.Regular, path: "onechunk.txt", seed: "B\nC\nD\nE\nF\nG\nI\nJ\nK\nL\nM\nN\nO\nP\nQ\nR\nS\nT\nV\nW\nX\nY\nZ", }, to: &testFile{ mode: filemode.Regular, path: "onechunk.txt", seed: "B\nC\nD\nE\nF\nG\nI\nJ\nK\nL\nM\nN\nO\nP\nQ\nR\nS\nT\nV\nW\nX\nY\n", }, chunks: []testChunk{{ content: "B\nC\nD\nE\nF\nG\nI\nJ\nK\nL\nM\nN\nO\nP\nQ\nR\nS\nT\nV\nW\nX\nY\n", op: Equal, }, { content: "Z", op: Delete, }}, }}, }, desc: "remove last letter", context: 0, diff: `diff --git a/onechunk.txt b/onechunk.txt index 0adddcde4fd38042c354518351820eb06c417c82..553ae669c7a9303cf848fcc749a2569228ac5309 100644 --- a/onechunk.txt +++ b/onechunk.txt @@ -23 +22,0 @@ Y -Z `, }} type testPatch struct { message string filePatches []testFilePatch } func (t testPatch) FilePatches() []FilePatch { var result []FilePatch for _, f := range t.filePatches { result = append(result, f) } return result } func (t testPatch) Message() string { return t.message } type testFilePatch struct { from, to *testFile chunks []testChunk } func (t testFilePatch) IsBinary() bool { return len(t.chunks) == 0 } func (t testFilePatch) Files() (File, File) { // Go is amazing switch { case t.from == nil && t.to == nil: return nil, nil case t.from == nil: return nil, t.to case t.to == nil: return t.from, nil } return t.from, t.to } func (t testFilePatch) Chunks() []Chunk { var result []Chunk for _, c := range t.chunks { result = append(result, c) } return result } type testFile struct { path string mode filemode.FileMode seed string } func (t testFile) Hash() plumbing.Hash { return plumbing.ComputeHash(plumbing.BlobObject, []byte(t.seed)) } func (t testFile) Mode() filemode.FileMode { return t.mode } func (t testFile) Path() string { return t.path } type testChunk struct { content string op Operation } func (t testChunk) Content() string { return t.content } func (t testChunk) Type() Operation { return t.op } type fixture struct { desc string context int diff string patch Patch } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/gitignore/000077500000000000000000000000001345605224300237735ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/gitignore/dir.go000066400000000000000000000066131345605224300251060ustar00rootroot00000000000000package gitignore import ( "bytes" "io/ioutil" "os" "os/user" "strings" "gopkg.in/src-d/go-billy.v4" "gopkg.in/src-d/go-git.v4/plumbing/format/config" gioutil "gopkg.in/src-d/go-git.v4/utils/ioutil" ) const ( commentPrefix = "#" coreSection = "core" eol = "\n" excludesfile = "excludesfile" gitDir = ".git" gitignoreFile = ".gitignore" gitconfigFile = ".gitconfig" systemFile = "/etc/gitconfig" ) // readIgnoreFile reads a specific git ignore file. func readIgnoreFile(fs billy.Filesystem, path []string, ignoreFile string) (ps []Pattern, err error) { f, err := fs.Open(fs.Join(append(path, ignoreFile)...)) if err == nil { defer f.Close() if data, err := ioutil.ReadAll(f); err == nil { for _, s := range strings.Split(string(data), eol) { if !strings.HasPrefix(s, commentPrefix) && len(strings.TrimSpace(s)) > 0 { ps = append(ps, ParsePattern(s, path)) } } } } else if !os.IsNotExist(err) { return nil, err } return } // ReadPatterns reads gitignore patterns recursively traversing through the directory // structure. The result is in the ascending order of priority (last higher). func ReadPatterns(fs billy.Filesystem, path []string) (ps []Pattern, err error) { ps, _ = readIgnoreFile(fs, path, gitignoreFile) var fis []os.FileInfo fis, err = fs.ReadDir(fs.Join(path...)) if err != nil { return } for _, fi := range fis { if fi.IsDir() && fi.Name() != gitDir { var subps []Pattern subps, err = ReadPatterns(fs, append(path, fi.Name())) if err != nil { return } if len(subps) > 0 { ps = append(ps, subps...) } } } return } func loadPatterns(fs billy.Filesystem, path string) (ps []Pattern, err error) { f, err := fs.Open(path) if err != nil { if os.IsNotExist(err) { return nil, nil } return nil, err } defer gioutil.CheckClose(f, &err) b, err := ioutil.ReadAll(f) if err != nil { return } d := config.NewDecoder(bytes.NewBuffer(b)) raw := config.New() if err = d.Decode(raw); err != nil { return } s := raw.Section(coreSection) efo := s.Options.Get(excludesfile) if efo == "" { return nil, nil } ps, err = readIgnoreFile(fs, nil, efo) if os.IsNotExist(err) { return nil, nil } return } // LoadGlobalPatterns loads gitignore patterns from from the gitignore file // declared in a user's ~/.gitconfig file. If the ~/.gitconfig file does not // exist the function will return nil. If the core.excludesfile property // is not declared, the function will return nil. If the file pointed to by // the core.excludesfile property does not exist, the function will return nil. // // The function assumes fs is rooted at the root filesystem. func LoadGlobalPatterns(fs billy.Filesystem) (ps []Pattern, err error) { usr, err := user.Current() if err != nil { return } return loadPatterns(fs, fs.Join(usr.HomeDir, gitconfigFile)) } // LoadSystemPatterns loads gitignore patterns from from the gitignore file // declared in a system's /etc/gitconfig file. If the ~/.gitconfig file does // not exist the function will return nil. If the core.excludesfile property // is not declared, the function will return nil. If the file pointed to by // the core.excludesfile property does not exist, the function will return nil. // // The function assumes fs is rooted at the root filesystem. func LoadSystemPatterns(fs billy.Filesystem) (ps []Pattern, err error) { return loadPatterns(fs, systemFile) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/gitignore/dir_test.go000066400000000000000000000131261345605224300261420ustar00rootroot00000000000000package gitignore import ( "os" "os/user" "strconv" . "gopkg.in/check.v1" "gopkg.in/src-d/go-billy.v4" "gopkg.in/src-d/go-billy.v4/memfs" ) type MatcherSuite struct { GFS billy.Filesystem // git repository root RFS billy.Filesystem // root that contains user home MCFS billy.Filesystem // root that contains user home, but missing ~/.gitconfig MEFS billy.Filesystem // root that contains user home, but missing excludesfile entry MIFS billy.Filesystem // root that contains user home, but missing .gitnignore SFS billy.Filesystem // root that contains /etc/gitconfig } var _ = Suite(&MatcherSuite{}) func (s *MatcherSuite) SetUpTest(c *C) { // setup generic git repository root fs := memfs.New() f, err := fs.Create(".gitignore") c.Assert(err, IsNil) _, err = f.Write([]byte("vendor/g*/\n")) c.Assert(err, IsNil) err = f.Close() c.Assert(err, IsNil) err = fs.MkdirAll("vendor", os.ModePerm) c.Assert(err, IsNil) f, err = fs.Create("vendor/.gitignore") c.Assert(err, IsNil) _, err = f.Write([]byte("!github.com/\n")) c.Assert(err, IsNil) err = f.Close() c.Assert(err, IsNil) fs.MkdirAll("another", os.ModePerm) fs.MkdirAll("vendor/github.com", os.ModePerm) fs.MkdirAll("vendor/gopkg.in", os.ModePerm) s.GFS = fs // setup root that contains user home usr, err := user.Current() c.Assert(err, IsNil) fs = memfs.New() err = fs.MkdirAll(usr.HomeDir, os.ModePerm) c.Assert(err, IsNil) f, err = fs.Create(fs.Join(usr.HomeDir, gitconfigFile)) c.Assert(err, IsNil) _, err = f.Write([]byte("[core]\n")) c.Assert(err, IsNil) _, err = f.Write([]byte(" excludesfile = " + strconv.Quote(fs.Join(usr.HomeDir, ".gitignore_global")) + "\n")) c.Assert(err, IsNil) err = f.Close() c.Assert(err, IsNil) f, err = fs.Create(fs.Join(usr.HomeDir, ".gitignore_global")) c.Assert(err, IsNil) _, err = f.Write([]byte("# IntelliJ\n")) c.Assert(err, IsNil) _, err = f.Write([]byte(".idea/\n")) c.Assert(err, IsNil) _, err = f.Write([]byte("*.iml\n")) c.Assert(err, IsNil) err = f.Close() c.Assert(err, IsNil) s.RFS = fs // root that contains user home, but missing ~/.gitconfig fs = memfs.New() err = fs.MkdirAll(usr.HomeDir, os.ModePerm) c.Assert(err, IsNil) f, err = fs.Create(fs.Join(usr.HomeDir, ".gitignore_global")) c.Assert(err, IsNil) _, err = f.Write([]byte("# IntelliJ\n")) c.Assert(err, IsNil) _, err = f.Write([]byte(".idea/\n")) c.Assert(err, IsNil) _, err = f.Write([]byte("*.iml\n")) c.Assert(err, IsNil) err = f.Close() c.Assert(err, IsNil) s.MCFS = fs // setup root that contains user home, but missing excludesfile entry fs = memfs.New() err = fs.MkdirAll(usr.HomeDir, os.ModePerm) c.Assert(err, IsNil) f, err = fs.Create(fs.Join(usr.HomeDir, gitconfigFile)) c.Assert(err, IsNil) _, err = f.Write([]byte("[core]\n")) c.Assert(err, IsNil) err = f.Close() c.Assert(err, IsNil) f, err = fs.Create(fs.Join(usr.HomeDir, ".gitignore_global")) c.Assert(err, IsNil) _, err = f.Write([]byte("# IntelliJ\n")) c.Assert(err, IsNil) _, err = f.Write([]byte(".idea/\n")) c.Assert(err, IsNil) _, err = f.Write([]byte("*.iml\n")) c.Assert(err, IsNil) err = f.Close() c.Assert(err, IsNil) s.MEFS = fs // setup root that contains user home, but missing .gitnignore fs = memfs.New() err = fs.MkdirAll(usr.HomeDir, os.ModePerm) c.Assert(err, IsNil) f, err = fs.Create(fs.Join(usr.HomeDir, gitconfigFile)) c.Assert(err, IsNil) _, err = f.Write([]byte("[core]\n")) c.Assert(err, IsNil) _, err = f.Write([]byte(" excludesfile = " + strconv.Quote(fs.Join(usr.HomeDir, ".gitignore_global")) + "\n")) c.Assert(err, IsNil) err = f.Close() c.Assert(err, IsNil) s.MIFS = fs // setup root that contains user home fs = memfs.New() err = fs.MkdirAll("etc", os.ModePerm) c.Assert(err, IsNil) f, err = fs.Create(systemFile) c.Assert(err, IsNil) _, err = f.Write([]byte("[core]\n")) c.Assert(err, IsNil) _, err = f.Write([]byte(" excludesfile = /etc/gitignore_global\n")) c.Assert(err, IsNil) err = f.Close() c.Assert(err, IsNil) f, err = fs.Create("/etc/gitignore_global") c.Assert(err, IsNil) _, err = f.Write([]byte("# IntelliJ\n")) c.Assert(err, IsNil) _, err = f.Write([]byte(".idea/\n")) c.Assert(err, IsNil) _, err = f.Write([]byte("*.iml\n")) c.Assert(err, IsNil) err = f.Close() c.Assert(err, IsNil) s.SFS = fs } func (s *MatcherSuite) TestDir_ReadPatterns(c *C) { ps, err := ReadPatterns(s.GFS, nil) c.Assert(err, IsNil) c.Assert(ps, HasLen, 2) m := NewMatcher(ps) c.Assert(m.Match([]string{"vendor", "gopkg.in"}, true), Equals, true) c.Assert(m.Match([]string{"vendor", "github.com"}, true), Equals, false) } func (s *MatcherSuite) TestDir_LoadGlobalPatterns(c *C) { ps, err := LoadGlobalPatterns(s.RFS) c.Assert(err, IsNil) c.Assert(ps, HasLen, 2) m := NewMatcher(ps) c.Assert(m.Match([]string{"go-git.v4.iml"}, true), Equals, true) c.Assert(m.Match([]string{".idea"}, true), Equals, true) } func (s *MatcherSuite) TestDir_LoadGlobalPatternsMissingGitconfig(c *C) { ps, err := LoadGlobalPatterns(s.MCFS) c.Assert(err, IsNil) c.Assert(ps, HasLen, 0) } func (s *MatcherSuite) TestDir_LoadGlobalPatternsMissingExcludesfile(c *C) { ps, err := LoadGlobalPatterns(s.MEFS) c.Assert(err, IsNil) c.Assert(ps, HasLen, 0) } func (s *MatcherSuite) TestDir_LoadGlobalPatternsMissingGitignore(c *C) { ps, err := LoadGlobalPatterns(s.MIFS) c.Assert(err, IsNil) c.Assert(ps, HasLen, 0) } func (s *MatcherSuite) TestDir_LoadSystemPatterns(c *C) { ps, err := LoadSystemPatterns(s.SFS) c.Assert(err, IsNil) c.Assert(ps, HasLen, 2) m := NewMatcher(ps) c.Assert(m.Match([]string{"go-git.v4.iml"}, true), Equals, true) c.Assert(m.Match([]string{".idea"}, true), Equals, true) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/gitignore/doc.go000066400000000000000000000066631345605224300251020ustar00rootroot00000000000000// Package gitignore implements matching file system paths to gitignore patterns that // can be automatically read from a git repository tree in the order of definition // priorities. It support all pattern formats as specified in the original gitignore // documentation, copied below: // // Pattern format // ============== // // - A blank line matches no files, so it can serve as a separator for readability. // // - A line starting with # serves as a comment. Put a backslash ("\") in front of // the first hash for patterns that begin with a hash. // // - Trailing spaces are ignored unless they are quoted with backslash ("\"). // // - An optional prefix "!" which negates the pattern; any matching file excluded // by a previous pattern will become included again. It is not possible to // re-include a file if a parent directory of that file is excluded. // Git doesn’t list excluded directories for performance reasons, so // any patterns on contained files have no effect, no matter where they are // defined. Put a backslash ("\") in front of the first "!" for patterns // that begin with a literal "!", for example, "\!important!.txt". // // - If the pattern ends with a slash, it is removed for the purpose of the // following description, but it would only find a match with a directory. // In other words, foo/ will match a directory foo and paths underneath it, // but will not match a regular file or a symbolic link foo (this is consistent // with the way how pathspec works in general in Git). // // - If the pattern does not contain a slash /, Git treats it as a shell glob // pattern and checks for a match against the pathname relative to the location // of the .gitignore file (relative to the toplevel of the work tree if not // from a .gitignore file). // // - Otherwise, Git treats the pattern as a shell glob suitable for consumption // by fnmatch(3) with the FNM_PATHNAME flag: wildcards in the pattern will // not match a / in the pathname. For example, "Documentation/*.html" matches // "Documentation/git.html" but not "Documentation/ppc/ppc.html" or // "tools/perf/Documentation/perf.html". // // - A leading slash matches the beginning of the pathname. For example, // "/*.c" matches "cat-file.c" but not "mozilla-sha1/sha1.c". // // Two consecutive asterisks ("**") in patterns matched against full pathname // may have special meaning: // // - A leading "**" followed by a slash means match in all directories. // For example, "**/foo" matches file or directory "foo" anywhere, the same as // pattern "foo". "**/foo/bar" matches file or directory "bar" // anywhere that is directly under directory "foo". // // - A trailing "/**" matches everything inside. For example, "abc/**" matches // all files inside directory "abc", relative to the location of the // .gitignore file, with infinite depth. // // - A slash followed by two consecutive asterisks then a slash matches // zero or more directories. For example, "a/**/b" matches "a/b", "a/x/b", // "a/x/y/b" and so on. // // - Other consecutive asterisks are considered invalid. // // Copyright and license // ===================== // // Copyright (c) Oleg Sklyar, Silvertern and source{d} // // The package code was donated to source{d} to include, modify and develop // further as a part of the `go-git` project, release it on the license of // the whole project or delete it from the project. package gitignore golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/gitignore/matcher.go000066400000000000000000000016461345605224300257540ustar00rootroot00000000000000package gitignore // Matcher defines a global multi-pattern matcher for gitignore patterns type Matcher interface { // Match matches patterns in the order of priorities. As soon as an inclusion or // exclusion is found, not further matching is performed. Match(path []string, isDir bool) bool } // NewMatcher constructs a new global matcher. Patterns must be given in the order of // increasing priority. That is most generic settings files first, then the content of // the repo .gitignore, then content of .gitignore down the path or the repo and then // the content command line arguments. func NewMatcher(ps []Pattern) Matcher { return &matcher{ps} } type matcher struct { patterns []Pattern } func (m *matcher) Match(path []string, isDir bool) bool { n := len(m.patterns) for i := n - 1; i >= 0; i-- { if match := m.patterns[i].Match(path, isDir); match > NoMatch { return match == Exclude } } return false } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/gitignore/matcher_test.go000066400000000000000000000006011345605224300270010ustar00rootroot00000000000000package gitignore import ( . "gopkg.in/check.v1" ) func (s *MatcherSuite) TestMatcher_Match(c *C) { ps := []Pattern{ ParsePattern("**/middle/v[uo]l?ano", nil), ParsePattern("!volcano", nil), } m := NewMatcher(ps) c.Assert(m.Match([]string{"head", "middle", "vulkano"}, false), Equals, true) c.Assert(m.Match([]string{"head", "middle", "volcano"}, false), Equals, false) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/gitignore/pattern.go000066400000000000000000000060111345605224300257750ustar00rootroot00000000000000package gitignore import ( "path/filepath" "strings" ) // MatchResult defines outcomes of a match, no match, exclusion or inclusion. type MatchResult int const ( // NoMatch defines the no match outcome of a match check NoMatch MatchResult = iota // Exclude defines an exclusion of a file as a result of a match check Exclude // Include defines an explicit inclusion of a file as a result of a match check Include ) const ( inclusionPrefix = "!" zeroToManyDirs = "**" patternDirSep = "/" ) // Pattern defines a single gitignore pattern. type Pattern interface { // Match matches the given path to the pattern. Match(path []string, isDir bool) MatchResult } type pattern struct { domain []string pattern []string inclusion bool dirOnly bool isGlob bool } // ParsePattern parses a gitignore pattern string into the Pattern structure. func ParsePattern(p string, domain []string) Pattern { res := pattern{domain: domain} if strings.HasPrefix(p, inclusionPrefix) { res.inclusion = true p = p[1:] } if !strings.HasSuffix(p, "\\ ") { p = strings.TrimRight(p, " ") } if strings.HasSuffix(p, patternDirSep) { res.dirOnly = true p = p[:len(p)-1] } if strings.Contains(p, patternDirSep) { res.isGlob = true } res.pattern = strings.Split(p, patternDirSep) return &res } func (p *pattern) Match(path []string, isDir bool) MatchResult { if len(path) <= len(p.domain) { return NoMatch } for i, e := range p.domain { if path[i] != e { return NoMatch } } path = path[len(p.domain):] if p.isGlob && !p.globMatch(path, isDir) { return NoMatch } else if !p.isGlob && !p.simpleNameMatch(path, isDir) { return NoMatch } if p.inclusion { return Include } else { return Exclude } } func (p *pattern) simpleNameMatch(path []string, isDir bool) bool { for i, name := range path { if match, err := filepath.Match(p.pattern[0], name); err != nil { return false } else if !match { continue } if p.dirOnly && !isDir && i == len(path)-1 { return false } return true } return false } func (p *pattern) globMatch(path []string, isDir bool) bool { matched := false canTraverse := false for i, pattern := range p.pattern { if pattern == "" { canTraverse = false continue } if pattern == zeroToManyDirs { if i == len(p.pattern)-1 { break } canTraverse = true continue } if strings.Contains(pattern, zeroToManyDirs) { return false } if len(path) == 0 { return false } if canTraverse { canTraverse = false for len(path) > 0 { e := path[0] path = path[1:] if match, err := filepath.Match(pattern, e); err != nil { return false } else if match { matched = true break } else if len(path) == 0 { // if nothing left then fail matched = false } } } else { if match, err := filepath.Match(pattern, path[0]); err != nil || !match { return false } matched = true path = path[1:] } } if matched && p.dirOnly && !isDir && len(path) == 0 { matched = false } return matched } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/gitignore/pattern_test.go000066400000000000000000000225451345605224300270460ustar00rootroot00000000000000package gitignore import ( "testing" . "gopkg.in/check.v1" ) func Test(t *testing.T) { TestingT(t) } type PatternSuite struct{} var _ = Suite(&PatternSuite{}) func (s *PatternSuite) TestSimpleMatch_inclusion(c *C) { p := ParsePattern("!vul?ano", nil) r := p.Match([]string{"value", "vulkano", "tail"}, false) c.Assert(r, Equals, Include) } func (s *PatternSuite) TestMatch_domainLonger_mismatch(c *C) { p := ParsePattern("value", []string{"head", "middle", "tail"}) r := p.Match([]string{"head", "middle"}, false) c.Assert(r, Equals, NoMatch) } func (s *PatternSuite) TestMatch_domainSameLength_mismatch(c *C) { p := ParsePattern("value", []string{"head", "middle", "tail"}) r := p.Match([]string{"head", "middle", "tail"}, false) c.Assert(r, Equals, NoMatch) } func (s *PatternSuite) TestMatch_domainMismatch_mismatch(c *C) { p := ParsePattern("value", []string{"head", "middle", "tail"}) r := p.Match([]string{"head", "middle", "_tail_", "value"}, false) c.Assert(r, Equals, NoMatch) } func (s *PatternSuite) TestSimpleMatch_withDomain(c *C) { p := ParsePattern("middle/", []string{"value", "volcano"}) r := p.Match([]string{"value", "volcano", "middle", "tail"}, false) c.Assert(r, Equals, Exclude) } func (s *PatternSuite) TestSimpleMatch_onlyMatchInDomain_mismatch(c *C) { p := ParsePattern("volcano/", []string{"value", "volcano"}) r := p.Match([]string{"value", "volcano", "tail"}, true) c.Assert(r, Equals, NoMatch) } func (s *PatternSuite) TestSimpleMatch_atStart(c *C) { p := ParsePattern("value", nil) r := p.Match([]string{"value", "tail"}, false) c.Assert(r, Equals, Exclude) } func (s *PatternSuite) TestSimpleMatch_inTheMiddle(c *C) { p := ParsePattern("value", nil) r := p.Match([]string{"head", "value", "tail"}, false) c.Assert(r, Equals, Exclude) } func (s *PatternSuite) TestSimpleMatch_atEnd(c *C) { p := ParsePattern("value", nil) r := p.Match([]string{"head", "value"}, false) c.Assert(r, Equals, Exclude) } func (s *PatternSuite) TestSimpleMatch_atStart_dirWanted(c *C) { p := ParsePattern("value/", nil) r := p.Match([]string{"value", "tail"}, false) c.Assert(r, Equals, Exclude) } func (s *PatternSuite) TestSimpleMatch_inTheMiddle_dirWanted(c *C) { p := ParsePattern("value/", nil) r := p.Match([]string{"head", "value", "tail"}, false) c.Assert(r, Equals, Exclude) } func (s *PatternSuite) TestSimpleMatch_atEnd_dirWanted(c *C) { p := ParsePattern("value/", nil) r := p.Match([]string{"head", "value"}, true) c.Assert(r, Equals, Exclude) } func (s *PatternSuite) TestSimpleMatch_atEnd_dirWanted_notADir_mismatch(c *C) { p := ParsePattern("value/", nil) r := p.Match([]string{"head", "value"}, false) c.Assert(r, Equals, NoMatch) } func (s *PatternSuite) TestSimpleMatch_mismatch(c *C) { p := ParsePattern("value", nil) r := p.Match([]string{"head", "val", "tail"}, false) c.Assert(r, Equals, NoMatch) } func (s *PatternSuite) TestSimpleMatch_valueLonger_mismatch(c *C) { p := ParsePattern("val", nil) r := p.Match([]string{"head", "value", "tail"}, false) c.Assert(r, Equals, NoMatch) } func (s *PatternSuite) TestSimpleMatch_withAsterisk(c *C) { p := ParsePattern("v*o", nil) r := p.Match([]string{"value", "vulkano", "tail"}, false) c.Assert(r, Equals, Exclude) } func (s *PatternSuite) TestSimpleMatch_withQuestionMark(c *C) { p := ParsePattern("vul?ano", nil) r := p.Match([]string{"value", "vulkano", "tail"}, false) c.Assert(r, Equals, Exclude) } func (s *PatternSuite) TestSimpleMatch_magicChars(c *C) { p := ParsePattern("v[ou]l[kc]ano", nil) r := p.Match([]string{"value", "volcano"}, false) c.Assert(r, Equals, Exclude) } func (s *PatternSuite) TestSimpleMatch_wrongPattern_mismatch(c *C) { p := ParsePattern("v[ou]l[", nil) r := p.Match([]string{"value", "vol["}, false) c.Assert(r, Equals, NoMatch) } func (s *PatternSuite) TestGlobMatch_fromRootWithSlash(c *C) { p := ParsePattern("/value/vul?ano", nil) r := p.Match([]string{"value", "vulkano", "tail"}, false) c.Assert(r, Equals, Exclude) } func (s *PatternSuite) TestGlobMatch_withDomain(c *C) { p := ParsePattern("middle/tail/", []string{"value", "volcano"}) r := p.Match([]string{"value", "volcano", "middle", "tail"}, true) c.Assert(r, Equals, Exclude) } func (s *PatternSuite) TestGlobMatch_onlyMatchInDomain_mismatch(c *C) { p := ParsePattern("volcano/tail", []string{"value", "volcano"}) r := p.Match([]string{"value", "volcano", "tail"}, false) c.Assert(r, Equals, NoMatch) } func (s *PatternSuite) TestGlobMatch_fromRootWithoutSlash(c *C) { p := ParsePattern("value/vul?ano", nil) r := p.Match([]string{"value", "vulkano", "tail"}, false) c.Assert(r, Equals, Exclude) } func (s *PatternSuite) TestGlobMatch_fromRoot_mismatch(c *C) { p := ParsePattern("value/vulkano", nil) r := p.Match([]string{"value", "volcano"}, false) c.Assert(r, Equals, NoMatch) } func (s *PatternSuite) TestGlobMatch_fromRoot_tooShort_mismatch(c *C) { p := ParsePattern("value/vul?ano", nil) r := p.Match([]string{"value"}, false) c.Assert(r, Equals, NoMatch) } func (s *PatternSuite) TestGlobMatch_fromRoot_notAtRoot_mismatch(c *C) { p := ParsePattern("/value/volcano", nil) r := p.Match([]string{"value", "value", "volcano"}, false) c.Assert(r, Equals, NoMatch) } func (s *PatternSuite) TestGlobMatch_leadingAsterisks_atStart(c *C) { p := ParsePattern("**/*lue/vol?ano", nil) r := p.Match([]string{"value", "volcano", "tail"}, false) c.Assert(r, Equals, Exclude) } func (s *PatternSuite) TestGlobMatch_leadingAsterisks_notAtStart(c *C) { p := ParsePattern("**/*lue/vol?ano", nil) r := p.Match([]string{"head", "value", "volcano", "tail"}, false) c.Assert(r, Equals, Exclude) } func (s *PatternSuite) TestGlobMatch_leadingAsterisks_mismatch(c *C) { p := ParsePattern("**/*lue/vol?ano", nil) r := p.Match([]string{"head", "value", "Volcano", "tail"}, false) c.Assert(r, Equals, NoMatch) } func (s *PatternSuite) TestGlobMatch_leadingAsterisks_isDir(c *C) { p := ParsePattern("**/*lue/vol?ano/", nil) r := p.Match([]string{"head", "value", "volcano", "tail"}, false) c.Assert(r, Equals, Exclude) } func (s *PatternSuite) TestGlobMatch_leadingAsterisks_isDirAtEnd(c *C) { p := ParsePattern("**/*lue/vol?ano/", nil) r := p.Match([]string{"head", "value", "volcano"}, true) c.Assert(r, Equals, Exclude) } func (s *PatternSuite) TestGlobMatch_leadingAsterisks_isDir_mismatch(c *C) { p := ParsePattern("**/*lue/vol?ano/", nil) r := p.Match([]string{"head", "value", "Colcano"}, true) c.Assert(r, Equals, NoMatch) } func (s *PatternSuite) TestGlobMatch_leadingAsterisks_isDirNoDirAtEnd_mismatch(c *C) { p := ParsePattern("**/*lue/vol?ano/", nil) r := p.Match([]string{"head", "value", "volcano"}, false) c.Assert(r, Equals, NoMatch) } func (s *PatternSuite) TestGlobMatch_tailingAsterisks(c *C) { p := ParsePattern("/*lue/vol?ano/**", nil) r := p.Match([]string{"value", "volcano", "tail", "moretail"}, false) c.Assert(r, Equals, Exclude) } func (s *PatternSuite) TestGlobMatch_tailingAsterisks_exactMatch(c *C) { p := ParsePattern("/*lue/vol?ano/**", nil) r := p.Match([]string{"value", "volcano"}, false) c.Assert(r, Equals, Exclude) } func (s *PatternSuite) TestGlobMatch_middleAsterisks_emptyMatch(c *C) { p := ParsePattern("/*lue/**/vol?ano", nil) r := p.Match([]string{"value", "volcano"}, false) c.Assert(r, Equals, Exclude) } func (s *PatternSuite) TestGlobMatch_middleAsterisks_oneMatch(c *C) { p := ParsePattern("/*lue/**/vol?ano", nil) r := p.Match([]string{"value", "middle", "volcano"}, false) c.Assert(r, Equals, Exclude) } func (s *PatternSuite) TestGlobMatch_middleAsterisks_multiMatch(c *C) { p := ParsePattern("/*lue/**/vol?ano", nil) r := p.Match([]string{"value", "middle1", "middle2", "volcano"}, false) c.Assert(r, Equals, Exclude) } func (s *PatternSuite) TestGlobMatch_middleAsterisks_isDir_trailing(c *C) { p := ParsePattern("/*lue/**/vol?ano/", nil) r := p.Match([]string{"value", "middle1", "middle2", "volcano"}, true) c.Assert(r, Equals, Exclude) } func (s *PatternSuite) TestGlobMatch_middleAsterisks_isDir_trailing_mismatch(c *C) { p := ParsePattern("/*lue/**/vol?ano/", nil) r := p.Match([]string{"value", "middle1", "middle2", "volcano"}, false) c.Assert(r, Equals, NoMatch) } func (s *PatternSuite) TestGlobMatch_middleAsterisks_isDir(c *C) { p := ParsePattern("/*lue/**/vol?ano/", nil) r := p.Match([]string{"value", "middle1", "middle2", "volcano", "tail"}, false) c.Assert(r, Equals, Exclude) } func (s *PatternSuite) TestGlobMatch_wrongDoubleAsterisk_mismatch(c *C) { p := ParsePattern("/*lue/**foo/vol?ano", nil) r := p.Match([]string{"value", "foo", "volcano", "tail"}, false) c.Assert(r, Equals, NoMatch) } func (s *PatternSuite) TestGlobMatch_magicChars(c *C) { p := ParsePattern("**/head/v[ou]l[kc]ano", nil) r := p.Match([]string{"value", "head", "volcano"}, false) c.Assert(r, Equals, Exclude) } func (s *PatternSuite) TestGlobMatch_wrongPattern_noTraversal_mismatch(c *C) { p := ParsePattern("**/head/v[ou]l[", nil) r := p.Match([]string{"value", "head", "vol["}, false) c.Assert(r, Equals, NoMatch) } func (s *PatternSuite) TestGlobMatch_wrongPattern_onTraversal_mismatch(c *C) { p := ParsePattern("/value/**/v[ou]l[", nil) r := p.Match([]string{"value", "head", "vol["}, false) c.Assert(r, Equals, NoMatch) } func (s *PatternSuite) TestGlobMatch_issue_923(c *C) { p := ParsePattern("**/android/**/GeneratedPluginRegistrant.java", nil) r := p.Match([]string{"packages", "flutter_tools", "lib", "src", "android", "gradle.dart"}, false) c.Assert(r, Equals, NoMatch) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/idxfile/000077500000000000000000000000001345605224300234305ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/idxfile/decoder.go000066400000000000000000000066121345605224300253710ustar00rootroot00000000000000package idxfile import ( "bufio" "bytes" "errors" "io" "gopkg.in/src-d/go-git.v4/utils/binary" ) var ( // ErrUnsupportedVersion is returned by Decode when the idx file version // is not supported. ErrUnsupportedVersion = errors.New("Unsuported version") // ErrMalformedIdxFile is returned by Decode when the idx file is corrupted. ErrMalformedIdxFile = errors.New("Malformed IDX file") ) const ( fanout = 256 objectIDLength = 20 ) // Decoder reads and decodes idx files from an input stream. type Decoder struct { *bufio.Reader } // NewDecoder builds a new idx stream decoder, that reads from r. func NewDecoder(r io.Reader) *Decoder { return &Decoder{bufio.NewReader(r)} } // Decode reads from the stream and decode the content into the MemoryIndex struct. func (d *Decoder) Decode(idx *MemoryIndex) error { if err := validateHeader(d); err != nil { return err } flow := []func(*MemoryIndex, io.Reader) error{ readVersion, readFanout, readObjectNames, readCRC32, readOffsets, readChecksums, } for _, f := range flow { if err := f(idx, d); err != nil { return err } } return nil } func validateHeader(r io.Reader) error { var h = make([]byte, 4) if _, err := io.ReadFull(r, h); err != nil { return err } if !bytes.Equal(h, idxHeader) { return ErrMalformedIdxFile } return nil } func readVersion(idx *MemoryIndex, r io.Reader) error { v, err := binary.ReadUint32(r) if err != nil { return err } if v > VersionSupported { return ErrUnsupportedVersion } idx.Version = v return nil } func readFanout(idx *MemoryIndex, r io.Reader) error { for k := 0; k < fanout; k++ { n, err := binary.ReadUint32(r) if err != nil { return err } idx.Fanout[k] = n idx.FanoutMapping[k] = noMapping } return nil } func readObjectNames(idx *MemoryIndex, r io.Reader) error { for k := 0; k < fanout; k++ { var buckets uint32 if k == 0 { buckets = idx.Fanout[k] } else { buckets = idx.Fanout[k] - idx.Fanout[k-1] } if buckets == 0 { continue } if buckets < 0 { return ErrMalformedIdxFile } idx.FanoutMapping[k] = len(idx.Names) nameLen := int(buckets * objectIDLength) bin := make([]byte, nameLen) if _, err := io.ReadFull(r, bin); err != nil { return err } idx.Names = append(idx.Names, bin) idx.Offset32 = append(idx.Offset32, make([]byte, buckets*4)) idx.CRC32 = append(idx.CRC32, make([]byte, buckets*4)) } return nil } func readCRC32(idx *MemoryIndex, r io.Reader) error { for k := 0; k < fanout; k++ { if pos := idx.FanoutMapping[k]; pos != noMapping { if _, err := io.ReadFull(r, idx.CRC32[pos]); err != nil { return err } } } return nil } func readOffsets(idx *MemoryIndex, r io.Reader) error { var o64cnt int for k := 0; k < fanout; k++ { if pos := idx.FanoutMapping[k]; pos != noMapping { if _, err := io.ReadFull(r, idx.Offset32[pos]); err != nil { return err } for p := 0; p < len(idx.Offset32[pos]); p += 4 { if idx.Offset32[pos][p]&(byte(1)<<7) > 0 { o64cnt++ } } } } if o64cnt > 0 { idx.Offset64 = make([]byte, o64cnt*8) if _, err := io.ReadFull(r, idx.Offset64); err != nil { return err } } return nil } func readChecksums(idx *MemoryIndex, r io.Reader) error { if _, err := io.ReadFull(r, idx.PackfileChecksum[:]); err != nil { return err } if _, err := io.ReadFull(r, idx.IdxChecksum[:]); err != nil { return err } return nil } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/idxfile/decoder_test.go000066400000000000000000000111321345605224300264210ustar00rootroot00000000000000package idxfile_test import ( "bytes" "encoding/base64" "fmt" "io" "io/ioutil" "testing" "gopkg.in/src-d/go-git.v4/plumbing" . "gopkg.in/src-d/go-git.v4/plumbing/format/idxfile" . "gopkg.in/check.v1" "gopkg.in/src-d/go-git-fixtures.v3" ) func Test(t *testing.T) { TestingT(t) } type IdxfileSuite struct { fixtures.Suite } var _ = Suite(&IdxfileSuite{}) func (s *IdxfileSuite) TestDecode(c *C) { f := fixtures.Basic().One() d := NewDecoder(f.Idx()) idx := new(MemoryIndex) err := d.Decode(idx) c.Assert(err, IsNil) count, _ := idx.Count() c.Assert(count, Equals, int64(31)) hash := plumbing.NewHash("1669dce138d9b841a518c64b10914d88f5e488ea") ok, err := idx.Contains(hash) c.Assert(err, IsNil) c.Assert(ok, Equals, true) offset, err := idx.FindOffset(hash) c.Assert(err, IsNil) c.Assert(offset, Equals, int64(615)) crc32, err := idx.FindCRC32(hash) c.Assert(err, IsNil) c.Assert(crc32, Equals, uint32(3645019190)) c.Assert(fmt.Sprintf("%x", idx.IdxChecksum), Equals, "fb794f1ec720b9bc8e43257451bd99c4be6fa1c9") c.Assert(fmt.Sprintf("%x", idx.PackfileChecksum), Equals, f.PackfileHash.String()) } func (s *IdxfileSuite) TestDecode64bitsOffsets(c *C) { f := bytes.NewBufferString(fixtureLarge4GB) idx := new(MemoryIndex) d := NewDecoder(base64.NewDecoder(base64.StdEncoding, f)) err := d.Decode(idx) c.Assert(err, IsNil) expected := map[string]uint64{ "303953e5aa461c203a324821bc1717f9b4fff895": 12, "5296768e3d9f661387ccbff18c4dea6c997fd78c": 142, "03fc8d58d44267274edef4585eaeeb445879d33f": 1601322837, "8f3ceb4ea4cb9e4a0f751795eb41c9a4f07be772": 2646996529, "e0d1d625010087f79c9e01ad9d8f95e1628dda02": 3452385606, "90eba326cdc4d1d61c5ad25224ccbf08731dd041": 3707047470, "bab53055add7bc35882758a922c54a874d6b1272": 5323223332, "1b8995f51987d8a449ca5ea4356595102dc2fbd4": 5894072943, "35858be9c6f5914cbe6768489c41eb6809a2bceb": 5924278919, } iter, err := idx.Entries() c.Assert(err, IsNil) var entries int for { e, err := iter.Next() if err == io.EOF { break } c.Assert(err, IsNil) entries++ c.Assert(expected[e.Hash.String()], Equals, e.Offset) } c.Assert(entries, Equals, len(expected)) } const fixtureLarge4GB = `/3RPYwAAAAIAAAAAAAAAAAAAAAAAAAABAAAAAQAAAAEAAAABAAAAAQAAAAEAAAABAAAAAQAAAAEA AAABAAAAAQAAAAEAAAABAAAAAQAAAAEAAAABAAAAAQAAAAEAAAABAAAAAQAAAAEAAAABAAAAAQAA AAEAAAACAAAAAgAAAAIAAAACAAAAAgAAAAIAAAACAAAAAgAAAAIAAAACAAAAAgAAAAIAAAACAAAA AgAAAAIAAAACAAAAAgAAAAIAAAACAAAAAgAAAAIAAAADAAAAAwAAAAMAAAADAAAAAwAAAAQAAAAE AAAABAAAAAQAAAAEAAAABAAAAAQAAAAEAAAABAAAAAQAAAAEAAAABAAAAAQAAAAEAAAABAAAAAQA AAAEAAAABAAAAAQAAAAEAAAABAAAAAQAAAAEAAAABAAAAAQAAAAEAAAABAAAAAQAAAAEAAAABQAA AAUAAAAFAAAABQAAAAUAAAAFAAAABQAAAAUAAAAFAAAABQAAAAUAAAAFAAAABQAAAAUAAAAFAAAA BQAAAAUAAAAFAAAABQAAAAUAAAAFAAAABQAAAAUAAAAFAAAABQAAAAUAAAAFAAAABQAAAAUAAAAF AAAABQAAAAUAAAAFAAAABQAAAAUAAAAFAAAABQAAAAUAAAAFAAAABQAAAAUAAAAFAAAABQAAAAUA AAAFAAAABQAAAAUAAAAFAAAABQAAAAUAAAAFAAAABQAAAAUAAAAFAAAABQAAAAUAAAAFAAAABQAA AAUAAAAFAAAABQAAAAYAAAAHAAAABwAAAAcAAAAHAAAABwAAAAcAAAAHAAAABwAAAAcAAAAHAAAA BwAAAAcAAAAHAAAABwAAAAcAAAAHAAAABwAAAAcAAAAHAAAABwAAAAcAAAAHAAAABwAAAAcAAAAH AAAABwAAAAcAAAAHAAAABwAAAAcAAAAHAAAABwAAAAcAAAAHAAAABwAAAAcAAAAHAAAABwAAAAcA AAAHAAAABwAAAAcAAAAIAAAACAAAAAgAAAAIAAAACAAAAAgAAAAIAAAACAAAAAgAAAAIAAAACAAA AAgAAAAIAAAACAAAAAgAAAAIAAAACAAAAAgAAAAIAAAACAAAAAgAAAAIAAAACAAAAAgAAAAIAAAA CAAAAAgAAAAIAAAACAAAAAgAAAAIAAAACAAAAAgAAAAIAAAACAAAAAgAAAAIAAAACAAAAAkAAAAJ AAAACQAAAAkAAAAJAAAACQAAAAkAAAAJAAAACQAAAAkAAAAJAAAACQAAAAkAAAAJAAAACQAAAAkA AAAJAAAACQAAAAkAAAAJAAAACQAAAAkAAAAJAAAACQAAAAkAAAAJAAAACQAAAAkAAAAJAAAACQAA AAkAAAAJA/yNWNRCZydO3vRYXq7rRFh50z8biZX1GYfYpEnKXqQ1ZZUQLcL71DA5U+WqRhwgOjJI IbwXF/m0//iVNYWL6cb1kUy+Z2hInEHraAmivOtSlnaOPZ9mE4fMv/GMTepsmX/XjI88606ky55K D3UXletByaTwe+dykOujJs3E0dYcWtJSJMy/CHMd0EG6tTBVrde8NYgnWKkixUqHTWsScuDR1iUB AIf3nJ4BrZ2PleFijdoCkp36qiGHwFa8NHxMnInZ0s3CKEKmHe+KcZPzuqwmm44GvqGAX3I/VYAA AAAAAAAMgAAAAQAAAI6AAAACgAAAA4AAAASAAAAFAAAAAV9Qam8AAAABYR1ShwAAAACdxfYxAAAA ANz1Di4AAAABPUnxJAAAAADNxzlGr6vCJpIFz4XaG/fi/f9C9zgQ8ptKSQpfQ1NMJBGTDTxxYGGp ch2xUA== ` func BenchmarkDecode(b *testing.B) { if err := fixtures.Init(); err != nil { b.Errorf("unexpected error initializing fixtures: %s", err) } f := fixtures.Basic().One() fixture, err := ioutil.ReadAll(f.Idx()) if err != nil { b.Errorf("unexpected error reading idx file: %s", err) } defer func() { if err := fixtures.Clean(); err != nil { b.Errorf("unexpected error cleaning fixtures: %s", err) } }() for i := 0; i < b.N; i++ { f := bytes.NewBuffer(fixture) idx := new(MemoryIndex) d := NewDecoder(f) if err := d.Decode(idx); err != nil { b.Errorf("unexpected error decoding: %s", err) } } } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/idxfile/doc.go000066400000000000000000000122031345605224300245220ustar00rootroot00000000000000// Package idxfile implements encoding and decoding of packfile idx files. // // == Original (version 1) pack-*.idx files have the following format: // // - The header consists of 256 4-byte network byte order // integers. N-th entry of this table records the number of // objects in the corresponding pack, the first byte of whose // object name is less than or equal to N. This is called the // 'first-level fan-out' table. // // - The header is followed by sorted 24-byte entries, one entry // per object in the pack. Each entry is: // // 4-byte network byte order integer, recording where the // object is stored in the packfile as the offset from the // beginning. // // 20-byte object name. // // - The file is concluded with a trailer: // // A copy of the 20-byte SHA1 checksum at the end of // corresponding packfile. // // 20-byte SHA1-checksum of all of the above. // // Pack Idx file: // // -- +--------------------------------+ // fanout | fanout[0] = 2 (for example) |-. // table +--------------------------------+ | // | fanout[1] | | // +--------------------------------+ | // | fanout[2] | | // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | // | fanout[255] = total objects |---. // -- +--------------------------------+ | | // main | offset | | | // index | object name 00XXXXXXXXXXXXXXXX | | | // tab +--------------------------------+ | | // | offset | | | // | object name 00XXXXXXXXXXXXXXXX | | | // +--------------------------------+<+ | // .-| offset | | // | | object name 01XXXXXXXXXXXXXXXX | | // | +--------------------------------+ | // | | offset | | // | | object name 01XXXXXXXXXXXXXXXX | | // | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | // | | offset | | // | | object name FFXXXXXXXXXXXXXXXX | | // --| +--------------------------------+<--+ // trailer | | packfile checksum | // | +--------------------------------+ // | | idxfile checksum | // | +--------------------------------+ // .---------. // | // Pack file entry: <+ // // packed object header: // 1-byte size extension bit (MSB) // type (next 3 bit) // size0 (lower 4-bit) // n-byte sizeN (as long as MSB is set, each 7-bit) // size0..sizeN form 4+7+7+..+7 bit integer, size0 // is the least significant part, and sizeN is the // most significant part. // packed object data: // If it is not DELTA, then deflated bytes (the size above // is the size before compression). // If it is REF_DELTA, then // 20-byte base object name SHA1 (the size above is the // size of the delta data that follows). // delta data, deflated. // If it is OFS_DELTA, then // n-byte offset (see below) interpreted as a negative // offset from the type-byte of the header of the // ofs-delta entry (the size above is the size of // the delta data that follows). // delta data, deflated. // // offset encoding: // n bytes with MSB set in all but the last one. // The offset is then the number constructed by // concatenating the lower 7 bit of each byte, and // for n >= 2 adding 2^7 + 2^14 + ... + 2^(7*(n-1)) // to the result. // // == Version 2 pack-*.idx files support packs larger than 4 GiB, and // have some other reorganizations. They have the format: // // - A 4-byte magic number '\377tOc' which is an unreasonable // fanout[0] value. // // - A 4-byte version number (= 2) // // - A 256-entry fan-out table just like v1. // // - A table of sorted 20-byte SHA1 object names. These are // packed together without offset values to reduce the cache // footprint of the binary search for a specific object name. // // - A table of 4-byte CRC32 values of the packed object data. // This is new in v2 so compressed data can be copied directly // from pack to pack during repacking without undetected // data corruption. // // - A table of 4-byte offset values (in network byte order). // These are usually 31-bit pack file offsets, but large // offsets are encoded as an index into the next table with // the msbit set. // // - A table of 8-byte offset entries (empty for pack files less // than 2 GiB). Pack files are organized with heavily used // objects toward the front, so most object references should // not need to refer to this table. // // - The same trailer as a v1 pack file: // // A copy of the 20-byte SHA1 checksum at the end of // corresponding packfile. // // 20-byte SHA1-checksum of all of the above. // // Source: // https://www.kernel.org/pub/software/scm/git/docs/v1.7.5/technical/pack-format.txt package idxfile golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/idxfile/encoder.go000066400000000000000000000046441345605224300254060ustar00rootroot00000000000000package idxfile import ( "crypto/sha1" "hash" "io" "gopkg.in/src-d/go-git.v4/utils/binary" ) // Encoder writes MemoryIndex structs to an output stream. type Encoder struct { io.Writer hash hash.Hash } // NewEncoder returns a new stream encoder that writes to w. func NewEncoder(w io.Writer) *Encoder { h := sha1.New() mw := io.MultiWriter(w, h) return &Encoder{mw, h} } // Encode encodes an MemoryIndex to the encoder writer. func (e *Encoder) Encode(idx *MemoryIndex) (int, error) { flow := []func(*MemoryIndex) (int, error){ e.encodeHeader, e.encodeFanout, e.encodeHashes, e.encodeCRC32, e.encodeOffsets, e.encodeChecksums, } sz := 0 for _, f := range flow { i, err := f(idx) sz += i if err != nil { return sz, err } } return sz, nil } func (e *Encoder) encodeHeader(idx *MemoryIndex) (int, error) { c, err := e.Write(idxHeader) if err != nil { return c, err } return c + 4, binary.WriteUint32(e, idx.Version) } func (e *Encoder) encodeFanout(idx *MemoryIndex) (int, error) { for _, c := range idx.Fanout { if err := binary.WriteUint32(e, c); err != nil { return 0, err } } return fanout * 4, nil } func (e *Encoder) encodeHashes(idx *MemoryIndex) (int, error) { var size int for k := 0; k < fanout; k++ { pos := idx.FanoutMapping[k] if pos == noMapping { continue } n, err := e.Write(idx.Names[pos]) if err != nil { return size, err } size += n } return size, nil } func (e *Encoder) encodeCRC32(idx *MemoryIndex) (int, error) { var size int for k := 0; k < fanout; k++ { pos := idx.FanoutMapping[k] if pos == noMapping { continue } n, err := e.Write(idx.CRC32[pos]) if err != nil { return size, err } size += n } return size, nil } func (e *Encoder) encodeOffsets(idx *MemoryIndex) (int, error) { var size int for k := 0; k < fanout; k++ { pos := idx.FanoutMapping[k] if pos == noMapping { continue } n, err := e.Write(idx.Offset32[pos]) if err != nil { return size, err } size += n } if len(idx.Offset64) > 0 { n, err := e.Write(idx.Offset64) if err != nil { return size, err } size += n } return size, nil } func (e *Encoder) encodeChecksums(idx *MemoryIndex) (int, error) { if _, err := e.Write(idx.PackfileChecksum[:]); err != nil { return 0, err } copy(idx.IdxChecksum[:], e.hash.Sum(nil)[:20]) if _, err := e.Write(idx.IdxChecksum[:]); err != nil { return 0, err } return 40, nil } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/idxfile/encoder_test.go000066400000000000000000000012431345605224300264350ustar00rootroot00000000000000package idxfile_test import ( "bytes" "io/ioutil" . "gopkg.in/src-d/go-git.v4/plumbing/format/idxfile" . "gopkg.in/check.v1" "gopkg.in/src-d/go-git-fixtures.v3" ) func (s *IdxfileSuite) TestDecodeEncode(c *C) { fixtures.ByTag("packfile").Test(c, func(f *fixtures.Fixture) { expected, err := ioutil.ReadAll(f.Idx()) c.Assert(err, IsNil) idx := new(MemoryIndex) d := NewDecoder(bytes.NewBuffer(expected)) err = d.Decode(idx) c.Assert(err, IsNil) result := bytes.NewBuffer(nil) e := NewEncoder(result) size, err := e.Encode(idx) c.Assert(err, IsNil) c.Assert(size, Equals, len(expected)) c.Assert(result.Bytes(), DeepEquals, expected) }) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/idxfile/idxfile.go000066400000000000000000000165321345605224300254120ustar00rootroot00000000000000package idxfile import ( "bytes" "io" "sort" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/utils/binary" ) const ( // VersionSupported is the only idx version supported. VersionSupported = 2 noMapping = -1 ) var ( idxHeader = []byte{255, 't', 'O', 'c'} ) // Index represents an index of a packfile. type Index interface { // Contains checks whether the given hash is in the index. Contains(h plumbing.Hash) (bool, error) // FindOffset finds the offset in the packfile for the object with // the given hash. FindOffset(h plumbing.Hash) (int64, error) // FindCRC32 finds the CRC32 of the object with the given hash. FindCRC32(h plumbing.Hash) (uint32, error) // FindHash finds the hash for the object with the given offset. FindHash(o int64) (plumbing.Hash, error) // Count returns the number of entries in the index. Count() (int64, error) // Entries returns an iterator to retrieve all index entries. Entries() (EntryIter, error) // EntriesByOffset returns an iterator to retrieve all index entries ordered // by offset. EntriesByOffset() (EntryIter, error) } // MemoryIndex is the in memory representation of an idx file. type MemoryIndex struct { Version uint32 Fanout [256]uint32 // FanoutMapping maps the position in the fanout table to the position // in the Names, Offset32 and CRC32 slices. This improves the memory // usage by not needing an array with unnecessary empty slots. FanoutMapping [256]int Names [][]byte Offset32 [][]byte CRC32 [][]byte Offset64 []byte PackfileChecksum [20]byte IdxChecksum [20]byte offsetHash map[int64]plumbing.Hash } var _ Index = (*MemoryIndex)(nil) // NewMemoryIndex returns an instance of a new MemoryIndex. func NewMemoryIndex() *MemoryIndex { return &MemoryIndex{} } func (idx *MemoryIndex) findHashIndex(h plumbing.Hash) (int, bool) { k := idx.FanoutMapping[h[0]] if k == noMapping { return 0, false } if len(idx.Names) <= k { return 0, false } data := idx.Names[k] high := uint64(len(idx.Offset32[k])) >> 2 if high == 0 { return 0, false } low := uint64(0) for { mid := (low + high) >> 1 offset := mid * objectIDLength cmp := bytes.Compare(h[:], data[offset:offset+objectIDLength]) if cmp < 0 { high = mid } else if cmp == 0 { return int(mid), true } else { low = mid + 1 } if low >= high { break } } return 0, false } // Contains implements the Index interface. func (idx *MemoryIndex) Contains(h plumbing.Hash) (bool, error) { _, ok := idx.findHashIndex(h) return ok, nil } // FindOffset implements the Index interface. func (idx *MemoryIndex) FindOffset(h plumbing.Hash) (int64, error) { if len(idx.FanoutMapping) <= int(h[0]) { return 0, plumbing.ErrObjectNotFound } k := idx.FanoutMapping[h[0]] i, ok := idx.findHashIndex(h) if !ok { return 0, plumbing.ErrObjectNotFound } return idx.getOffset(k, i) } const isO64Mask = uint64(1) << 31 func (idx *MemoryIndex) getOffset(firstLevel, secondLevel int) (int64, error) { offset := secondLevel << 2 buf := bytes.NewBuffer(idx.Offset32[firstLevel][offset : offset+4]) ofs, err := binary.ReadUint32(buf) if err != nil { return -1, err } if (uint64(ofs) & isO64Mask) != 0 { offset := 8 * (uint64(ofs) & ^isO64Mask) buf := bytes.NewBuffer(idx.Offset64[offset : offset+8]) n, err := binary.ReadUint64(buf) if err != nil { return -1, err } return int64(n), nil } return int64(ofs), nil } // FindCRC32 implements the Index interface. func (idx *MemoryIndex) FindCRC32(h plumbing.Hash) (uint32, error) { k := idx.FanoutMapping[h[0]] i, ok := idx.findHashIndex(h) if !ok { return 0, plumbing.ErrObjectNotFound } return idx.getCRC32(k, i) } func (idx *MemoryIndex) getCRC32(firstLevel, secondLevel int) (uint32, error) { offset := secondLevel << 2 buf := bytes.NewBuffer(idx.CRC32[firstLevel][offset : offset+4]) return binary.ReadUint32(buf) } // FindHash implements the Index interface. func (idx *MemoryIndex) FindHash(o int64) (plumbing.Hash, error) { // Lazily generate the reverse offset/hash map if required. if idx.offsetHash == nil { if err := idx.genOffsetHash(); err != nil { return plumbing.ZeroHash, err } } hash, ok := idx.offsetHash[o] if !ok { return plumbing.ZeroHash, plumbing.ErrObjectNotFound } return hash, nil } // genOffsetHash generates the offset/hash mapping for reverse search. func (idx *MemoryIndex) genOffsetHash() error { count, err := idx.Count() if err != nil { return err } idx.offsetHash = make(map[int64]plumbing.Hash, count) iter, err := idx.Entries() if err != nil { return err } for { entry, err := iter.Next() if err != nil { if err == io.EOF { return nil } return err } idx.offsetHash[int64(entry.Offset)] = entry.Hash } } // Count implements the Index interface. func (idx *MemoryIndex) Count() (int64, error) { return int64(idx.Fanout[fanout-1]), nil } // Entries implements the Index interface. func (idx *MemoryIndex) Entries() (EntryIter, error) { return &idxfileEntryIter{idx, 0, 0, 0}, nil } // EntriesByOffset implements the Index interface. func (idx *MemoryIndex) EntriesByOffset() (EntryIter, error) { count, err := idx.Count() if err != nil { return nil, err } iter := &idxfileEntryOffsetIter{ entries: make(entriesByOffset, count), } entries, err := idx.Entries() if err != nil { return nil, err } for pos := 0; int64(pos) < count; pos++ { entry, err := entries.Next() if err != nil { return nil, err } iter.entries[pos] = entry } sort.Sort(iter.entries) return iter, nil } // EntryIter is an iterator that will return the entries in a packfile index. type EntryIter interface { // Next returns the next entry in the packfile index. Next() (*Entry, error) // Close closes the iterator. Close() error } type idxfileEntryIter struct { idx *MemoryIndex total int firstLevel, secondLevel int } func (i *idxfileEntryIter) Next() (*Entry, error) { for { if i.firstLevel >= fanout { return nil, io.EOF } if i.total >= int(i.idx.Fanout[i.firstLevel]) { i.firstLevel++ i.secondLevel = 0 continue } entry := new(Entry) ofs := i.secondLevel * objectIDLength copy(entry.Hash[:], i.idx.Names[i.idx.FanoutMapping[i.firstLevel]][ofs:]) pos := i.idx.FanoutMapping[entry.Hash[0]] offset, err := i.idx.getOffset(pos, i.secondLevel) if err != nil { return nil, err } entry.Offset = uint64(offset) entry.CRC32, err = i.idx.getCRC32(pos, i.secondLevel) if err != nil { return nil, err } i.secondLevel++ i.total++ return entry, nil } } func (i *idxfileEntryIter) Close() error { i.firstLevel = fanout return nil } // Entry is the in memory representation of an object entry in the idx file. type Entry struct { Hash plumbing.Hash CRC32 uint32 Offset uint64 } type idxfileEntryOffsetIter struct { entries entriesByOffset pos int } func (i *idxfileEntryOffsetIter) Next() (*Entry, error) { if i.pos >= len(i.entries) { return nil, io.EOF } entry := i.entries[i.pos] i.pos++ return entry, nil } func (i *idxfileEntryOffsetIter) Close() error { i.pos = len(i.entries) + 1 return nil } type entriesByOffset []*Entry func (o entriesByOffset) Len() int { return len(o) } func (o entriesByOffset) Less(i int, j int) bool { return o[i].Offset < o[j].Offset } func (o entriesByOffset) Swap(i int, j int) { o[i], o[j] = o[j], o[i] } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/idxfile/idxfile_test.go000066400000000000000000000065341345605224300264520ustar00rootroot00000000000000package idxfile_test import ( "bytes" "encoding/base64" "fmt" "io" "testing" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/format/idxfile" . "gopkg.in/check.v1" "gopkg.in/src-d/go-git-fixtures.v3" ) func BenchmarkFindOffset(b *testing.B) { idx, err := fixtureIndex() if err != nil { b.Fatalf(err.Error()) } for i := 0; i < b.N; i++ { for _, h := range fixtureHashes { _, err := idx.FindOffset(h) if err != nil { b.Fatalf("error getting offset: %s", err) } } } } func BenchmarkFindCRC32(b *testing.B) { idx, err := fixtureIndex() if err != nil { b.Fatalf(err.Error()) } for i := 0; i < b.N; i++ { for _, h := range fixtureHashes { _, err := idx.FindCRC32(h) if err != nil { b.Fatalf("error getting crc32: %s", err) } } } } func BenchmarkContains(b *testing.B) { idx, err := fixtureIndex() if err != nil { b.Fatalf(err.Error()) } for i := 0; i < b.N; i++ { for _, h := range fixtureHashes { ok, err := idx.Contains(h) if err != nil { b.Fatalf("error checking if hash is in index: %s", err) } if !ok { b.Error("expected hash to be in index") } } } } func BenchmarkEntries(b *testing.B) { idx, err := fixtureIndex() if err != nil { b.Fatalf(err.Error()) } for i := 0; i < b.N; i++ { iter, err := idx.Entries() if err != nil { b.Fatalf("unexpected error getting entries: %s", err) } var entries int for { _, err := iter.Next() if err != nil { if err == io.EOF { break } b.Errorf("unexpected error getting entry: %s", err) } entries++ } if entries != len(fixtureHashes) { b.Errorf("expecting entries to be %d, got %d", len(fixtureHashes), entries) } } } type IndexSuite struct { fixtures.Suite } var _ = Suite(&IndexSuite{}) func (s *IndexSuite) TestFindHash(c *C) { idx, err := fixtureIndex() c.Assert(err, IsNil) for i, pos := range fixtureOffsets { hash, err := idx.FindHash(pos) c.Assert(err, IsNil) c.Assert(hash, Equals, fixtureHashes[i]) } } func (s *IndexSuite) TestEntriesByOffset(c *C) { idx, err := fixtureIndex() c.Assert(err, IsNil) entries, err := idx.EntriesByOffset() c.Assert(err, IsNil) for _, pos := range fixtureOffsets { e, err := entries.Next() c.Assert(err, IsNil) c.Assert(e.Offset, Equals, uint64(pos)) } } var fixtureHashes = []plumbing.Hash{ plumbing.NewHash("303953e5aa461c203a324821bc1717f9b4fff895"), plumbing.NewHash("5296768e3d9f661387ccbff18c4dea6c997fd78c"), plumbing.NewHash("03fc8d58d44267274edef4585eaeeb445879d33f"), plumbing.NewHash("8f3ceb4ea4cb9e4a0f751795eb41c9a4f07be772"), plumbing.NewHash("e0d1d625010087f79c9e01ad9d8f95e1628dda02"), plumbing.NewHash("90eba326cdc4d1d61c5ad25224ccbf08731dd041"), plumbing.NewHash("bab53055add7bc35882758a922c54a874d6b1272"), plumbing.NewHash("1b8995f51987d8a449ca5ea4356595102dc2fbd4"), plumbing.NewHash("35858be9c6f5914cbe6768489c41eb6809a2bceb"), } var fixtureOffsets = []int64{ 12, 142, 1601322837, 2646996529, 3452385606, 3707047470, 5323223332, 5894072943, 5924278919, } func fixtureIndex() (*idxfile.MemoryIndex, error) { f := bytes.NewBufferString(fixtureLarge4GB) idx := new(idxfile.MemoryIndex) d := idxfile.NewDecoder(base64.NewDecoder(base64.StdEncoding, f)) err := d.Decode(idx) if err != nil { return nil, fmt.Errorf("unexpected error decoding index: %s", err) } return idx, nil } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/idxfile/writer.go000066400000000000000000000076271345605224300253070ustar00rootroot00000000000000package idxfile import ( "bytes" "fmt" "math" "sort" "sync" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/utils/binary" ) // objects implements sort.Interface and uses hash as sorting key. type objects []Entry // Writer implements a packfile Observer interface and is used to generate // indexes. type Writer struct { m sync.Mutex count uint32 checksum plumbing.Hash objects objects offset64 uint32 finished bool index *MemoryIndex added map[plumbing.Hash]struct{} } // Index returns a previously created MemoryIndex or creates a new one if // needed. func (w *Writer) Index() (*MemoryIndex, error) { w.m.Lock() defer w.m.Unlock() if w.index == nil { return w.createIndex() } return w.index, nil } // Add appends new object data. func (w *Writer) Add(h plumbing.Hash, pos uint64, crc uint32) { w.m.Lock() defer w.m.Unlock() if w.added == nil { w.added = make(map[plumbing.Hash]struct{}) } if _, ok := w.added[h]; !ok { w.added[h] = struct{}{} w.objects = append(w.objects, Entry{h, crc, pos}) } } func (w *Writer) Finished() bool { return w.finished } // OnHeader implements packfile.Observer interface. func (w *Writer) OnHeader(count uint32) error { w.count = count w.objects = make(objects, 0, count) return nil } // OnInflatedObjectHeader implements packfile.Observer interface. func (w *Writer) OnInflatedObjectHeader(t plumbing.ObjectType, objSize int64, pos int64) error { return nil } // OnInflatedObjectContent implements packfile.Observer interface. func (w *Writer) OnInflatedObjectContent(h plumbing.Hash, pos int64, crc uint32, _ []byte) error { w.Add(h, uint64(pos), crc) return nil } // OnFooter implements packfile.Observer interface. func (w *Writer) OnFooter(h plumbing.Hash) error { w.checksum = h w.finished = true _, err := w.createIndex() if err != nil { return err } return nil } // creatIndex returns a filled MemoryIndex with the information filled by // the observer callbacks. func (w *Writer) createIndex() (*MemoryIndex, error) { if !w.finished { return nil, fmt.Errorf("the index still hasn't finished building") } idx := new(MemoryIndex) w.index = idx sort.Sort(w.objects) // unmap all fans by default for i := range idx.FanoutMapping { idx.FanoutMapping[i] = noMapping } buf := new(bytes.Buffer) last := -1 bucket := -1 for i, o := range w.objects { fan := o.Hash[0] // fill the gaps between fans for j := last + 1; j < int(fan); j++ { idx.Fanout[j] = uint32(i) } // update the number of objects for this position idx.Fanout[fan] = uint32(i + 1) // we move from one bucket to another, update counters and allocate // memory if last != int(fan) { bucket++ idx.FanoutMapping[fan] = bucket last = int(fan) idx.Names = append(idx.Names, make([]byte, 0)) idx.Offset32 = append(idx.Offset32, make([]byte, 0)) idx.CRC32 = append(idx.CRC32, make([]byte, 0)) } idx.Names[bucket] = append(idx.Names[bucket], o.Hash[:]...) offset := o.Offset if offset > math.MaxInt32 { offset = w.addOffset64(offset) } buf.Truncate(0) binary.WriteUint32(buf, uint32(offset)) idx.Offset32[bucket] = append(idx.Offset32[bucket], buf.Bytes()...) buf.Truncate(0) binary.WriteUint32(buf, uint32(o.CRC32)) idx.CRC32[bucket] = append(idx.CRC32[bucket], buf.Bytes()...) } for j := last + 1; j < 256; j++ { idx.Fanout[j] = uint32(len(w.objects)) } idx.Version = VersionSupported idx.PackfileChecksum = w.checksum return idx, nil } func (w *Writer) addOffset64(pos uint64) uint64 { buf := new(bytes.Buffer) binary.WriteUint64(buf, pos) w.index.Offset64 = append(w.index.Offset64, buf.Bytes()...) index := uint64(w.offset64 | (1 << 31)) w.offset64++ return index } func (o objects) Len() int { return len(o) } func (o objects) Less(i int, j int) bool { cmp := bytes.Compare(o[i].Hash[:], o[j].Hash[:]) return cmp < 0 } func (o objects) Swap(i int, j int) { o[i], o[j] = o[j], o[i] } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/idxfile/writer_test.go000066400000000000000000000050151345605224300263330ustar00rootroot00000000000000package idxfile_test import ( "bytes" "encoding/base64" "io/ioutil" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/format/idxfile" "gopkg.in/src-d/go-git.v4/plumbing/format/packfile" . "gopkg.in/check.v1" "gopkg.in/src-d/go-git-fixtures.v3" ) type WriterSuite struct { fixtures.Suite } var _ = Suite(&WriterSuite{}) func (s *WriterSuite) TestWriter(c *C) { f := fixtures.Basic().One() scanner := packfile.NewScanner(f.Packfile()) obs := new(idxfile.Writer) parser, err := packfile.NewParser(scanner, obs) c.Assert(err, IsNil) _, err = parser.Parse() c.Assert(err, IsNil) idx, err := obs.Index() c.Assert(err, IsNil) idxFile := f.Idx() expected, err := ioutil.ReadAll(idxFile) c.Assert(err, IsNil) idxFile.Close() buf := new(bytes.Buffer) encoder := idxfile.NewEncoder(buf) n, err := encoder.Encode(idx) c.Assert(err, IsNil) c.Assert(n, Equals, len(expected)) c.Assert(buf.Bytes(), DeepEquals, expected) } func (s *WriterSuite) TestWriterLarge(c *C) { writer := new(idxfile.Writer) err := writer.OnHeader(uint32(len(fixture4GbEntries))) c.Assert(err, IsNil) for _, o := range fixture4GbEntries { err = writer.OnInflatedObjectContent(plumbing.NewHash(o.hash), o.offset, o.crc, nil) c.Assert(err, IsNil) } err = writer.OnFooter(fixture4GbChecksum) c.Assert(err, IsNil) idx, err := writer.Index() c.Assert(err, IsNil) // load fixture index f := bytes.NewBufferString(fixtureLarge4GB) expected, err := ioutil.ReadAll(base64.NewDecoder(base64.StdEncoding, f)) c.Assert(err, IsNil) buf := new(bytes.Buffer) encoder := idxfile.NewEncoder(buf) n, err := encoder.Encode(idx) c.Assert(err, IsNil) c.Assert(n, Equals, len(expected)) c.Assert(buf.Bytes(), DeepEquals, expected) } var ( fixture4GbChecksum = plumbing.NewHash("afabc2269205cf85da1bf7e2fdff42f73810f29b") fixture4GbEntries = []struct { offset int64 hash string crc uint32 }{ {12, "303953e5aa461c203a324821bc1717f9b4fff895", 0xbc347c4c}, {142, "5296768e3d9f661387ccbff18c4dea6c997fd78c", 0xcdc22842}, {1601322837, "03fc8d58d44267274edef4585eaeeb445879d33f", 0x929dfaaa}, {2646996529, "8f3ceb4ea4cb9e4a0f751795eb41c9a4f07be772", 0xa61def8a}, {3452385606, "e0d1d625010087f79c9e01ad9d8f95e1628dda02", 0x06bea180}, {3707047470, "90eba326cdc4d1d61c5ad25224ccbf08731dd041", 0x7193f3ba}, {5323223332, "bab53055add7bc35882758a922c54a874d6b1272", 0xac269b8e}, {5894072943, "1b8995f51987d8a449ca5ea4356595102dc2fbd4", 0x2187c056}, {5924278919, "35858be9c6f5914cbe6768489c41eb6809a2bceb", 0x9c89d9d2}, } ) golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/index/000077500000000000000000000000001345605224300231135ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/index/decoder.go000066400000000000000000000212431345605224300250510ustar00rootroot00000000000000package index import ( "bytes" "crypto/sha1" "errors" "hash" "io" "io/ioutil" "strconv" "time" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/utils/binary" ) var ( // DecodeVersionSupported is the range of supported index versions DecodeVersionSupported = struct{ Min, Max uint32 }{Min: 2, Max: 4} // ErrMalformedSignature is returned by Decode when the index header file is // malformed ErrMalformedSignature = errors.New("malformed index signature file") // ErrInvalidChecksum is returned by Decode if the SHA1 hash mismatch with // the read content ErrInvalidChecksum = errors.New("invalid checksum") errUnknownExtension = errors.New("unknown extension") ) const ( entryHeaderLength = 62 entryExtended = 0x4000 entryValid = 0x8000 nameMask = 0xfff intentToAddMask = 1 << 13 skipWorkTreeMask = 1 << 14 ) // A Decoder reads and decodes index files from an input stream. type Decoder struct { r io.Reader hash hash.Hash lastEntry *Entry } // NewDecoder returns a new decoder that reads from r. func NewDecoder(r io.Reader) *Decoder { h := sha1.New() return &Decoder{ r: io.TeeReader(r, h), hash: h, } } // Decode reads the whole index object from its input and stores it in the // value pointed to by idx. func (d *Decoder) Decode(idx *Index) error { var err error idx.Version, err = validateHeader(d.r) if err != nil { return err } entryCount, err := binary.ReadUint32(d.r) if err != nil { return err } if err := d.readEntries(idx, int(entryCount)); err != nil { return err } return d.readExtensions(idx) } func (d *Decoder) readEntries(idx *Index, count int) error { for i := 0; i < count; i++ { e, err := d.readEntry(idx) if err != nil { return err } d.lastEntry = e idx.Entries = append(idx.Entries, e) } return nil } func (d *Decoder) readEntry(idx *Index) (*Entry, error) { e := &Entry{} var msec, mnsec, sec, nsec uint32 var flags uint16 flow := []interface{}{ &sec, &nsec, &msec, &mnsec, &e.Dev, &e.Inode, &e.Mode, &e.UID, &e.GID, &e.Size, &e.Hash, &flags, } if err := binary.Read(d.r, flow...); err != nil { return nil, err } read := entryHeaderLength if sec != 0 || nsec != 0 { e.CreatedAt = time.Unix(int64(sec), int64(nsec)) } if msec != 0 || mnsec != 0 { e.ModifiedAt = time.Unix(int64(msec), int64(mnsec)) } e.Stage = Stage(flags>>12) & 0x3 if flags&entryExtended != 0 { extended, err := binary.ReadUint16(d.r) if err != nil { return nil, err } read += 2 e.IntentToAdd = extended&intentToAddMask != 0 e.SkipWorktree = extended&skipWorkTreeMask != 0 } if err := d.readEntryName(idx, e, flags); err != nil { return nil, err } return e, d.padEntry(idx, e, read) } func (d *Decoder) readEntryName(idx *Index, e *Entry, flags uint16) error { var name string var err error switch idx.Version { case 2, 3: len := flags & nameMask name, err = d.doReadEntryName(len) case 4: name, err = d.doReadEntryNameV4() default: return ErrUnsupportedVersion } if err != nil { return err } e.Name = name return nil } func (d *Decoder) doReadEntryNameV4() (string, error) { l, err := binary.ReadVariableWidthInt(d.r) if err != nil { return "", err } var base string if d.lastEntry != nil { base = d.lastEntry.Name[:len(d.lastEntry.Name)-int(l)] } name, err := binary.ReadUntil(d.r, '\x00') if err != nil { return "", err } return base + string(name), nil } func (d *Decoder) doReadEntryName(len uint16) (string, error) { name := make([]byte, len) if err := binary.Read(d.r, &name); err != nil { return "", err } return string(name), nil } // Index entries are padded out to the next 8 byte alignment // for historical reasons related to how C Git read the files. func (d *Decoder) padEntry(idx *Index, e *Entry, read int) error { if idx.Version == 4 { return nil } entrySize := read + len(e.Name) padLen := 8 - entrySize%8 _, err := io.CopyN(ioutil.Discard, d.r, int64(padLen)) return err } func (d *Decoder) readExtensions(idx *Index) error { // TODO: support 'Split index' and 'Untracked cache' extensions, take in // count that they are not supported by jgit or libgit var expected []byte var err error var header [4]byte for { expected = d.hash.Sum(nil) var n int if n, err = io.ReadFull(d.r, header[:]); err != nil { if n == 0 { err = io.EOF } break } err = d.readExtension(idx, header[:]) if err != nil { break } } if err != errUnknownExtension { return err } return d.readChecksum(expected, header) } func (d *Decoder) readExtension(idx *Index, header []byte) error { switch { case bytes.Equal(header, treeExtSignature): r, err := d.getExtensionReader() if err != nil { return err } idx.Cache = &Tree{} d := &treeExtensionDecoder{r} if err := d.Decode(idx.Cache); err != nil { return err } case bytes.Equal(header, resolveUndoExtSignature): r, err := d.getExtensionReader() if err != nil { return err } idx.ResolveUndo = &ResolveUndo{} d := &resolveUndoDecoder{r} if err := d.Decode(idx.ResolveUndo); err != nil { return err } case bytes.Equal(header, endOfIndexEntryExtSignature): r, err := d.getExtensionReader() if err != nil { return err } idx.EndOfIndexEntry = &EndOfIndexEntry{} d := &endOfIndexEntryDecoder{r} if err := d.Decode(idx.EndOfIndexEntry); err != nil { return err } default: return errUnknownExtension } return nil } func (d *Decoder) getExtensionReader() (io.Reader, error) { len, err := binary.ReadUint32(d.r) if err != nil { return nil, err } return &io.LimitedReader{R: d.r, N: int64(len)}, nil } func (d *Decoder) readChecksum(expected []byte, alreadyRead [4]byte) error { var h plumbing.Hash copy(h[:4], alreadyRead[:]) if err := binary.Read(d.r, h[4:]); err != nil { return err } if !bytes.Equal(h[:], expected) { return ErrInvalidChecksum } return nil } func validateHeader(r io.Reader) (version uint32, err error) { var s = make([]byte, 4) if _, err := io.ReadFull(r, s); err != nil { return 0, err } if !bytes.Equal(s, indexSignature) { return 0, ErrMalformedSignature } version, err = binary.ReadUint32(r) if err != nil { return 0, err } if version < DecodeVersionSupported.Min || version > DecodeVersionSupported.Max { return 0, ErrUnsupportedVersion } return } type treeExtensionDecoder struct { r io.Reader } func (d *treeExtensionDecoder) Decode(t *Tree) error { for { e, err := d.readEntry() if err != nil { if err == io.EOF { return nil } return err } if e == nil { continue } t.Entries = append(t.Entries, *e) } } func (d *treeExtensionDecoder) readEntry() (*TreeEntry, error) { e := &TreeEntry{} path, err := binary.ReadUntil(d.r, '\x00') if err != nil { return nil, err } e.Path = string(path) count, err := binary.ReadUntil(d.r, ' ') if err != nil { return nil, err } i, err := strconv.Atoi(string(count)) if err != nil { return nil, err } // An entry can be in an invalidated state and is represented by having a // negative number in the entry_count field. if i == -1 { return nil, nil } e.Entries = i trees, err := binary.ReadUntil(d.r, '\n') if err != nil { return nil, err } i, err = strconv.Atoi(string(trees)) if err != nil { return nil, err } e.Trees = i if err := binary.Read(d.r, &e.Hash); err != nil { return nil, err } return e, nil } type resolveUndoDecoder struct { r io.Reader } func (d *resolveUndoDecoder) Decode(ru *ResolveUndo) error { for { e, err := d.readEntry() if err != nil { if err == io.EOF { return nil } return err } ru.Entries = append(ru.Entries, *e) } } func (d *resolveUndoDecoder) readEntry() (*ResolveUndoEntry, error) { e := &ResolveUndoEntry{ Stages: make(map[Stage]plumbing.Hash), } path, err := binary.ReadUntil(d.r, '\x00') if err != nil { return nil, err } e.Path = string(path) for i := 0; i < 3; i++ { if err := d.readStage(e, Stage(i+1)); err != nil { return nil, err } } for s := range e.Stages { var hash plumbing.Hash if err := binary.Read(d.r, hash[:]); err != nil { return nil, err } e.Stages[s] = hash } return e, nil } func (d *resolveUndoDecoder) readStage(e *ResolveUndoEntry, s Stage) error { ascii, err := binary.ReadUntil(d.r, '\x00') if err != nil { return err } stage, err := strconv.ParseInt(string(ascii), 8, 64) if err != nil { return err } if stage != 0 { e.Stages[s] = plumbing.ZeroHash } return nil } type endOfIndexEntryDecoder struct { r io.Reader } func (d *endOfIndexEntryDecoder) Decode(e *EndOfIndexEntry) error { var err error e.Offset, err = binary.ReadUint32(d.r) if err != nil { return err } return binary.Read(d.r, &e.Hash) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/index/decoder_test.go000066400000000000000000000151201345605224300261050ustar00rootroot00000000000000package index import ( "testing" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/filemode" . "gopkg.in/check.v1" "gopkg.in/src-d/go-git-fixtures.v3" ) func Test(t *testing.T) { TestingT(t) } type IndexSuite struct { fixtures.Suite } var _ = Suite(&IndexSuite{}) func (s *IndexSuite) TestDecode(c *C) { f, err := fixtures.Basic().One().DotGit().Open("index") c.Assert(err, IsNil) defer func() { c.Assert(f.Close(), IsNil) }() idx := &Index{} d := NewDecoder(f) err = d.Decode(idx) c.Assert(err, IsNil) c.Assert(idx.Version, Equals, uint32(2)) c.Assert(idx.Entries, HasLen, 9) } func (s *IndexSuite) TestDecodeEntries(c *C) { f, err := fixtures.Basic().One().DotGit().Open("index") c.Assert(err, IsNil) defer func() { c.Assert(f.Close(), IsNil) }() idx := &Index{} d := NewDecoder(f) err = d.Decode(idx) c.Assert(err, IsNil) c.Assert(idx.Entries, HasLen, 9) e := idx.Entries[0] c.Assert(e.CreatedAt.Unix(), Equals, int64(1480626693)) c.Assert(e.CreatedAt.Nanosecond(), Equals, 498593596) c.Assert(e.ModifiedAt.Unix(), Equals, int64(1480626693)) c.Assert(e.ModifiedAt.Nanosecond(), Equals, 498593596) c.Assert(e.Dev, Equals, uint32(39)) c.Assert(e.Inode, Equals, uint32(140626)) c.Assert(e.UID, Equals, uint32(1000)) c.Assert(e.GID, Equals, uint32(100)) c.Assert(e.Size, Equals, uint32(189)) c.Assert(e.Hash.String(), Equals, "32858aad3c383ed1ff0a0f9bdf231d54a00c9e88") c.Assert(e.Name, Equals, ".gitignore") c.Assert(e.Mode, Equals, filemode.Regular) e = idx.Entries[1] c.Assert(e.Name, Equals, "CHANGELOG") } func (s *IndexSuite) TestDecodeCacheTree(c *C) { f, err := fixtures.Basic().One().DotGit().Open("index") c.Assert(err, IsNil) defer func() { c.Assert(f.Close(), IsNil) }() idx := &Index{} d := NewDecoder(f) err = d.Decode(idx) c.Assert(err, IsNil) c.Assert(idx.Entries, HasLen, 9) c.Assert(idx.Cache.Entries, HasLen, 5) for i, expected := range expectedEntries { c.Assert(idx.Cache.Entries[i].Path, Equals, expected.Path) c.Assert(idx.Cache.Entries[i].Entries, Equals, expected.Entries) c.Assert(idx.Cache.Entries[i].Trees, Equals, expected.Trees) c.Assert(idx.Cache.Entries[i].Hash.String(), Equals, expected.Hash.String()) } } var expectedEntries = []TreeEntry{ {Path: "", Entries: 9, Trees: 4, Hash: plumbing.NewHash("a8d315b2b1c615d43042c3a62402b8a54288cf5c")}, {Path: "go", Entries: 1, Trees: 0, Hash: plumbing.NewHash("a39771a7651f97faf5c72e08224d857fc35133db")}, {Path: "php", Entries: 1, Trees: 0, Hash: plumbing.NewHash("586af567d0bb5e771e49bdd9434f5e0fb76d25fa")}, {Path: "json", Entries: 2, Trees: 0, Hash: plumbing.NewHash("5a877e6a906a2743ad6e45d99c1793642aaf8eda")}, {Path: "vendor", Entries: 1, Trees: 0, Hash: plumbing.NewHash("cf4aa3b38974fb7d81f367c0830f7d78d65ab86b")}, } func (s *IndexSuite) TestDecodeMergeConflict(c *C) { f, err := fixtures.Basic().ByTag("merge-conflict").One().DotGit().Open("index") c.Assert(err, IsNil) defer func() { c.Assert(f.Close(), IsNil) }() idx := &Index{} d := NewDecoder(f) err = d.Decode(idx) c.Assert(err, IsNil) c.Assert(idx.Version, Equals, uint32(2)) c.Assert(idx.Entries, HasLen, 13) expected := []struct { Stage Stage Hash string }{ {AncestorMode, "880cd14280f4b9b6ed3986d6671f907d7cc2a198"}, {OurMode, "d499a1a0b79b7d87a35155afd0c1cce78b37a91c"}, {TheirMode, "14f8e368114f561c38e134f6e68ea6fea12d77ed"}, } // stagged files for i, e := range idx.Entries[4:7] { c.Assert(e.Stage, Equals, expected[i].Stage) c.Assert(e.CreatedAt.IsZero(), Equals, true) c.Assert(e.ModifiedAt.IsZero(), Equals, true) c.Assert(e.Dev, Equals, uint32(0)) c.Assert(e.Inode, Equals, uint32(0)) c.Assert(e.UID, Equals, uint32(0)) c.Assert(e.GID, Equals, uint32(0)) c.Assert(e.Size, Equals, uint32(0)) c.Assert(e.Hash.String(), Equals, expected[i].Hash) c.Assert(e.Name, Equals, "go/example.go") } } func (s *IndexSuite) TestDecodeExtendedV3(c *C) { f, err := fixtures.Basic().ByTag("intent-to-add").One().DotGit().Open("index") c.Assert(err, IsNil) defer func() { c.Assert(f.Close(), IsNil) }() idx := &Index{} d := NewDecoder(f) err = d.Decode(idx) c.Assert(err, IsNil) c.Assert(idx.Version, Equals, uint32(3)) c.Assert(idx.Entries, HasLen, 11) c.Assert(idx.Entries[6].Name, Equals, "intent-to-add") c.Assert(idx.Entries[6].IntentToAdd, Equals, true) c.Assert(idx.Entries[6].SkipWorktree, Equals, false) } func (s *IndexSuite) TestDecodeResolveUndo(c *C) { f, err := fixtures.Basic().ByTag("resolve-undo").One().DotGit().Open("index") c.Assert(err, IsNil) defer func() { c.Assert(f.Close(), IsNil) }() idx := &Index{} d := NewDecoder(f) err = d.Decode(idx) c.Assert(err, IsNil) c.Assert(idx.Version, Equals, uint32(2)) c.Assert(idx.Entries, HasLen, 8) ru := idx.ResolveUndo c.Assert(ru.Entries, HasLen, 2) c.Assert(ru.Entries[0].Path, Equals, "go/example.go") c.Assert(ru.Entries[0].Stages, HasLen, 3) c.Assert(ru.Entries[0].Stages[AncestorMode], Not(Equals), plumbing.ZeroHash) c.Assert(ru.Entries[0].Stages[OurMode], Not(Equals), plumbing.ZeroHash) c.Assert(ru.Entries[0].Stages[TheirMode], Not(Equals), plumbing.ZeroHash) c.Assert(ru.Entries[1].Path, Equals, "haskal/haskal.hs") c.Assert(ru.Entries[1].Stages, HasLen, 2) c.Assert(ru.Entries[1].Stages[OurMode], Not(Equals), plumbing.ZeroHash) c.Assert(ru.Entries[1].Stages[TheirMode], Not(Equals), plumbing.ZeroHash) } func (s *IndexSuite) TestDecodeV4(c *C) { f, err := fixtures.Basic().ByTag("index-v4").One().DotGit().Open("index") c.Assert(err, IsNil) defer func() { c.Assert(f.Close(), IsNil) }() idx := &Index{} d := NewDecoder(f) err = d.Decode(idx) c.Assert(err, IsNil) c.Assert(idx.Version, Equals, uint32(4)) c.Assert(idx.Entries, HasLen, 11) names := []string{ ".gitignore", "CHANGELOG", "LICENSE", "binary.jpg", "go/example.go", "haskal/haskal.hs", "intent-to-add", "json/long.json", "json/short.json", "php/crappy.php", "vendor/foo.go", } for i, e := range idx.Entries { c.Assert(e.Name, Equals, names[i]) } c.Assert(idx.Entries[6].Name, Equals, "intent-to-add") c.Assert(idx.Entries[6].IntentToAdd, Equals, true) c.Assert(idx.Entries[6].SkipWorktree, Equals, false) } func (s *IndexSuite) TestDecodeEndOfIndexEntry(c *C) { f, err := fixtures.Basic().ByTag("end-of-index-entry").One().DotGit().Open("index") c.Assert(err, IsNil) defer func() { c.Assert(f.Close(), IsNil) }() idx := &Index{} d := NewDecoder(f) err = d.Decode(idx) c.Assert(err, IsNil) c.Assert(idx.Version, Equals, uint32(2)) c.Assert(idx.EndOfIndexEntry, NotNil) c.Assert(idx.EndOfIndexEntry.Offset, Equals, uint32(716)) c.Assert(idx.EndOfIndexEntry.Hash.String(), Equals, "922e89d9ffd7cefce93a211615b2053c0f42bd78") } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/index/doc.go000066400000000000000000000330301345605224300242060ustar00rootroot00000000000000// Package index implements encoding and decoding of index format files. // // Git index format // ================ // // == The Git index file has the following format // // All binary numbers are in network byte order. Version 2 is described // here unless stated otherwise. // // - A 12-byte header consisting of // // 4-byte signature: // The signature is { 'D', 'I', 'R', 'C' } (stands for "dircache") // // 4-byte version number: // The current supported versions are 2, 3 and 4. // // 32-bit number of index entries. // // - A number of sorted index entries (see below). // // - Extensions // // Extensions are identified by signature. Optional extensions can // be ignored if Git does not understand them. // // Git currently supports cached tree and resolve undo extensions. // // 4-byte extension signature. If the first byte is 'A'..'Z' the // extension is optional and can be ignored. // // 32-bit size of the extension // // Extension data // // - 160-bit SHA-1 over the content of the index file before this // checksum. // // == Index entry // // Index entries are sorted in ascending order on the name field, // interpreted as a string of unsigned bytes (i.e. memcmp() order, no // localization, no special casing of directory separator '/'). Entries // with the same name are sorted by their stage field. // // 32-bit ctime seconds, the last time a file's metadata changed // this is stat(2) data // // 32-bit ctime nanosecond fractions // this is stat(2) data // // 32-bit mtime seconds, the last time a file's data changed // this is stat(2) data // // 32-bit mtime nanosecond fractions // this is stat(2) data // // 32-bit dev // this is stat(2) data // // 32-bit ino // this is stat(2) data // // 32-bit mode, split into (high to low bits) // // 4-bit object type // valid values in binary are 1000 (regular file), 1010 (symbolic link) // and 1110 (gitlink) // // 3-bit unused // // 9-bit unix permission. Only 0755 and 0644 are valid for regular files. // Symbolic links and gitlinks have value 0 in this field. // // 32-bit uid // this is stat(2) data // // 32-bit gid // this is stat(2) data // // 32-bit file size // This is the on-disk size from stat(2), truncated to 32-bit. // // 160-bit SHA-1 for the represented object // // A 16-bit 'flags' field split into (high to low bits) // // 1-bit assume-valid flag // // 1-bit extended flag (must be zero in version 2) // // 2-bit stage (during merge) // // 12-bit name length if the length is less than 0xFFF; otherwise 0xFFF // is stored in this field. // // (Version 3 or later) A 16-bit field, only applicable if the // "extended flag" above is 1, split into (high to low bits). // // 1-bit reserved for future // // 1-bit skip-worktree flag (used by sparse checkout) // // 1-bit intent-to-add flag (used by "git add -N") // // 13-bit unused, must be zero // // Entry path name (variable length) relative to top level directory // (without leading slash). '/' is used as path separator. The special // path components ".", ".." and ".git" (without quotes) are disallowed. // Trailing slash is also disallowed. // // The exact encoding is undefined, but the '.' and '/' characters // are encoded in 7-bit ASCII and the encoding cannot contain a NUL // byte (iow, this is a UNIX pathname). // // (Version 4) In version 4, the entry path name is prefix-compressed // relative to the path name for the previous entry (the very first // entry is encoded as if the path name for the previous entry is an // empty string). At the beginning of an entry, an integer N in the // variable width encoding (the same encoding as the offset is encoded // for OFS_DELTA pack entries; see pack-format.txt) is stored, followed // by a NUL-terminated string S. Removing N bytes from the end of the // path name for the previous entry, and replacing it with the string S // yields the path name for this entry. // // 1-8 nul bytes as necessary to pad the entry to a multiple of eight bytes // while keeping the name NUL-terminated. // // (Version 4) In version 4, the padding after the pathname does not // exist. // // Interpretation of index entries in split index mode is completely // different. See below for details. // // == Extensions // // === Cached tree // // Cached tree extension contains pre-computed hashes for trees that can // be derived from the index. It helps speed up tree object generation // from index for a new commit. // // When a path is updated in index, the path must be invalidated and // removed from tree cache. // // The signature for this extension is { 'T', 'R', 'E', 'E' }. // // A series of entries fill the entire extension; each of which // consists of: // // - NUL-terminated path component (relative to its parent directory); // // - ASCII decimal number of entries in the index that is covered by the // tree this entry represents (entry_count); // // - A space (ASCII 32); // // - ASCII decimal number that represents the number of subtrees this // tree has; // // - A newline (ASCII 10); and // // - 160-bit object name for the object that would result from writing // this span of index as a tree. // // An entry can be in an invalidated state and is represented by having // a negative number in the entry_count field. In this case, there is no // object name and the next entry starts immediately after the newline. // When writing an invalid entry, -1 should always be used as entry_count. // // The entries are written out in the top-down, depth-first order. The // first entry represents the root level of the repository, followed by the // first subtree--let's call this A--of the root level (with its name // relative to the root level), followed by the first subtree of A (with // its name relative to A), ... // // === Resolve undo // // A conflict is represented in the index as a set of higher stage entries. // When a conflict is resolved (e.g. with "git add path"), these higher // stage entries will be removed and a stage-0 entry with proper resolution // is added. // // When these higher stage entries are removed, they are saved in the // resolve undo extension, so that conflicts can be recreated (e.g. with // "git checkout -m"), in case users want to redo a conflict resolution // from scratch. // // The signature for this extension is { 'R', 'E', 'U', 'C' }. // // A series of entries fill the entire extension; each of which // consists of: // // - NUL-terminated pathname the entry describes (relative to the root of // the repository, i.e. full pathname); // // - Three NUL-terminated ASCII octal numbers, entry mode of entries in // stage 1 to 3 (a missing stage is represented by "0" in this field); // and // // - At most three 160-bit object names of the entry in stages from 1 to 3 // (nothing is written for a missing stage). // // === Split index // // In split index mode, the majority of index entries could be stored // in a separate file. This extension records the changes to be made on // top of that to produce the final index. // // The signature for this extension is { 'l', 'i', 'n', 'k' }. // // The extension consists of: // // - 160-bit SHA-1 of the shared index file. The shared index file path // is $GIT_DIR/sharedindex.. If all 160 bits are zero, the // index does not require a shared index file. // // - An ewah-encoded delete bitmap, each bit represents an entry in the // shared index. If a bit is set, its corresponding entry in the // shared index will be removed from the final index. Note, because // a delete operation changes index entry positions, but we do need // original positions in replace phase, it's best to just mark // entries for removal, then do a mass deletion after replacement. // // - An ewah-encoded replace bitmap, each bit represents an entry in // the shared index. If a bit is set, its corresponding entry in the // shared index will be replaced with an entry in this index // file. All replaced entries are stored in sorted order in this // index. The first "1" bit in the replace bitmap corresponds to the // first index entry, the second "1" bit to the second entry and so // on. Replaced entries may have empty path names to save space. // // The remaining index entries after replaced ones will be added to the // final index. These added entries are also sorted by entry name then // stage. // // == Untracked cache // // Untracked cache saves the untracked file list and necessary data to // verify the cache. The signature for this extension is { 'U', 'N', // 'T', 'R' }. // // The extension starts with // // - A sequence of NUL-terminated strings, preceded by the size of the // sequence in variable width encoding. Each string describes the // environment where the cache can be used. // // - Stat data of $GIT_DIR/info/exclude. See "Index entry" section from // ctime field until "file size". // // - Stat data of plumbing.excludesfile // // - 32-bit dir_flags (see struct dir_struct) // // - 160-bit SHA-1 of $GIT_DIR/info/exclude. Null SHA-1 means the file // does not exist. // // - 160-bit SHA-1 of plumbing.excludesfile. Null SHA-1 means the file does // not exist. // // - NUL-terminated string of per-dir exclude file name. This usually // is ".gitignore". // // - The number of following directory blocks, variable width // encoding. If this number is zero, the extension ends here with a // following NUL. // // - A number of directory blocks in depth-first-search order, each // consists of // // - The number of untracked entries, variable width encoding. // // - The number of sub-directory blocks, variable width encoding. // // - The directory name terminated by NUL. // // - A number of untracked file/dir names terminated by NUL. // // The remaining data of each directory block is grouped by type: // // - An ewah bitmap, the n-th bit marks whether the n-th directory has // valid untracked cache entries. // // - An ewah bitmap, the n-th bit records "check-only" bit of // read_directory_recursive() for the n-th directory. // // - An ewah bitmap, the n-th bit indicates whether SHA-1 and stat data // is valid for the n-th directory and exists in the next data. // // - An array of stat data. The n-th data corresponds with the n-th // "one" bit in the previous ewah bitmap. // // - An array of SHA-1. The n-th SHA-1 corresponds with the n-th "one" bit // in the previous ewah bitmap. // // - One NUL. // // == File System Monitor cache // // The file system monitor cache tracks files for which the core.fsmonitor // hook has told us about changes. The signature for this extension is // { 'F', 'S', 'M', 'N' }. // // The extension starts with // // - 32-bit version number: the current supported version is 1. // // - 64-bit time: the extension data reflects all changes through the given // time which is stored as the nanoseconds elapsed since midnight, // January 1, 1970. // // - 32-bit bitmap size: the size of the CE_FSMONITOR_VALID bitmap. // // - An ewah bitmap, the n-th bit indicates whether the n-th index entry // is not CE_FSMONITOR_VALID. // // == End of Index Entry // // The End of Index Entry (EOIE) is used to locate the end of the variable // length index entries and the begining of the extensions. Code can take // advantage of this to quickly locate the index extensions without having // to parse through all of the index entries. // // Because it must be able to be loaded before the variable length cache // entries and other index extensions, this extension must be written last. // The signature for this extension is { 'E', 'O', 'I', 'E' }. // // The extension consists of: // // - 32-bit offset to the end of the index entries // // - 160-bit SHA-1 over the extension types and their sizes (but not // their contents). E.g. if we have "TREE" extension that is N-bytes // long, "REUC" extension that is M-bytes long, followed by "EOIE", // then the hash would be: // // SHA-1("TREE" + + // "REUC" + ) // // == Index Entry Offset Table // // The Index Entry Offset Table (IEOT) is used to help address the CPU // cost of loading the index by enabling multi-threading the process of // converting cache entries from the on-disk format to the in-memory format. // The signature for this extension is { 'I', 'E', 'O', 'T' }. // // The extension consists of: // // - 32-bit version (currently 1) // // - A number of index offset entries each consisting of: // // - 32-bit offset from the begining of the file to the first cache entry // in this block of entries. // // - 32-bit count of cache entries in this blockpackage index package index golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/index/encoder.go000066400000000000000000000056321345605224300250670ustar00rootroot00000000000000package index import ( "bytes" "crypto/sha1" "errors" "hash" "io" "sort" "time" "gopkg.in/src-d/go-git.v4/utils/binary" ) var ( // EncodeVersionSupported is the range of supported index versions EncodeVersionSupported uint32 = 2 // ErrInvalidTimestamp is returned by Encode if a Index with a Entry with // negative timestamp values ErrInvalidTimestamp = errors.New("negative timestamps are not allowed") ) // An Encoder writes an Index to an output stream. type Encoder struct { w io.Writer hash hash.Hash } // NewEncoder returns a new encoder that writes to w. func NewEncoder(w io.Writer) *Encoder { h := sha1.New() mw := io.MultiWriter(w, h) return &Encoder{mw, h} } // Encode writes the Index to the stream of the encoder. func (e *Encoder) Encode(idx *Index) error { // TODO: support versions v3 and v4 // TODO: support extensions if idx.Version != EncodeVersionSupported { return ErrUnsupportedVersion } if err := e.encodeHeader(idx); err != nil { return err } if err := e.encodeEntries(idx); err != nil { return err } return e.encodeFooter() } func (e *Encoder) encodeHeader(idx *Index) error { return binary.Write(e.w, indexSignature, idx.Version, uint32(len(idx.Entries)), ) } func (e *Encoder) encodeEntries(idx *Index) error { sort.Sort(byName(idx.Entries)) for _, entry := range idx.Entries { if err := e.encodeEntry(entry); err != nil { return err } wrote := entryHeaderLength + len(entry.Name) if err := e.padEntry(wrote); err != nil { return err } } return nil } func (e *Encoder) encodeEntry(entry *Entry) error { if entry.IntentToAdd || entry.SkipWorktree { return ErrUnsupportedVersion } sec, nsec, err := e.timeToUint32(&entry.CreatedAt) if err != nil { return err } msec, mnsec, err := e.timeToUint32(&entry.ModifiedAt) if err != nil { return err } flags := uint16(entry.Stage&0x3) << 12 if l := len(entry.Name); l < nameMask { flags |= uint16(l) } else { flags |= nameMask } flow := []interface{}{ sec, nsec, msec, mnsec, entry.Dev, entry.Inode, entry.Mode, entry.UID, entry.GID, entry.Size, entry.Hash[:], flags, } if err := binary.Write(e.w, flow...); err != nil { return err } return binary.Write(e.w, []byte(entry.Name)) } func (e *Encoder) timeToUint32(t *time.Time) (uint32, uint32, error) { if t.IsZero() { return 0, 0, nil } if t.Unix() < 0 || t.UnixNano() < 0 { return 0, 0, ErrInvalidTimestamp } return uint32(t.Unix()), uint32(t.Nanosecond()), nil } func (e *Encoder) padEntry(wrote int) error { padLen := 8 - wrote%8 _, err := e.w.Write(bytes.Repeat([]byte{'\x00'}, padLen)) return err } func (e *Encoder) encodeFooter() error { return binary.Write(e.w, e.hash.Sum(nil)) } type byName []*Entry func (l byName) Len() int { return len(l) } func (l byName) Swap(i, j int) { l[i], l[j] = l[j], l[i] } func (l byName) Less(i, j int) bool { return l[i].Name < l[j].Name } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/index/encoder_test.go000066400000000000000000000035611345605224300261250ustar00rootroot00000000000000package index import ( "bytes" "strings" "time" "github.com/google/go-cmp/cmp" . "gopkg.in/check.v1" "gopkg.in/src-d/go-git.v4/plumbing" ) func (s *IndexSuite) TestEncode(c *C) { idx := &Index{ Version: 2, Entries: []*Entry{{ CreatedAt: time.Now(), ModifiedAt: time.Now(), Dev: 4242, Inode: 424242, UID: 84, GID: 8484, Size: 42, Stage: TheirMode, Hash: plumbing.NewHash("e25b29c8946e0e192fae2edc1dabf7be71e8ecf3"), Name: "foo", }, { CreatedAt: time.Now(), ModifiedAt: time.Now(), Name: "bar", Size: 82, }, { CreatedAt: time.Now(), ModifiedAt: time.Now(), Name: strings.Repeat(" ", 20), Size: 82, }}, } buf := bytes.NewBuffer(nil) e := NewEncoder(buf) err := e.Encode(idx) c.Assert(err, IsNil) output := &Index{} d := NewDecoder(buf) err = d.Decode(output) c.Assert(err, IsNil) c.Assert(cmp.Equal(idx, output), Equals, true) c.Assert(output.Entries[0].Name, Equals, strings.Repeat(" ", 20)) c.Assert(output.Entries[1].Name, Equals, "bar") c.Assert(output.Entries[2].Name, Equals, "foo") } func (s *IndexSuite) TestEncodeUnsuportedVersion(c *C) { idx := &Index{Version: 3} buf := bytes.NewBuffer(nil) e := NewEncoder(buf) err := e.Encode(idx) c.Assert(err, Equals, ErrUnsupportedVersion) } func (s *IndexSuite) TestEncodeWithIntentToAddUnsuportedVersion(c *C) { idx := &Index{ Version: 2, Entries: []*Entry{{IntentToAdd: true}}, } buf := bytes.NewBuffer(nil) e := NewEncoder(buf) err := e.Encode(idx) c.Assert(err, Equals, ErrUnsupportedVersion) } func (s *IndexSuite) TestEncodeWithSkipWorktreeUnsuportedVersion(c *C) { idx := &Index{ Version: 2, Entries: []*Entry{{SkipWorktree: true}}, } buf := bytes.NewBuffer(nil) e := NewEncoder(buf) err := e.Encode(idx) c.Assert(err, Equals, ErrUnsupportedVersion) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/index/index.go000066400000000000000000000147471345605224300245660ustar00rootroot00000000000000package index import ( "bytes" "errors" "fmt" "path/filepath" "time" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/filemode" ) var ( // ErrUnsupportedVersion is returned by Decode when the index file version // is not supported. ErrUnsupportedVersion = errors.New("unsupported version") // ErrEntryNotFound is returned by Index.Entry, if an entry is not found. ErrEntryNotFound = errors.New("entry not found") indexSignature = []byte{'D', 'I', 'R', 'C'} treeExtSignature = []byte{'T', 'R', 'E', 'E'} resolveUndoExtSignature = []byte{'R', 'E', 'U', 'C'} endOfIndexEntryExtSignature = []byte{'E', 'O', 'I', 'E'} ) // Stage during merge type Stage int const ( // Merged is the default stage, fully merged Merged Stage = 1 // AncestorMode is the base revision AncestorMode Stage = 1 // OurMode is the first tree revision, ours OurMode Stage = 2 // TheirMode is the second tree revision, theirs TheirMode Stage = 3 ) // Index contains the information about which objects are currently checked out // in the worktree, having information about the working files. Changes in // worktree are detected using this Index. The Index is also used during merges type Index struct { // Version is index version Version uint32 // Entries collection of entries represented by this Index. The order of // this collection is not guaranteed Entries []*Entry // Cache represents the 'Cached tree' extension Cache *Tree // ResolveUndo represents the 'Resolve undo' extension ResolveUndo *ResolveUndo // EndOfIndexEntry represents the 'End of Index Entry' extension EndOfIndexEntry *EndOfIndexEntry } // Add creates a new Entry and returns it. The caller should first check that // another entry with the same path does not exist. func (i *Index) Add(path string) *Entry { e := &Entry{ Name: filepath.ToSlash(path), } i.Entries = append(i.Entries, e) return e } // Entry returns the entry that match the given path, if any. func (i *Index) Entry(path string) (*Entry, error) { path = filepath.ToSlash(path) for _, e := range i.Entries { if e.Name == path { return e, nil } } return nil, ErrEntryNotFound } // Remove remove the entry that match the give path and returns deleted entry. func (i *Index) Remove(path string) (*Entry, error) { path = filepath.ToSlash(path) for index, e := range i.Entries { if e.Name == path { i.Entries = append(i.Entries[:index], i.Entries[index+1:]...) return e, nil } } return nil, ErrEntryNotFound } // Glob returns the all entries matching pattern or nil if there is no matching // entry. The syntax of patterns is the same as in filepath.Glob. func (i *Index) Glob(pattern string) (matches []*Entry, err error) { pattern = filepath.ToSlash(pattern) for _, e := range i.Entries { m, err := match(pattern, e.Name) if err != nil { return nil, err } if m { matches = append(matches, e) } } return } // String is equivalent to `git ls-files --stage --debug` func (i *Index) String() string { buf := bytes.NewBuffer(nil) for _, e := range i.Entries { buf.WriteString(e.String()) } return buf.String() } // Entry represents a single file (or stage of a file) in the cache. An entry // represents exactly one stage of a file. If a file path is unmerged then // multiple Entry instances may appear for the same path name. type Entry struct { // Hash is the SHA1 of the represented file Hash plumbing.Hash // Name is the Entry path name relative to top level directory Name string // CreatedAt time when the tracked path was created CreatedAt time.Time // ModifiedAt time when the tracked path was changed ModifiedAt time.Time // Dev and Inode of the tracked path Dev, Inode uint32 // Mode of the path Mode filemode.FileMode // UID and GID, userid and group id of the owner UID, GID uint32 // Size is the length in bytes for regular files Size uint32 // Stage on a merge is defines what stage is representing this entry // https://git-scm.com/book/en/v2/Git-Tools-Advanced-Merging Stage Stage // SkipWorktree used in sparse checkouts // https://git-scm.com/docs/git-read-tree#_sparse_checkout SkipWorktree bool // IntentToAdd record only the fact that the path will be added later // https://git-scm.com/docs/git-add ("git add -N") IntentToAdd bool } func (e Entry) String() string { buf := bytes.NewBuffer(nil) fmt.Fprintf(buf, "%06o %s %d\t%s\n", e.Mode, e.Hash, e.Stage, e.Name) fmt.Fprintf(buf, " ctime: %d:%d\n", e.CreatedAt.Unix(), e.CreatedAt.Nanosecond()) fmt.Fprintf(buf, " mtime: %d:%d\n", e.ModifiedAt.Unix(), e.ModifiedAt.Nanosecond()) fmt.Fprintf(buf, " dev: %d\tino: %d\n", e.Dev, e.Inode) fmt.Fprintf(buf, " uid: %d\tgid: %d\n", e.UID, e.GID) fmt.Fprintf(buf, " size: %d\tflags: %x\n", e.Size, 0) return buf.String() } // Tree contains pre-computed hashes for trees that can be derived from the // index. It helps speed up tree object generation from index for a new commit. type Tree struct { Entries []TreeEntry } // TreeEntry entry of a cached Tree type TreeEntry struct { // Path component (relative to its parent directory) Path string // Entries is the number of entries in the index that is covered by the tree // this entry represents. Entries int // Trees is the number that represents the number of subtrees this tree has Trees int // Hash object name for the object that would result from writing this span // of index as a tree. Hash plumbing.Hash } // ResolveUndo is used when a conflict is resolved (e.g. with "git add path"), // these higher stage entries are removed and a stage-0 entry with proper // resolution is added. When these higher stage entries are removed, they are // saved in the resolve undo extension. type ResolveUndo struct { Entries []ResolveUndoEntry } // ResolveUndoEntry contains the information about a conflict when is resolved type ResolveUndoEntry struct { Path string Stages map[Stage]plumbing.Hash } // EndOfIndexEntry is the End of Index Entry (EOIE) is used to locate the end of // the variable length index entries and the begining of the extensions. Code // can take advantage of this to quickly locate the index extensions without // having to parse through all of the index entries. // // Because it must be able to be loaded before the variable length cache // entries and other index extensions, this extension must be written last. type EndOfIndexEntry struct { // Offset to the end of the index entries Offset uint32 // Hash is a SHA-1 over the extension types and their sizes (but not // their contents). Hash plumbing.Hash } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/index/index_test.go000066400000000000000000000026631345605224300256170ustar00rootroot00000000000000package index import ( "path/filepath" . "gopkg.in/check.v1" ) func (s *IndexSuite) TestIndexAdd(c *C) { idx := &Index{} e := idx.Add("foo") e.Size = 42 e, err := idx.Entry("foo") c.Assert(err, IsNil) c.Assert(e.Name, Equals, "foo") c.Assert(e.Size, Equals, uint32(42)) } func (s *IndexSuite) TestIndexEntry(c *C) { idx := &Index{ Entries: []*Entry{ {Name: "foo", Size: 42}, {Name: "bar", Size: 82}, }, } e, err := idx.Entry("foo") c.Assert(err, IsNil) c.Assert(e.Name, Equals, "foo") e, err = idx.Entry("missing") c.Assert(e, IsNil) c.Assert(err, Equals, ErrEntryNotFound) } func (s *IndexSuite) TestIndexRemove(c *C) { idx := &Index{ Entries: []*Entry{ {Name: "foo", Size: 42}, {Name: "bar", Size: 82}, }, } e, err := idx.Remove("foo") c.Assert(err, IsNil) c.Assert(e.Name, Equals, "foo") e, err = idx.Remove("foo") c.Assert(e, IsNil) c.Assert(err, Equals, ErrEntryNotFound) } func (s *IndexSuite) TestIndexGlob(c *C) { idx := &Index{ Entries: []*Entry{ {Name: "foo/bar/bar", Size: 42}, {Name: "foo/baz/qux", Size: 42}, {Name: "fux", Size: 82}, }, } m, err := idx.Glob(filepath.Join("foo", "b*")) c.Assert(err, IsNil) c.Assert(m, HasLen, 2) c.Assert(m[0].Name, Equals, "foo/bar/bar") c.Assert(m[1].Name, Equals, "foo/baz/qux") m, err = idx.Glob("f*") c.Assert(err, IsNil) c.Assert(m, HasLen, 3) m, err = idx.Glob("f*/baz/q*") c.Assert(err, IsNil) c.Assert(m, HasLen, 1) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/index/match.go000066400000000000000000000077631345605224300245530ustar00rootroot00000000000000package index import ( "path/filepath" "runtime" "unicode/utf8" ) // match is filepath.Match with support to match fullpath and not only filenames // code from: // https://github.com/golang/go/blob/39852bf4cce6927e01d0136c7843f65a801738cb/src/path/filepath/match.go#L44-L224 func match(pattern, name string) (matched bool, err error) { Pattern: for len(pattern) > 0 { var star bool var chunk string star, chunk, pattern = scanChunk(pattern) // Look for match at current position. t, ok, err := matchChunk(chunk, name) // if we're the last chunk, make sure we've exhausted the name // otherwise we'll give a false result even if we could still match // using the star if ok && (len(t) == 0 || len(pattern) > 0) { name = t continue } if err != nil { return false, err } if star { // Look for match skipping i+1 bytes. // Cannot skip /. for i := 0; i < len(name); i++ { t, ok, err := matchChunk(chunk, name[i+1:]) if ok { // if we're the last chunk, make sure we exhausted the name if len(pattern) == 0 && len(t) > 0 { continue } name = t continue Pattern } if err != nil { return false, err } } } return false, nil } return len(name) == 0, nil } // scanChunk gets the next segment of pattern, which is a non-star string // possibly preceded by a star. func scanChunk(pattern string) (star bool, chunk, rest string) { for len(pattern) > 0 && pattern[0] == '*' { pattern = pattern[1:] star = true } inrange := false var i int Scan: for i = 0; i < len(pattern); i++ { switch pattern[i] { case '\\': if runtime.GOOS != "windows" { // error check handled in matchChunk: bad pattern. if i+1 < len(pattern) { i++ } } case '[': inrange = true case ']': inrange = false case '*': if !inrange { break Scan } } } return star, pattern[0:i], pattern[i:] } // matchChunk checks whether chunk matches the beginning of s. // If so, it returns the remainder of s (after the match). // Chunk is all single-character operators: literals, char classes, and ?. func matchChunk(chunk, s string) (rest string, ok bool, err error) { for len(chunk) > 0 { if len(s) == 0 { return } switch chunk[0] { case '[': // character class r, n := utf8.DecodeRuneInString(s) s = s[n:] chunk = chunk[1:] // We can't end right after '[', we're expecting at least // a closing bracket and possibly a caret. if len(chunk) == 0 { err = filepath.ErrBadPattern return } // possibly negated negated := chunk[0] == '^' if negated { chunk = chunk[1:] } // parse all ranges match := false nrange := 0 for { if len(chunk) > 0 && chunk[0] == ']' && nrange > 0 { chunk = chunk[1:] break } var lo, hi rune if lo, chunk, err = getEsc(chunk); err != nil { return } hi = lo if chunk[0] == '-' { if hi, chunk, err = getEsc(chunk[1:]); err != nil { return } } if lo <= r && r <= hi { match = true } nrange++ } if match == negated { return } case '?': _, n := utf8.DecodeRuneInString(s) s = s[n:] chunk = chunk[1:] case '\\': if runtime.GOOS != "windows" { chunk = chunk[1:] if len(chunk) == 0 { err = filepath.ErrBadPattern return } } fallthrough default: if chunk[0] != s[0] { return } s = s[1:] chunk = chunk[1:] } } return s, true, nil } // getEsc gets a possibly-escaped character from chunk, for a character class. func getEsc(chunk string) (r rune, nchunk string, err error) { if len(chunk) == 0 || chunk[0] == '-' || chunk[0] == ']' { err = filepath.ErrBadPattern return } if chunk[0] == '\\' && runtime.GOOS != "windows" { chunk = chunk[1:] if len(chunk) == 0 { err = filepath.ErrBadPattern return } } r, n := utf8.DecodeRuneInString(chunk) if r == utf8.RuneError && n == 1 { err = filepath.ErrBadPattern } nchunk = chunk[n:] if len(nchunk) == 0 { err = filepath.ErrBadPattern } return } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/objfile/000077500000000000000000000000001345605224300234165ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/objfile/common_test.go000066400000000000000000000073641345605224300263060ustar00rootroot00000000000000package objfile import ( "encoding/base64" "testing" . "gopkg.in/check.v1" "gopkg.in/src-d/go-git.v4/plumbing" ) type objfileFixture struct { hash string // hash of data t plumbing.ObjectType // object type content string // base64-encoded content data string // base64-encoded objfile data } var objfileFixtures = []objfileFixture{ { "e69de29bb2d1d6434b8b29ae775ad8c2e48c5391", plumbing.BlobObject, base64.StdEncoding.EncodeToString([]byte("")), "eAFLyslPUjBgAAAJsAHw", }, { "a8a940627d132695a9769df883f85992f0ff4a43", plumbing.BlobObject, base64.StdEncoding.EncodeToString([]byte("this is a test")), "eAFLyslPUjA0YSjJyCxWAKJEhZLU4hIAUDYHOg==", }, { "4dc2174801ac4a3d36886210fd086fbe134cf7b2", plumbing.BlobObject, base64.StdEncoding.EncodeToString([]byte("this\nis\n\n\na\nmultiline\n\ntest.\n")), "eAFLyslPUjCyZCjJyCzmAiIurkSu3NKcksyczLxULq6S1OISPS4A1I8LMQ==", }, { "13e6f47dd57798bfdc728d91f5c6d7f40c5bb5fc", plumbing.BlobObject, base64.StdEncoding.EncodeToString([]byte("this tests\r\nCRLF\r\nencoded files.\r\n")), "eAFLyslPUjA2YSjJyCxWKEktLinm5XIO8nHj5UrNS85PSU1RSMvMSS3W4+UCABp3DNE=", }, { "72a7bc4667ab068e954172437b993d9fbaa137cb", plumbing.BlobObject, base64.StdEncoding.EncodeToString([]byte("test@example.com")), "eAFLyslPUjA0YyhJLS5xSK1IzC3ISdVLzs8FAGVtCIA=", }, { "bb2b40e85ec0455d1de72daff71583f0dd72a33f", plumbing.BlobObject, base64.StdEncoding.EncodeToString([]byte("package main\r\n\r\nimport (\r\n\t\"fmt\"\r\n\t\"io\"\r\n\t\"os\"\r\n\r\n\t\"gopkg.in/src-d/go-git.v3\"\r\n)\r\n\r\nfunc main() {\r\n\tfmt.Printf(\"Retrieving %q ...\\n\", os.Args[2])\r\n\tr, err := git.NewRepository(os.Args[2], nil)\r\n\tif err != nil {\r\n\t\tpanic(err)\r\n\t}\r\n\r\n\tif err := r.Pull(\"origin\", \"refs/heads/master\"); err != nil {\r\n\t\tpanic(err)\r\n\t}\r\n\r\n\tdumpCommits(r)\r\n}\r\n\r\nfunc dumpCommits(r *git.Repository) {\r\n\titer := r.Commits()\r\n\tdefer iter.Close()\r\n\r\n\tfor {\r\n\t\tcommit, err := iter.Next()\r\n\t\tif err != nil {\r\n\t\t\tif err == io.EOF {\r\n\t\t\t\tbreak\r\n\t\t\t}\r\n\r\n\t\t\tpanic(err)\r\n\t\t}\r\n\r\n\t\tfmt.Println(commit)\r\n\t}\r\n}\r\n")), "eAGNUU1LAzEU9JpC/0NcEFJps2ARQdmDFD3W0qt6SHez8dHdZH1JqyL+d/Oy/aDgQVh47LzJTGayatyKX99MzzpVrpXRvFVgh4PhANrOYeBiOGBZ3YaMJrg0nI+D/o3r1kaCzT2Wkyo3bmIgyO00rkfEqDe2TIJixL/jgagjFwg21CJb6oCgt2ANv3jnUsoXm4258/IejX++eo0CDMdcI/LbgpPuXH8sdec8BIdf4sgccwsN0aFO9POCgGTIOmWhFFGE9j/p1jtWFEW52DSNyByCAXLPUNc+f9Oq8nmrfNCYje7+o1lt2m7m2haCF2SVnFL6kw2/pBzHEH0rEH0oI8q9BF220nWEaSdnjfNaRDDCtcM+WZnsDgUl4lx/BuKxv6rYY0XBwcmHp8deh7EVarWmQ7uC2Glre/TweI0VvTk5xaTx+wWX66Gs", }, { "e94db0f9ffca44dc7bade6a3591f544183395a7c", plumbing.TreeObject, "MTAwNjQ0IFRlc3QgMS50eHQAqKlAYn0TJpWpdp34g/hZkvD/SkMxMDA2NDQgVGVzdCAyLnR4dABNwhdIAaxKPTaIYhD9CG++E0z3sjEwMDY0NCBUZXN0IDMudHh0ABPm9H3Vd5i/3HKNkfXG1/QMW7X8MTAwNjQ0IFRlc3QgNC50eHQAcqe8RmerBo6VQXJDe5k9n7qhN8sxMDA2NDQgVGVzdCA1LnR4dAC7K0DoXsBFXR3nLa/3FYPw3XKjPw==", "eAErKUpNVTC0NGAwNDAwMzFRCEktLlEw1CupKGFYsdIhqVZYberKsrk/mn9ETvrw38sZWZURWJXvIXEPxjVetmYdSQJ/OfL3Cft834SsyhisSvjZl9qr5TP23ynqnfj12PUvPNFb/yCrMgGrKlq+xy19NVvfVMci5+qZtvN3LTQ/jazKFKxqt7bDi7gDrrGyz3XXfxdt/nC3aLE9AA2STmk=", }, { "9d7f8a56eaf92469dee8a856e716a03387ddb076", plumbing.CommitObject, "dHJlZSBlOTRkYjBmOWZmY2E0NGRjN2JhZGU2YTM1OTFmNTQ0MTgzMzk1YTdjCmF1dGhvciBKb3NodWEgU2pvZGluZyA8am9zaHVhLnNqb2RpbmdAc2NqYWxsaWFuY2UuY29tPiAxNDU2NTMxNTgzIC0wODAwCmNvbW1pdHRlciBKb3NodWEgU2pvZGluZyA8am9zaHVhLnNqb2RpbmdAc2NqYWxsaWFuY2UuY29tPiAxNDU2NTMxNTgzIC0wODAwCgpUZXN0IENvbW1pdAo=", "eAGtjksOgjAUAF33FO8CktZ+aBNjTNy51Qs8Xl8FAjSh5f4SvILLmcVkKM/zUOEi3amuzMDBxE6mkBKhMZHaDiM71DaoZI1RXutgsSWBW+3zCs9c+g3hNeY4LB+4jgc35cf3QiNO04ALcUN5voEy1lmtrNdwll5Ksdt9oPIfUuLNpcLjCIov3ApFmQ==", }, } func Test(t *testing.T) { TestingT(t) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/objfile/doc.go000066400000000000000000000001251345605224300245100ustar00rootroot00000000000000// Package objfile implements encoding and decoding of object files. package objfile golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/objfile/reader.go000066400000000000000000000054031345605224300252110ustar00rootroot00000000000000package objfile import ( "compress/zlib" "errors" "io" "strconv" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/format/packfile" ) var ( ErrClosed = errors.New("objfile: already closed") ErrHeader = errors.New("objfile: invalid header") ErrNegativeSize = errors.New("objfile: negative object size") ) // Reader reads and decodes compressed objfile data from a provided io.Reader. // Reader implements io.ReadCloser. Close should be called when finished with // the Reader. Close will not close the underlying io.Reader. type Reader struct { multi io.Reader zlib io.ReadCloser hasher plumbing.Hasher } // NewReader returns a new Reader reading from r. func NewReader(r io.Reader) (*Reader, error) { zlib, err := zlib.NewReader(r) if err != nil { return nil, packfile.ErrZLib.AddDetails(err.Error()) } return &Reader{ zlib: zlib, }, nil } // Header reads the type and the size of object, and prepares the reader for read func (r *Reader) Header() (t plumbing.ObjectType, size int64, err error) { var raw []byte raw, err = r.readUntil(' ') if err != nil { return } t, err = plumbing.ParseObjectType(string(raw)) if err != nil { return } raw, err = r.readUntil(0) if err != nil { return } size, err = strconv.ParseInt(string(raw), 10, 64) if err != nil { err = ErrHeader return } defer r.prepareForRead(t, size) return } // readSlice reads one byte at a time from r until it encounters delim or an // error. func (r *Reader) readUntil(delim byte) ([]byte, error) { var buf [1]byte value := make([]byte, 0, 16) for { if n, err := r.zlib.Read(buf[:]); err != nil && (err != io.EOF || n == 0) { if err == io.EOF { return nil, ErrHeader } return nil, err } if buf[0] == delim { return value, nil } value = append(value, buf[0]) } } func (r *Reader) prepareForRead(t plumbing.ObjectType, size int64) { r.hasher = plumbing.NewHasher(t, size) r.multi = io.TeeReader(r.zlib, r.hasher) } // Read reads len(p) bytes into p from the object data stream. It returns // the number of bytes read (0 <= n <= len(p)) and any error encountered. Even // if Read returns n < len(p), it may use all of p as scratch space during the // call. // // If Read encounters the end of the data stream it will return err == io.EOF, // either in the current call if n > 0 or in a subsequent call. func (r *Reader) Read(p []byte) (n int, err error) { return r.multi.Read(p) } // Hash returns the hash of the object data stream that has been read so far. func (r *Reader) Hash() plumbing.Hash { return r.hasher.Sum() } // Close releases any resources consumed by the Reader. Calling Close does not // close the wrapped io.Reader originally passed to NewReader. func (r *Reader) Close() error { return r.zlib.Close() } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/objfile/reader_test.go000066400000000000000000000032731345605224300262530ustar00rootroot00000000000000package objfile import ( "bytes" "encoding/base64" "fmt" "io" "io/ioutil" . "gopkg.in/check.v1" "gopkg.in/src-d/go-git.v4/plumbing" ) type SuiteReader struct{} var _ = Suite(&SuiteReader{}) func (s *SuiteReader) TestReadObjfile(c *C) { for k, fixture := range objfileFixtures { com := fmt.Sprintf("test %d: ", k) hash := plumbing.NewHash(fixture.hash) content, _ := base64.StdEncoding.DecodeString(fixture.content) data, _ := base64.StdEncoding.DecodeString(fixture.data) testReader(c, bytes.NewReader(data), hash, fixture.t, content, com) } } func testReader(c *C, source io.Reader, hash plumbing.Hash, t plumbing.ObjectType, content []byte, com string) { r, err := NewReader(source) c.Assert(err, IsNil) typ, size, err := r.Header() c.Assert(err, IsNil) c.Assert(typ, Equals, t) c.Assert(content, HasLen, int(size)) rc, err := ioutil.ReadAll(r) c.Assert(err, IsNil) c.Assert(rc, DeepEquals, content, Commentf("%scontent=%s, expected=%s", base64.StdEncoding.EncodeToString(rc), base64.StdEncoding.EncodeToString(content))) c.Assert(r.Hash(), Equals, hash) // Test Hash() before close c.Assert(r.Close(), IsNil) } func (s *SuiteReader) TestReadEmptyObjfile(c *C) { source := bytes.NewReader([]byte{}) _, err := NewReader(source) c.Assert(err, NotNil) } func (s *SuiteReader) TestReadGarbage(c *C) { source := bytes.NewReader([]byte("!@#$RO!@NROSADfinq@o#irn@oirfn")) _, err := NewReader(source) c.Assert(err, NotNil) } func (s *SuiteReader) TestReadCorruptZLib(c *C) { data, _ := base64.StdEncoding.DecodeString("eAFLysaalPUjBgAAAJsAHw") source := bytes.NewReader(data) r, err := NewReader(source) c.Assert(err, IsNil) _, _, err = r.Header() c.Assert(err, NotNil) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/objfile/writer.go000066400000000000000000000050241345605224300252620ustar00rootroot00000000000000package objfile import ( "compress/zlib" "errors" "io" "strconv" "gopkg.in/src-d/go-git.v4/plumbing" ) var ( ErrOverflow = errors.New("objfile: declared data length exceeded (overflow)") ) // Writer writes and encodes data in compressed objfile format to a provided // io.Writer. Close should be called when finished with the Writer. Close will // not close the underlying io.Writer. type Writer struct { raw io.Writer zlib io.WriteCloser hasher plumbing.Hasher multi io.Writer closed bool pending int64 // number of unwritten bytes } // NewWriter returns a new Writer writing to w. // // The returned Writer implements io.WriteCloser. Close should be called when // finished with the Writer. Close will not close the underlying io.Writer. func NewWriter(w io.Writer) *Writer { return &Writer{ raw: w, zlib: zlib.NewWriter(w), } } // WriteHeader writes the type and the size and prepares to accept the object's // contents. If an invalid t is provided, plumbing.ErrInvalidType is returned. If a // negative size is provided, ErrNegativeSize is returned. func (w *Writer) WriteHeader(t plumbing.ObjectType, size int64) error { if !t.Valid() { return plumbing.ErrInvalidType } if size < 0 { return ErrNegativeSize } b := t.Bytes() b = append(b, ' ') b = append(b, []byte(strconv.FormatInt(size, 10))...) b = append(b, 0) defer w.prepareForWrite(t, size) _, err := w.zlib.Write(b) return err } func (w *Writer) prepareForWrite(t plumbing.ObjectType, size int64) { w.pending = size w.hasher = plumbing.NewHasher(t, size) w.multi = io.MultiWriter(w.zlib, w.hasher) } // Write writes the object's contents. Write returns the error ErrOverflow if // more than size bytes are written after WriteHeader. func (w *Writer) Write(p []byte) (n int, err error) { if w.closed { return 0, ErrClosed } overwrite := false if int64(len(p)) > w.pending { p = p[0:w.pending] overwrite = true } n, err = w.multi.Write(p) w.pending -= int64(n) if err == nil && overwrite { err = ErrOverflow return } return } // Hash returns the hash of the object data stream that has been written so far. // It can be called before or after Close. func (w *Writer) Hash() plumbing.Hash { return w.hasher.Sum() // Not yet closed, return hash of data written so far } // Close releases any resources consumed by the Writer. // // Calling Close does not close the wrapped io.Writer originally passed to // NewWriter. func (w *Writer) Close() error { if err := w.zlib.Close(); err != nil { return err } w.closed = true return nil } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/objfile/writer_test.go000066400000000000000000000035451345605224300263270ustar00rootroot00000000000000package objfile import ( "bytes" "encoding/base64" "fmt" "io" . "gopkg.in/check.v1" "gopkg.in/src-d/go-git.v4/plumbing" ) type SuiteWriter struct{} var _ = Suite(&SuiteWriter{}) func (s *SuiteWriter) TestWriteObjfile(c *C) { for k, fixture := range objfileFixtures { buffer := bytes.NewBuffer(nil) com := fmt.Sprintf("test %d: ", k) hash := plumbing.NewHash(fixture.hash) content, _ := base64.StdEncoding.DecodeString(fixture.content) // Write the data out to the buffer testWriter(c, buffer, hash, fixture.t, content) // Read the data back in from the buffer to be sure it matches testReader(c, buffer, hash, fixture.t, content, com) } } func testWriter(c *C, dest io.Writer, hash plumbing.Hash, t plumbing.ObjectType, content []byte) { size := int64(len(content)) w := NewWriter(dest) err := w.WriteHeader(t, size) c.Assert(err, IsNil) written, err := io.Copy(w, bytes.NewReader(content)) c.Assert(err, IsNil) c.Assert(written, Equals, size) c.Assert(w.Hash(), Equals, hash) c.Assert(w.Close(), IsNil) } func (s *SuiteWriter) TestWriteOverflow(c *C) { buf := bytes.NewBuffer(nil) w := NewWriter(buf) err := w.WriteHeader(plumbing.BlobObject, 8) c.Assert(err, IsNil) n, err := w.Write([]byte("1234")) c.Assert(err, IsNil) c.Assert(n, Equals, 4) n, err = w.Write([]byte("56789")) c.Assert(err, Equals, ErrOverflow) c.Assert(n, Equals, 4) } func (s *SuiteWriter) TestNewWriterInvalidType(c *C) { buf := bytes.NewBuffer(nil) w := NewWriter(buf) err := w.WriteHeader(plumbing.InvalidObject, 8) c.Assert(err, Equals, plumbing.ErrInvalidType) } func (s *SuiteWriter) TestNewWriterInvalidSize(c *C) { buf := bytes.NewBuffer(nil) w := NewWriter(buf) err := w.WriteHeader(plumbing.BlobObject, -1) c.Assert(err, Equals, ErrNegativeSize) err = w.WriteHeader(plumbing.BlobObject, -1651860) c.Assert(err, Equals, ErrNegativeSize) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/packfile/000077500000000000000000000000001345605224300235625ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/packfile/common.go000066400000000000000000000027721345605224300254110ustar00rootroot00000000000000package packfile import ( "bytes" "io" "sync" "gopkg.in/src-d/go-git.v4/plumbing/storer" "gopkg.in/src-d/go-git.v4/utils/ioutil" ) var signature = []byte{'P', 'A', 'C', 'K'} const ( // VersionSupported is the packfile version supported by this package VersionSupported uint32 = 2 firstLengthBits = uint8(4) // the first byte into object header has 4 bits to store the length lengthBits = uint8(7) // subsequent bytes has 7 bits to store the length maskFirstLength = 15 // 0000 1111 maskContinue = 0x80 // 1000 0000 maskLength = uint8(127) // 0111 1111 maskType = uint8(112) // 0111 0000 ) // UpdateObjectStorage updates the storer with the objects in the given // packfile. func UpdateObjectStorage(s storer.Storer, packfile io.Reader) error { if pw, ok := s.(storer.PackfileWriter); ok { return WritePackfileToObjectStorage(pw, packfile) } p, err := NewParserWithStorage(NewScanner(packfile), s) if err != nil { return err } _, err = p.Parse() return err } // WritePackfileToObjectStorage writes all the packfile objects into the given // object storage. func WritePackfileToObjectStorage( sw storer.PackfileWriter, packfile io.Reader, ) (err error) { w, err := sw.PackfileWriter() if err != nil { return err } defer ioutil.CheckClose(w, &err) var n int64 n, err = io.Copy(w, packfile) if err == nil && n == 0 { return ErrEmptyPackfile } return err } var bufPool = sync.Pool{ New: func() interface{} { return bytes.NewBuffer(nil) }, } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/packfile/common_test.go000066400000000000000000000015331345605224300264420ustar00rootroot00000000000000package packfile import ( "bytes" "testing" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/storage/memory" . "gopkg.in/check.v1" ) func Test(t *testing.T) { TestingT(t) } type CommonSuite struct{} var _ = Suite(&CommonSuite{}) func (s *CommonSuite) TestEmptyUpdateObjectStorage(c *C) { var buf bytes.Buffer sto := memory.NewStorage() err := UpdateObjectStorage(sto, &buf) c.Assert(err, Equals, ErrEmptyPackfile) } func newObject(t plumbing.ObjectType, cont []byte) plumbing.EncodedObject { o := plumbing.MemoryObject{} o.SetType(t) o.SetSize(int64(len(cont))) o.Write(cont) return &o } type piece struct { val string times int } func genBytes(elements []piece) []byte { var result []byte for _, e := range elements { for i := 0; i < e.times; i++ { result = append(result, e.val...) } } return result } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/packfile/delta_index.go000066400000000000000000000236761345605224300264070ustar00rootroot00000000000000package packfile const blksz = 16 const maxChainLength = 64 // deltaIndex is a modified version of JGit's DeltaIndex adapted to our current // design. type deltaIndex struct { table []int entries []int mask int } func (idx *deltaIndex) init(buf []byte) { scanner := newDeltaIndexScanner(buf, len(buf)) idx.mask = scanner.mask idx.table = scanner.table idx.entries = make([]int, countEntries(scanner)+1) idx.copyEntries(scanner) } // findMatch returns the offset of src where the block starting at tgtOffset // is and the length of the match. A length of 0 means there was no match. A // length of -1 means the src length is lower than the blksz and whatever // other positive length is the length of the match in bytes. func (idx *deltaIndex) findMatch(src, tgt []byte, tgtOffset int) (srcOffset, l int) { if len(tgt) < tgtOffset+s { return 0, len(tgt) - tgtOffset } if len(src) < blksz { return 0, -1 } if len(tgt) >= tgtOffset+s && len(src) >= blksz { h := hashBlock(tgt, tgtOffset) tIdx := h & idx.mask eIdx := idx.table[tIdx] if eIdx != 0 { srcOffset = idx.entries[eIdx] } else { return } l = matchLength(src, tgt, tgtOffset, srcOffset) } return } func matchLength(src, tgt []byte, otgt, osrc int) (l int) { lensrc := len(src) lentgt := len(tgt) for (osrc < lensrc && otgt < lentgt) && src[osrc] == tgt[otgt] { l++ osrc++ otgt++ } return } func countEntries(scan *deltaIndexScanner) (cnt int) { // Figure out exactly how many entries we need. As we do the // enumeration truncate any delta chains longer than what we // are willing to scan during encode. This keeps the encode // logic linear in the size of the input rather than quadratic. for i := 0; i < len(scan.table); i++ { h := scan.table[i] if h == 0 { continue } size := 0 for { size++ if size == maxChainLength { scan.next[h] = 0 break } h = scan.next[h] if h == 0 { break } } cnt += size } return } func (idx *deltaIndex) copyEntries(scanner *deltaIndexScanner) { // Rebuild the entries list from the scanner, positioning all // blocks in the same hash chain next to each other. We can // then later discard the next list, along with the scanner. // next := 1 for i := 0; i < len(idx.table); i++ { h := idx.table[i] if h == 0 { continue } idx.table[i] = next for { idx.entries[next] = scanner.entries[h] next++ h = scanner.next[h] if h == 0 { break } } } } type deltaIndexScanner struct { table []int entries []int next []int mask int count int } func newDeltaIndexScanner(buf []byte, size int) *deltaIndexScanner { size -= size % blksz worstCaseBlockCnt := size / blksz if worstCaseBlockCnt < 1 { return new(deltaIndexScanner) } tableSize := tableSize(worstCaseBlockCnt) scanner := &deltaIndexScanner{ table: make([]int, tableSize), mask: tableSize - 1, entries: make([]int, worstCaseBlockCnt+1), next: make([]int, worstCaseBlockCnt+1), } scanner.scan(buf, size) return scanner } // slightly modified version of JGit's DeltaIndexScanner. We store the offset on the entries // instead of the entries and the key, so we avoid operations to retrieve the offset later, as // we don't use the key. // See: https://github.com/eclipse/jgit/blob/005e5feb4ecd08c4e4d141a38b9e7942accb3212/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/pack/DeltaIndexScanner.java func (s *deltaIndexScanner) scan(buf []byte, end int) { lastHash := 0 ptr := end - blksz for { key := hashBlock(buf, ptr) tIdx := key & s.mask head := s.table[tIdx] if head != 0 && lastHash == key { s.entries[head] = ptr } else { s.count++ eIdx := s.count s.entries[eIdx] = ptr s.next[eIdx] = head s.table[tIdx] = eIdx } lastHash = key ptr -= blksz if 0 > ptr { break } } } func tableSize(worstCaseBlockCnt int) int { shift := 32 - leadingZeros(uint32(worstCaseBlockCnt)) sz := 1 << uint(shift-1) if sz < worstCaseBlockCnt { sz <<= 1 } return sz } // use https://golang.org/pkg/math/bits/#LeadingZeros32 in the future func leadingZeros(x uint32) (n int) { if x >= 1<<16 { x >>= 16 n = 16 } if x >= 1<<8 { x >>= 8 n += 8 } n += int(len8tab[x]) return 32 - n } var len8tab = [256]uint8{ 0x00, 0x01, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, } func hashBlock(raw []byte, ptr int) int { // The first 4 steps collapse out into a 4 byte big-endian decode, // with a larger right shift as we combined shift lefts together. // hash := ((uint32(raw[ptr]) & 0xff) << 24) | ((uint32(raw[ptr+1]) & 0xff) << 16) | ((uint32(raw[ptr+2]) & 0xff) << 8) | (uint32(raw[ptr+3]) & 0xff) hash ^= T[hash>>31] hash = ((hash << 8) | (uint32(raw[ptr+4]) & 0xff)) ^ T[hash>>23] hash = ((hash << 8) | (uint32(raw[ptr+5]) & 0xff)) ^ T[hash>>23] hash = ((hash << 8) | (uint32(raw[ptr+6]) & 0xff)) ^ T[hash>>23] hash = ((hash << 8) | (uint32(raw[ptr+7]) & 0xff)) ^ T[hash>>23] hash = ((hash << 8) | (uint32(raw[ptr+8]) & 0xff)) ^ T[hash>>23] hash = ((hash << 8) | (uint32(raw[ptr+9]) & 0xff)) ^ T[hash>>23] hash = ((hash << 8) | (uint32(raw[ptr+10]) & 0xff)) ^ T[hash>>23] hash = ((hash << 8) | (uint32(raw[ptr+11]) & 0xff)) ^ T[hash>>23] hash = ((hash << 8) | (uint32(raw[ptr+12]) & 0xff)) ^ T[hash>>23] hash = ((hash << 8) | (uint32(raw[ptr+13]) & 0xff)) ^ T[hash>>23] hash = ((hash << 8) | (uint32(raw[ptr+14]) & 0xff)) ^ T[hash>>23] hash = ((hash << 8) | (uint32(raw[ptr+15]) & 0xff)) ^ T[hash>>23] return int(hash) } var T = []uint32{0x00000000, 0xd4c6b32d, 0x7d4bd577, 0xa98d665a, 0x2e5119c3, 0xfa97aaee, 0x531accb4, 0x87dc7f99, 0x5ca23386, 0x886480ab, 0x21e9e6f1, 0xf52f55dc, 0x72f32a45, 0xa6359968, 0x0fb8ff32, 0xdb7e4c1f, 0x6d82d421, 0xb944670c, 0x10c90156, 0xc40fb27b, 0x43d3cde2, 0x97157ecf, 0x3e981895, 0xea5eabb8, 0x3120e7a7, 0xe5e6548a, 0x4c6b32d0, 0x98ad81fd, 0x1f71fe64, 0xcbb74d49, 0x623a2b13, 0xb6fc983e, 0x0fc31b6f, 0xdb05a842, 0x7288ce18, 0xa64e7d35, 0x219202ac, 0xf554b181, 0x5cd9d7db, 0x881f64f6, 0x536128e9, 0x87a79bc4, 0x2e2afd9e, 0xfaec4eb3, 0x7d30312a, 0xa9f68207, 0x007be45d, 0xd4bd5770, 0x6241cf4e, 0xb6877c63, 0x1f0a1a39, 0xcbcca914, 0x4c10d68d, 0x98d665a0, 0x315b03fa, 0xe59db0d7, 0x3ee3fcc8, 0xea254fe5, 0x43a829bf, 0x976e9a92, 0x10b2e50b, 0xc4745626, 0x6df9307c, 0xb93f8351, 0x1f8636de, 0xcb4085f3, 0x62cde3a9, 0xb60b5084, 0x31d72f1d, 0xe5119c30, 0x4c9cfa6a, 0x985a4947, 0x43240558, 0x97e2b675, 0x3e6fd02f, 0xeaa96302, 0x6d751c9b, 0xb9b3afb6, 0x103ec9ec, 0xc4f87ac1, 0x7204e2ff, 0xa6c251d2, 0x0f4f3788, 0xdb8984a5, 0x5c55fb3c, 0x88934811, 0x211e2e4b, 0xf5d89d66, 0x2ea6d179, 0xfa606254, 0x53ed040e, 0x872bb723, 0x00f7c8ba, 0xd4317b97, 0x7dbc1dcd, 0xa97aaee0, 0x10452db1, 0xc4839e9c, 0x6d0ef8c6, 0xb9c84beb, 0x3e143472, 0xead2875f, 0x435fe105, 0x97995228, 0x4ce71e37, 0x9821ad1a, 0x31accb40, 0xe56a786d, 0x62b607f4, 0xb670b4d9, 0x1ffdd283, 0xcb3b61ae, 0x7dc7f990, 0xa9014abd, 0x008c2ce7, 0xd44a9fca, 0x5396e053, 0x8750537e, 0x2edd3524, 0xfa1b8609, 0x2165ca16, 0xf5a3793b, 0x5c2e1f61, 0x88e8ac4c, 0x0f34d3d5, 0xdbf260f8, 0x727f06a2, 0xa6b9b58f, 0x3f0c6dbc, 0xebcade91, 0x4247b8cb, 0x96810be6, 0x115d747f, 0xc59bc752, 0x6c16a108, 0xb8d01225, 0x63ae5e3a, 0xb768ed17, 0x1ee58b4d, 0xca233860, 0x4dff47f9, 0x9939f4d4, 0x30b4928e, 0xe47221a3, 0x528eb99d, 0x86480ab0, 0x2fc56cea, 0xfb03dfc7, 0x7cdfa05e, 0xa8191373, 0x01947529, 0xd552c604, 0x0e2c8a1b, 0xdaea3936, 0x73675f6c, 0xa7a1ec41, 0x207d93d8, 0xf4bb20f5, 0x5d3646af, 0x89f0f582, 0x30cf76d3, 0xe409c5fe, 0x4d84a3a4, 0x99421089, 0x1e9e6f10, 0xca58dc3d, 0x63d5ba67, 0xb713094a, 0x6c6d4555, 0xb8abf678, 0x11269022, 0xc5e0230f, 0x423c5c96, 0x96faefbb, 0x3f7789e1, 0xebb13acc, 0x5d4da2f2, 0x898b11df, 0x20067785, 0xf4c0c4a8, 0x731cbb31, 0xa7da081c, 0x0e576e46, 0xda91dd6b, 0x01ef9174, 0xd5292259, 0x7ca44403, 0xa862f72e, 0x2fbe88b7, 0xfb783b9a, 0x52f55dc0, 0x8633eeed, 0x208a5b62, 0xf44ce84f, 0x5dc18e15, 0x89073d38, 0x0edb42a1, 0xda1df18c, 0x739097d6, 0xa75624fb, 0x7c2868e4, 0xa8eedbc9, 0x0163bd93, 0xd5a50ebe, 0x52797127, 0x86bfc20a, 0x2f32a450, 0xfbf4177d, 0x4d088f43, 0x99ce3c6e, 0x30435a34, 0xe485e919, 0x63599680, 0xb79f25ad, 0x1e1243f7, 0xcad4f0da, 0x11aabcc5, 0xc56c0fe8, 0x6ce169b2, 0xb827da9f, 0x3ffba506, 0xeb3d162b, 0x42b07071, 0x9676c35c, 0x2f49400d, 0xfb8ff320, 0x5202957a, 0x86c42657, 0x011859ce, 0xd5deeae3, 0x7c538cb9, 0xa8953f94, 0x73eb738b, 0xa72dc0a6, 0x0ea0a6fc, 0xda6615d1, 0x5dba6a48, 0x897cd965, 0x20f1bf3f, 0xf4370c12, 0x42cb942c, 0x960d2701, 0x3f80415b, 0xeb46f276, 0x6c9a8def, 0xb85c3ec2, 0x11d15898, 0xc517ebb5, 0x1e69a7aa, 0xcaaf1487, 0x632272dd, 0xb7e4c1f0, 0x3038be69, 0xe4fe0d44, 0x4d736b1e, 0x99b5d833, } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/packfile/delta_selector.go000066400000000000000000000206631345605224300271110ustar00rootroot00000000000000package packfile import ( "sort" "sync" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/storer" ) const ( // deltas based on deltas, how many steps we can do. // 50 is the default value used in JGit maxDepth = int64(50) ) // applyDelta is the set of object types that we should apply deltas var applyDelta = map[plumbing.ObjectType]bool{ plumbing.BlobObject: true, plumbing.TreeObject: true, } type deltaSelector struct { storer storer.EncodedObjectStorer } func newDeltaSelector(s storer.EncodedObjectStorer) *deltaSelector { return &deltaSelector{s} } // ObjectsToPack creates a list of ObjectToPack from the hashes // provided, creating deltas if it's suitable, using an specific // internal logic. `packWindow` specifies the size of the sliding // window used to compare objects for delta compression; 0 turns off // delta compression entirely. func (dw *deltaSelector) ObjectsToPack( hashes []plumbing.Hash, packWindow uint, ) ([]*ObjectToPack, error) { otp, err := dw.objectsToPack(hashes, packWindow) if err != nil { return nil, err } if packWindow == 0 { return otp, nil } dw.sort(otp) var objectGroups [][]*ObjectToPack var prev *ObjectToPack i := -1 for _, obj := range otp { if prev == nil || prev.Type() != obj.Type() { objectGroups = append(objectGroups, []*ObjectToPack{obj}) i++ prev = obj } else { objectGroups[i] = append(objectGroups[i], obj) } } var wg sync.WaitGroup var once sync.Once for _, objs := range objectGroups { objs := objs wg.Add(1) go func() { if walkErr := dw.walk(objs, packWindow); walkErr != nil { once.Do(func() { err = walkErr }) } wg.Done() }() } wg.Wait() if err != nil { return nil, err } return otp, nil } func (dw *deltaSelector) objectsToPack( hashes []plumbing.Hash, packWindow uint, ) ([]*ObjectToPack, error) { var objectsToPack []*ObjectToPack for _, h := range hashes { var o plumbing.EncodedObject var err error if packWindow == 0 { o, err = dw.encodedObject(h) } else { o, err = dw.encodedDeltaObject(h) } if err != nil { return nil, err } otp := newObjectToPack(o) if _, ok := o.(plumbing.DeltaObject); ok { otp.CleanOriginal() } objectsToPack = append(objectsToPack, otp) } if packWindow == 0 { return objectsToPack, nil } if err := dw.fixAndBreakChains(objectsToPack); err != nil { return nil, err } return objectsToPack, nil } func (dw *deltaSelector) encodedDeltaObject(h plumbing.Hash) (plumbing.EncodedObject, error) { edos, ok := dw.storer.(storer.DeltaObjectStorer) if !ok { return dw.encodedObject(h) } return edos.DeltaObject(plumbing.AnyObject, h) } func (dw *deltaSelector) encodedObject(h plumbing.Hash) (plumbing.EncodedObject, error) { return dw.storer.EncodedObject(plumbing.AnyObject, h) } func (dw *deltaSelector) fixAndBreakChains(objectsToPack []*ObjectToPack) error { m := make(map[plumbing.Hash]*ObjectToPack, len(objectsToPack)) for _, otp := range objectsToPack { m[otp.Hash()] = otp } for _, otp := range objectsToPack { if err := dw.fixAndBreakChainsOne(m, otp); err != nil { return err } } return nil } func (dw *deltaSelector) fixAndBreakChainsOne(objectsToPack map[plumbing.Hash]*ObjectToPack, otp *ObjectToPack) error { if !otp.Object.Type().IsDelta() { return nil } // Initial ObjectToPack instances might have a delta assigned to Object // but no actual base initially. Once Base is assigned to a delta, it means // we already fixed it. if otp.Base != nil { return nil } do, ok := otp.Object.(plumbing.DeltaObject) if !ok { // if this is not a DeltaObject, then we cannot retrieve its base, // so we have to break the delta chain here. return dw.undeltify(otp) } base, ok := objectsToPack[do.BaseHash()] if !ok { // The base of the delta is not in our list of objects to pack, so // we break the chain. return dw.undeltify(otp) } if err := dw.fixAndBreakChainsOne(objectsToPack, base); err != nil { return err } otp.SetDelta(base, otp.Object) return nil } func (dw *deltaSelector) restoreOriginal(otp *ObjectToPack) error { if otp.Original != nil { return nil } if !otp.Object.Type().IsDelta() { return nil } obj, err := dw.encodedObject(otp.Hash()) if err != nil { return err } otp.SetOriginal(obj) return nil } // undeltify undeltifies an *ObjectToPack by retrieving the original object from // the storer and resetting it. func (dw *deltaSelector) undeltify(otp *ObjectToPack) error { if err := dw.restoreOriginal(otp); err != nil { return err } otp.Object = otp.Original otp.Depth = 0 return nil } func (dw *deltaSelector) sort(objectsToPack []*ObjectToPack) { sort.Sort(byTypeAndSize(objectsToPack)) } func (dw *deltaSelector) walk( objectsToPack []*ObjectToPack, packWindow uint, ) error { indexMap := make(map[plumbing.Hash]*deltaIndex) for i := 0; i < len(objectsToPack); i++ { // Clean up the index map and reconstructed delta objects for anything // outside our pack window, to save memory. if i > int(packWindow) { obj := objectsToPack[i-int(packWindow)] delete(indexMap, obj.Hash()) if obj.IsDelta() { obj.SaveOriginalMetadata() obj.CleanOriginal() } } target := objectsToPack[i] // If we already have a delta, we don't try to find a new one for this // object. This happens when a delta is set to be reused from an existing // packfile. if target.IsDelta() { continue } // We only want to create deltas from specific types. if !applyDelta[target.Type()] { continue } for j := i - 1; j >= 0 && i-j < int(packWindow); j-- { base := objectsToPack[j] // Objects must use only the same type as their delta base. // Since objectsToPack is sorted by type and size, once we find // a different type, we know we won't find more of them. if base.Type() != target.Type() { break } if err := dw.tryToDeltify(indexMap, base, target); err != nil { return err } } } return nil } func (dw *deltaSelector) tryToDeltify(indexMap map[plumbing.Hash]*deltaIndex, base, target *ObjectToPack) error { // Original object might not be present if we're reusing a delta, so we // ensure it is restored. if err := dw.restoreOriginal(target); err != nil { return err } if err := dw.restoreOriginal(base); err != nil { return err } // If the sizes are radically different, this is a bad pairing. if target.Size() < base.Size()>>4 { return nil } msz := dw.deltaSizeLimit( target.Object.Size(), base.Depth, target.Depth, target.IsDelta(), ) // Nearly impossible to fit useful delta. if msz <= 8 { return nil } // If we have to insert a lot to make this work, find another. if base.Size()-target.Size() > msz { return nil } if _, ok := indexMap[base.Hash()]; !ok { indexMap[base.Hash()] = new(deltaIndex) } // Now we can generate the delta using originals delta, err := getDelta(indexMap[base.Hash()], base.Original, target.Original) if err != nil { return err } // if delta better than target if delta.Size() < msz { target.SetDelta(base, delta) } return nil } func (dw *deltaSelector) deltaSizeLimit(targetSize int64, baseDepth int, targetDepth int, targetDelta bool) int64 { if !targetDelta { // Any delta should be no more than 50% of the original size // (for text files deflate of whole form should shrink 50%). n := targetSize >> 1 // Evenly distribute delta size limits over allowed depth. // If src is non-delta (depth = 0), delta <= 50% of original. // If src is almost at limit (9/10), delta <= 10% of original. return n * (maxDepth - int64(baseDepth)) / maxDepth } // With a delta base chosen any new delta must be "better". // Retain the distribution described above. d := int64(targetDepth) n := targetSize // If target depth is bigger than maxDepth, this delta is not suitable to be used. if d >= maxDepth { return 0 } // If src is whole (depth=0) and base is near limit (depth=9/10) // any delta using src can be 10x larger and still be better. // // If src is near limit (depth=9/10) and base is whole (depth=0) // a new delta dependent on src must be 1/10th the size. return n * (maxDepth - int64(baseDepth)) / (maxDepth - d) } type byTypeAndSize []*ObjectToPack func (a byTypeAndSize) Len() int { return len(a) } func (a byTypeAndSize) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a byTypeAndSize) Less(i, j int) bool { if a[i].Type() < a[j].Type() { return false } if a[i].Type() > a[j].Type() { return true } return a[i].Size() > a[j].Size() } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/packfile/delta_selector_test.go000066400000000000000000000150151345605224300301430ustar00rootroot00000000000000package packfile import ( "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/storage/memory" . "gopkg.in/check.v1" ) type DeltaSelectorSuite struct { ds *deltaSelector store *memory.Storage hashes map[string]plumbing.Hash } var _ = Suite(&DeltaSelectorSuite{}) func (s *DeltaSelectorSuite) SetUpTest(c *C) { s.store = memory.NewStorage() s.createTestObjects() s.ds = newDeltaSelector(s.store) } func (s *DeltaSelectorSuite) TestSort(c *C) { var o1 = newObjectToPack(newObject(plumbing.BlobObject, []byte("00000"))) var o4 = newObjectToPack(newObject(plumbing.BlobObject, []byte("0000"))) var o6 = newObjectToPack(newObject(plumbing.BlobObject, []byte("00"))) var o9 = newObjectToPack(newObject(plumbing.BlobObject, []byte("0"))) var o8 = newObjectToPack(newObject(plumbing.TreeObject, []byte("000"))) var o2 = newObjectToPack(newObject(plumbing.TreeObject, []byte("00"))) var o3 = newObjectToPack(newObject(plumbing.TreeObject, []byte("0"))) var o5 = newObjectToPack(newObject(plumbing.CommitObject, []byte("0000"))) var o7 = newObjectToPack(newObject(plumbing.CommitObject, []byte("00"))) toSort := []*ObjectToPack{o1, o2, o3, o4, o5, o6, o7, o8, o9} s.ds.sort(toSort) expected := []*ObjectToPack{o1, o4, o6, o9, o8, o2, o3, o5, o7} c.Assert(toSort, DeepEquals, expected) } type testObject struct { id string object plumbing.EncodedObject } var testObjects []*testObject = []*testObject{{ id: "base", object: newObject(plumbing.BlobObject, genBytes([]piece{{ times: 1000, val: "a", }, { times: 1000, val: "b", }})), }, { id: "smallBase", object: newObject(plumbing.BlobObject, genBytes([]piece{{ times: 1, val: "a", }, { times: 1, val: "b", }, { times: 6, val: "c", }})), }, { id: "smallTarget", object: newObject(plumbing.BlobObject, genBytes([]piece{{ times: 1, val: "a", }, { times: 1, val: "c", }})), }, { id: "target", object: newObject(plumbing.BlobObject, genBytes([]piece{{ times: 1000, val: "a", }, { times: 1000, val: "b", }, { times: 1000, val: "c", }})), }, { id: "o1", object: newObject(plumbing.BlobObject, genBytes([]piece{{ times: 1000, val: "a", }, { times: 1000, val: "b", }})), }, { id: "o2", object: newObject(plumbing.BlobObject, genBytes([]piece{{ times: 1000, val: "a", }, { times: 500, val: "b", }})), }, { id: "o3", object: newObject(plumbing.BlobObject, genBytes([]piece{{ times: 1000, val: "a", }, { times: 499, val: "b", }})), }, { id: "bigBase", object: newObject(plumbing.BlobObject, genBytes([]piece{{ times: 1000000, val: "a", }})), }, { id: "treeType", object: newObject(plumbing.TreeObject, []byte("I am a tree!")), }} func (s *DeltaSelectorSuite) createTestObjects() { s.hashes = make(map[string]plumbing.Hash) for _, o := range testObjects { h, err := s.store.SetEncodedObject(o.object) if err != nil { panic(err) } s.hashes[o.id] = h } } func (s *DeltaSelectorSuite) TestObjectsToPack(c *C) { // Different type hashes := []plumbing.Hash{s.hashes["base"], s.hashes["treeType"]} deltaWindowSize := uint(10) otp, err := s.ds.ObjectsToPack(hashes, deltaWindowSize) c.Assert(err, IsNil) c.Assert(len(otp), Equals, 2) c.Assert(otp[0].Object, Equals, s.store.Objects[s.hashes["base"]]) c.Assert(otp[1].Object, Equals, s.store.Objects[s.hashes["treeType"]]) // Size radically different hashes = []plumbing.Hash{s.hashes["bigBase"], s.hashes["target"]} otp, err = s.ds.ObjectsToPack(hashes, deltaWindowSize) c.Assert(err, IsNil) c.Assert(len(otp), Equals, 2) c.Assert(otp[0].Object, Equals, s.store.Objects[s.hashes["bigBase"]]) c.Assert(otp[1].Object, Equals, s.store.Objects[s.hashes["target"]]) // Delta Size Limit with no best delta yet hashes = []plumbing.Hash{s.hashes["smallBase"], s.hashes["smallTarget"]} otp, err = s.ds.ObjectsToPack(hashes, deltaWindowSize) c.Assert(err, IsNil) c.Assert(len(otp), Equals, 2) c.Assert(otp[0].Object, Equals, s.store.Objects[s.hashes["smallBase"]]) c.Assert(otp[1].Object, Equals, s.store.Objects[s.hashes["smallTarget"]]) // It will create the delta hashes = []plumbing.Hash{s.hashes["base"], s.hashes["target"]} otp, err = s.ds.ObjectsToPack(hashes, deltaWindowSize) c.Assert(err, IsNil) c.Assert(len(otp), Equals, 2) c.Assert(otp[0].Object, Equals, s.store.Objects[s.hashes["target"]]) c.Assert(otp[0].IsDelta(), Equals, false) c.Assert(otp[1].Original, Equals, s.store.Objects[s.hashes["base"]]) c.Assert(otp[1].IsDelta(), Equals, true) c.Assert(otp[1].Depth, Equals, 1) // If our base is another delta, the depth will increase by one hashes = []plumbing.Hash{ s.hashes["o1"], s.hashes["o2"], s.hashes["o3"], } otp, err = s.ds.ObjectsToPack(hashes, deltaWindowSize) c.Assert(err, IsNil) c.Assert(len(otp), Equals, 3) c.Assert(otp[0].Object, Equals, s.store.Objects[s.hashes["o1"]]) c.Assert(otp[0].IsDelta(), Equals, false) c.Assert(otp[1].Original, Equals, s.store.Objects[s.hashes["o2"]]) c.Assert(otp[1].IsDelta(), Equals, true) c.Assert(otp[1].Depth, Equals, 1) c.Assert(otp[2].Original, Equals, s.store.Objects[s.hashes["o3"]]) c.Assert(otp[2].IsDelta(), Equals, true) c.Assert(otp[2].Depth, Equals, 2) // Check that objects outside of the sliding window don't produce // a delta. hashes = make([]plumbing.Hash, 0, deltaWindowSize+2) hashes = append(hashes, s.hashes["base"]) for i := uint(0); i < deltaWindowSize; i++ { hashes = append(hashes, s.hashes["smallTarget"]) } hashes = append(hashes, s.hashes["target"]) // Don't sort so we can easily check the sliding window without // creating a bunch of new objects. otp, err = s.ds.objectsToPack(hashes, deltaWindowSize) c.Assert(err, IsNil) err = s.ds.walk(otp, deltaWindowSize) c.Assert(err, IsNil) c.Assert(len(otp), Equals, int(deltaWindowSize)+2) targetIdx := len(otp) - 1 c.Assert(otp[targetIdx].IsDelta(), Equals, false) // Check that no deltas are created, and the objects are unsorted, // if compression is off. hashes = []plumbing.Hash{s.hashes["base"], s.hashes["target"]} otp, err = s.ds.ObjectsToPack(hashes, 0) c.Assert(err, IsNil) c.Assert(len(otp), Equals, 2) c.Assert(otp[0].Object, Equals, s.store.Objects[s.hashes["base"]]) c.Assert(otp[0].IsDelta(), Equals, false) c.Assert(otp[1].Original, Equals, s.store.Objects[s.hashes["target"]]) c.Assert(otp[1].IsDelta(), Equals, false) c.Assert(otp[1].Depth, Equals, 0) } func (s *DeltaSelectorSuite) TestMaxDepth(c *C) { dsl := s.ds.deltaSizeLimit(0, 0, int(maxDepth), true) c.Assert(dsl, Equals, int64(0)) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/packfile/delta_test.go000066400000000000000000000062621345605224300262470ustar00rootroot00000000000000package packfile import ( "math/rand" . "gopkg.in/check.v1" ) type DeltaSuite struct { testCases []deltaTest } var _ = Suite(&DeltaSuite{}) type deltaTest struct { description string base []piece target []piece } func (s *DeltaSuite) SetUpSuite(c *C) { s.testCases = []deltaTest{{ description: "distinct file", base: []piece{{"0", 300}}, target: []piece{{"2", 200}}, }, { description: "same file", base: []piece{{"1", 3000}}, target: []piece{{"1", 3000}}, }, { description: "small file", base: []piece{{"1", 3}}, target: []piece{{"1", 3}, {"0", 1}}, }, { description: "big file", base: []piece{{"1", 300000}}, target: []piece{{"1", 30000}, {"0", 1000000}}, }, { description: "add elements before", base: []piece{{"0", 200}}, target: []piece{{"1", 300}, {"0", 200}}, }, { description: "add 10 times more elements at the end", base: []piece{{"1", 300}, {"0", 200}}, target: []piece{{"0", 2000}}, }, { description: "add elements between", base: []piece{{"0", 400}}, target: []piece{{"0", 200}, {"1", 200}, {"0", 200}}, }, { description: "add elements after", base: []piece{{"0", 200}}, target: []piece{{"0", 200}, {"1", 200}}, }, { description: "modify elements at the end", base: []piece{{"1", 300}, {"0", 200}}, target: []piece{{"0", 100}}, }, { description: "complex modification", base: []piece{{"0", 3}, {"1", 40}, {"2", 30}, {"3", 2}, {"4", 400}, {"5", 23}}, target: []piece{{"1", 30}, {"2", 20}, {"7", 40}, {"4", 400}, {"5", 10}}, }, { description: "A copy operation bigger than 64kb", base: []piece{{bigRandStr, 1}, {"1", 200}}, target: []piece{{bigRandStr, 1}}, }} } var bigRandStr = randStringBytes(100 * 1024) const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" func randBytes(n int) []byte { b := make([]byte, n) for i := range b { b[i] = letterBytes[rand.Intn(len(letterBytes))] } return b } func randStringBytes(n int) string { return string(randBytes(n)) } func (s *DeltaSuite) TestAddDelta(c *C) { for _, t := range s.testCases { baseBuf := genBytes(t.base) targetBuf := genBytes(t.target) delta := DiffDelta(baseBuf, targetBuf) result, err := PatchDelta(baseBuf, delta) c.Log("Executing test case:", t.description) c.Assert(err, IsNil) c.Assert(result, DeepEquals, targetBuf) } } func (s *DeltaSuite) TestIncompleteDelta(c *C) { for _, t := range s.testCases { c.Log("Incomplete delta on:", t.description) baseBuf := genBytes(t.base) targetBuf := genBytes(t.target) delta := DiffDelta(baseBuf, targetBuf) delta = delta[:len(delta)-2] result, err := PatchDelta(baseBuf, delta) c.Assert(err, NotNil) c.Assert(result, IsNil) } // check nil input too result, err := PatchDelta(nil, nil) c.Assert(err, NotNil) c.Assert(result, IsNil) } func (s *DeltaSuite) TestMaxCopySizeDelta(c *C) { baseBuf := randBytes(maxCopySize) targetBuf := baseBuf[0:] targetBuf = append(targetBuf, byte(1)) delta := DiffDelta(baseBuf, targetBuf) result, err := PatchDelta(baseBuf, delta) c.Assert(err, IsNil) c.Assert(result, DeepEquals, targetBuf) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/packfile/diff_delta.go000066400000000000000000000100541345605224300261720ustar00rootroot00000000000000package packfile import ( "bytes" "gopkg.in/src-d/go-git.v4/plumbing" ) // See https://github.com/jelmer/dulwich/blob/master/dulwich/pack.py and // https://github.com/tarruda/node-git-core/blob/master/src/js/delta.js // for more info const ( // Standard chunk size used to generate fingerprints s = 16 // https://github.com/git/git/blob/f7466e94375b3be27f229c78873f0acf8301c0a5/diff-delta.c#L428 // Max size of a copy operation (64KB) maxCopySize = 64 * 1024 ) // GetDelta returns an EncodedObject of type OFSDeltaObject. Base and Target object, // will be loaded into memory to be able to create the delta object. // To generate target again, you will need the obtained object and "base" one. // Error will be returned if base or target object cannot be read. func GetDelta(base, target plumbing.EncodedObject) (plumbing.EncodedObject, error) { return getDelta(new(deltaIndex), base, target) } func getDelta(index *deltaIndex, base, target plumbing.EncodedObject) (plumbing.EncodedObject, error) { br, err := base.Reader() if err != nil { return nil, err } defer br.Close() tr, err := target.Reader() if err != nil { return nil, err } defer tr.Close() bb := bufPool.Get().(*bytes.Buffer) bb.Reset() defer bufPool.Put(bb) _, err = bb.ReadFrom(br) if err != nil { return nil, err } tb := bufPool.Get().(*bytes.Buffer) tb.Reset() defer bufPool.Put(tb) _, err = tb.ReadFrom(tr) if err != nil { return nil, err } db := diffDelta(index, bb.Bytes(), tb.Bytes()) delta := &plumbing.MemoryObject{} _, err = delta.Write(db) if err != nil { return nil, err } delta.SetSize(int64(len(db))) delta.SetType(plumbing.OFSDeltaObject) return delta, nil } // DiffDelta returns the delta that transforms src into tgt. func DiffDelta(src, tgt []byte) []byte { return diffDelta(new(deltaIndex), src, tgt) } func diffDelta(index *deltaIndex, src []byte, tgt []byte) []byte { buf := bufPool.Get().(*bytes.Buffer) buf.Reset() buf.Write(deltaEncodeSize(len(src))) buf.Write(deltaEncodeSize(len(tgt))) if len(index.entries) == 0 { index.init(src) } ibuf := bufPool.Get().(*bytes.Buffer) ibuf.Reset() for i := 0; i < len(tgt); i++ { offset, l := index.findMatch(src, tgt, i) if l == 0 { // couldn't find a match, just write the current byte and continue ibuf.WriteByte(tgt[i]) } else if l < 0 { // src is less than blksz, copy the rest of the target to avoid // calls to findMatch for ; i < len(tgt); i++ { ibuf.WriteByte(tgt[i]) } } else if l < s { // remaining target is less than blksz, copy what's left of it // and avoid calls to findMatch for j := i; j < i+l; j++ { ibuf.WriteByte(tgt[j]) } i += l - 1 } else { encodeInsertOperation(ibuf, buf) rl := l aOffset := offset for rl > 0 { if rl < maxCopySize { buf.Write(encodeCopyOperation(aOffset, rl)) break } buf.Write(encodeCopyOperation(aOffset, maxCopySize)) rl -= maxCopySize aOffset += maxCopySize } i += l - 1 } } encodeInsertOperation(ibuf, buf) bytes := buf.Bytes() bufPool.Put(buf) bufPool.Put(ibuf) return bytes } func encodeInsertOperation(ibuf, buf *bytes.Buffer) { if ibuf.Len() == 0 { return } b := ibuf.Bytes() s := ibuf.Len() o := 0 for { if s <= 127 { break } buf.WriteByte(byte(127)) buf.Write(b[o : o+127]) s -= 127 o += 127 } buf.WriteByte(byte(s)) buf.Write(b[o : o+s]) ibuf.Reset() } func deltaEncodeSize(size int) []byte { var ret []byte c := size & 0x7f size >>= 7 for { if size == 0 { break } ret = append(ret, byte(c|0x80)) c = size & 0x7f size >>= 7 } ret = append(ret, byte(c)) return ret } func encodeCopyOperation(offset, length int) []byte { code := 0x80 var opcodes []byte var i uint for i = 0; i < 4; i++ { f := 0xff << (i * 8) if offset&f != 0 { opcodes = append(opcodes, byte(offset&f>>(i*8))) code |= 0x01 << i } } for i = 0; i < 3; i++ { f := 0xff << (i * 8) if length&f != 0 { opcodes = append(opcodes, byte(length&f>>(i*8))) code |= 0x10 << i } } return append([]byte{byte(code)}, opcodes...) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/packfile/doc.go000066400000000000000000000025151345605224300246610ustar00rootroot00000000000000// Package packfile implements encoding and decoding of packfile format. // // == pack-*.pack files have the following format: // // - A header appears at the beginning and consists of the following: // // 4-byte signature: // The signature is: {'P', 'A', 'C', 'K'} // // 4-byte version number (network byte order): // GIT currently accepts version number 2 or 3 but // generates version 2 only. // // 4-byte number of objects contained in the pack (network byte order) // // Observation: we cannot have more than 4G versions ;-) and // more than 4G objects in a pack. // // - The header is followed by number of object entries, each of // which looks like this: // // (undeltified representation) // n-byte type and length (3-bit type, (n-1)*7+4-bit length) // compressed data // // (deltified representation) // n-byte type and length (3-bit type, (n-1)*7+4-bit length) // 20-byte base object name // compressed delta data // // Observation: length of each object is encoded in a variable // length format and is not constrained to 32-bit or anything. // // - The trailer records 20-byte SHA1 checksum of all of the above. // // // Source: // https://www.kernel.org/pub/software/scm/git/docs/v1.7.5/technical/pack-protocol.txt package packfile golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/packfile/encoder.go000066400000000000000000000114511345605224300255320ustar00rootroot00000000000000package packfile import ( "compress/zlib" "crypto/sha1" "fmt" "io" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/storer" "gopkg.in/src-d/go-git.v4/utils/binary" ) // Encoder gets the data from the storage and write it into the writer in PACK // format type Encoder struct { selector *deltaSelector w *offsetWriter zw *zlib.Writer hasher plumbing.Hasher useRefDeltas bool } // NewEncoder creates a new packfile encoder using a specific Writer and // EncodedObjectStorer. By default deltas used to generate the packfile will be // OFSDeltaObject. To use Reference deltas, set useRefDeltas to true. func NewEncoder(w io.Writer, s storer.EncodedObjectStorer, useRefDeltas bool) *Encoder { h := plumbing.Hasher{ Hash: sha1.New(), } mw := io.MultiWriter(w, h) ow := newOffsetWriter(mw) zw := zlib.NewWriter(mw) return &Encoder{ selector: newDeltaSelector(s), w: ow, zw: zw, hasher: h, useRefDeltas: useRefDeltas, } } // Encode creates a packfile containing all the objects referenced in // hashes and writes it to the writer in the Encoder. `packWindow` // specifies the size of the sliding window used to compare objects // for delta compression; 0 turns off delta compression entirely. func (e *Encoder) Encode( hashes []plumbing.Hash, packWindow uint, ) (plumbing.Hash, error) { objects, err := e.selector.ObjectsToPack(hashes, packWindow) if err != nil { return plumbing.ZeroHash, err } return e.encode(objects) } func (e *Encoder) encode(objects []*ObjectToPack) (plumbing.Hash, error) { if err := e.head(len(objects)); err != nil { return plumbing.ZeroHash, err } for _, o := range objects { if err := e.entry(o); err != nil { return plumbing.ZeroHash, err } } return e.footer() } func (e *Encoder) head(numEntries int) error { return binary.Write( e.w, signature, int32(VersionSupported), int32(numEntries), ) } func (e *Encoder) entry(o *ObjectToPack) error { if o.WantWrite() { // A cycle exists in this delta chain. This should only occur if a // selected object representation disappeared during writing // (for example due to a concurrent repack) and a different base // was chosen, forcing a cycle. Select something other than a // delta, and write this object. e.selector.restoreOriginal(o) o.BackToOriginal() } if o.IsWritten() { return nil } o.MarkWantWrite() if err := e.writeBaseIfDelta(o); err != nil { return err } // We need to check if we already write that object due a cyclic delta chain if o.IsWritten() { return nil } o.Offset = e.w.Offset() if o.IsDelta() { if err := e.writeDeltaHeader(o); err != nil { return err } } else { if err := e.entryHead(o.Type(), o.Size()); err != nil { return err } } e.zw.Reset(e.w) or, err := o.Object.Reader() if err != nil { return err } _, err = io.Copy(e.zw, or) if err != nil { return err } return e.zw.Close() } func (e *Encoder) writeBaseIfDelta(o *ObjectToPack) error { if o.IsDelta() && !o.Base.IsWritten() { // We must write base first return e.entry(o.Base) } return nil } func (e *Encoder) writeDeltaHeader(o *ObjectToPack) error { // Write offset deltas by default t := plumbing.OFSDeltaObject if e.useRefDeltas { t = plumbing.REFDeltaObject } if err := e.entryHead(t, o.Object.Size()); err != nil { return err } if e.useRefDeltas { return e.writeRefDeltaHeader(o.Base.Hash()) } else { return e.writeOfsDeltaHeader(o) } } func (e *Encoder) writeRefDeltaHeader(base plumbing.Hash) error { return binary.Write(e.w, base) } func (e *Encoder) writeOfsDeltaHeader(o *ObjectToPack) error { // for OFS_DELTA, offset of the base is interpreted as negative offset // relative to the type-byte of the header of the ofs-delta entry. relativeOffset := o.Offset - o.Base.Offset if relativeOffset <= 0 { return fmt.Errorf("bad offset for OFS_DELTA entry: %d", relativeOffset) } return binary.WriteVariableWidthInt(e.w, relativeOffset) } func (e *Encoder) entryHead(typeNum plumbing.ObjectType, size int64) error { t := int64(typeNum) header := []byte{} c := (t << firstLengthBits) | (size & maskFirstLength) size >>= firstLengthBits for { if size == 0 { break } header = append(header, byte(c|maskContinue)) c = size & int64(maskLength) size >>= lengthBits } header = append(header, byte(c)) _, err := e.w.Write(header) return err } func (e *Encoder) footer() (plumbing.Hash, error) { h := e.hasher.Sum() return h, binary.Write(e.w, h) } type offsetWriter struct { w io.Writer offset int64 } func newOffsetWriter(w io.Writer) *offsetWriter { return &offsetWriter{w: w} } func (ow *offsetWriter) Write(p []byte) (n int, err error) { n, err = ow.w.Write(p) ow.offset += int64(n) return n, err } func (ow *offsetWriter) Offset() int64 { return ow.offset } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/packfile/encoder_advanced_test.go000066400000000000000000000064661345605224300304300ustar00rootroot00000000000000package packfile_test import ( "bytes" "io" "math/rand" "testing" "gopkg.in/src-d/go-billy.v4/memfs" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/cache" "gopkg.in/src-d/go-git.v4/plumbing/format/idxfile" . "gopkg.in/src-d/go-git.v4/plumbing/format/packfile" "gopkg.in/src-d/go-git.v4/plumbing/storer" "gopkg.in/src-d/go-git.v4/storage/filesystem" . "gopkg.in/check.v1" "gopkg.in/src-d/go-git-fixtures.v3" ) type EncoderAdvancedSuite struct { fixtures.Suite } var _ = Suite(&EncoderAdvancedSuite{}) func (s *EncoderAdvancedSuite) TestEncodeDecode(c *C) { if testing.Short() { c.Skip("skipping test in short mode.") } fixs := fixtures.Basic().ByTag("packfile").ByTag(".git") fixs = append(fixs, fixtures.ByURL("https://github.com/src-d/go-git.git"). ByTag("packfile").ByTag(".git").One()) fixs.Test(c, func(f *fixtures.Fixture) { storage := filesystem.NewStorage(f.DotGit(), cache.NewObjectLRUDefault()) s.testEncodeDecode(c, storage, 10) }) } func (s *EncoderAdvancedSuite) TestEncodeDecodeNoDeltaCompression(c *C) { if testing.Short() { c.Skip("skipping test in short mode.") } fixs := fixtures.Basic().ByTag("packfile").ByTag(".git") fixs = append(fixs, fixtures.ByURL("https://github.com/src-d/go-git.git"). ByTag("packfile").ByTag(".git").One()) fixs.Test(c, func(f *fixtures.Fixture) { storage := filesystem.NewStorage(f.DotGit(), cache.NewObjectLRUDefault()) s.testEncodeDecode(c, storage, 0) }) } func (s *EncoderAdvancedSuite) testEncodeDecode( c *C, storage storer.Storer, packWindow uint, ) { objIter, err := storage.IterEncodedObjects(plumbing.AnyObject) c.Assert(err, IsNil) expectedObjects := map[plumbing.Hash]bool{} var hashes []plumbing.Hash err = objIter.ForEach(func(o plumbing.EncodedObject) error { expectedObjects[o.Hash()] = true hashes = append(hashes, o.Hash()) return err }) c.Assert(err, IsNil) // Shuffle hashes to avoid delta selector getting order right just because // the initial order is correct. auxHashes := make([]plumbing.Hash, len(hashes)) for i, j := range rand.Perm(len(hashes)) { auxHashes[j] = hashes[i] } hashes = auxHashes buf := bytes.NewBuffer(nil) enc := NewEncoder(buf, storage, false) encodeHash, err := enc.Encode(hashes, packWindow) c.Assert(err, IsNil) fs := memfs.New() f, err := fs.Create("packfile") c.Assert(err, IsNil) _, err = f.Write(buf.Bytes()) c.Assert(err, IsNil) _, err = f.Seek(0, io.SeekStart) c.Assert(err, IsNil) w := new(idxfile.Writer) parser, err := NewParser(NewScanner(f), w) c.Assert(err, IsNil) _, err = parser.Parse() c.Assert(err, IsNil) index, err := w.Index() c.Assert(err, IsNil) _, err = f.Seek(0, io.SeekStart) c.Assert(err, IsNil) p := NewPackfile(index, fs, f) decodeHash, err := p.ID() c.Assert(err, IsNil) c.Assert(encodeHash, Equals, decodeHash) objIter, err = p.GetAll() c.Assert(err, IsNil) obtainedObjects := map[plumbing.Hash]bool{} err = objIter.ForEach(func(o plumbing.EncodedObject) error { obtainedObjects[o.Hash()] = true return nil }) c.Assert(err, IsNil) c.Assert(obtainedObjects, DeepEquals, expectedObjects) for h := range obtainedObjects { if !expectedObjects[h] { c.Errorf("obtained unexpected object: %s", h) } } for h := range expectedObjects { if !obtainedObjects[h] { c.Errorf("missing object: %s", h) } } } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/packfile/encoder_test.go000066400000000000000000000175771345605224300266100ustar00rootroot00000000000000package packfile import ( "bytes" "io" stdioutil "io/ioutil" "gopkg.in/src-d/go-billy.v4/memfs" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/format/idxfile" "gopkg.in/src-d/go-git.v4/storage/memory" . "gopkg.in/check.v1" "gopkg.in/src-d/go-git-fixtures.v3" ) type EncoderSuite struct { fixtures.Suite buf *bytes.Buffer store *memory.Storage enc *Encoder } var _ = Suite(&EncoderSuite{}) func (s *EncoderSuite) SetUpTest(c *C) { s.buf = bytes.NewBuffer(nil) s.store = memory.NewStorage() s.enc = NewEncoder(s.buf, s.store, false) } func (s *EncoderSuite) TestCorrectPackHeader(c *C) { hash, err := s.enc.Encode([]plumbing.Hash{}, 10) c.Assert(err, IsNil) hb := [20]byte(hash) // PACK + VERSION + OBJECTS + HASH expectedResult := []byte{'P', 'A', 'C', 'K', 0, 0, 0, 2, 0, 0, 0, 0} expectedResult = append(expectedResult, hb[:]...) result := s.buf.Bytes() c.Assert(result, DeepEquals, expectedResult) } func (s *EncoderSuite) TestCorrectPackWithOneEmptyObject(c *C) { o := &plumbing.MemoryObject{} o.SetType(plumbing.CommitObject) o.SetSize(0) _, err := s.store.SetEncodedObject(o) c.Assert(err, IsNil) hash, err := s.enc.Encode([]plumbing.Hash{o.Hash()}, 10) c.Assert(err, IsNil) // PACK + VERSION(2) + OBJECT NUMBER(1) expectedResult := []byte{'P', 'A', 'C', 'K', 0, 0, 0, 2, 0, 0, 0, 1} // OBJECT HEADER(TYPE + SIZE)= 0001 0000 expectedResult = append(expectedResult, []byte{16}...) // Zlib header expectedResult = append(expectedResult, []byte{120, 156, 1, 0, 0, 255, 255, 0, 0, 0, 1}...) // + HASH hb := [20]byte(hash) expectedResult = append(expectedResult, hb[:]...) result := s.buf.Bytes() c.Assert(result, DeepEquals, expectedResult) } func (s *EncoderSuite) TestMaxObjectSize(c *C) { o := s.store.NewEncodedObject() o.SetSize(9223372036854775807) o.SetType(plumbing.CommitObject) _, err := s.store.SetEncodedObject(o) c.Assert(err, IsNil) hash, err := s.enc.Encode([]plumbing.Hash{o.Hash()}, 10) c.Assert(err, IsNil) c.Assert(hash.IsZero(), Not(Equals), true) } func (s *EncoderSuite) TestHashNotFound(c *C) { h, err := s.enc.Encode([]plumbing.Hash{plumbing.NewHash("BAD")}, 10) c.Assert(h, Equals, plumbing.ZeroHash) c.Assert(err, NotNil) c.Assert(err, Equals, plumbing.ErrObjectNotFound) } func (s *EncoderSuite) TestDecodeEncodeWithDeltaDecodeREF(c *C) { s.enc = NewEncoder(s.buf, s.store, true) s.simpleDeltaTest(c) } func (s *EncoderSuite) TestDecodeEncodeWithDeltaDecodeOFS(c *C) { s.enc = NewEncoder(s.buf, s.store, false) s.simpleDeltaTest(c) } func (s *EncoderSuite) TestDecodeEncodeWithDeltasDecodeREF(c *C) { s.enc = NewEncoder(s.buf, s.store, true) s.deltaOverDeltaTest(c) } func (s *EncoderSuite) TestDecodeEncodeWithDeltasDecodeOFS(c *C) { s.enc = NewEncoder(s.buf, s.store, false) s.deltaOverDeltaTest(c) } func (s *EncoderSuite) TestDecodeEncodeWithCycleREF(c *C) { s.enc = NewEncoder(s.buf, s.store, true) s.deltaOverDeltaCyclicTest(c) } func (s *EncoderSuite) TestDecodeEncodeWithCycleOFS(c *C) { s.enc = NewEncoder(s.buf, s.store, false) s.deltaOverDeltaCyclicTest(c) } func (s *EncoderSuite) simpleDeltaTest(c *C) { srcObject := newObject(plumbing.BlobObject, []byte("0")) targetObject := newObject(plumbing.BlobObject, []byte("01")) deltaObject, err := GetDelta(srcObject, targetObject) c.Assert(err, IsNil) srcToPack := newObjectToPack(srcObject) encHash, err := s.enc.encode([]*ObjectToPack{ srcToPack, newDeltaObjectToPack(srcToPack, targetObject, deltaObject), }) c.Assert(err, IsNil) p, cleanup := packfileFromReader(c, s.buf) defer cleanup() decHash, err := p.ID() c.Assert(err, IsNil) c.Assert(encHash, Equals, decHash) decSrc, err := p.Get(srcObject.Hash()) c.Assert(err, IsNil) objectsEqual(c, decSrc, srcObject) decTarget, err := p.Get(targetObject.Hash()) c.Assert(err, IsNil) objectsEqual(c, decTarget, targetObject) } func (s *EncoderSuite) deltaOverDeltaTest(c *C) { srcObject := newObject(plumbing.BlobObject, []byte("0")) targetObject := newObject(plumbing.BlobObject, []byte("01")) otherTargetObject := newObject(plumbing.BlobObject, []byte("011111")) deltaObject, err := GetDelta(srcObject, targetObject) c.Assert(err, IsNil) c.Assert(deltaObject.Hash(), Not(Equals), plumbing.ZeroHash) otherDeltaObject, err := GetDelta(targetObject, otherTargetObject) c.Assert(err, IsNil) c.Assert(otherDeltaObject.Hash(), Not(Equals), plumbing.ZeroHash) srcToPack := newObjectToPack(srcObject) targetToPack := newObjectToPack(targetObject) encHash, err := s.enc.encode([]*ObjectToPack{ targetToPack, srcToPack, newDeltaObjectToPack(srcToPack, targetObject, deltaObject), newDeltaObjectToPack(targetToPack, otherTargetObject, otherDeltaObject), }) c.Assert(err, IsNil) p, cleanup := packfileFromReader(c, s.buf) defer cleanup() decHash, err := p.ID() c.Assert(err, IsNil) c.Assert(encHash, Equals, decHash) decSrc, err := p.Get(srcObject.Hash()) c.Assert(err, IsNil) objectsEqual(c, decSrc, srcObject) decTarget, err := p.Get(targetObject.Hash()) c.Assert(err, IsNil) objectsEqual(c, decTarget, targetObject) decOtherTarget, err := p.Get(otherTargetObject.Hash()) c.Assert(err, IsNil) objectsEqual(c, decOtherTarget, otherTargetObject) } func (s *EncoderSuite) deltaOverDeltaCyclicTest(c *C) { o1 := newObject(plumbing.BlobObject, []byte("0")) o2 := newObject(plumbing.BlobObject, []byte("01")) o3 := newObject(plumbing.BlobObject, []byte("011111")) o4 := newObject(plumbing.BlobObject, []byte("01111100000")) _, err := s.store.SetEncodedObject(o1) c.Assert(err, IsNil) _, err = s.store.SetEncodedObject(o2) c.Assert(err, IsNil) _, err = s.store.SetEncodedObject(o3) c.Assert(err, IsNil) _, err = s.store.SetEncodedObject(o4) c.Assert(err, IsNil) d2, err := GetDelta(o1, o2) c.Assert(err, IsNil) d3, err := GetDelta(o4, o3) c.Assert(err, IsNil) d4, err := GetDelta(o3, o4) c.Assert(err, IsNil) po1 := newObjectToPack(o1) pd2 := newDeltaObjectToPack(po1, o2, d2) pd3 := newObjectToPack(o3) pd4 := newObjectToPack(o4) pd3.SetDelta(pd4, d3) pd4.SetDelta(pd3, d4) // SetOriginal is used by delta selector when generating ObjectToPack. // It also fills type, hash and size values to be used when Original // is nil. po1.SetOriginal(po1.Original) pd2.SetOriginal(pd2.Original) pd2.CleanOriginal() pd3.SetOriginal(pd3.Original) pd3.CleanOriginal() pd4.SetOriginal(pd4.Original) encHash, err := s.enc.encode([]*ObjectToPack{ po1, pd2, pd3, pd4, }) c.Assert(err, IsNil) p, cleanup := packfileFromReader(c, s.buf) defer cleanup() decHash, err := p.ID() c.Assert(err, IsNil) c.Assert(encHash, Equals, decHash) decSrc, err := p.Get(o1.Hash()) c.Assert(err, IsNil) objectsEqual(c, decSrc, o1) decTarget, err := p.Get(o2.Hash()) c.Assert(err, IsNil) objectsEqual(c, decTarget, o2) decOtherTarget, err := p.Get(o3.Hash()) c.Assert(err, IsNil) objectsEqual(c, decOtherTarget, o3) decAnotherTarget, err := p.Get(o4.Hash()) c.Assert(err, IsNil) objectsEqual(c, decAnotherTarget, o4) } func objectsEqual(c *C, o1, o2 plumbing.EncodedObject) { c.Assert(o1.Type(), Equals, o2.Type()) c.Assert(o1.Hash(), Equals, o2.Hash()) c.Assert(o1.Size(), Equals, o2.Size()) r1, err := o1.Reader() c.Assert(err, IsNil) b1, err := stdioutil.ReadAll(r1) c.Assert(err, IsNil) r2, err := o2.Reader() c.Assert(err, IsNil) b2, err := stdioutil.ReadAll(r2) c.Assert(err, IsNil) c.Assert(bytes.Compare(b1, b2), Equals, 0) } func packfileFromReader(c *C, buf *bytes.Buffer) (*Packfile, func()) { fs := memfs.New() file, err := fs.Create("packfile") c.Assert(err, IsNil) _, err = file.Write(buf.Bytes()) c.Assert(err, IsNil) _, err = file.Seek(0, io.SeekStart) c.Assert(err, IsNil) scanner := NewScanner(file) w := new(idxfile.Writer) p, err := NewParser(scanner, w) c.Assert(err, IsNil) _, err = p.Parse() c.Assert(err, IsNil) index, err := w.Index() c.Assert(err, IsNil) return NewPackfile(index, fs, file), func() { c.Assert(file.Close(), IsNil) } } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/packfile/error.go000066400000000000000000000012051345605224300252400ustar00rootroot00000000000000package packfile import "fmt" // Error specifies errors returned during packfile parsing. type Error struct { reason, details string } // NewError returns a new error. func NewError(reason string) *Error { return &Error{reason: reason} } // Error returns a text representation of the error. func (e *Error) Error() string { if e.details == "" { return e.reason } return fmt.Sprintf("%s: %s", e.reason, e.details) } // AddDetails adds details to an error, with additional text. func (e *Error) AddDetails(format string, args ...interface{}) *Error { return &Error{ reason: e.reason, details: fmt.Sprintf(format, args...), } } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/packfile/fsobject.go000066400000000000000000000045751345605224300257230ustar00rootroot00000000000000package packfile import ( "io" billy "gopkg.in/src-d/go-billy.v4" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/cache" "gopkg.in/src-d/go-git.v4/plumbing/format/idxfile" ) // FSObject is an object from the packfile on the filesystem. type FSObject struct { hash plumbing.Hash h *ObjectHeader offset int64 size int64 typ plumbing.ObjectType index idxfile.Index fs billy.Filesystem path string cache cache.Object } // NewFSObject creates a new filesystem object. func NewFSObject( hash plumbing.Hash, finalType plumbing.ObjectType, offset int64, contentSize int64, index idxfile.Index, fs billy.Filesystem, path string, cache cache.Object, ) *FSObject { return &FSObject{ hash: hash, offset: offset, size: contentSize, typ: finalType, index: index, fs: fs, path: path, cache: cache, } } // Reader implements the plumbing.EncodedObject interface. func (o *FSObject) Reader() (io.ReadCloser, error) { obj, ok := o.cache.Get(o.hash) if ok && obj != o { reader, err := obj.Reader() if err != nil { return nil, err } return reader, nil } f, err := o.fs.Open(o.path) if err != nil { return nil, err } p := NewPackfileWithCache(o.index, nil, f, o.cache) r, err := p.getObjectContent(o.offset) if err != nil { _ = f.Close() return nil, err } if err := f.Close(); err != nil { return nil, err } return r, nil } // SetSize implements the plumbing.EncodedObject interface. This method // is a noop. func (o *FSObject) SetSize(int64) {} // SetType implements the plumbing.EncodedObject interface. This method is // a noop. func (o *FSObject) SetType(plumbing.ObjectType) {} // Hash implements the plumbing.EncodedObject interface. func (o *FSObject) Hash() plumbing.Hash { return o.hash } // Size implements the plumbing.EncodedObject interface. func (o *FSObject) Size() int64 { return o.size } // Type implements the plumbing.EncodedObject interface. func (o *FSObject) Type() plumbing.ObjectType { return o.typ } // Writer implements the plumbing.EncodedObject interface. This method always // returns a nil writer. func (o *FSObject) Writer() (io.WriteCloser, error) { return nil, nil } type objectReader struct { io.ReadCloser f billy.File } func (r *objectReader) Close() error { if err := r.ReadCloser.Close(); err != nil { _ = r.f.Close() return err } return r.f.Close() } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/packfile/object_pack.go000066400000000000000000000076021345605224300263620ustar00rootroot00000000000000package packfile import ( "gopkg.in/src-d/go-git.v4/plumbing" ) // ObjectToPack is a representation of an object that is going to be into a // pack file. type ObjectToPack struct { // The main object to pack, it could be any object, including deltas Object plumbing.EncodedObject // Base is the object that a delta is based on (it could be also another delta). // If the main object is not a delta, Base will be null Base *ObjectToPack // Original is the object that we can generate applying the delta to // Base, or the same object as Object in the case of a non-delta // object. Original plumbing.EncodedObject // Depth is the amount of deltas needed to resolve to obtain Original // (delta based on delta based on ...) Depth int // offset in pack when object has been already written, or 0 if it // has not been written yet Offset int64 // Information from the original object resolvedOriginal bool originalType plumbing.ObjectType originalSize int64 originalHash plumbing.Hash } // newObjectToPack creates a correct ObjectToPack based on a non-delta object func newObjectToPack(o plumbing.EncodedObject) *ObjectToPack { return &ObjectToPack{ Object: o, Original: o, } } // newDeltaObjectToPack creates a correct ObjectToPack for a delta object, based on // his base (could be another delta), the delta target (in this case called original), // and the delta Object itself func newDeltaObjectToPack(base *ObjectToPack, original, delta plumbing.EncodedObject) *ObjectToPack { return &ObjectToPack{ Object: delta, Base: base, Original: original, Depth: base.Depth + 1, } } // BackToOriginal converts that ObjectToPack to a non-deltified object if it was one func (o *ObjectToPack) BackToOriginal() { if o.IsDelta() && o.Original != nil { o.Object = o.Original o.Base = nil o.Depth = 0 } } // IsWritten returns if that ObjectToPack was // already written into the packfile or not func (o *ObjectToPack) IsWritten() bool { return o.Offset > 1 } // MarkWantWrite marks this ObjectToPack as WantWrite // to avoid delta chain loops func (o *ObjectToPack) MarkWantWrite() { o.Offset = 1 } // WantWrite checks if this ObjectToPack was marked as WantWrite before func (o *ObjectToPack) WantWrite() bool { return o.Offset == 1 } // SetOriginal sets both Original and saves size, type and hash. If object // is nil Original is set but previous resolved values are kept func (o *ObjectToPack) SetOriginal(obj plumbing.EncodedObject) { o.Original = obj o.SaveOriginalMetadata() } // SaveOriginalMetadata saves size, type and hash of Original object func (o *ObjectToPack) SaveOriginalMetadata() { if o.Original != nil { o.originalSize = o.Original.Size() o.originalType = o.Original.Type() o.originalHash = o.Original.Hash() o.resolvedOriginal = true } } // CleanOriginal sets Original to nil func (o *ObjectToPack) CleanOriginal() { o.Original = nil } func (o *ObjectToPack) Type() plumbing.ObjectType { if o.Original != nil { return o.Original.Type() } if o.resolvedOriginal { return o.originalType } if o.Base != nil { return o.Base.Type() } if o.Object != nil { return o.Object.Type() } panic("cannot get type") } func (o *ObjectToPack) Hash() plumbing.Hash { if o.Original != nil { return o.Original.Hash() } if o.resolvedOriginal { return o.originalHash } do, ok := o.Object.(plumbing.DeltaObject) if ok { return do.ActualHash() } panic("cannot get hash") } func (o *ObjectToPack) Size() int64 { if o.Original != nil { return o.Original.Size() } if o.resolvedOriginal { return o.originalSize } do, ok := o.Object.(plumbing.DeltaObject) if ok { return do.ActualSize() } panic("cannot get ObjectToPack size") } func (o *ObjectToPack) IsDelta() bool { return o.Base != nil } func (o *ObjectToPack) SetDelta(base *ObjectToPack, delta plumbing.EncodedObject) { o.Object = delta o.Base = base o.Depth = base.Depth + 1 } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/packfile/object_pack_test.go000066400000000000000000000023151345605224300274150ustar00rootroot00000000000000package packfile import ( "io" "gopkg.in/src-d/go-git.v4/plumbing" . "gopkg.in/check.v1" ) type ObjectToPackSuite struct{} var _ = Suite(&ObjectToPackSuite{}) func (s *ObjectToPackSuite) TestObjectToPack(c *C) { obj := &dummyObject{} otp := newObjectToPack(obj) c.Assert(obj, Equals, otp.Object) c.Assert(obj, Equals, otp.Original) c.Assert(otp.Base, IsNil) c.Assert(otp.IsDelta(), Equals, false) original := &dummyObject{} delta := &dummyObject{} deltaToPack := newDeltaObjectToPack(otp, original, delta) c.Assert(obj, Equals, deltaToPack.Object) c.Assert(original, Equals, deltaToPack.Original) c.Assert(otp, Equals, deltaToPack.Base) c.Assert(deltaToPack.IsDelta(), Equals, true) } type dummyObject struct{} func (*dummyObject) Hash() plumbing.Hash { return plumbing.ZeroHash } func (*dummyObject) Type() plumbing.ObjectType { return plumbing.InvalidObject } func (*dummyObject) SetType(plumbing.ObjectType) {} func (*dummyObject) Size() int64 { return 0 } func (*dummyObject) SetSize(s int64) {} func (*dummyObject) Reader() (io.ReadCloser, error) { return nil, nil } func (*dummyObject) Writer() (io.WriteCloser, error) { return nil, nil } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/packfile/packfile.go000066400000000000000000000261241345605224300256740ustar00rootroot00000000000000package packfile import ( "bytes" "io" "os" billy "gopkg.in/src-d/go-billy.v4" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/cache" "gopkg.in/src-d/go-git.v4/plumbing/format/idxfile" "gopkg.in/src-d/go-git.v4/plumbing/storer" ) var ( // ErrInvalidObject is returned by Decode when an invalid object is // found in the packfile. ErrInvalidObject = NewError("invalid git object") // ErrZLib is returned by Decode when there was an error unzipping // the packfile contents. ErrZLib = NewError("zlib reading error") ) // When reading small objects from packfile it is beneficial to do so at // once to exploit the buffered I/O. In many cases the objects are so small // that they were already loaded to memory when the object header was // loaded from the packfile. Wrapping in FSObject would cause this buffered // data to be thrown away and then re-read later, with the additional // seeking causing reloads from disk. Objects smaller than this threshold // are now always read into memory and stored in cache instead of being // wrapped in FSObject. const smallObjectThreshold = 16 * 1024 // Packfile allows retrieving information from inside a packfile. type Packfile struct { idxfile.Index fs billy.Filesystem file billy.File s *Scanner deltaBaseCache cache.Object offsetToType map[int64]plumbing.ObjectType } // NewPackfileWithCache creates a new Packfile with the given object cache. // If the filesystem is provided, the packfile will return FSObjects, otherwise // it will return MemoryObjects. func NewPackfileWithCache( index idxfile.Index, fs billy.Filesystem, file billy.File, cache cache.Object, ) *Packfile { s := NewScanner(file) return &Packfile{ index, fs, file, s, cache, make(map[int64]plumbing.ObjectType), } } // NewPackfile returns a packfile representation for the given packfile file // and packfile idx. // If the filesystem is provided, the packfile will return FSObjects, otherwise // it will return MemoryObjects. func NewPackfile(index idxfile.Index, fs billy.Filesystem, file billy.File) *Packfile { return NewPackfileWithCache(index, fs, file, cache.NewObjectLRUDefault()) } // Get retrieves the encoded object in the packfile with the given hash. func (p *Packfile) Get(h plumbing.Hash) (plumbing.EncodedObject, error) { offset, err := p.FindOffset(h) if err != nil { return nil, err } return p.GetByOffset(offset) } // GetByOffset retrieves the encoded object from the packfile with the given // offset. func (p *Packfile) GetByOffset(o int64) (plumbing.EncodedObject, error) { hash, err := p.FindHash(o) if err == nil { if obj, ok := p.deltaBaseCache.Get(hash); ok { return obj, nil } } return p.objectAtOffset(o) } // GetSizeByOffset retrieves the size of the encoded object from the // packfile with the given offset. func (p *Packfile) GetSizeByOffset(o int64) (size int64, err error) { if _, err := p.s.SeekFromStart(o); err != nil { if err == io.EOF || isInvalid(err) { return 0, plumbing.ErrObjectNotFound } return 0, err } h, err := p.nextObjectHeader() if err != nil { return 0, err } return p.getObjectSize(h) } func (p *Packfile) objectHeaderAtOffset(offset int64) (*ObjectHeader, error) { h, err := p.s.SeekObjectHeader(offset) p.s.pendingObject = nil return h, err } func (p *Packfile) nextObjectHeader() (*ObjectHeader, error) { h, err := p.s.NextObjectHeader() p.s.pendingObject = nil return h, err } func (p *Packfile) getObjectSize(h *ObjectHeader) (int64, error) { switch h.Type { case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject: return h.Length, nil case plumbing.REFDeltaObject, plumbing.OFSDeltaObject: buf := bufPool.Get().(*bytes.Buffer) buf.Reset() defer bufPool.Put(buf) if _, _, err := p.s.NextObject(buf); err != nil { return 0, err } delta := buf.Bytes() _, delta = decodeLEB128(delta) // skip src size sz, _ := decodeLEB128(delta) return int64(sz), nil default: return 0, ErrInvalidObject.AddDetails("type %q", h.Type) } } func (p *Packfile) getObjectType(h *ObjectHeader) (typ plumbing.ObjectType, err error) { switch h.Type { case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject: return h.Type, nil case plumbing.REFDeltaObject, plumbing.OFSDeltaObject: var offset int64 if h.Type == plumbing.REFDeltaObject { offset, err = p.FindOffset(h.Reference) if err != nil { return } } else { offset = h.OffsetReference } if baseType, ok := p.offsetToType[offset]; ok { typ = baseType } else { h, err = p.objectHeaderAtOffset(offset) if err != nil { return } typ, err = p.getObjectType(h) if err != nil { return } } default: err = ErrInvalidObject.AddDetails("type %q", h.Type) } return } func (p *Packfile) objectAtOffset(offset int64) (plumbing.EncodedObject, error) { h, err := p.objectHeaderAtOffset(offset) if err != nil { if err == io.EOF || isInvalid(err) { return nil, plumbing.ErrObjectNotFound } return nil, err } // If we have no filesystem, we will return a MemoryObject instead // of an FSObject. if p.fs == nil { return p.getNextObject(h) } // If the object is not a delta and it's small enough then read it // completely into memory now since it is already read from disk // into buffer anyway. if h.Length <= smallObjectThreshold && h.Type != plumbing.OFSDeltaObject && h.Type != plumbing.REFDeltaObject { return p.getNextObject(h) } hash, err := p.FindHash(h.Offset) if err != nil { return nil, err } size, err := p.getObjectSize(h) if err != nil { return nil, err } typ, err := p.getObjectType(h) if err != nil { return nil, err } p.offsetToType[h.Offset] = typ return NewFSObject( hash, typ, h.Offset, size, p.Index, p.fs, p.file.Name(), p.deltaBaseCache, ), nil } func (p *Packfile) getObjectContent(offset int64) (io.ReadCloser, error) { ref, err := p.FindHash(offset) if err == nil { obj, ok := p.cacheGet(ref) if ok { reader, err := obj.Reader() if err != nil { return nil, err } return reader, nil } } h, err := p.objectHeaderAtOffset(offset) if err != nil { return nil, err } obj, err := p.getNextObject(h) if err != nil { return nil, err } return obj.Reader() } func (p *Packfile) getNextObject(h *ObjectHeader) (plumbing.EncodedObject, error) { var obj = new(plumbing.MemoryObject) obj.SetSize(h.Length) obj.SetType(h.Type) var err error switch h.Type { case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject: err = p.fillRegularObjectContent(obj) case plumbing.REFDeltaObject: err = p.fillREFDeltaObjectContent(obj, h.Reference) case plumbing.OFSDeltaObject: err = p.fillOFSDeltaObjectContent(obj, h.OffsetReference) default: err = ErrInvalidObject.AddDetails("type %q", h.Type) } if err != nil { return nil, err } return obj, nil } func (p *Packfile) fillRegularObjectContent(obj plumbing.EncodedObject) error { w, err := obj.Writer() if err != nil { return err } _, _, err = p.s.NextObject(w) p.cachePut(obj) return err } func (p *Packfile) fillREFDeltaObjectContent(obj plumbing.EncodedObject, ref plumbing.Hash) error { buf := bufPool.Get().(*bytes.Buffer) buf.Reset() _, _, err := p.s.NextObject(buf) if err != nil { return err } base, ok := p.cacheGet(ref) if !ok { base, err = p.Get(ref) if err != nil { return err } } obj.SetType(base.Type()) err = ApplyDelta(obj, base, buf.Bytes()) p.cachePut(obj) bufPool.Put(buf) return err } func (p *Packfile) fillOFSDeltaObjectContent(obj plumbing.EncodedObject, offset int64) error { buf := bytes.NewBuffer(nil) _, _, err := p.s.NextObject(buf) if err != nil { return err } var base plumbing.EncodedObject var ok bool hash, err := p.FindHash(offset) if err == nil { base, ok = p.cacheGet(hash) } if !ok { base, err = p.GetByOffset(offset) if err != nil { return err } } obj.SetType(base.Type()) err = ApplyDelta(obj, base, buf.Bytes()) p.cachePut(obj) return err } func (p *Packfile) cacheGet(h plumbing.Hash) (plumbing.EncodedObject, bool) { if p.deltaBaseCache == nil { return nil, false } return p.deltaBaseCache.Get(h) } func (p *Packfile) cachePut(obj plumbing.EncodedObject) { if p.deltaBaseCache == nil { return } p.deltaBaseCache.Put(obj) } // GetAll returns an iterator with all encoded objects in the packfile. // The iterator returned is not thread-safe, it should be used in the same // thread as the Packfile instance. func (p *Packfile) GetAll() (storer.EncodedObjectIter, error) { return p.GetByType(plumbing.AnyObject) } // GetByType returns all the objects of the given type. func (p *Packfile) GetByType(typ plumbing.ObjectType) (storer.EncodedObjectIter, error) { switch typ { case plumbing.AnyObject, plumbing.BlobObject, plumbing.TreeObject, plumbing.CommitObject, plumbing.TagObject: entries, err := p.EntriesByOffset() if err != nil { return nil, err } return &objectIter{ // Easiest way to provide an object decoder is just to pass a Packfile // instance. To not mess with the seeks, it's a new instance with a // different scanner but the same cache and offset to hash map for // reusing as much cache as possible. p: p, iter: entries, typ: typ, }, nil default: return nil, plumbing.ErrInvalidType } } // ID returns the ID of the packfile, which is the checksum at the end of it. func (p *Packfile) ID() (plumbing.Hash, error) { prev, err := p.file.Seek(-20, io.SeekEnd) if err != nil { return plumbing.ZeroHash, err } var hash plumbing.Hash if _, err := io.ReadFull(p.file, hash[:]); err != nil { return plumbing.ZeroHash, err } if _, err := p.file.Seek(prev, io.SeekStart); err != nil { return plumbing.ZeroHash, err } return hash, nil } // Close the packfile and its resources. func (p *Packfile) Close() error { closer, ok := p.file.(io.Closer) if !ok { return nil } return closer.Close() } type objectIter struct { p *Packfile typ plumbing.ObjectType iter idxfile.EntryIter } func (i *objectIter) Next() (plumbing.EncodedObject, error) { for { e, err := i.iter.Next() if err != nil { return nil, err } obj, err := i.p.GetByOffset(int64(e.Offset)) if err != nil { return nil, err } if i.typ == plumbing.AnyObject || obj.Type() == i.typ { return obj, nil } } } func (i *objectIter) ForEach(f func(plumbing.EncodedObject) error) error { for { o, err := i.Next() if err != nil { if err == io.EOF { return nil } return err } if err := f(o); err != nil { return err } } } func (i *objectIter) Close() { i.iter.Close() } // isInvalid checks whether an error is an os.PathError with an os.ErrInvalid // error inside. It also checks for the windows error, which is different from // os.ErrInvalid. func isInvalid(err error) bool { pe, ok := err.(*os.PathError) if !ok { return false } errstr := pe.Err.Error() return errstr == errInvalidUnix || errstr == errInvalidWindows } // errInvalidWindows is the Windows equivalent to os.ErrInvalid const errInvalidWindows = "The parameter is incorrect." var errInvalidUnix = os.ErrInvalid.Error() golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/packfile/packfile_test.go000066400000000000000000000222371345605224300267340ustar00rootroot00000000000000package packfile_test import ( "io" "math" . "gopkg.in/check.v1" "gopkg.in/src-d/go-billy.v4/osfs" fixtures "gopkg.in/src-d/go-git-fixtures.v3" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/format/idxfile" "gopkg.in/src-d/go-git.v4/plumbing/format/packfile" "gopkg.in/src-d/go-git.v4/plumbing/storer" ) type PackfileSuite struct { fixtures.Suite p *packfile.Packfile idx *idxfile.MemoryIndex f *fixtures.Fixture } var _ = Suite(&PackfileSuite{}) func (s *PackfileSuite) TestGet(c *C) { for h := range expectedEntries { obj, err := s.p.Get(h) c.Assert(err, IsNil) c.Assert(obj, Not(IsNil)) c.Assert(obj.Hash(), Equals, h) } _, err := s.p.Get(plumbing.ZeroHash) c.Assert(err, Equals, plumbing.ErrObjectNotFound) } func (s *PackfileSuite) TestGetByOffset(c *C) { for h, o := range expectedEntries { obj, err := s.p.GetByOffset(o) c.Assert(err, IsNil) c.Assert(obj, Not(IsNil)) c.Assert(obj.Hash(), Equals, h) } _, err := s.p.GetByOffset(math.MaxInt64) c.Assert(err, Equals, plumbing.ErrObjectNotFound) } func (s *PackfileSuite) TestID(c *C) { id, err := s.p.ID() c.Assert(err, IsNil) c.Assert(id, Equals, s.f.PackfileHash) } func (s *PackfileSuite) TestGetAll(c *C) { iter, err := s.p.GetAll() c.Assert(err, IsNil) var objects int for { o, err := iter.Next() if err == io.EOF { break } c.Assert(err, IsNil) objects++ _, ok := expectedEntries[o.Hash()] c.Assert(ok, Equals, true) } c.Assert(objects, Equals, len(expectedEntries)) } var expectedEntries = map[plumbing.Hash]int64{ plumbing.NewHash("1669dce138d9b841a518c64b10914d88f5e488ea"): 615, plumbing.NewHash("32858aad3c383ed1ff0a0f9bdf231d54a00c9e88"): 1524, plumbing.NewHash("35e85108805c84807bc66a02d91535e1e24b38b9"): 1063, plumbing.NewHash("49c6bb89b17060d7b4deacb7b338fcc6ea2352a9"): 78882, plumbing.NewHash("4d081c50e250fa32ea8b1313cf8bb7c2ad7627fd"): 84688, plumbing.NewHash("586af567d0bb5e771e49bdd9434f5e0fb76d25fa"): 84559, plumbing.NewHash("5a877e6a906a2743ad6e45d99c1793642aaf8eda"): 84479, plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5"): 186, plumbing.NewHash("7e59600739c96546163833214c36459e324bad0a"): 84653, plumbing.NewHash("880cd14280f4b9b6ed3986d6671f907d7cc2a198"): 78050, plumbing.NewHash("8dcef98b1d52143e1e2dbc458ffe38f925786bf2"): 84741, plumbing.NewHash("918c48b83bd081e863dbe1b80f8998f058cd8294"): 286, plumbing.NewHash("9a48f23120e880dfbe41f7c9b7b708e9ee62a492"): 80998, plumbing.NewHash("9dea2395f5403188298c1dabe8bdafe562c491e3"): 84032, plumbing.NewHash("a39771a7651f97faf5c72e08224d857fc35133db"): 84430, plumbing.NewHash("a5b8b09e2f8fcb0bb99d3ccb0958157b40890d69"): 838, plumbing.NewHash("a8d315b2b1c615d43042c3a62402b8a54288cf5c"): 84375, plumbing.NewHash("aa9b383c260e1d05fbbf6b30a02914555e20c725"): 84760, plumbing.NewHash("af2d6a6954d532f8ffb47615169c8fdf9d383a1a"): 449, plumbing.NewHash("b029517f6300c2da0f4b651b8642506cd6aaf45d"): 1392, plumbing.NewHash("b8e471f58bcbca63b07bda20e428190409c2db47"): 1230, plumbing.NewHash("c192bd6a24ea1ab01d78686e417c8bdc7c3d197f"): 1713, plumbing.NewHash("c2d30fa8ef288618f65f6eed6e168e0d514886f4"): 84725, plumbing.NewHash("c8f1d8c61f9da76f4cb49fd86322b6e685dba956"): 80725, plumbing.NewHash("cf4aa3b38974fb7d81f367c0830f7d78d65ab86b"): 84608, plumbing.NewHash("d3ff53e0564a9f87d8e84b6e28e5060e517008aa"): 1685, plumbing.NewHash("d5c0f4ab811897cadf03aec358ae60d21f91c50d"): 2351, plumbing.NewHash("dbd3641b371024f44d0e469a9c8f5457b0660de1"): 84115, plumbing.NewHash("e8d3ffab552895c19b9fcf7aa264d277cde33881"): 12, plumbing.NewHash("eba74343e2f15d62adedfd8c883ee0262b5c8021"): 84708, plumbing.NewHash("fb72698cab7617ac416264415f13224dfd7a165e"): 84671, } func (s *PackfileSuite) SetUpTest(c *C) { s.f = fixtures.Basic().One() fs := osfs.New("") f, err := fs.Open(s.f.Packfile().Name()) c.Assert(err, IsNil) s.idx = idxfile.NewMemoryIndex() c.Assert(idxfile.NewDecoder(s.f.Idx()).Decode(s.idx), IsNil) s.p = packfile.NewPackfile(s.idx, fs, f) } func (s *PackfileSuite) TearDownTest(c *C) { c.Assert(s.p.Close(), IsNil) } func (s *PackfileSuite) TestDecode(c *C) { fixtures.Basic().ByTag("packfile").Test(c, func(f *fixtures.Fixture) { index := getIndexFromIdxFile(f.Idx()) fs := osfs.New("") pf, err := fs.Open(f.Packfile().Name()) c.Assert(err, IsNil) p := packfile.NewPackfile(index, fs, pf) defer p.Close() for _, h := range expectedHashes { obj, err := p.Get(plumbing.NewHash(h)) c.Assert(err, IsNil) c.Assert(obj.Hash().String(), Equals, h) } }) } func (s *PackfileSuite) TestDecodeByTypeRefDelta(c *C) { f := fixtures.Basic().ByTag("ref-delta").One() index := getIndexFromIdxFile(f.Idx()) fs := osfs.New("") pf, err := fs.Open(f.Packfile().Name()) c.Assert(err, IsNil) packfile := packfile.NewPackfile(index, fs, pf) defer packfile.Close() iter, err := packfile.GetByType(plumbing.CommitObject) c.Assert(err, IsNil) var count int for { obj, err := iter.Next() if err == io.EOF { break } count++ c.Assert(err, IsNil) c.Assert(obj.Type(), Equals, plumbing.CommitObject) } c.Assert(count > 0, Equals, true) } func (s *PackfileSuite) TestDecodeByType(c *C) { ts := []plumbing.ObjectType{ plumbing.CommitObject, plumbing.TagObject, plumbing.TreeObject, plumbing.BlobObject, } fixtures.Basic().ByTag("packfile").Test(c, func(f *fixtures.Fixture) { for _, t := range ts { index := getIndexFromIdxFile(f.Idx()) fs := osfs.New("") pf, err := fs.Open(f.Packfile().Name()) c.Assert(err, IsNil) packfile := packfile.NewPackfile(index, fs, pf) defer packfile.Close() iter, err := packfile.GetByType(t) c.Assert(err, IsNil) c.Assert(iter.ForEach(func(obj plumbing.EncodedObject) error { c.Assert(obj.Type(), Equals, t) return nil }), IsNil) } }) } func (s *PackfileSuite) TestDecodeByTypeConstructor(c *C) { f := fixtures.Basic().ByTag("packfile").One() index := getIndexFromIdxFile(f.Idx()) fs := osfs.New("") pf, err := fs.Open(f.Packfile().Name()) c.Assert(err, IsNil) packfile := packfile.NewPackfile(index, fs, pf) defer packfile.Close() _, err = packfile.GetByType(plumbing.OFSDeltaObject) c.Assert(err, Equals, plumbing.ErrInvalidType) _, err = packfile.GetByType(plumbing.REFDeltaObject) c.Assert(err, Equals, plumbing.ErrInvalidType) _, err = packfile.GetByType(plumbing.InvalidObject) c.Assert(err, Equals, plumbing.ErrInvalidType) } var expectedHashes = []string{ "918c48b83bd081e863dbe1b80f8998f058cd8294", "af2d6a6954d532f8ffb47615169c8fdf9d383a1a", "1669dce138d9b841a518c64b10914d88f5e488ea", "a5b8b09e2f8fcb0bb99d3ccb0958157b40890d69", "b8e471f58bcbca63b07bda20e428190409c2db47", "35e85108805c84807bc66a02d91535e1e24b38b9", "b029517f6300c2da0f4b651b8642506cd6aaf45d", "32858aad3c383ed1ff0a0f9bdf231d54a00c9e88", "d3ff53e0564a9f87d8e84b6e28e5060e517008aa", "c192bd6a24ea1ab01d78686e417c8bdc7c3d197f", "d5c0f4ab811897cadf03aec358ae60d21f91c50d", "49c6bb89b17060d7b4deacb7b338fcc6ea2352a9", "cf4aa3b38974fb7d81f367c0830f7d78d65ab86b", "9dea2395f5403188298c1dabe8bdafe562c491e3", "586af567d0bb5e771e49bdd9434f5e0fb76d25fa", "9a48f23120e880dfbe41f7c9b7b708e9ee62a492", "5a877e6a906a2743ad6e45d99c1793642aaf8eda", "c8f1d8c61f9da76f4cb49fd86322b6e685dba956", "a8d315b2b1c615d43042c3a62402b8a54288cf5c", "a39771a7651f97faf5c72e08224d857fc35133db", "880cd14280f4b9b6ed3986d6671f907d7cc2a198", "fb72698cab7617ac416264415f13224dfd7a165e", "4d081c50e250fa32ea8b1313cf8bb7c2ad7627fd", "eba74343e2f15d62adedfd8c883ee0262b5c8021", "c2d30fa8ef288618f65f6eed6e168e0d514886f4", "8dcef98b1d52143e1e2dbc458ffe38f925786bf2", "aa9b383c260e1d05fbbf6b30a02914555e20c725", "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", "dbd3641b371024f44d0e469a9c8f5457b0660de1", "e8d3ffab552895c19b9fcf7aa264d277cde33881", "7e59600739c96546163833214c36459e324bad0a", } func assertObjects(c *C, s storer.EncodedObjectStorer, expects []string) { i, err := s.IterEncodedObjects(plumbing.AnyObject) c.Assert(err, IsNil) var count int err = i.ForEach(func(plumbing.EncodedObject) error { count++; return nil }) c.Assert(err, IsNil) c.Assert(count, Equals, len(expects)) for _, exp := range expects { obt, err := s.EncodedObject(plumbing.AnyObject, plumbing.NewHash(exp)) c.Assert(err, IsNil) c.Assert(obt.Hash().String(), Equals, exp) } } func getIndexFromIdxFile(r io.Reader) idxfile.Index { idxf := idxfile.NewMemoryIndex() d := idxfile.NewDecoder(r) if err := d.Decode(idxf); err != nil { panic(err) } return idxf } func (s *PackfileSuite) TestSize(c *C) { f := fixtures.Basic().ByTag("ref-delta").One() index := getIndexFromIdxFile(f.Idx()) fs := osfs.New("") pf, err := fs.Open(f.Packfile().Name()) c.Assert(err, IsNil) packfile := packfile.NewPackfile(index, fs, pf) defer packfile.Close() // Get the size of binary.jpg, which is not delta-encoded. offset, err := packfile.FindOffset(plumbing.NewHash("d5c0f4ab811897cadf03aec358ae60d21f91c50d")) c.Assert(err, IsNil) size, err := packfile.GetSizeByOffset(offset) c.Assert(err, IsNil) c.Assert(size, Equals, int64(76110)) // Get the size of the root commit, which is delta-encoded. offset, err = packfile.FindOffset(f.Head) c.Assert(err, IsNil) size, err = packfile.GetSizeByOffset(offset) c.Assert(err, IsNil) c.Assert(size, Equals, int64(245)) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/packfile/parser.go000066400000000000000000000245051345605224300254130ustar00rootroot00000000000000package packfile import ( "bytes" "errors" "io" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/cache" "gopkg.in/src-d/go-git.v4/plumbing/storer" ) var ( // ErrReferenceDeltaNotFound is returned when the reference delta is not // found. ErrReferenceDeltaNotFound = errors.New("reference delta not found") // ErrNotSeekableSource is returned when the source for the parser is not // seekable and a storage was not provided, so it can't be parsed. ErrNotSeekableSource = errors.New("parser source is not seekable and storage was not provided") // ErrDeltaNotCached is returned when the delta could not be found in cache. ErrDeltaNotCached = errors.New("delta could not be found in cache") ) // Observer interface is implemented by index encoders. type Observer interface { // OnHeader is called when a new packfile is opened. OnHeader(count uint32) error // OnInflatedObjectHeader is called for each object header read. OnInflatedObjectHeader(t plumbing.ObjectType, objSize int64, pos int64) error // OnInflatedObjectContent is called for each decoded object. OnInflatedObjectContent(h plumbing.Hash, pos int64, crc uint32, content []byte) error // OnFooter is called when decoding is done. OnFooter(h plumbing.Hash) error } // Parser decodes a packfile and calls any observer associated to it. Is used // to generate indexes. type Parser struct { storage storer.EncodedObjectStorer scanner *Scanner count uint32 oi []*objectInfo oiByHash map[plumbing.Hash]*objectInfo oiByOffset map[int64]*objectInfo hashOffset map[plumbing.Hash]int64 checksum plumbing.Hash cache *cache.BufferLRU // delta content by offset, only used if source is not seekable deltas map[int64][]byte ob []Observer } // NewParser creates a new Parser. The Scanner source must be seekable. // If it's not, NewParserWithStorage should be used instead. func NewParser(scanner *Scanner, ob ...Observer) (*Parser, error) { return NewParserWithStorage(scanner, nil, ob...) } // NewParserWithStorage creates a new Parser. The scanner source must either // be seekable or a storage must be provided. func NewParserWithStorage( scanner *Scanner, storage storer.EncodedObjectStorer, ob ...Observer, ) (*Parser, error) { if !scanner.IsSeekable && storage == nil { return nil, ErrNotSeekableSource } var deltas map[int64][]byte if !scanner.IsSeekable { deltas = make(map[int64][]byte) } return &Parser{ storage: storage, scanner: scanner, ob: ob, count: 0, cache: cache.NewBufferLRUDefault(), deltas: deltas, }, nil } func (p *Parser) forEachObserver(f func(o Observer) error) error { for _, o := range p.ob { if err := f(o); err != nil { return err } } return nil } func (p *Parser) onHeader(count uint32) error { return p.forEachObserver(func(o Observer) error { return o.OnHeader(count) }) } func (p *Parser) onInflatedObjectHeader( t plumbing.ObjectType, objSize int64, pos int64, ) error { return p.forEachObserver(func(o Observer) error { return o.OnInflatedObjectHeader(t, objSize, pos) }) } func (p *Parser) onInflatedObjectContent( h plumbing.Hash, pos int64, crc uint32, content []byte, ) error { return p.forEachObserver(func(o Observer) error { return o.OnInflatedObjectContent(h, pos, crc, content) }) } func (p *Parser) onFooter(h plumbing.Hash) error { return p.forEachObserver(func(o Observer) error { return o.OnFooter(h) }) } // Parse start decoding phase of the packfile. func (p *Parser) Parse() (plumbing.Hash, error) { if err := p.init(); err != nil { return plumbing.ZeroHash, err } if err := p.indexObjects(); err != nil { return plumbing.ZeroHash, err } var err error p.checksum, err = p.scanner.Checksum() if err != nil && err != io.EOF { return plumbing.ZeroHash, err } if err := p.resolveDeltas(); err != nil { return plumbing.ZeroHash, err } if err := p.onFooter(p.checksum); err != nil { return plumbing.ZeroHash, err } return p.checksum, nil } func (p *Parser) init() error { _, c, err := p.scanner.Header() if err != nil { return err } if err := p.onHeader(c); err != nil { return err } p.count = c p.oiByHash = make(map[plumbing.Hash]*objectInfo, p.count) p.oiByOffset = make(map[int64]*objectInfo, p.count) p.oi = make([]*objectInfo, p.count) return nil } func (p *Parser) indexObjects() error { buf := new(bytes.Buffer) for i := uint32(0); i < p.count; i++ { buf.Reset() oh, err := p.scanner.NextObjectHeader() if err != nil { return err } delta := false var ota *objectInfo switch t := oh.Type; t { case plumbing.OFSDeltaObject: delta = true parent, ok := p.oiByOffset[oh.OffsetReference] if !ok { return plumbing.ErrObjectNotFound } ota = newDeltaObject(oh.Offset, oh.Length, t, parent) parent.Children = append(parent.Children, ota) case plumbing.REFDeltaObject: delta = true parent, ok := p.oiByHash[oh.Reference] if !ok { // can't find referenced object in this pack file // this must be a "thin" pack. parent = &objectInfo{ //Placeholder parent SHA1: oh.Reference, ExternalRef: true, // mark as an external reference that must be resolved Type: plumbing.AnyObject, DiskType: plumbing.AnyObject, } p.oiByHash[oh.Reference] = parent } ota = newDeltaObject(oh.Offset, oh.Length, t, parent) parent.Children = append(parent.Children, ota) default: ota = newBaseObject(oh.Offset, oh.Length, t) } _, crc, err := p.scanner.NextObject(buf) if err != nil { return err } ota.Crc32 = crc ota.Length = oh.Length data := buf.Bytes() if !delta { sha1, err := getSHA1(ota.Type, data) if err != nil { return err } ota.SHA1 = sha1 p.oiByHash[ota.SHA1] = ota } if p.storage != nil && !delta { obj := new(plumbing.MemoryObject) obj.SetSize(oh.Length) obj.SetType(oh.Type) if _, err := obj.Write(data); err != nil { return err } if _, err := p.storage.SetEncodedObject(obj); err != nil { return err } } if delta && !p.scanner.IsSeekable { p.deltas[oh.Offset] = make([]byte, len(data)) copy(p.deltas[oh.Offset], data) } p.oiByOffset[oh.Offset] = ota p.oi[i] = ota } return nil } func (p *Parser) resolveDeltas() error { for _, obj := range p.oi { content, err := p.get(obj) if err != nil { return err } if err := p.onInflatedObjectHeader(obj.Type, obj.Length, obj.Offset); err != nil { return err } if err := p.onInflatedObjectContent(obj.SHA1, obj.Offset, obj.Crc32, content); err != nil { return err } if !obj.IsDelta() && len(obj.Children) > 0 { for _, child := range obj.Children { if _, err := p.resolveObject(child, content); err != nil { return err } } // Remove the delta from the cache. if obj.DiskType.IsDelta() && !p.scanner.IsSeekable { delete(p.deltas, obj.Offset) } } } return nil } func (p *Parser) get(o *objectInfo) (b []byte, err error) { var ok bool if !o.ExternalRef { // skip cache check for placeholder parents b, ok = p.cache.Get(o.Offset) } // If it's not on the cache and is not a delta we can try to find it in the // storage, if there's one. External refs must enter here. if !ok && p.storage != nil && !o.Type.IsDelta() { e, err := p.storage.EncodedObject(plumbing.AnyObject, o.SHA1) if err != nil { return nil, err } o.Type = e.Type() r, err := e.Reader() if err != nil { return nil, err } b = make([]byte, e.Size()) if _, err = r.Read(b); err != nil { return nil, err } } if b != nil { return b, nil } if o.ExternalRef { // we were not able to resolve a ref in a thin pack return nil, ErrReferenceDeltaNotFound } var data []byte if o.DiskType.IsDelta() { base, err := p.get(o.Parent) if err != nil { return nil, err } data, err = p.resolveObject(o, base) if err != nil { return nil, err } } else { data, err = p.readData(o) if err != nil { return nil, err } } if len(o.Children) > 0 { p.cache.Put(o.Offset, data) } return data, nil } func (p *Parser) resolveObject( o *objectInfo, base []byte, ) ([]byte, error) { if !o.DiskType.IsDelta() { return nil, nil } data, err := p.readData(o) if err != nil { return nil, err } data, err = applyPatchBase(o, data, base) if err != nil { return nil, err } if p.storage != nil { obj := new(plumbing.MemoryObject) obj.SetSize(o.Size()) obj.SetType(o.Type) if _, err := obj.Write(data); err != nil { return nil, err } if _, err := p.storage.SetEncodedObject(obj); err != nil { return nil, err } } return data, nil } func (p *Parser) readData(o *objectInfo) ([]byte, error) { if !p.scanner.IsSeekable && o.DiskType.IsDelta() { data, ok := p.deltas[o.Offset] if !ok { return nil, ErrDeltaNotCached } return data, nil } if _, err := p.scanner.SeekObjectHeader(o.Offset); err != nil { return nil, err } buf := new(bytes.Buffer) if _, _, err := p.scanner.NextObject(buf); err != nil { return nil, err } return buf.Bytes(), nil } func applyPatchBase(ota *objectInfo, data, base []byte) ([]byte, error) { patched, err := PatchDelta(base, data) if err != nil { return nil, err } if ota.SHA1 == plumbing.ZeroHash { ota.Type = ota.Parent.Type sha1, err := getSHA1(ota.Type, patched) if err != nil { return nil, err } ota.SHA1 = sha1 ota.Length = int64(len(patched)) } return patched, nil } func getSHA1(t plumbing.ObjectType, data []byte) (plumbing.Hash, error) { hasher := plumbing.NewHasher(t, int64(len(data))) if _, err := hasher.Write(data); err != nil { return plumbing.ZeroHash, err } return hasher.Sum(), nil } type objectInfo struct { Offset int64 Length int64 Type plumbing.ObjectType DiskType plumbing.ObjectType ExternalRef bool // indicates this is an external reference in a thin pack file Crc32 uint32 Parent *objectInfo Children []*objectInfo SHA1 plumbing.Hash } func newBaseObject(offset, length int64, t plumbing.ObjectType) *objectInfo { return newDeltaObject(offset, length, t, nil) } func newDeltaObject( offset, length int64, t plumbing.ObjectType, parent *objectInfo, ) *objectInfo { obj := &objectInfo{ Offset: offset, Length: length, Type: t, DiskType: t, Crc32: 0, Parent: parent, } return obj } func (o *objectInfo) IsDelta() bool { return o.Type.IsDelta() } func (o *objectInfo) Size() int64 { return o.Length } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/packfile/parser_test.go000066400000000000000000000160141345605224300264460ustar00rootroot00000000000000package packfile_test import ( "io" "testing" git "gopkg.in/src-d/go-git.v4" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/format/packfile" "gopkg.in/src-d/go-git.v4/plumbing/storer" . "gopkg.in/check.v1" "gopkg.in/src-d/go-git-fixtures.v3" ) type ParserSuite struct { fixtures.Suite } var _ = Suite(&ParserSuite{}) func (s *ParserSuite) TestParserHashes(c *C) { f := fixtures.Basic().One() scanner := packfile.NewScanner(f.Packfile()) obs := new(testObserver) parser, err := packfile.NewParser(scanner, obs) c.Assert(err, IsNil) ch, err := parser.Parse() c.Assert(err, IsNil) checksum := "a3fed42da1e8189a077c0e6846c040dcf73fc9dd" c.Assert(ch.String(), Equals, checksum) c.Assert(obs.checksum, Equals, checksum) c.Assert(int(obs.count), Equals, int(31)) commit := plumbing.CommitObject blob := plumbing.BlobObject tree := plumbing.TreeObject objs := []observerObject{ {"e8d3ffab552895c19b9fcf7aa264d277cde33881", commit, 254, 12, 0xaa07ba4b}, {"6ecf0ef2c2dffb796033e5a02219af86ec6584e5", commit, 245, 186, 0xf706df58}, {"918c48b83bd081e863dbe1b80f8998f058cd8294", commit, 242, 286, 0x12438846}, {"af2d6a6954d532f8ffb47615169c8fdf9d383a1a", commit, 242, 449, 0x2905a38c}, {"1669dce138d9b841a518c64b10914d88f5e488ea", commit, 333, 615, 0xd9429436}, {"a5b8b09e2f8fcb0bb99d3ccb0958157b40890d69", commit, 332, 838, 0xbecfde4e}, {"35e85108805c84807bc66a02d91535e1e24b38b9", commit, 244, 1063, 0x780e4b3e}, {"b8e471f58bcbca63b07bda20e428190409c2db47", commit, 243, 1230, 0xdc18344f}, {"b029517f6300c2da0f4b651b8642506cd6aaf45d", commit, 187, 1392, 0xcf4e4280}, {"32858aad3c383ed1ff0a0f9bdf231d54a00c9e88", blob, 189, 1524, 0x1f08118a}, {"d3ff53e0564a9f87d8e84b6e28e5060e517008aa", blob, 18, 1685, 0xafded7b8}, {"c192bd6a24ea1ab01d78686e417c8bdc7c3d197f", blob, 1072, 1713, 0xcc1428ed}, {"d5c0f4ab811897cadf03aec358ae60d21f91c50d", blob, 76110, 2351, 0x1631d22f}, {"880cd14280f4b9b6ed3986d6671f907d7cc2a198", blob, 2780, 78050, 0xbfff5850}, {"49c6bb89b17060d7b4deacb7b338fcc6ea2352a9", blob, 217848, 78882, 0xd108e1d8}, {"c8f1d8c61f9da76f4cb49fd86322b6e685dba956", blob, 706, 80725, 0x8e97ba25}, {"9a48f23120e880dfbe41f7c9b7b708e9ee62a492", blob, 11488, 80998, 0x7316ff70}, {"9dea2395f5403188298c1dabe8bdafe562c491e3", blob, 78, 84032, 0xdb4fce56}, {"dbd3641b371024f44d0e469a9c8f5457b0660de1", tree, 272, 84115, 0x901cce2c}, {"a8d315b2b1c615d43042c3a62402b8a54288cf5c", tree, 271, 84375, 0xec4552b0}, {"a39771a7651f97faf5c72e08224d857fc35133db", tree, 38, 84430, 0x847905bf}, {"5a877e6a906a2743ad6e45d99c1793642aaf8eda", tree, 75, 84479, 0x3689459a}, {"586af567d0bb5e771e49bdd9434f5e0fb76d25fa", tree, 38, 84559, 0xe67af94a}, {"cf4aa3b38974fb7d81f367c0830f7d78d65ab86b", tree, 34, 84608, 0xc2314a2e}, {"7e59600739c96546163833214c36459e324bad0a", blob, 9, 84653, 0xcd987848}, {"fb72698cab7617ac416264415f13224dfd7a165e", tree, 238, 84671, 0x8a853a6d}, {"4d081c50e250fa32ea8b1313cf8bb7c2ad7627fd", tree, 179, 84688, 0x70c6518}, {"eba74343e2f15d62adedfd8c883ee0262b5c8021", tree, 148, 84708, 0x4f4108e2}, {"c2d30fa8ef288618f65f6eed6e168e0d514886f4", tree, 110, 84725, 0xd6fe09e9}, {"8dcef98b1d52143e1e2dbc458ffe38f925786bf2", tree, 111, 84741, 0xf07a2804}, {"aa9b383c260e1d05fbbf6b30a02914555e20c725", tree, 73, 84760, 0x1d75d6be}, } c.Assert(obs.objects, DeepEquals, objs) } func (s *ParserSuite) TestThinPack(c *C) { // Initialize an empty repository fs, err := git.PlainInit(c.MkDir(), true) c.Assert(err, IsNil) // Try to parse a thin pack without having the required objects in the repo to // see if the correct errors are returned thinpack := fixtures.ByTag("thinpack").One() scanner := packfile.NewScanner(thinpack.Packfile()) parser, err := packfile.NewParserWithStorage(scanner, fs.Storer) // ParserWithStorage writes to the storer all parsed objects! c.Assert(err, IsNil) _, err = parser.Parse() c.Assert(err, Equals, plumbing.ErrObjectNotFound) // start over with a clean repo fs, err = git.PlainInit(c.MkDir(), true) c.Assert(err, IsNil) // Now unpack a base packfile into our empty repo: f := fixtures.ByURL("https://github.com/spinnaker/spinnaker.git").One() w, err := fs.Storer.(storer.PackfileWriter).PackfileWriter() c.Assert(err, IsNil) _, err = io.Copy(w, f.Packfile()) c.Assert(err, IsNil) w.Close() // Check that the test object that will come with our thin pack is *not* in the repo _, err = fs.Storer.EncodedObject(plumbing.CommitObject, thinpack.Head) c.Assert(err, Equals, plumbing.ErrObjectNotFound) // Now unpack the thin pack: scanner = packfile.NewScanner(thinpack.Packfile()) parser, err = packfile.NewParserWithStorage(scanner, fs.Storer) // ParserWithStorage writes to the storer all parsed objects! c.Assert(err, IsNil) h, err := parser.Parse() c.Assert(err, IsNil) c.Assert(h, Equals, plumbing.NewHash("1288734cbe0b95892e663221d94b95de1f5d7be8")) // Check that our test object is now accessible _, err = fs.Storer.EncodedObject(plumbing.CommitObject, thinpack.Head) c.Assert(err, IsNil) } type observerObject struct { hash string otype plumbing.ObjectType size int64 offset int64 crc uint32 } type testObserver struct { count uint32 checksum string objects []observerObject pos map[int64]int } func (t *testObserver) OnHeader(count uint32) error { t.count = count t.pos = make(map[int64]int, count) return nil } func (t *testObserver) OnInflatedObjectHeader(otype plumbing.ObjectType, objSize int64, pos int64) error { o := t.get(pos) o.otype = otype o.size = objSize o.offset = pos t.put(pos, o) return nil } func (t *testObserver) OnInflatedObjectContent(h plumbing.Hash, pos int64, crc uint32, _ []byte) error { o := t.get(pos) o.hash = h.String() o.crc = crc t.put(pos, o) return nil } func (t *testObserver) OnFooter(h plumbing.Hash) error { t.checksum = h.String() return nil } func (t *testObserver) get(pos int64) observerObject { i, ok := t.pos[pos] if ok { return t.objects[i] } return observerObject{} } func (t *testObserver) put(pos int64, o observerObject) { i, ok := t.pos[pos] if ok { t.objects[i] = o return } t.pos[pos] = len(t.objects) t.objects = append(t.objects, o) } func BenchmarkParse(b *testing.B) { if err := fixtures.Init(); err != nil { b.Fatal(err) } defer func() { if err := fixtures.Clean(); err != nil { b.Fatal(err) } }() for _, f := range fixtures.ByTag("packfile") { b.Run(f.URL, func(b *testing.B) { for i := 0; i < b.N; i++ { parser, err := packfile.NewParser(packfile.NewScanner(f.Packfile())) if err != nil { b.Fatal(err) } _, err = parser.Parse() if err != nil { b.Fatal(err) } } }) } } func BenchmarkParseBasic(b *testing.B) { if err := fixtures.Init(); err != nil { b.Fatal(err) } defer func() { if err := fixtures.Clean(); err != nil { b.Fatal(err) } }() f := fixtures.Basic().One() for i := 0; i < b.N; i++ { parser, err := packfile.NewParser(packfile.NewScanner(f.Packfile())) if err != nil { b.Fatal(err) } _, err = parser.Parse() if err != nil { b.Fatal(err) } } } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/packfile/patch_delta.go000066400000000000000000000112421345605224300263610ustar00rootroot00000000000000package packfile import ( "errors" "io/ioutil" "gopkg.in/src-d/go-git.v4/plumbing" ) // See https://github.com/git/git/blob/49fa3dc76179e04b0833542fa52d0f287a4955ac/delta.h // https://github.com/git/git/blob/c2c5f6b1e479f2c38e0e01345350620944e3527f/patch-delta.c, // and https://github.com/tarruda/node-git-core/blob/master/src/js/delta.js // for details about the delta format. const deltaSizeMin = 4 // ApplyDelta writes to target the result of applying the modification deltas in delta to base. func ApplyDelta(target, base plumbing.EncodedObject, delta []byte) error { r, err := base.Reader() if err != nil { return err } w, err := target.Writer() if err != nil { return err } src, err := ioutil.ReadAll(r) if err != nil { return err } dst, err := PatchDelta(src, delta) if err != nil { return err } target.SetSize(int64(len(dst))) _, err = w.Write(dst) return err } var ( ErrInvalidDelta = errors.New("invalid delta") ErrDeltaCmd = errors.New("wrong delta command") ) // PatchDelta returns the result of applying the modification deltas in delta to src. // An error will be returned if delta is corrupted (ErrDeltaLen) or an action command // is not copy from source or copy from delta (ErrDeltaCmd). func PatchDelta(src, delta []byte) ([]byte, error) { if len(delta) < deltaSizeMin { return nil, ErrInvalidDelta } srcSz, delta := decodeLEB128(delta) if srcSz != uint(len(src)) { return nil, ErrInvalidDelta } targetSz, delta := decodeLEB128(delta) remainingTargetSz := targetSz var cmd byte dest := make([]byte, 0, targetSz) for { if len(delta) == 0 { return nil, ErrInvalidDelta } cmd = delta[0] delta = delta[1:] if isCopyFromSrc(cmd) { var offset, sz uint var err error offset, delta, err = decodeOffset(cmd, delta) if err != nil { return nil, err } sz, delta, err = decodeSize(cmd, delta) if err != nil { return nil, err } if invalidSize(sz, targetSz) || invalidOffsetSize(offset, sz, srcSz) { break } dest = append(dest, src[offset:offset+sz]...) remainingTargetSz -= sz } else if isCopyFromDelta(cmd) { sz := uint(cmd) // cmd is the size itself if invalidSize(sz, targetSz) { return nil, ErrInvalidDelta } if uint(len(delta)) < sz { return nil, ErrInvalidDelta } dest = append(dest, delta[0:sz]...) remainingTargetSz -= sz delta = delta[sz:] } else { return nil, ErrDeltaCmd } if remainingTargetSz <= 0 { break } } return dest, nil } // Decodes a number encoded as an unsigned LEB128 at the start of some // binary data and returns the decoded number and the rest of the // stream. // // This must be called twice on the delta data buffer, first to get the // expected source buffer size, and again to get the target buffer size. func decodeLEB128(input []byte) (uint, []byte) { var num, sz uint var b byte for { b = input[sz] num |= (uint(b) & payload) << (sz * 7) // concats 7 bits chunks sz++ if uint(b)&continuation == 0 || sz == uint(len(input)) { break } } return num, input[sz:] } const ( payload = 0x7f // 0111 1111 continuation = 0x80 // 1000 0000 ) func isCopyFromSrc(cmd byte) bool { return (cmd & 0x80) != 0 } func isCopyFromDelta(cmd byte) bool { return (cmd&0x80) == 0 && cmd != 0 } func decodeOffset(cmd byte, delta []byte) (uint, []byte, error) { var offset uint if (cmd & 0x01) != 0 { if len(delta) == 0 { return 0, nil, ErrInvalidDelta } offset = uint(delta[0]) delta = delta[1:] } if (cmd & 0x02) != 0 { if len(delta) == 0 { return 0, nil, ErrInvalidDelta } offset |= uint(delta[0]) << 8 delta = delta[1:] } if (cmd & 0x04) != 0 { if len(delta) == 0 { return 0, nil, ErrInvalidDelta } offset |= uint(delta[0]) << 16 delta = delta[1:] } if (cmd & 0x08) != 0 { if len(delta) == 0 { return 0, nil, ErrInvalidDelta } offset |= uint(delta[0]) << 24 delta = delta[1:] } return offset, delta, nil } func decodeSize(cmd byte, delta []byte) (uint, []byte, error) { var sz uint if (cmd & 0x10) != 0 { if len(delta) == 0 { return 0, nil, ErrInvalidDelta } sz = uint(delta[0]) delta = delta[1:] } if (cmd & 0x20) != 0 { if len(delta) == 0 { return 0, nil, ErrInvalidDelta } sz |= uint(delta[0]) << 8 delta = delta[1:] } if (cmd & 0x40) != 0 { if len(delta) == 0 { return 0, nil, ErrInvalidDelta } sz |= uint(delta[0]) << 16 delta = delta[1:] } if sz == 0 { sz = 0x10000 } return sz, delta, nil } func invalidSize(sz, targetSz uint) bool { return sz > targetSz } func invalidOffsetSize(offset, sz, srcSz uint) bool { return sumOverflows(offset, sz) || offset+sz > srcSz } func sumOverflows(a, b uint) bool { return a+b < a } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/packfile/scanner.go000066400000000000000000000251721345605224300255510ustar00rootroot00000000000000package packfile import ( "bufio" "bytes" "compress/zlib" "fmt" "hash" "hash/crc32" "io" stdioutil "io/ioutil" "sync" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/utils/binary" "gopkg.in/src-d/go-git.v4/utils/ioutil" ) var ( // ErrEmptyPackfile is returned by ReadHeader when no data is found in the packfile ErrEmptyPackfile = NewError("empty packfile") // ErrBadSignature is returned by ReadHeader when the signature in the packfile is incorrect. ErrBadSignature = NewError("malformed pack file signature") // ErrUnsupportedVersion is returned by ReadHeader when the packfile version is // different than VersionSupported. ErrUnsupportedVersion = NewError("unsupported packfile version") // ErrSeekNotSupported returned if seek is not support ErrSeekNotSupported = NewError("not seek support") ) // ObjectHeader contains the information related to the object, this information // is collected from the previous bytes to the content of the object. type ObjectHeader struct { Type plumbing.ObjectType Offset int64 Length int64 Reference plumbing.Hash OffsetReference int64 } type Scanner struct { r reader zr readerResetter crc hash.Hash32 // pendingObject is used to detect if an object has been read, or still // is waiting to be read pendingObject *ObjectHeader version, objects uint32 // lsSeekable says if this scanner can do Seek or not, to have a Scanner // seekable a r implementing io.Seeker is required IsSeekable bool } // NewScanner returns a new Scanner based on a reader, if the given reader // implements io.ReadSeeker the Scanner will be also Seekable func NewScanner(r io.Reader) *Scanner { seeker, ok := r.(io.ReadSeeker) if !ok { seeker = &trackableReader{Reader: r} } crc := crc32.NewIEEE() return &Scanner{ r: newTeeReader(newByteReadSeeker(seeker), crc), crc: crc, IsSeekable: ok, } } // Header reads the whole packfile header (signature, version and object count). // It returns the version and the object count and performs checks on the // validity of the signature and the version fields. func (s *Scanner) Header() (version, objects uint32, err error) { if s.version != 0 { return s.version, s.objects, nil } sig, err := s.readSignature() if err != nil { if err == io.EOF { err = ErrEmptyPackfile } return } if !s.isValidSignature(sig) { err = ErrBadSignature return } version, err = s.readVersion() s.version = version if err != nil { return } if !s.isSupportedVersion(version) { err = ErrUnsupportedVersion.AddDetails("%d", version) return } objects, err = s.readCount() s.objects = objects return } // readSignature reads an returns the signature field in the packfile. func (s *Scanner) readSignature() ([]byte, error) { var sig = make([]byte, 4) if _, err := io.ReadFull(s.r, sig); err != nil { return []byte{}, err } return sig, nil } // isValidSignature returns if sig is a valid packfile signature. func (s *Scanner) isValidSignature(sig []byte) bool { return bytes.Equal(sig, signature) } // readVersion reads and returns the version field of a packfile. func (s *Scanner) readVersion() (uint32, error) { return binary.ReadUint32(s.r) } // isSupportedVersion returns whether version v is supported by the parser. // The current supported version is VersionSupported, defined above. func (s *Scanner) isSupportedVersion(v uint32) bool { return v == VersionSupported } // readCount reads and returns the count of objects field of a packfile. func (s *Scanner) readCount() (uint32, error) { return binary.ReadUint32(s.r) } // SeekObjectHeader seeks to specified offset and returns the ObjectHeader // for the next object in the reader func (s *Scanner) SeekObjectHeader(offset int64) (*ObjectHeader, error) { // if seeking we assume that you are not interested in the header if s.version == 0 { s.version = VersionSupported } if _, err := s.r.Seek(offset, io.SeekStart); err != nil { return nil, err } h, err := s.nextObjectHeader() if err != nil { return nil, err } h.Offset = offset return h, nil } // NextObjectHeader returns the ObjectHeader for the next object in the reader func (s *Scanner) NextObjectHeader() (*ObjectHeader, error) { if err := s.doPending(); err != nil { return nil, err } offset, err := s.r.Seek(0, io.SeekCurrent) if err != nil { return nil, err } h, err := s.nextObjectHeader() if err != nil { return nil, err } h.Offset = offset return h, nil } // nextObjectHeader returns the ObjectHeader for the next object in the reader // without the Offset field func (s *Scanner) nextObjectHeader() (*ObjectHeader, error) { defer s.Flush() s.crc.Reset() h := &ObjectHeader{} s.pendingObject = h var err error h.Offset, err = s.r.Seek(0, io.SeekCurrent) if err != nil { return nil, err } h.Type, h.Length, err = s.readObjectTypeAndLength() if err != nil { return nil, err } switch h.Type { case plumbing.OFSDeltaObject: no, err := binary.ReadVariableWidthInt(s.r) if err != nil { return nil, err } h.OffsetReference = h.Offset - no case plumbing.REFDeltaObject: var err error h.Reference, err = binary.ReadHash(s.r) if err != nil { return nil, err } } return h, nil } func (s *Scanner) doPending() error { if s.version == 0 { var err error s.version, s.objects, err = s.Header() if err != nil { return err } } return s.discardObjectIfNeeded() } func (s *Scanner) discardObjectIfNeeded() error { if s.pendingObject == nil { return nil } h := s.pendingObject n, _, err := s.NextObject(stdioutil.Discard) if err != nil { return err } if n != h.Length { return fmt.Errorf( "error discarding object, discarded %d, expected %d", n, h.Length, ) } return nil } // ReadObjectTypeAndLength reads and returns the object type and the // length field from an object entry in a packfile. func (s *Scanner) readObjectTypeAndLength() (plumbing.ObjectType, int64, error) { t, c, err := s.readType() if err != nil { return t, 0, err } l, err := s.readLength(c) return t, l, err } func (s *Scanner) readType() (plumbing.ObjectType, byte, error) { var c byte var err error if c, err = s.r.ReadByte(); err != nil { return plumbing.ObjectType(0), 0, err } typ := parseType(c) return typ, c, nil } func parseType(b byte) plumbing.ObjectType { return plumbing.ObjectType((b & maskType) >> firstLengthBits) } // the length is codified in the last 4 bits of the first byte and in // the last 7 bits of subsequent bytes. Last byte has a 0 MSB. func (s *Scanner) readLength(first byte) (int64, error) { length := int64(first & maskFirstLength) c := first shift := firstLengthBits var err error for c&maskContinue > 0 { if c, err = s.r.ReadByte(); err != nil { return 0, err } length += int64(c&maskLength) << shift shift += lengthBits } return length, nil } // NextObject writes the content of the next object into the reader, returns // the number of bytes written, the CRC32 of the content and an error, if any func (s *Scanner) NextObject(w io.Writer) (written int64, crc32 uint32, err error) { defer s.crc.Reset() s.pendingObject = nil written, err = s.copyObject(w) s.Flush() crc32 = s.crc.Sum32() return } // ReadRegularObject reads and write a non-deltified object // from it zlib stream in an object entry in the packfile. func (s *Scanner) copyObject(w io.Writer) (n int64, err error) { if s.zr == nil { var zr io.ReadCloser zr, err = zlib.NewReader(s.r) if err != nil { return 0, fmt.Errorf("zlib initialization error: %s", err) } s.zr = zr.(readerResetter) } else { if err = s.zr.Reset(s.r, nil); err != nil { return 0, fmt.Errorf("zlib reset error: %s", err) } } defer ioutil.CheckClose(s.zr, &err) buf := byteSlicePool.Get().([]byte) n, err = io.CopyBuffer(w, s.zr, buf) byteSlicePool.Put(buf) return } var byteSlicePool = sync.Pool{ New: func() interface{} { return make([]byte, 32*1024) }, } // SeekFromStart sets a new offset from start, returns the old position before // the change. func (s *Scanner) SeekFromStart(offset int64) (previous int64, err error) { // if seeking we assume that you are not interested in the header if s.version == 0 { s.version = VersionSupported } previous, err = s.r.Seek(0, io.SeekCurrent) if err != nil { return -1, err } _, err = s.r.Seek(offset, io.SeekStart) return previous, err } // Checksum returns the checksum of the packfile func (s *Scanner) Checksum() (plumbing.Hash, error) { err := s.discardObjectIfNeeded() if err != nil { return plumbing.ZeroHash, err } return binary.ReadHash(s.r) } // Close reads the reader until io.EOF func (s *Scanner) Close() error { buf := byteSlicePool.Get().([]byte) _, err := io.CopyBuffer(stdioutil.Discard, s.r, buf) byteSlicePool.Put(buf) return err } // Flush finishes writing the buffer to crc hasher in case we are using // a teeReader. Otherwise it is a no-op. func (s *Scanner) Flush() error { tee, ok := s.r.(*teeReader) if ok { return tee.Flush() } return nil } type trackableReader struct { count int64 io.Reader } // Read reads up to len(p) bytes into p. func (r *trackableReader) Read(p []byte) (n int, err error) { n, err = r.Reader.Read(p) r.count += int64(n) return } // Seek only supports io.SeekCurrent, any other operation fails func (r *trackableReader) Seek(offset int64, whence int) (int64, error) { if whence != io.SeekCurrent { return -1, ErrSeekNotSupported } return r.count, nil } func newByteReadSeeker(r io.ReadSeeker) *bufferedSeeker { return &bufferedSeeker{ r: r, Reader: *bufio.NewReader(r), } } type bufferedSeeker struct { r io.ReadSeeker bufio.Reader } func (r *bufferedSeeker) Seek(offset int64, whence int) (int64, error) { if whence == io.SeekCurrent && offset == 0 { current, err := r.r.Seek(offset, whence) if err != nil { return current, err } return current - int64(r.Buffered()), nil } defer r.Reader.Reset(r.r) return r.r.Seek(offset, whence) } type readerResetter interface { io.ReadCloser zlib.Resetter } type reader interface { io.Reader io.ByteReader io.Seeker } type teeReader struct { reader w hash.Hash32 bufWriter *bufio.Writer } func newTeeReader(r reader, h hash.Hash32) *teeReader { return &teeReader{ reader: r, w: h, bufWriter: bufio.NewWriter(h), } } func (r *teeReader) Read(p []byte) (n int, err error) { r.Flush() n, err = r.reader.Read(p) if n > 0 { if n, err := r.w.Write(p[:n]); err != nil { return n, err } } return } func (r *teeReader) ReadByte() (b byte, err error) { b, err = r.reader.ReadByte() if err == nil { return b, r.bufWriter.WriteByte(b) } return } func (r *teeReader) Flush() (err error) { return r.bufWriter.Flush() } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/packfile/scanner_test.go000066400000000000000000000204551345605224300266070ustar00rootroot00000000000000package packfile import ( "bytes" "io" "gopkg.in/src-d/go-git.v4/plumbing" . "gopkg.in/check.v1" "gopkg.in/src-d/go-git-fixtures.v3" ) type ScannerSuite struct { fixtures.Suite } var _ = Suite(&ScannerSuite{}) func (s *ScannerSuite) TestHeader(c *C) { r := fixtures.Basic().One().Packfile() p := NewScanner(r) version, objects, err := p.Header() c.Assert(err, IsNil) c.Assert(version, Equals, VersionSupported) c.Assert(objects, Equals, uint32(31)) } func (s *ScannerSuite) TestNextObjectHeaderWithoutHeader(c *C) { r := fixtures.Basic().One().Packfile() p := NewScanner(r) h, err := p.NextObjectHeader() c.Assert(err, IsNil) c.Assert(h, DeepEquals, &expectedHeadersOFS[0]) version, objects, err := p.Header() c.Assert(err, IsNil) c.Assert(version, Equals, VersionSupported) c.Assert(objects, Equals, uint32(31)) } func (s *ScannerSuite) TestNextObjectHeaderREFDelta(c *C) { s.testNextObjectHeader(c, "ref-delta", expectedHeadersREF, expectedCRCREF) } func (s *ScannerSuite) TestNextObjectHeaderOFSDelta(c *C) { s.testNextObjectHeader(c, "ofs-delta", expectedHeadersOFS, expectedCRCOFS) } func (s *ScannerSuite) testNextObjectHeader(c *C, tag string, expected []ObjectHeader, expectedCRC []uint32) { r := fixtures.Basic().ByTag(tag).One().Packfile() p := NewScanner(r) _, objects, err := p.Header() c.Assert(err, IsNil) for i := 0; i < int(objects); i++ { h, err := p.NextObjectHeader() c.Assert(err, IsNil) c.Assert(*h, DeepEquals, expected[i]) buf := bytes.NewBuffer(nil) n, crcFromScanner, err := p.NextObject(buf) c.Assert(err, IsNil) c.Assert(n, Equals, h.Length) c.Assert(crcFromScanner, Equals, expectedCRC[i]) } n, err := p.Checksum() c.Assert(err, IsNil) c.Assert(n, HasLen, 20) } func (s *ScannerSuite) TestNextObjectHeaderWithOutReadObject(c *C) { f := fixtures.Basic().ByTag("ref-delta").One() r := f.Packfile() p := NewScanner(r) _, objects, err := p.Header() c.Assert(err, IsNil) for i := 0; i < int(objects); i++ { h, _ := p.NextObjectHeader() c.Assert(err, IsNil) c.Assert(*h, DeepEquals, expectedHeadersREF[i]) } err = p.discardObjectIfNeeded() c.Assert(err, IsNil) n, err := p.Checksum() c.Assert(err, IsNil) c.Assert(n, Equals, f.PackfileHash) } func (s *ScannerSuite) TestNextObjectHeaderWithOutReadObjectNonSeekable(c *C) { f := fixtures.Basic().ByTag("ref-delta").One() r := io.MultiReader(f.Packfile()) p := NewScanner(r) _, objects, err := p.Header() c.Assert(err, IsNil) for i := 0; i < int(objects); i++ { h, _ := p.NextObjectHeader() c.Assert(err, IsNil) c.Assert(*h, DeepEquals, expectedHeadersREF[i]) } err = p.discardObjectIfNeeded() c.Assert(err, IsNil) n, err := p.Checksum() c.Assert(err, IsNil) c.Assert(n, Equals, f.PackfileHash) } func (s *ScannerSuite) TestSeekObjectHeader(c *C) { r := fixtures.Basic().One().Packfile() p := NewScanner(r) h, err := p.SeekObjectHeader(expectedHeadersOFS[4].Offset) c.Assert(err, IsNil) c.Assert(h, DeepEquals, &expectedHeadersOFS[4]) } func (s *ScannerSuite) TestSeekObjectHeaderNonSeekable(c *C) { r := io.MultiReader(fixtures.Basic().One().Packfile()) p := NewScanner(r) _, err := p.SeekObjectHeader(expectedHeadersOFS[4].Offset) c.Assert(err, Equals, ErrSeekNotSupported) } var expectedHeadersOFS = []ObjectHeader{ {Type: plumbing.CommitObject, Offset: 12, Length: 254}, {Type: plumbing.OFSDeltaObject, Offset: 186, Length: 93, OffsetReference: 12}, {Type: plumbing.CommitObject, Offset: 286, Length: 242}, {Type: plumbing.CommitObject, Offset: 449, Length: 242}, {Type: plumbing.CommitObject, Offset: 615, Length: 333}, {Type: plumbing.CommitObject, Offset: 838, Length: 332}, {Type: plumbing.CommitObject, Offset: 1063, Length: 244}, {Type: plumbing.CommitObject, Offset: 1230, Length: 243}, {Type: plumbing.CommitObject, Offset: 1392, Length: 187}, {Type: plumbing.BlobObject, Offset: 1524, Length: 189}, {Type: plumbing.BlobObject, Offset: 1685, Length: 18}, {Type: plumbing.BlobObject, Offset: 1713, Length: 1072}, {Type: plumbing.BlobObject, Offset: 2351, Length: 76110}, {Type: plumbing.BlobObject, Offset: 78050, Length: 2780}, {Type: plumbing.BlobObject, Offset: 78882, Length: 217848}, {Type: plumbing.BlobObject, Offset: 80725, Length: 706}, {Type: plumbing.BlobObject, Offset: 80998, Length: 11488}, {Type: plumbing.BlobObject, Offset: 84032, Length: 78}, {Type: plumbing.TreeObject, Offset: 84115, Length: 272}, {Type: plumbing.OFSDeltaObject, Offset: 84375, Length: 43, OffsetReference: 84115}, {Type: plumbing.TreeObject, Offset: 84430, Length: 38}, {Type: plumbing.TreeObject, Offset: 84479, Length: 75}, {Type: plumbing.TreeObject, Offset: 84559, Length: 38}, {Type: plumbing.TreeObject, Offset: 84608, Length: 34}, {Type: plumbing.BlobObject, Offset: 84653, Length: 9}, {Type: plumbing.OFSDeltaObject, Offset: 84671, Length: 6, OffsetReference: 84375}, {Type: plumbing.OFSDeltaObject, Offset: 84688, Length: 9, OffsetReference: 84375}, {Type: plumbing.OFSDeltaObject, Offset: 84708, Length: 6, OffsetReference: 84375}, {Type: plumbing.OFSDeltaObject, Offset: 84725, Length: 5, OffsetReference: 84115}, {Type: plumbing.OFSDeltaObject, Offset: 84741, Length: 8, OffsetReference: 84375}, {Type: plumbing.OFSDeltaObject, Offset: 84760, Length: 4, OffsetReference: 84741}, } var expectedCRCOFS = []uint32{ 0xaa07ba4b, 0xf706df58, 0x12438846, 0x2905a38c, 0xd9429436, 0xbecfde4e, 0x780e4b3e, 0xdc18344f, 0xcf4e4280, 0x1f08118a, 0xafded7b8, 0xcc1428ed, 0x1631d22f, 0xbfff5850, 0xd108e1d8, 0x8e97ba25, 0x7316ff70, 0xdb4fce56, 0x901cce2c, 0xec4552b0, 0x847905bf, 0x3689459a, 0xe67af94a, 0xc2314a2e, 0xcd987848, 0x8a853a6d, 0x70c6518, 0x4f4108e2, 0xd6fe09e9, 0xf07a2804, 0x1d75d6be, } var expectedHeadersREF = []ObjectHeader{ {Type: plumbing.CommitObject, Offset: 12, Length: 254}, {Type: plumbing.REFDeltaObject, Offset: 186, Length: 93, Reference: plumbing.NewHash("e8d3ffab552895c19b9fcf7aa264d277cde33881")}, {Type: plumbing.CommitObject, Offset: 304, Length: 242}, {Type: plumbing.CommitObject, Offset: 467, Length: 242}, {Type: plumbing.CommitObject, Offset: 633, Length: 333}, {Type: plumbing.CommitObject, Offset: 856, Length: 332}, {Type: plumbing.CommitObject, Offset: 1081, Length: 243}, {Type: plumbing.CommitObject, Offset: 1243, Length: 244}, {Type: plumbing.CommitObject, Offset: 1410, Length: 187}, {Type: plumbing.BlobObject, Offset: 1542, Length: 189}, {Type: plumbing.BlobObject, Offset: 1703, Length: 18}, {Type: plumbing.BlobObject, Offset: 1731, Length: 1072}, {Type: plumbing.BlobObject, Offset: 2369, Length: 76110}, {Type: plumbing.TreeObject, Offset: 78068, Length: 38}, {Type: plumbing.BlobObject, Offset: 78117, Length: 2780}, {Type: plumbing.TreeObject, Offset: 79049, Length: 75}, {Type: plumbing.BlobObject, Offset: 79129, Length: 217848}, {Type: plumbing.BlobObject, Offset: 80972, Length: 706}, {Type: plumbing.TreeObject, Offset: 81265, Length: 38}, {Type: plumbing.BlobObject, Offset: 81314, Length: 11488}, {Type: plumbing.TreeObject, Offset: 84752, Length: 34}, {Type: plumbing.BlobObject, Offset: 84797, Length: 78}, {Type: plumbing.TreeObject, Offset: 84880, Length: 271}, {Type: plumbing.REFDeltaObject, Offset: 85141, Length: 6, Reference: plumbing.NewHash("a8d315b2b1c615d43042c3a62402b8a54288cf5c")}, {Type: plumbing.REFDeltaObject, Offset: 85176, Length: 37, Reference: plumbing.NewHash("fb72698cab7617ac416264415f13224dfd7a165e")}, {Type: plumbing.BlobObject, Offset: 85244, Length: 9}, {Type: plumbing.REFDeltaObject, Offset: 85262, Length: 9, Reference: plumbing.NewHash("fb72698cab7617ac416264415f13224dfd7a165e")}, {Type: plumbing.REFDeltaObject, Offset: 85300, Length: 6, Reference: plumbing.NewHash("fb72698cab7617ac416264415f13224dfd7a165e")}, {Type: plumbing.TreeObject, Offset: 85335, Length: 110}, {Type: plumbing.REFDeltaObject, Offset: 85448, Length: 8, Reference: plumbing.NewHash("eba74343e2f15d62adedfd8c883ee0262b5c8021")}, {Type: plumbing.TreeObject, Offset: 85485, Length: 73}, } var expectedCRCREF = []uint32{ 0xaa07ba4b, 0xfb4725a4, 0x12438846, 0x2905a38c, 0xd9429436, 0xbecfde4e, 0xdc18344f, 0x780e4b3e, 0xcf4e4280, 0x1f08118a, 0xafded7b8, 0xcc1428ed, 0x1631d22f, 0x847905bf, 0x3e20f31d, 0x3689459a, 0xd108e1d8, 0x71143d4a, 0xe67af94a, 0x739fb89f, 0xc2314a2e, 0x87864926, 0x415d752f, 0xf72fb182, 0x3ffa37d4, 0xcd987848, 0x2f20ac8f, 0xf2f0575, 0x7d8726e1, 0x740bf39, 0x26af4735, } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/pktline/000077500000000000000000000000001345605224300234525ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/pktline/encoder.go000066400000000000000000000057611345605224300254310ustar00rootroot00000000000000// Package pktline implements reading payloads form pkt-lines and encoding // pkt-lines from payloads. package pktline import ( "bytes" "errors" "fmt" "io" ) // An Encoder writes pkt-lines to an output stream. type Encoder struct { w io.Writer } const ( // MaxPayloadSize is the maximum payload size of a pkt-line in bytes. MaxPayloadSize = 65516 // For compatibility with canonical Git implementation, accept longer pkt-lines OversizePayloadMax = 65520 ) var ( // FlushPkt are the contents of a flush-pkt pkt-line. FlushPkt = []byte{'0', '0', '0', '0'} // Flush is the payload to use with the Encode method to encode a flush-pkt. Flush = []byte{} // FlushString is the payload to use with the EncodeString method to encode a flush-pkt. FlushString = "" // ErrPayloadTooLong is returned by the Encode methods when any of the // provided payloads is bigger than MaxPayloadSize. ErrPayloadTooLong = errors.New("payload is too long") ) // NewEncoder returns a new encoder that writes to w. func NewEncoder(w io.Writer) *Encoder { return &Encoder{ w: w, } } // Flush encodes a flush-pkt to the output stream. func (e *Encoder) Flush() error { _, err := e.w.Write(FlushPkt) return err } // Encode encodes a pkt-line with the payload specified and write it to // the output stream. If several payloads are specified, each of them // will get streamed in their own pkt-lines. func (e *Encoder) Encode(payloads ...[]byte) error { for _, p := range payloads { if err := e.encodeLine(p); err != nil { return err } } return nil } func (e *Encoder) encodeLine(p []byte) error { if len(p) > MaxPayloadSize { return ErrPayloadTooLong } if bytes.Equal(p, Flush) { return e.Flush() } n := len(p) + 4 if _, err := e.w.Write(asciiHex16(n)); err != nil { return err } _, err := e.w.Write(p) return err } // Returns the hexadecimal ascii representation of the 16 less // significant bits of n. The length of the returned slice will always // be 4. Example: if n is 1234 (0x4d2), the return value will be // []byte{'0', '4', 'd', '2'}. func asciiHex16(n int) []byte { var ret [4]byte ret[0] = byteToASCIIHex(byte(n & 0xf000 >> 12)) ret[1] = byteToASCIIHex(byte(n & 0x0f00 >> 8)) ret[2] = byteToASCIIHex(byte(n & 0x00f0 >> 4)) ret[3] = byteToASCIIHex(byte(n & 0x000f)) return ret[:] } // turns a byte into its hexadecimal ascii representation. Example: // from 11 (0xb) to 'b'. func byteToASCIIHex(n byte) byte { if n < 10 { return '0' + n } return 'a' - 10 + n } // EncodeString works similarly as Encode but payloads are specified as strings. func (e *Encoder) EncodeString(payloads ...string) error { for _, p := range payloads { if err := e.Encode([]byte(p)); err != nil { return err } } return nil } // Encodef encodes a single pkt-line with the payload formatted as // the format specifier. The rest of the arguments will be used in // the format string. func (e *Encoder) Encodef(format string, a ...interface{}) error { return e.EncodeString( fmt.Sprintf(format, a...), ) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/pktline/encoder_test.go000066400000000000000000000123441345605224300264630ustar00rootroot00000000000000package pktline_test import ( "bytes" "os" "strings" "testing" "gopkg.in/src-d/go-git.v4/plumbing/format/pktline" . "gopkg.in/check.v1" ) func Test(t *testing.T) { TestingT(t) } type SuiteEncoder struct{} var _ = Suite(&SuiteEncoder{}) func (s *SuiteEncoder) TestFlush(c *C) { var buf bytes.Buffer e := pktline.NewEncoder(&buf) err := e.Flush() c.Assert(err, IsNil) obtained := buf.Bytes() c.Assert(obtained, DeepEquals, pktline.FlushPkt) } func (s *SuiteEncoder) TestEncode(c *C) { for i, test := range [...]struct { input [][]byte expected []byte }{ { input: [][]byte{ []byte("hello\n"), }, expected: []byte("000ahello\n"), }, { input: [][]byte{ []byte("hello\n"), pktline.Flush, }, expected: []byte("000ahello\n0000"), }, { input: [][]byte{ []byte("hello\n"), []byte("world!\n"), []byte("foo"), }, expected: []byte("000ahello\n000bworld!\n0007foo"), }, { input: [][]byte{ []byte("hello\n"), pktline.Flush, []byte("world!\n"), []byte("foo"), pktline.Flush, }, expected: []byte("000ahello\n0000000bworld!\n0007foo0000"), }, { input: [][]byte{ []byte(strings.Repeat("a", pktline.MaxPayloadSize)), }, expected: []byte( "fff0" + strings.Repeat("a", pktline.MaxPayloadSize)), }, { input: [][]byte{ []byte(strings.Repeat("a", pktline.MaxPayloadSize)), []byte(strings.Repeat("b", pktline.MaxPayloadSize)), }, expected: []byte( "fff0" + strings.Repeat("a", pktline.MaxPayloadSize) + "fff0" + strings.Repeat("b", pktline.MaxPayloadSize)), }, } { comment := Commentf("input %d = %v\n", i, test.input) var buf bytes.Buffer e := pktline.NewEncoder(&buf) err := e.Encode(test.input...) c.Assert(err, IsNil, comment) c.Assert(buf.Bytes(), DeepEquals, test.expected, comment) } } func (s *SuiteEncoder) TestEncodeErrPayloadTooLong(c *C) { for i, input := range [...][][]byte{ { []byte(strings.Repeat("a", pktline.MaxPayloadSize+1)), }, { []byte("hello world!"), []byte(strings.Repeat("a", pktline.MaxPayloadSize+1)), }, { []byte("hello world!"), []byte(strings.Repeat("a", pktline.MaxPayloadSize+1)), []byte("foo"), }, } { comment := Commentf("input %d = %v\n", i, input) var buf bytes.Buffer e := pktline.NewEncoder(&buf) err := e.Encode(input...) c.Assert(err, Equals, pktline.ErrPayloadTooLong, comment) } } func (s *SuiteEncoder) TestEncodeStrings(c *C) { for i, test := range [...]struct { input []string expected []byte }{ { input: []string{ "hello\n", }, expected: []byte("000ahello\n"), }, { input: []string{ "hello\n", pktline.FlushString, }, expected: []byte("000ahello\n0000"), }, { input: []string{ "hello\n", "world!\n", "foo", }, expected: []byte("000ahello\n000bworld!\n0007foo"), }, { input: []string{ "hello\n", pktline.FlushString, "world!\n", "foo", pktline.FlushString, }, expected: []byte("000ahello\n0000000bworld!\n0007foo0000"), }, { input: []string{ strings.Repeat("a", pktline.MaxPayloadSize), }, expected: []byte( "fff0" + strings.Repeat("a", pktline.MaxPayloadSize)), }, { input: []string{ strings.Repeat("a", pktline.MaxPayloadSize), strings.Repeat("b", pktline.MaxPayloadSize), }, expected: []byte( "fff0" + strings.Repeat("a", pktline.MaxPayloadSize) + "fff0" + strings.Repeat("b", pktline.MaxPayloadSize)), }, } { comment := Commentf("input %d = %v\n", i, test.input) var buf bytes.Buffer e := pktline.NewEncoder(&buf) err := e.EncodeString(test.input...) c.Assert(err, IsNil, comment) c.Assert(buf.Bytes(), DeepEquals, test.expected, comment) } } func (s *SuiteEncoder) TestEncodeStringErrPayloadTooLong(c *C) { for i, input := range [...][]string{ { strings.Repeat("a", pktline.MaxPayloadSize+1), }, { "hello world!", strings.Repeat("a", pktline.MaxPayloadSize+1), }, { "hello world!", strings.Repeat("a", pktline.MaxPayloadSize+1), "foo", }, } { comment := Commentf("input %d = %v\n", i, input) var buf bytes.Buffer e := pktline.NewEncoder(&buf) err := e.EncodeString(input...) c.Assert(err, Equals, pktline.ErrPayloadTooLong, comment) } } func (s *SuiteEncoder) TestEncodef(c *C) { format := " %s %d\n" str := "foo" d := 42 var buf bytes.Buffer e := pktline.NewEncoder(&buf) err := e.Encodef(format, str, d) c.Assert(err, IsNil) expected := []byte("000c foo 42\n") c.Assert(buf.Bytes(), DeepEquals, expected) } func ExampleEncoder() { // Create an encoder that writes pktlines to stdout. e := pktline.NewEncoder(os.Stdout) // Encode some data as a new pkt-line. _ = e.Encode([]byte("data\n")) // error checks removed for brevity // Encode a flush-pkt. _ = e.Flush() // Encode a couple of byte slices and a flush in one go. Each of // them will end up as payloads of their own pktlines. _ = e.Encode( []byte("hello\n"), []byte("world!\n"), pktline.Flush, ) // You can also encode strings: _ = e.EncodeString( "foo\n", "bar\n", pktline.FlushString, ) // You can also format and encode a payload: _ = e.Encodef(" %s %d\n", "foo", 42) // Output: // 0009data // 0000000ahello // 000bworld! // 00000008foo // 0008bar // 0000000c foo 42 } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/pktline/scanner.go000066400000000000000000000065061345605224300254410ustar00rootroot00000000000000package pktline import ( "errors" "io" ) const ( lenSize = 4 ) // ErrInvalidPktLen is returned by Err() when an invalid pkt-len is found. var ErrInvalidPktLen = errors.New("invalid pkt-len found") // Scanner provides a convenient interface for reading the payloads of a // series of pkt-lines. It takes an io.Reader providing the source, // which then can be tokenized through repeated calls to the Scan // method. // // After each Scan call, the Bytes method will return the payload of the // corresponding pkt-line on a shared buffer, which will be 65516 bytes // or smaller. Flush pkt-lines are represented by empty byte slices. // // Scanning stops at EOF or the first I/O error. type Scanner struct { r io.Reader // The reader provided by the client err error // Sticky error payload []byte // Last pkt-payload len [lenSize]byte // Last pkt-len } // NewScanner returns a new Scanner to read from r. func NewScanner(r io.Reader) *Scanner { return &Scanner{ r: r, } } // Err returns the first error encountered by the Scanner. func (s *Scanner) Err() error { return s.err } // Scan advances the Scanner to the next pkt-line, whose payload will // then be available through the Bytes method. Scanning stops at EOF // or the first I/O error. After Scan returns false, the Err method // will return any error that occurred during scanning, except that if // it was io.EOF, Err will return nil. func (s *Scanner) Scan() bool { var l int l, s.err = s.readPayloadLen() if s.err == io.EOF { s.err = nil return false } if s.err != nil { return false } if cap(s.payload) < l { s.payload = make([]byte, 0, l) } if _, s.err = io.ReadFull(s.r, s.payload[:l]); s.err != nil { return false } s.payload = s.payload[:l] return true } // Bytes returns the most recent payload generated by a call to Scan. // The underlying array may point to data that will be overwritten by a // subsequent call to Scan. It does no allocation. func (s *Scanner) Bytes() []byte { return s.payload } // Method readPayloadLen returns the payload length by reading the // pkt-len and subtracting the pkt-len size. func (s *Scanner) readPayloadLen() (int, error) { if _, err := io.ReadFull(s.r, s.len[:]); err != nil { if err == io.ErrUnexpectedEOF { return 0, ErrInvalidPktLen } return 0, err } n, err := hexDecode(s.len) if err != nil { return 0, err } switch { case n == 0: return 0, nil case n <= lenSize: return 0, ErrInvalidPktLen case n > OversizePayloadMax+lenSize: return 0, ErrInvalidPktLen default: return n - lenSize, nil } } // Turns the hexadecimal representation of a number in a byte slice into // a number. This function substitute strconv.ParseUint(string(buf), 16, // 16) and/or hex.Decode, to avoid generating new strings, thus helping the // GC. func hexDecode(buf [lenSize]byte) (int, error) { var ret int for i := 0; i < lenSize; i++ { n, err := asciiHexToByte(buf[i]) if err != nil { return 0, ErrInvalidPktLen } ret = 16*ret + int(n) } return ret, nil } // turns the hexadecimal ascii representation of a byte into its // numerical value. Example: from 'b' to 11 (0xb). func asciiHexToByte(b byte) (byte, error) { switch { case b >= '0' && b <= '9': return b - '0', nil case b >= 'a' && b <= 'f': return b - 'a' + 10, nil default: return 0, ErrInvalidPktLen } } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/format/pktline/scanner_test.go000066400000000000000000000130711345605224300264730ustar00rootroot00000000000000package pktline_test import ( "bytes" "errors" "fmt" "io" "strings" "gopkg.in/src-d/go-git.v4/plumbing/format/pktline" . "gopkg.in/check.v1" ) type SuiteScanner struct{} var _ = Suite(&SuiteScanner{}) func (s *SuiteScanner) TestInvalid(c *C) { for _, test := range [...]string{ "0001", "0002", "0003", "0004", "0001asdfsadf", "0004foo", "fff5", "ffff", "gorka", "0", "003", " 5a", "5 a", "5 \n", "-001", "-000", } { r := strings.NewReader(test) sc := pktline.NewScanner(r) _ = sc.Scan() c.Assert(sc.Err(), ErrorMatches, pktline.ErrInvalidPktLen.Error(), Commentf("data = %q", test)) } } func (s *SuiteScanner) TestDecodeOversizePktLines(c *C) { for _, test := range [...]string{ "fff1" + strings.Repeat("a", 0xfff1), "fff2" + strings.Repeat("a", 0xfff2), "fff3" + strings.Repeat("a", 0xfff3), "fff4" + strings.Repeat("a", 0xfff4), } { r := strings.NewReader(test) sc := pktline.NewScanner(r) _ = sc.Scan() c.Assert(sc.Err(), IsNil) } } func (s *SuiteScanner) TestEmptyReader(c *C) { r := strings.NewReader("") sc := pktline.NewScanner(r) hasPayload := sc.Scan() c.Assert(hasPayload, Equals, false) c.Assert(sc.Err(), Equals, nil) } func (s *SuiteScanner) TestFlush(c *C) { var buf bytes.Buffer e := pktline.NewEncoder(&buf) err := e.Flush() c.Assert(err, IsNil) sc := pktline.NewScanner(&buf) c.Assert(sc.Scan(), Equals, true) payload := sc.Bytes() c.Assert(len(payload), Equals, 0) } func (s *SuiteScanner) TestPktLineTooShort(c *C) { r := strings.NewReader("010cfoobar") sc := pktline.NewScanner(r) c.Assert(sc.Scan(), Equals, false) c.Assert(sc.Err(), ErrorMatches, "unexpected EOF") } func (s *SuiteScanner) TestScanAndPayload(c *C) { for _, test := range [...]string{ "a", "a\n", strings.Repeat("a", 100), strings.Repeat("a", 100) + "\n", strings.Repeat("\x00", 100), strings.Repeat("\x00", 100) + "\n", strings.Repeat("a", pktline.MaxPayloadSize), strings.Repeat("a", pktline.MaxPayloadSize-1) + "\n", } { var buf bytes.Buffer e := pktline.NewEncoder(&buf) err := e.EncodeString(test) c.Assert(err, IsNil, Commentf("input len=%x, contents=%.10q\n", len(test), test)) sc := pktline.NewScanner(&buf) c.Assert(sc.Scan(), Equals, true, Commentf("test = %.20q...", test)) obtained := sc.Bytes() c.Assert(obtained, DeepEquals, []byte(test), Commentf("in = %.20q out = %.20q", test, string(obtained))) } } func (s *SuiteScanner) TestSkip(c *C) { for _, test := range [...]struct { input []string n int expected []byte }{ { input: []string{ "first", "second", "third"}, n: 1, expected: []byte("second"), }, { input: []string{ "first", "second", "third"}, n: 2, expected: []byte("third"), }, } { var buf bytes.Buffer e := pktline.NewEncoder(&buf) err := e.EncodeString(test.input...) c.Assert(err, IsNil) sc := pktline.NewScanner(&buf) for i := 0; i < test.n; i++ { c.Assert(sc.Scan(), Equals, true, Commentf("scan error = %s", sc.Err())) } c.Assert(sc.Scan(), Equals, true, Commentf("scan error = %s", sc.Err())) obtained := sc.Bytes() c.Assert(obtained, DeepEquals, test.expected, Commentf("\nin = %.20q\nout = %.20q\nexp = %.20q", test.input, obtained, test.expected)) } } func (s *SuiteScanner) TestEOF(c *C) { var buf bytes.Buffer e := pktline.NewEncoder(&buf) err := e.EncodeString("first", "second") c.Assert(err, IsNil) sc := pktline.NewScanner(&buf) for sc.Scan() { } c.Assert(sc.Err(), IsNil) } type mockReader struct{} func (r *mockReader) Read([]byte) (int, error) { return 0, errors.New("foo") } func (s *SuiteScanner) TestInternalReadError(c *C) { sc := pktline.NewScanner(&mockReader{}) c.Assert(sc.Scan(), Equals, false) c.Assert(sc.Err(), ErrorMatches, "foo") } // A section are several non flush-pkt lines followed by a flush-pkt, which // how the git protocol sends long messages. func (s *SuiteScanner) TestReadSomeSections(c *C) { nSections := 2 nLines := 4 data := sectionsExample(c, nSections, nLines) sc := pktline.NewScanner(data) sectionCounter := 0 lineCounter := 0 for sc.Scan() { if len(sc.Bytes()) == 0 { sectionCounter++ } lineCounter++ } c.Assert(sc.Err(), IsNil) c.Assert(sectionCounter, Equals, nSections) c.Assert(lineCounter, Equals, (1+nLines)*nSections) } // returns nSection sections, each of them with nLines pkt-lines (not // counting the flush-pkt: // // 0009 0.0\n // 0009 0.1\n // ... // 0000 // and so on func sectionsExample(c *C, nSections, nLines int) io.Reader { var buf bytes.Buffer e := pktline.NewEncoder(&buf) for section := 0; section < nSections; section++ { ss := []string{} for line := 0; line < nLines; line++ { line := fmt.Sprintf(" %d.%d\n", section, line) ss = append(ss, line) } err := e.EncodeString(ss...) c.Assert(err, IsNil) err = e.Flush() c.Assert(err, IsNil) } return &buf } func ExampleScanner() { // A reader is needed as input. input := strings.NewReader("000ahello\n" + "000bworld!\n" + "0000", ) // Create the scanner... s := pktline.NewScanner(input) // and scan every pkt-line found in the input. for s.Scan() { payload := s.Bytes() if len(payload) == 0 { // zero sized payloads correspond to flush-pkts. fmt.Println("FLUSH-PKT DETECTED") } else { // otherwise, you will be able to access the full payload. fmt.Printf("PAYLOAD = %q\n", string(payload)) } } // this will catch any error when reading from the input, if any. if s.Err() != nil { fmt.Println(s.Err()) } // Output: // PAYLOAD = "hello\n" // PAYLOAD = "world!\n" // FLUSH-PKT DETECTED } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/hash.go000066400000000000000000000026571345605224300220000ustar00rootroot00000000000000package plumbing import ( "bytes" "crypto/sha1" "encoding/hex" "hash" "sort" "strconv" ) // Hash SHA1 hased content type Hash [20]byte // ZeroHash is Hash with value zero var ZeroHash Hash // ComputeHash compute the hash for a given ObjectType and content func ComputeHash(t ObjectType, content []byte) Hash { h := NewHasher(t, int64(len(content))) h.Write(content) return h.Sum() } // NewHash return a new Hash from a hexadecimal hash representation func NewHash(s string) Hash { b, _ := hex.DecodeString(s) var h Hash copy(h[:], b) return h } func (h Hash) IsZero() bool { var empty Hash return h == empty } func (h Hash) String() string { return hex.EncodeToString(h[:]) } type Hasher struct { hash.Hash } func NewHasher(t ObjectType, size int64) Hasher { h := Hasher{sha1.New()} h.Write(t.Bytes()) h.Write([]byte(" ")) h.Write([]byte(strconv.FormatInt(size, 10))) h.Write([]byte{0}) return h } func (h Hasher) Sum() (hash Hash) { copy(hash[:], h.Hash.Sum(nil)) return } // HashesSort sorts a slice of Hashes in increasing order. func HashesSort(a []Hash) { sort.Sort(HashSlice(a)) } // HashSlice attaches the methods of sort.Interface to []Hash, sorting in // increasing order. type HashSlice []Hash func (p HashSlice) Len() int { return len(p) } func (p HashSlice) Less(i, j int) bool { return bytes.Compare(p[i][:], p[j][:]) < 0 } func (p HashSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/hash_test.go000066400000000000000000000026071345605224300230320ustar00rootroot00000000000000package plumbing import ( "testing" . "gopkg.in/check.v1" ) func Test(t *testing.T) { TestingT(t) } type HashSuite struct{} var _ = Suite(&HashSuite{}) func (s *HashSuite) TestComputeHash(c *C) { hash := ComputeHash(BlobObject, []byte("")) c.Assert(hash.String(), Equals, "e69de29bb2d1d6434b8b29ae775ad8c2e48c5391") hash = ComputeHash(BlobObject, []byte("Hello, World!\n")) c.Assert(hash.String(), Equals, "8ab686eafeb1f44702738c8b0f24f2567c36da6d") } func (s *HashSuite) TestNewHash(c *C) { hash := ComputeHash(BlobObject, []byte("Hello, World!\n")) c.Assert(hash, Equals, NewHash(hash.String())) } func (s *HashSuite) TestIsZero(c *C) { hash := NewHash("foo") c.Assert(hash.IsZero(), Equals, true) hash = NewHash("8ab686eafeb1f44702738c8b0f24f2567c36da6d") c.Assert(hash.IsZero(), Equals, false) } func (s *HashSuite) TestNewHasher(c *C) { content := "hasher test sample" hasher := NewHasher(BlobObject, int64(len(content))) hasher.Write([]byte(content)) c.Assert(hasher.Sum().String(), Equals, "dc42c3cc80028d0ec61f0a6b24cadd1c195c4dfc") } func (s *HashSuite) TestHashesSort(c *C) { i := []Hash{ NewHash("2222222222222222222222222222222222222222"), NewHash("1111111111111111111111111111111111111111"), } HashesSort(i) c.Assert(i[0], Equals, NewHash("1111111111111111111111111111111111111111")) c.Assert(i[1], Equals, NewHash("2222222222222222222222222222222222222222")) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/memory.go000066400000000000000000000031521345605224300223540ustar00rootroot00000000000000package plumbing import ( "bytes" "io" "io/ioutil" ) // MemoryObject on memory Object implementation type MemoryObject struct { t ObjectType h Hash cont []byte sz int64 } // Hash returns the object Hash, the hash is calculated on-the-fly the first // time it's called, in all subsequent calls the same Hash is returned even // if the type or the content have changed. The Hash is only generated if the // size of the content is exactly the object size. func (o *MemoryObject) Hash() Hash { if o.h == ZeroHash && int64(len(o.cont)) == o.sz { o.h = ComputeHash(o.t, o.cont) } return o.h } // Type return the ObjectType func (o *MemoryObject) Type() ObjectType { return o.t } // SetType sets the ObjectType func (o *MemoryObject) SetType(t ObjectType) { o.t = t } // Size return the size of the object func (o *MemoryObject) Size() int64 { return o.sz } // SetSize set the object size, a content of the given size should be written // afterwards func (o *MemoryObject) SetSize(s int64) { o.sz = s } // Reader returns a ObjectReader used to read the object's content. func (o *MemoryObject) Reader() (io.ReadCloser, error) { return ioutil.NopCloser(bytes.NewBuffer(o.cont)), nil } // Writer returns a ObjectWriter used to write the object's content. func (o *MemoryObject) Writer() (io.WriteCloser, error) { return o, nil } func (o *MemoryObject) Write(p []byte) (n int, err error) { o.cont = append(o.cont, p...) o.sz = int64(len(o.cont)) return len(p), nil } // Close releases any resources consumed by the object when it is acting as a // ObjectWriter. func (o *MemoryObject) Close() error { return nil } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/memory_test.go000066400000000000000000000027471345605224300234240ustar00rootroot00000000000000package plumbing import ( "io/ioutil" . "gopkg.in/check.v1" ) type MemoryObjectSuite struct{} var _ = Suite(&MemoryObjectSuite{}) func (s *MemoryObjectSuite) TestHash(c *C) { o := &MemoryObject{} o.SetType(BlobObject) o.SetSize(14) _, err := o.Write([]byte("Hello, World!\n")) c.Assert(err, IsNil) c.Assert(o.Hash().String(), Equals, "8ab686eafeb1f44702738c8b0f24f2567c36da6d") o.SetType(CommitObject) c.Assert(o.Hash().String(), Equals, "8ab686eafeb1f44702738c8b0f24f2567c36da6d") } func (s *MemoryObjectSuite) TestHashNotFilled(c *C) { o := &MemoryObject{} o.SetType(BlobObject) o.SetSize(14) c.Assert(o.Hash(), Equals, ZeroHash) } func (s *MemoryObjectSuite) TestType(c *C) { o := &MemoryObject{} o.SetType(BlobObject) c.Assert(o.Type(), Equals, BlobObject) } func (s *MemoryObjectSuite) TestSize(c *C) { o := &MemoryObject{} o.SetSize(42) c.Assert(o.Size(), Equals, int64(42)) } func (s *MemoryObjectSuite) TestReader(c *C) { o := &MemoryObject{cont: []byte("foo")} reader, err := o.Reader() c.Assert(err, IsNil) defer func() { c.Assert(reader.Close(), IsNil) }() b, err := ioutil.ReadAll(reader) c.Assert(err, IsNil) c.Assert(b, DeepEquals, []byte("foo")) } func (s *MemoryObjectSuite) TestWriter(c *C) { o := &MemoryObject{} writer, err := o.Writer() c.Assert(err, IsNil) defer func() { c.Assert(writer.Close(), IsNil) }() n, err := writer.Write([]byte("foo")) c.Assert(err, IsNil) c.Assert(n, Equals, 3) c.Assert(o.cont, DeepEquals, []byte("foo")) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/object.go000066400000000000000000000050151345605224300223120ustar00rootroot00000000000000// package plumbing implement the core interfaces and structs used by go-git package plumbing import ( "errors" "io" ) var ( ErrObjectNotFound = errors.New("object not found") // ErrInvalidType is returned when an invalid object type is provided. ErrInvalidType = errors.New("invalid object type") ) // Object is a generic representation of any git object type EncodedObject interface { Hash() Hash Type() ObjectType SetType(ObjectType) Size() int64 SetSize(int64) Reader() (io.ReadCloser, error) Writer() (io.WriteCloser, error) } // DeltaObject is an EncodedObject representing a delta. type DeltaObject interface { EncodedObject // BaseHash returns the hash of the object used as base for this delta. BaseHash() Hash // ActualHash returns the hash of the object after applying the delta. ActualHash() Hash // Size returns the size of the object after applying the delta. ActualSize() int64 } // ObjectType internal object type // Integer values from 0 to 7 map to those exposed by git. // AnyObject is used to represent any from 0 to 7. type ObjectType int8 const ( InvalidObject ObjectType = 0 CommitObject ObjectType = 1 TreeObject ObjectType = 2 BlobObject ObjectType = 3 TagObject ObjectType = 4 // 5 reserved for future expansion OFSDeltaObject ObjectType = 6 REFDeltaObject ObjectType = 7 AnyObject ObjectType = -127 ) func (t ObjectType) String() string { switch t { case CommitObject: return "commit" case TreeObject: return "tree" case BlobObject: return "blob" case TagObject: return "tag" case OFSDeltaObject: return "ofs-delta" case REFDeltaObject: return "ref-delta" case AnyObject: return "any" default: return "unknown" } } func (t ObjectType) Bytes() []byte { return []byte(t.String()) } // Valid returns true if t is a valid ObjectType. func (t ObjectType) Valid() bool { return t >= CommitObject && t <= REFDeltaObject } // IsDelta returns true for any ObjectTyoe that represents a delta (i.e. // REFDeltaObject or OFSDeltaObject). func (t ObjectType) IsDelta() bool { return t == REFDeltaObject || t == OFSDeltaObject } // ParseObjectType parses a string representation of ObjectType. It returns an // error on parse failure. func ParseObjectType(value string) (typ ObjectType, err error) { switch value { case "commit": typ = CommitObject case "tree": typ = TreeObject case "blob": typ = BlobObject case "tag": typ = TagObject case "ofs-delta": typ = OFSDeltaObject case "ref-delta": typ = REFDeltaObject default: err = ErrInvalidType } return } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/object/000077500000000000000000000000001345605224300217625ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/object/blob.go000066400000000000000000000065171345605224300232400ustar00rootroot00000000000000package object import ( "io" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/storer" "gopkg.in/src-d/go-git.v4/utils/ioutil" ) // Blob is used to store arbitrary data - it is generally a file. type Blob struct { // Hash of the blob. Hash plumbing.Hash // Size of the (uncompressed) blob. Size int64 obj plumbing.EncodedObject } // GetBlob gets a blob from an object storer and decodes it. func GetBlob(s storer.EncodedObjectStorer, h plumbing.Hash) (*Blob, error) { o, err := s.EncodedObject(plumbing.BlobObject, h) if err != nil { return nil, err } return DecodeBlob(o) } // DecodeObject decodes an encoded object into a *Blob. func DecodeBlob(o plumbing.EncodedObject) (*Blob, error) { b := &Blob{} if err := b.Decode(o); err != nil { return nil, err } return b, nil } // ID returns the object ID of the blob. The returned value will always match // the current value of Blob.Hash. // // ID is present to fulfill the Object interface. func (b *Blob) ID() plumbing.Hash { return b.Hash } // Type returns the type of object. It always returns plumbing.BlobObject. // // Type is present to fulfill the Object interface. func (b *Blob) Type() plumbing.ObjectType { return plumbing.BlobObject } // Decode transforms a plumbing.EncodedObject into a Blob struct. func (b *Blob) Decode(o plumbing.EncodedObject) error { if o.Type() != plumbing.BlobObject { return ErrUnsupportedObject } b.Hash = o.Hash() b.Size = o.Size() b.obj = o return nil } // Encode transforms a Blob into a plumbing.EncodedObject. func (b *Blob) Encode(o plumbing.EncodedObject) (err error) { o.SetType(plumbing.BlobObject) w, err := o.Writer() if err != nil { return err } defer ioutil.CheckClose(w, &err) r, err := b.Reader() if err != nil { return err } defer ioutil.CheckClose(r, &err) _, err = io.Copy(w, r) return err } // Reader returns a reader allow the access to the content of the blob func (b *Blob) Reader() (io.ReadCloser, error) { return b.obj.Reader() } // BlobIter provides an iterator for a set of blobs. type BlobIter struct { storer.EncodedObjectIter s storer.EncodedObjectStorer } // NewBlobIter takes a storer.EncodedObjectStorer and a // storer.EncodedObjectIter and returns a *BlobIter that iterates over all // blobs contained in the storer.EncodedObjectIter. // // Any non-blob object returned by the storer.EncodedObjectIter is skipped. func NewBlobIter(s storer.EncodedObjectStorer, iter storer.EncodedObjectIter) *BlobIter { return &BlobIter{iter, s} } // Next moves the iterator to the next blob and returns a pointer to it. If // there are no more blobs, it returns io.EOF. func (iter *BlobIter) Next() (*Blob, error) { for { obj, err := iter.EncodedObjectIter.Next() if err != nil { return nil, err } if obj.Type() != plumbing.BlobObject { continue } return DecodeBlob(obj) } } // ForEach call the cb function for each blob contained on this iter until // an error happens or the end of the iter is reached. If ErrStop is sent // the iteration is stop but no error is returned. The iterator is closed. func (iter *BlobIter) ForEach(cb func(*Blob) error) error { return iter.EncodedObjectIter.ForEach(func(obj plumbing.EncodedObject) error { if obj.Type() != plumbing.BlobObject { return nil } b, err := DecodeBlob(obj) if err != nil { return err } return cb(b) }) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/object/blob_test.go000066400000000000000000000046451345605224300242770ustar00rootroot00000000000000package object import ( "bytes" "io" "io/ioutil" "gopkg.in/src-d/go-git.v4/plumbing" . "gopkg.in/check.v1" ) type BlobsSuite struct { BaseObjectsSuite } var _ = Suite(&BlobsSuite{}) func (s *BlobsSuite) TestBlobHash(c *C) { o := &plumbing.MemoryObject{} o.SetType(plumbing.BlobObject) o.SetSize(3) writer, err := o.Writer() c.Assert(err, IsNil) defer func() { c.Assert(writer.Close(), IsNil) }() writer.Write([]byte{'F', 'O', 'O'}) blob := &Blob{} c.Assert(blob.Decode(o), IsNil) c.Assert(blob.Size, Equals, int64(3)) c.Assert(blob.Hash.String(), Equals, "d96c7efbfec2814ae0301ad054dc8d9fc416c9b5") reader, err := blob.Reader() c.Assert(err, IsNil) defer func() { c.Assert(reader.Close(), IsNil) }() data, err := ioutil.ReadAll(reader) c.Assert(err, IsNil) c.Assert(string(data), Equals, "FOO") } func (s *BlobsSuite) TestBlobDecodeEncodeIdempotent(c *C) { var objects []*plumbing.MemoryObject for _, str := range []string{"foo", "foo\n"} { obj := &plumbing.MemoryObject{} obj.Write([]byte(str)) obj.SetType(plumbing.BlobObject) obj.Hash() objects = append(objects, obj) } for _, object := range objects { blob := &Blob{} err := blob.Decode(object) c.Assert(err, IsNil) newObject := &plumbing.MemoryObject{} err = blob.Encode(newObject) c.Assert(err, IsNil) newObject.Hash() // Ensure Hash is pre-computed before deep comparison c.Assert(newObject, DeepEquals, object) } } func (s *BlobsSuite) TestBlobIter(c *C) { encIter, err := s.Storer.IterEncodedObjects(plumbing.BlobObject) c.Assert(err, IsNil) iter := NewBlobIter(s.Storer, encIter) blobs := []*Blob{} iter.ForEach(func(b *Blob) error { blobs = append(blobs, b) return nil }) c.Assert(len(blobs) > 0, Equals, true) iter.Close() encIter, err = s.Storer.IterEncodedObjects(plumbing.BlobObject) c.Assert(err, IsNil) iter = NewBlobIter(s.Storer, encIter) i := 0 for { b, err := iter.Next() if err == io.EOF { break } c.Assert(err, IsNil) c.Assert(b.ID(), Equals, blobs[i].ID()) c.Assert(b.Size, Equals, blobs[i].Size) c.Assert(b.Type(), Equals, blobs[i].Type()) r1, err := b.Reader() c.Assert(err, IsNil) b1, err := ioutil.ReadAll(r1) c.Assert(err, IsNil) c.Assert(r1.Close(), IsNil) r2, err := blobs[i].Reader() c.Assert(err, IsNil) b2, err := ioutil.ReadAll(r2) c.Assert(err, IsNil) c.Assert(r2.Close(), IsNil) c.Assert(bytes.Compare(b1, b2), Equals, 0) i++ } iter.Close() } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/object/change.go000066400000000000000000000074231345605224300235440ustar00rootroot00000000000000package object import ( "bytes" "context" "fmt" "strings" "gopkg.in/src-d/go-git.v4/utils/merkletrie" ) // Change values represent a detected change between two git trees. For // modifications, From is the original status of the node and To is its // final status. For insertions, From is the zero value and for // deletions To is the zero value. type Change struct { From ChangeEntry To ChangeEntry } var empty = ChangeEntry{} // Action returns the kind of action represented by the change, an // insertion, a deletion or a modification. func (c *Change) Action() (merkletrie.Action, error) { if c.From == empty && c.To == empty { return merkletrie.Action(0), fmt.Errorf("malformed change: empty from and to") } if c.From == empty { return merkletrie.Insert, nil } if c.To == empty { return merkletrie.Delete, nil } return merkletrie.Modify, nil } // Files return the files before and after a change. // For insertions from will be nil. For deletions to will be nil. func (c *Change) Files() (from, to *File, err error) { action, err := c.Action() if err != nil { return } if action == merkletrie.Insert || action == merkletrie.Modify { to, err = c.To.Tree.TreeEntryFile(&c.To.TreeEntry) if !c.To.TreeEntry.Mode.IsFile() { return nil, nil, nil } if err != nil { return } } if action == merkletrie.Delete || action == merkletrie.Modify { from, err = c.From.Tree.TreeEntryFile(&c.From.TreeEntry) if !c.From.TreeEntry.Mode.IsFile() { return nil, nil, nil } if err != nil { return } } return } func (c *Change) String() string { action, err := c.Action() if err != nil { return fmt.Sprintf("malformed change") } return fmt.Sprintf("", action, c.name()) } // Patch returns a Patch with all the file changes in chunks. This // representation can be used to create several diff outputs. func (c *Change) Patch() (*Patch, error) { return c.PatchContext(context.Background()) } // Patch returns a Patch with all the file changes in chunks. This // representation can be used to create several diff outputs. // If context expires, an non-nil error will be returned // Provided context must be non-nil func (c *Change) PatchContext(ctx context.Context) (*Patch, error) { return getPatchContext(ctx, "", c) } func (c *Change) name() string { if c.From != empty { return c.From.Name } return c.To.Name } // ChangeEntry values represent a node that has suffered a change. type ChangeEntry struct { // Full path of the node using "/" as separator. Name string // Parent tree of the node that has changed. Tree *Tree // The entry of the node. TreeEntry TreeEntry } // Changes represents a collection of changes between two git trees. // Implements sort.Interface lexicographically over the path of the // changed files. type Changes []*Change func (c Changes) Len() int { return len(c) } func (c Changes) Swap(i, j int) { c[i], c[j] = c[j], c[i] } func (c Changes) Less(i, j int) bool { return strings.Compare(c[i].name(), c[j].name()) < 0 } func (c Changes) String() string { var buffer bytes.Buffer buffer.WriteString("[") comma := "" for _, v := range c { buffer.WriteString(comma) buffer.WriteString(v.String()) comma = ", " } buffer.WriteString("]") return buffer.String() } // Patch returns a Patch with all the changes in chunks. This // representation can be used to create several diff outputs. func (c Changes) Patch() (*Patch, error) { return c.PatchContext(context.Background()) } // Patch returns a Patch with all the changes in chunks. This // representation can be used to create several diff outputs. // If context expires, an non-nil error will be returned // Provided context must be non-nil func (c Changes) PatchContext(ctx context.Context) (*Patch, error) { return getPatchContext(ctx, "", c...) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/object/change_adaptor.go000066400000000000000000000023431345605224300252520ustar00rootroot00000000000000package object import ( "errors" "fmt" "gopkg.in/src-d/go-git.v4/utils/merkletrie" "gopkg.in/src-d/go-git.v4/utils/merkletrie/noder" ) // The following functions transform changes types form the merkletrie // package to changes types from this package. func newChange(c merkletrie.Change) (*Change, error) { ret := &Change{} var err error if ret.From, err = newChangeEntry(c.From); err != nil { return nil, fmt.Errorf("From field: %s", err) } if ret.To, err = newChangeEntry(c.To); err != nil { return nil, fmt.Errorf("To field: %s", err) } return ret, nil } func newChangeEntry(p noder.Path) (ChangeEntry, error) { if p == nil { return empty, nil } asTreeNoder, ok := p.Last().(*treeNoder) if !ok { return ChangeEntry{}, errors.New("cannot transform non-TreeNoders") } return ChangeEntry{ Name: p.String(), Tree: asTreeNoder.parent, TreeEntry: TreeEntry{ Name: asTreeNoder.name, Mode: asTreeNoder.mode, Hash: asTreeNoder.hash, }, }, nil } func newChanges(src merkletrie.Changes) (Changes, error) { ret := make(Changes, len(src)) var err error for i, e := range src { ret[i], err = newChange(e) if err != nil { return nil, fmt.Errorf("change #%d: %s", i, err) } } return ret, nil } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/object/change_adaptor_test.go000066400000000000000000000247471345605224300263250ustar00rootroot00000000000000package object import ( "sort" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/cache" "gopkg.in/src-d/go-git.v4/plumbing/filemode" "gopkg.in/src-d/go-git.v4/plumbing/storer" "gopkg.in/src-d/go-git.v4/storage/filesystem" "gopkg.in/src-d/go-git.v4/utils/merkletrie" "gopkg.in/src-d/go-git.v4/utils/merkletrie/noder" . "gopkg.in/check.v1" "gopkg.in/src-d/go-git-fixtures.v3" ) type ChangeAdaptorSuite struct { fixtures.Suite Storer storer.EncodedObjectStorer Fixture *fixtures.Fixture } func (s *ChangeAdaptorSuite) SetUpSuite(c *C) { s.Suite.SetUpSuite(c) s.Fixture = fixtures.Basic().One() sto := filesystem.NewStorage(s.Fixture.DotGit(), cache.NewObjectLRUDefault()) s.Storer = sto } func (s *ChangeAdaptorSuite) tree(c *C, h plumbing.Hash) *Tree { t, err := GetTree(s.Storer, h) c.Assert(err, IsNil) return t } var _ = Suite(&ChangeAdaptorSuite{}) // utility function to build Noders from a tree and an tree entry. func newNoder(t *Tree, e TreeEntry) noder.Noder { return &treeNoder{ parent: t, name: e.Name, mode: e.Mode, hash: e.Hash, } } // utility function to build Paths func newPath(nn ...noder.Noder) noder.Path { return noder.Path(nn) } func (s *ChangeAdaptorSuite) TestTreeNoderHashHasMode(c *C) { hash := plumbing.NewHash("aaaa") mode := filemode.Regular treeNoder := &treeNoder{ hash: hash, mode: mode, } expected := []byte{ 0xaa, 0xaa, 0x00, 0x00, // original hash is aaaa and 16 zeros 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, } expected = append(expected, filemode.Regular.Bytes()...) c.Assert(treeNoder.Hash(), DeepEquals, expected) } func (s *ChangeAdaptorSuite) TestNewChangeInsert(c *C) { tree := &Tree{} entry := TreeEntry{ Name: "name", Mode: filemode.FileMode(42), Hash: plumbing.NewHash("aaaaa"), } path := newPath(newNoder(tree, entry)) expectedTo, err := newChangeEntry(path) c.Assert(err, IsNil) src := merkletrie.Change{ From: nil, To: path, } obtained, err := newChange(src) c.Assert(err, IsNil) action, err := obtained.Action() c.Assert(err, IsNil) c.Assert(action, Equals, merkletrie.Insert) c.Assert(obtained.From, Equals, ChangeEntry{}) c.Assert(obtained.To, Equals, expectedTo) } func (s *ChangeAdaptorSuite) TestNewChangeDelete(c *C) { tree := &Tree{} entry := TreeEntry{ Name: "name", Mode: filemode.FileMode(42), Hash: plumbing.NewHash("aaaaa"), } path := newPath(newNoder(tree, entry)) expectedFrom, err := newChangeEntry(path) c.Assert(err, IsNil) src := merkletrie.Change{ From: path, To: nil, } obtained, err := newChange(src) c.Assert(err, IsNil) action, err := obtained.Action() c.Assert(err, IsNil) c.Assert(action, Equals, merkletrie.Delete) c.Assert(obtained.From, Equals, expectedFrom) c.Assert(obtained.To, Equals, ChangeEntry{}) } func (s *ChangeAdaptorSuite) TestNewChangeModify(c *C) { treeA := &Tree{} entryA := TreeEntry{ Name: "name", Mode: filemode.FileMode(42), Hash: plumbing.NewHash("aaaaa"), } pathA := newPath(newNoder(treeA, entryA)) expectedFrom, err := newChangeEntry(pathA) c.Assert(err, IsNil) treeB := &Tree{} entryB := TreeEntry{ Name: "name", Mode: filemode.FileMode(42), Hash: plumbing.NewHash("bbbb"), } pathB := newPath(newNoder(treeB, entryB)) expectedTo, err := newChangeEntry(pathB) c.Assert(err, IsNil) src := merkletrie.Change{ From: pathA, To: pathB, } obtained, err := newChange(src) c.Assert(err, IsNil) action, err := obtained.Action() c.Assert(err, IsNil) c.Assert(action, Equals, merkletrie.Modify) c.Assert(obtained.From, Equals, expectedFrom) c.Assert(obtained.To, Equals, expectedTo) } func (s *ChangeAdaptorSuite) TestEmptyChangeFails(c *C) { change := &Change{ From: empty, To: empty, } _, err := change.Action() c.Assert(err, ErrorMatches, "malformed change.*") _, _, err = change.Files() c.Assert(err, ErrorMatches, "malformed change.*") str := change.String() c.Assert(str, Equals, "malformed change") } type noderMock struct{ noder.Noder } func (s *ChangeAdaptorSuite) TestNewChangeFailsWithChangesFromOtherNoders(c *C) { src := merkletrie.Change{ From: newPath(noderMock{}), To: nil, } _, err := newChange(src) c.Assert(err, Not(IsNil)) src = merkletrie.Change{ From: nil, To: newPath(noderMock{}), } _, err = newChange(src) c.Assert(err, Not(IsNil)) } func (s *ChangeAdaptorSuite) TestChangeStringFrom(c *C) { expected := "" change := Change{} change.From.Name = "foo" obtained := change.String() c.Assert(obtained, Equals, expected) } func (s *ChangeAdaptorSuite) TestChangeStringTo(c *C) { expected := "" change := Change{} change.To.Name = "foo" obtained := change.String() c.Assert(obtained, Equals, expected) } func (s *ChangeAdaptorSuite) TestChangeFilesInsert(c *C) { tree := s.tree(c, plumbing.NewHash("a8d315b2b1c615d43042c3a62402b8a54288cf5c")) change := Change{} change.To.Name = "json/long.json" change.To.Tree = tree change.To.TreeEntry.Mode = filemode.Regular change.To.TreeEntry.Hash = plumbing.NewHash("49c6bb89b17060d7b4deacb7b338fcc6ea2352a9") from, to, err := change.Files() c.Assert(err, IsNil) c.Assert(from, IsNil) c.Assert(to.ID(), Equals, change.To.TreeEntry.Hash) } func (s *ChangeAdaptorSuite) TestChangeFilesInsertNotFound(c *C) { tree := s.tree(c, plumbing.NewHash("a8d315b2b1c615d43042c3a62402b8a54288cf5c")) change := Change{} change.To.Name = "json/long.json" change.To.Tree = tree change.To.TreeEntry.Mode = filemode.Regular // there is no object for this hash change.To.TreeEntry.Hash = plumbing.NewHash("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") _, _, err := change.Files() c.Assert(err, Not(IsNil)) } func (s *ChangeAdaptorSuite) TestChangeFilesDelete(c *C) { tree := s.tree(c, plumbing.NewHash("a8d315b2b1c615d43042c3a62402b8a54288cf5c")) change := Change{} change.From.Name = "json/long.json" change.From.Tree = tree change.From.TreeEntry.Mode = filemode.Regular change.From.TreeEntry.Hash = plumbing.NewHash("49c6bb89b17060d7b4deacb7b338fcc6ea2352a9") from, to, err := change.Files() c.Assert(err, IsNil) c.Assert(to, IsNil) c.Assert(from.ID(), Equals, change.From.TreeEntry.Hash) } func (s *ChangeAdaptorSuite) TestChangeFilesDeleteNotFound(c *C) { tree := s.tree(c, plumbing.NewHash("a8d315b2b1c615d43042c3a62402b8a54288cf5c")) change := Change{} change.From.Name = "json/long.json" change.From.Tree = tree change.From.TreeEntry.Mode = filemode.Regular // there is no object for this hash change.From.TreeEntry.Hash = plumbing.NewHash("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") _, _, err := change.Files() c.Assert(err, Not(IsNil)) } func (s *ChangeAdaptorSuite) TestChangeFilesModify(c *C) { tree := s.tree(c, plumbing.NewHash("a8d315b2b1c615d43042c3a62402b8a54288cf5c")) change := Change{} change.To.Name = "json/long.json" change.To.Tree = tree change.To.TreeEntry.Mode = filemode.Regular change.To.TreeEntry.Hash = plumbing.NewHash("49c6bb89b17060d7b4deacb7b338fcc6ea2352a9") change.From.Name = "json/long.json" change.From.Tree = tree change.From.TreeEntry.Mode = filemode.Regular change.From.TreeEntry.Hash = plumbing.NewHash("9a48f23120e880dfbe41f7c9b7b708e9ee62a492") from, to, err := change.Files() c.Assert(err, IsNil) c.Assert(to.ID(), Equals, change.To.TreeEntry.Hash) c.Assert(from.ID(), Equals, change.From.TreeEntry.Hash) } func (s *ChangeAdaptorSuite) TestChangeEntryFailsWithOtherNoders(c *C) { path := noder.Path{noderMock{}} _, err := newChangeEntry(path) c.Assert(err, Not(IsNil)) } func (s *ChangeAdaptorSuite) TestChangeEntryFromNilIsZero(c *C) { obtained, err := newChangeEntry(nil) c.Assert(err, IsNil) c.Assert(obtained, Equals, ChangeEntry{}) } func (s *ChangeAdaptorSuite) TestChangeEntryFromSortPath(c *C) { tree := &Tree{} entry := TreeEntry{ Name: "name", Mode: filemode.FileMode(42), Hash: plumbing.NewHash("aaaaa"), } path := newPath(newNoder(tree, entry)) obtained, err := newChangeEntry(path) c.Assert(err, IsNil) c.Assert(obtained.Name, Equals, entry.Name) c.Assert(obtained.Tree, Equals, tree) c.Assert(obtained.TreeEntry, DeepEquals, entry) } func (s *ChangeAdaptorSuite) TestChangeEntryFromLongPath(c *C) { treeA := &Tree{} entryA := TreeEntry{ Name: "nameA", Mode: filemode.FileMode(42), Hash: plumbing.NewHash("aaaa"), } treeB := &Tree{} entryB := TreeEntry{ Name: "nameB", Mode: filemode.FileMode(24), Hash: plumbing.NewHash("bbbb"), } path := newPath( newNoder(treeA, entryA), newNoder(treeB, entryB), ) obtained, err := newChangeEntry(path) c.Assert(err, IsNil) c.Assert(obtained.Name, Equals, entryA.Name+"/"+entryB.Name) c.Assert(obtained.Tree, Equals, treeB) c.Assert(obtained.TreeEntry, Equals, entryB) } func (s *ChangeAdaptorSuite) TestNewChangesEmpty(c *C) { expected := "[]" changes, err := newChanges(nil) c.Assert(err, IsNil) obtained := changes.String() c.Assert(obtained, Equals, expected) expected = "[]" changes, err = newChanges(merkletrie.Changes{}) c.Assert(err, IsNil) obtained = changes.String() c.Assert(obtained, Equals, expected) } func (s *ChangeAdaptorSuite) TestNewChanges(c *C) { treeA := &Tree{} entryA := TreeEntry{Name: "nameA"} pathA := newPath(newNoder(treeA, entryA)) changeA := merkletrie.Change{ From: nil, To: pathA, } treeB := &Tree{} entryB := TreeEntry{Name: "nameB"} pathB := newPath(newNoder(treeB, entryB)) changeB := merkletrie.Change{ From: pathB, To: nil, } src := merkletrie.Changes{changeA, changeB} changes, err := newChanges(src) c.Assert(err, IsNil) c.Assert(len(changes), Equals, 2) action, err := changes[0].Action() c.Assert(err, IsNil) c.Assert(action, Equals, merkletrie.Insert) c.Assert(changes[0].To.Name, Equals, "nameA") action, err = changes[1].Action() c.Assert(err, IsNil) c.Assert(action, Equals, merkletrie.Delete) c.Assert(changes[1].From.Name, Equals, "nameB") } func (s *ChangeAdaptorSuite) TestNewChangesFailsWithOtherNoders(c *C) { change := merkletrie.Change{ From: nil, To: newPath(noderMock{}), } src := merkletrie.Changes{change} _, err := newChanges(src) c.Assert(err, Not(IsNil)) } func (s *ChangeAdaptorSuite) TestSortChanges(c *C) { c1 := &Change{} c1.To.Name = "1" c2 := &Change{} c2.From.Name = "2" c2.To.Name = "2" c3 := &Change{} c3.From.Name = "3" changes := Changes{c3, c1, c2} sort.Sort(changes) c.Assert(changes[0].String(), Equals, "") c.Assert(changes[1].String(), Equals, "") c.Assert(changes[2].String(), Equals, "") } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/object/change_test.go000066400000000000000000000274371345605224300246120ustar00rootroot00000000000000package object import ( "context" "sort" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/cache" "gopkg.in/src-d/go-git.v4/plumbing/filemode" "gopkg.in/src-d/go-git.v4/plumbing/format/diff" "gopkg.in/src-d/go-git.v4/plumbing/storer" "gopkg.in/src-d/go-git.v4/storage/filesystem" "gopkg.in/src-d/go-git.v4/utils/merkletrie" . "gopkg.in/check.v1" "gopkg.in/src-d/go-git-fixtures.v3" ) type ChangeSuite struct { fixtures.Suite Storer storer.EncodedObjectStorer Fixture *fixtures.Fixture } func (s *ChangeSuite) SetUpSuite(c *C) { s.Suite.SetUpSuite(c) s.Fixture = fixtures.ByURL("https://github.com/src-d/go-git.git"). ByTag(".git").One() sto := filesystem.NewStorage(s.Fixture.DotGit(), cache.NewObjectLRUDefault()) s.Storer = sto } func (s *ChangeSuite) tree(c *C, h plumbing.Hash) *Tree { t, err := GetTree(s.Storer, h) c.Assert(err, IsNil) return t } var _ = Suite(&ChangeSuite{}) func (s *ChangeSuite) TestInsert(c *C) { // Commit a5078b19f08f63e7948abd0a5e2fb7d319d3a565 of the go-git // fixture inserted "examples/clone/main.go". // // On that commit, the "examples/clone" tree is // 6efca3ff41cab651332f9ebc0c96bb26be809615 // // and the "examples/colone/main.go" is // f95dc8f7923add1a8b9f72ecb1e8db1402de601a path := "examples/clone/main.go" name := "main.go" mode := filemode.Regular blob := plumbing.NewHash("f95dc8f7923add1a8b9f72ecb1e8db1402de601a") tree := plumbing.NewHash("6efca3ff41cab651332f9ebc0c96bb26be809615") change := &Change{ From: empty, To: ChangeEntry{ Name: path, Tree: s.tree(c, tree), TreeEntry: TreeEntry{ Name: name, Mode: mode, Hash: blob, }, }, } action, err := change.Action() c.Assert(err, IsNil) c.Assert(action, Equals, merkletrie.Insert) from, to, err := change.Files() c.Assert(err, IsNil) c.Assert(from, IsNil) c.Assert(to.Name, Equals, name) c.Assert(to.Blob.Hash, Equals, blob) p, err := change.Patch() c.Assert(err, IsNil) c.Assert(len(p.FilePatches()), Equals, 1) c.Assert(len(p.FilePatches()[0].Chunks()), Equals, 1) c.Assert(p.FilePatches()[0].Chunks()[0].Type(), Equals, diff.Add) p, err = change.PatchContext(context.Background()) c.Assert(err, IsNil) c.Assert(len(p.FilePatches()), Equals, 1) c.Assert(len(p.FilePatches()[0].Chunks()), Equals, 1) c.Assert(p.FilePatches()[0].Chunks()[0].Type(), Equals, diff.Add) str := change.String() c.Assert(str, Equals, "") } func (s *ChangeSuite) TestDelete(c *C) { // Commit f6011d65d57c2a866e231fc21a39cb618f86f9ea of the go-git // fixture deleted "utils/difftree/difftree.go". // // The parent of that commit is // 9b4a386db3d98a4362516a00ef3d04d4698c9bcd. // // On that parent commit, the "utils/difftree" tree is // f3d11566401ce4b0808aab9dd6fad3d5abf1481a. // // and the "utils/difftree/difftree.go" is // e2cb9a5719daf634d45a063112b4044ee81da13ea. path := "utils/difftree/difftree.go" name := "difftree.go" mode := filemode.Regular blob := plumbing.NewHash("e2cb9a5719daf634d45a063112b4044ee81da13e") tree := plumbing.NewHash("f3d11566401ce4b0808aab9dd6fad3d5abf1481a") change := &Change{ From: ChangeEntry{ Name: path, Tree: s.tree(c, tree), TreeEntry: TreeEntry{ Name: name, Mode: mode, Hash: blob, }, }, To: empty, } action, err := change.Action() c.Assert(err, IsNil) c.Assert(action, Equals, merkletrie.Delete) from, to, err := change.Files() c.Assert(err, IsNil) c.Assert(to, IsNil) c.Assert(from.Name, Equals, name) c.Assert(from.Blob.Hash, Equals, blob) p, err := change.Patch() c.Assert(err, IsNil) c.Assert(len(p.FilePatches()), Equals, 1) c.Assert(len(p.FilePatches()[0].Chunks()), Equals, 1) c.Assert(p.FilePatches()[0].Chunks()[0].Type(), Equals, diff.Delete) p, err = change.PatchContext(context.Background()) c.Assert(err, IsNil) c.Assert(len(p.FilePatches()), Equals, 1) c.Assert(len(p.FilePatches()[0].Chunks()), Equals, 1) c.Assert(p.FilePatches()[0].Chunks()[0].Type(), Equals, diff.Delete) str := change.String() c.Assert(str, Equals, "") } func (s *ChangeSuite) TestModify(c *C) { // Commit 7beaad711378a4daafccc2c04bc46d36df2a0fd1 of the go-git // fixture modified "examples/latest/latest.go". // the "examples/latest" tree is // b1f01b730b855c82431918cb338ad47ed558999b. // and "examples/latest/latest.go" is blob // 05f583ace3a9a078d8150905a53a4d82567f125f. // // The parent of that commit is // 337148ef6d751477796922ac127b416b8478fcc4. // the "examples/latest" tree is // 8b0af31d2544acb5c4f3816a602f11418cbd126e. // and "examples/latest/latest.go" is blob // de927fad935d172929aacf20e71f3bf0b91dd6f9. path := "utils/difftree/difftree.go" name := "difftree.go" mode := filemode.Regular fromBlob := plumbing.NewHash("05f583ace3a9a078d8150905a53a4d82567f125f") fromTree := plumbing.NewHash("b1f01b730b855c82431918cb338ad47ed558999b") toBlob := plumbing.NewHash("de927fad935d172929aacf20e71f3bf0b91dd6f9") toTree := plumbing.NewHash("8b0af31d2544acb5c4f3816a602f11418cbd126e") change := &Change{ From: ChangeEntry{ Name: path, Tree: s.tree(c, fromTree), TreeEntry: TreeEntry{ Name: name, Mode: mode, Hash: fromBlob, }, }, To: ChangeEntry{ Name: path, Tree: s.tree(c, toTree), TreeEntry: TreeEntry{ Name: name, Mode: mode, Hash: toBlob, }, }, } action, err := change.Action() c.Assert(err, IsNil) c.Assert(action, Equals, merkletrie.Modify) from, to, err := change.Files() c.Assert(err, IsNil) c.Assert(from.Name, Equals, name) c.Assert(from.Blob.Hash, Equals, fromBlob) c.Assert(to.Name, Equals, name) c.Assert(to.Blob.Hash, Equals, toBlob) p, err := change.Patch() c.Assert(err, IsNil) c.Assert(len(p.FilePatches()), Equals, 1) c.Assert(len(p.FilePatches()[0].Chunks()), Equals, 7) c.Assert(p.FilePatches()[0].Chunks()[0].Type(), Equals, diff.Equal) c.Assert(p.FilePatches()[0].Chunks()[1].Type(), Equals, diff.Delete) c.Assert(p.FilePatches()[0].Chunks()[2].Type(), Equals, diff.Add) c.Assert(p.FilePatches()[0].Chunks()[3].Type(), Equals, diff.Equal) c.Assert(p.FilePatches()[0].Chunks()[4].Type(), Equals, diff.Delete) c.Assert(p.FilePatches()[0].Chunks()[5].Type(), Equals, diff.Add) c.Assert(p.FilePatches()[0].Chunks()[6].Type(), Equals, diff.Equal) p, err = change.PatchContext(context.Background()) c.Assert(err, IsNil) c.Assert(len(p.FilePatches()), Equals, 1) c.Assert(len(p.FilePatches()[0].Chunks()), Equals, 7) c.Assert(p.FilePatches()[0].Chunks()[0].Type(), Equals, diff.Equal) c.Assert(p.FilePatches()[0].Chunks()[1].Type(), Equals, diff.Delete) c.Assert(p.FilePatches()[0].Chunks()[2].Type(), Equals, diff.Add) c.Assert(p.FilePatches()[0].Chunks()[3].Type(), Equals, diff.Equal) c.Assert(p.FilePatches()[0].Chunks()[4].Type(), Equals, diff.Delete) c.Assert(p.FilePatches()[0].Chunks()[5].Type(), Equals, diff.Add) c.Assert(p.FilePatches()[0].Chunks()[6].Type(), Equals, diff.Equal) str := change.String() c.Assert(str, Equals, "") } func (s *ChangeSuite) TestEmptyChangeFails(c *C) { change := &Change{} _, err := change.Action() c.Assert(err, ErrorMatches, "malformed.*") _, _, err = change.Files() c.Assert(err, ErrorMatches, "malformed.*") str := change.String() c.Assert(str, Equals, "malformed change") } // test reproducing bug #317 func (s *ChangeSuite) TestNoFileFilemodes(c *C) { s.Suite.SetUpSuite(c) f := fixtures.ByURL("https://github.com/git-fixtures/submodule.git").One() sto := filesystem.NewStorage(f.DotGit(), cache.NewObjectLRUDefault()) iter, err := sto.IterEncodedObjects(plumbing.AnyObject) c.Assert(err, IsNil) var commits []*Commit iter.ForEach(func(o plumbing.EncodedObject) error { if o.Type() == plumbing.CommitObject { commit, err := GetCommit(sto, o.Hash()) c.Assert(err, IsNil) commits = append(commits, commit) } return nil }) c.Assert(len(commits), Not(Equals), 0) var prev *Commit for _, commit := range commits { if prev == nil { prev = commit continue } tree, err := commit.Tree() c.Assert(err, IsNil) prevTree, err := prev.Tree() c.Assert(err, IsNil) changes, err := DiffTree(tree, prevTree) c.Assert(err, IsNil) for _, change := range changes { _, _, err := change.Files() c.Assert(err, IsNil) } prev = commit } } func (s *ChangeSuite) TestErrorsFindingChildsAreDetected(c *C) { // Commit 7beaad711378a4daafccc2c04bc46d36df2a0fd1 of the go-git // fixture modified "examples/latest/latest.go". // the "examples/latest" tree is // b1f01b730b855c82431918cb338ad47ed558999b. // and "examples/latest/latest.go" is blob // 05f583ace3a9a078d8150905a53a4d82567f125f. // // The parent of that commit is // 337148ef6d751477796922ac127b416b8478fcc4. // the "examples/latest" tree is // 8b0af31d2544acb5c4f3816a602f11418cbd126e. // and "examples/latest/latest.go" is blob // de927fad935d172929aacf20e71f3bf0b91dd6f9. path := "utils/difftree/difftree.go" name := "difftree.go" mode := filemode.Regular fromBlob := plumbing.NewHash("aaaa") // does not exists fromTree := plumbing.NewHash("b1f01b730b855c82431918cb338ad47ed558999b") toBlob := plumbing.NewHash("bbbb") // does not exists toTree := plumbing.NewHash("8b0af31d2544acb5c4f3816a602f11418cbd126e") change := &Change{ From: ChangeEntry{ Name: path, Tree: s.tree(c, fromTree), TreeEntry: TreeEntry{ Name: name, Mode: mode, Hash: fromBlob, }, }, To: ChangeEntry{}, } _, _, err := change.Files() c.Assert(err, ErrorMatches, "object not found") change = &Change{ From: empty, To: ChangeEntry{ Name: path, Tree: s.tree(c, toTree), TreeEntry: TreeEntry{ Name: name, Mode: mode, Hash: toBlob, }, }, } _, _, err = change.Files() c.Assert(err, ErrorMatches, "object not found") } func (s *ChangeSuite) TestChangesString(c *C) { expected := "[]" changes := Changes{} obtained := changes.String() c.Assert(obtained, Equals, expected) expected = "[]" changes = make([]*Change, 1) changes[0] = &Change{} changes[0].From.Name = "bla" changes[0].To.Name = "bla" obtained = changes.String() c.Assert(obtained, Equals, expected) expected = "[, ]" changes = make([]*Change, 2) changes[0] = &Change{} changes[0].From.Name = "bla" changes[0].To.Name = "bla" changes[1] = &Change{} changes[1].From.Name = "foo/bar" obtained = changes.String() c.Assert(obtained, Equals, expected) } func (s *ChangeSuite) TestChangesSort(c *C) { changes := make(Changes, 3) changes[0] = &Change{} changes[0].From.Name = "z" changes[0].To.Name = "z" changes[1] = &Change{} changes[1].From.Name = "b/b" changes[2] = &Change{} changes[2].To.Name = "b/a" expected := "[, " + ", " + "]" sort.Sort(changes) c.Assert(changes.String(), Equals, expected) } func (s *ChangeSuite) TestCancel(c *C) { // Commit a5078b19f08f63e7948abd0a5e2fb7d319d3a565 of the go-git // fixture inserted "examples/clone/main.go". // // On that commit, the "examples/clone" tree is // 6efca3ff41cab651332f9ebc0c96bb26be809615 // // and the "examples/colone/main.go" is // f95dc8f7923add1a8b9f72ecb1e8db1402de601a path := "examples/clone/main.go" name := "main.go" mode := filemode.Regular blob := plumbing.NewHash("f95dc8f7923add1a8b9f72ecb1e8db1402de601a") tree := plumbing.NewHash("6efca3ff41cab651332f9ebc0c96bb26be809615") change := &Change{ From: empty, To: ChangeEntry{ Name: path, Tree: s.tree(c, tree), TreeEntry: TreeEntry{ Name: name, Mode: mode, Hash: blob, }, }, } ctx, cancel := context.WithCancel(context.Background()) cancel() p, err := change.PatchContext(ctx) c.Assert(p, IsNil) c.Assert(err, ErrorMatches, "operation canceled") } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/object/commit.go000066400000000000000000000247001345605224300236040ustar00rootroot00000000000000package object import ( "bufio" "bytes" "context" "errors" "fmt" "io" "strings" "golang.org/x/crypto/openpgp" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/storer" "gopkg.in/src-d/go-git.v4/utils/ioutil" ) const ( beginpgp string = "-----BEGIN PGP SIGNATURE-----" endpgp string = "-----END PGP SIGNATURE-----" headerpgp string = "gpgsig" ) // Hash represents the hash of an object type Hash plumbing.Hash // Commit points to a single tree, marking it as what the project looked like // at a certain point in time. It contains meta-information about that point // in time, such as a timestamp, the author of the changes since the last // commit, a pointer to the previous commit(s), etc. // http://shafiulazam.com/gitbook/1_the_git_object_model.html type Commit struct { // Hash of the commit object. Hash plumbing.Hash // Author is the original author of the commit. Author Signature // Committer is the one performing the commit, might be different from // Author. Committer Signature // PGPSignature is the PGP signature of the commit. PGPSignature string // Message is the commit message, contains arbitrary text. Message string // TreeHash is the hash of the root tree of the commit. TreeHash plumbing.Hash // ParentHashes are the hashes of the parent commits of the commit. ParentHashes []plumbing.Hash s storer.EncodedObjectStorer } // GetCommit gets a commit from an object storer and decodes it. func GetCommit(s storer.EncodedObjectStorer, h plumbing.Hash) (*Commit, error) { o, err := s.EncodedObject(plumbing.CommitObject, h) if err != nil { return nil, err } return DecodeCommit(s, o) } // DecodeCommit decodes an encoded object into a *Commit and associates it to // the given object storer. func DecodeCommit(s storer.EncodedObjectStorer, o plumbing.EncodedObject) (*Commit, error) { c := &Commit{s: s} if err := c.Decode(o); err != nil { return nil, err } return c, nil } // Tree returns the Tree from the commit. func (c *Commit) Tree() (*Tree, error) { return GetTree(c.s, c.TreeHash) } // PatchContext returns the Patch between the actual commit and the provided one. // Error will be return if context expires. Provided context must be non-nil. func (c *Commit) PatchContext(ctx context.Context, to *Commit) (*Patch, error) { fromTree, err := c.Tree() if err != nil { return nil, err } toTree, err := to.Tree() if err != nil { return nil, err } return fromTree.PatchContext(ctx, toTree) } // Patch returns the Patch between the actual commit and the provided one. func (c *Commit) Patch(to *Commit) (*Patch, error) { return c.PatchContext(context.Background(), to) } // Parents return a CommitIter to the parent Commits. func (c *Commit) Parents() CommitIter { return NewCommitIter(c.s, storer.NewEncodedObjectLookupIter(c.s, plumbing.CommitObject, c.ParentHashes), ) } // NumParents returns the number of parents in a commit. func (c *Commit) NumParents() int { return len(c.ParentHashes) } var ErrParentNotFound = errors.New("commit parent not found") // Parent returns the ith parent of a commit. func (c *Commit) Parent(i int) (*Commit, error) { if len(c.ParentHashes) == 0 || i > len(c.ParentHashes)-1 { return nil, ErrParentNotFound } return GetCommit(c.s, c.ParentHashes[i]) } // File returns the file with the specified "path" in the commit and a // nil error if the file exists. If the file does not exist, it returns // a nil file and the ErrFileNotFound error. func (c *Commit) File(path string) (*File, error) { tree, err := c.Tree() if err != nil { return nil, err } return tree.File(path) } // Files returns a FileIter allowing to iterate over the Tree func (c *Commit) Files() (*FileIter, error) { tree, err := c.Tree() if err != nil { return nil, err } return tree.Files(), nil } // ID returns the object ID of the commit. The returned value will always match // the current value of Commit.Hash. // // ID is present to fulfill the Object interface. func (c *Commit) ID() plumbing.Hash { return c.Hash } // Type returns the type of object. It always returns plumbing.CommitObject. // // Type is present to fulfill the Object interface. func (c *Commit) Type() plumbing.ObjectType { return plumbing.CommitObject } // Decode transforms a plumbing.EncodedObject into a Commit struct. func (c *Commit) Decode(o plumbing.EncodedObject) (err error) { if o.Type() != plumbing.CommitObject { return ErrUnsupportedObject } c.Hash = o.Hash() reader, err := o.Reader() if err != nil { return err } defer ioutil.CheckClose(reader, &err) r := bufio.NewReader(reader) var message bool var pgpsig bool for { line, err := r.ReadBytes('\n') if err != nil && err != io.EOF { return err } if pgpsig { if len(line) > 0 && line[0] == ' ' { line = bytes.TrimLeft(line, " ") c.PGPSignature += string(line) continue } else { pgpsig = false } } if !message { line = bytes.TrimSpace(line) if len(line) == 0 { message = true continue } split := bytes.SplitN(line, []byte{' '}, 2) var data []byte if len(split) == 2 { data = split[1] } switch string(split[0]) { case "tree": c.TreeHash = plumbing.NewHash(string(data)) case "parent": c.ParentHashes = append(c.ParentHashes, plumbing.NewHash(string(data))) case "author": c.Author.Decode(data) case "committer": c.Committer.Decode(data) case headerpgp: c.PGPSignature += string(data) + "\n" pgpsig = true } } else { c.Message += string(line) } if err == io.EOF { return nil } } } // Encode transforms a Commit into a plumbing.EncodedObject. func (b *Commit) Encode(o plumbing.EncodedObject) error { return b.encode(o, true) } func (b *Commit) encode(o plumbing.EncodedObject, includeSig bool) (err error) { o.SetType(plumbing.CommitObject) w, err := o.Writer() if err != nil { return err } defer ioutil.CheckClose(w, &err) if _, err = fmt.Fprintf(w, "tree %s\n", b.TreeHash.String()); err != nil { return err } for _, parent := range b.ParentHashes { if _, err = fmt.Fprintf(w, "parent %s\n", parent.String()); err != nil { return err } } if _, err = fmt.Fprint(w, "author "); err != nil { return err } if err = b.Author.Encode(w); err != nil { return err } if _, err = fmt.Fprint(w, "\ncommitter "); err != nil { return err } if err = b.Committer.Encode(w); err != nil { return err } if b.PGPSignature != "" && includeSig { if _, err = fmt.Fprint(w, "\n"+headerpgp+" "); err != nil { return err } // Split all the signature lines and re-write with a left padding and // newline. Use join for this so it's clear that a newline should not be // added after this section, as it will be added when the message is // printed. signature := strings.TrimSuffix(b.PGPSignature, "\n") lines := strings.Split(signature, "\n") if _, err = fmt.Fprint(w, strings.Join(lines, "\n ")); err != nil { return err } } if _, err = fmt.Fprintf(w, "\n\n%s", b.Message); err != nil { return err } return err } // Stats returns the stats of a commit. func (c *Commit) Stats() (FileStats, error) { return c.StatsContext(context.Background()) } // StatsContext returns the stats of a commit. Error will be return if context // expires. Provided context must be non-nil. func (c *Commit) StatsContext(ctx context.Context) (FileStats, error) { fromTree, err := c.Tree() if err != nil { return nil, err } toTree := &Tree{} if c.NumParents() != 0 { firstParent, err := c.Parents().Next() if err != nil { return nil, err } toTree, err = firstParent.Tree() if err != nil { return nil, err } } patch, err := toTree.PatchContext(ctx, fromTree) if err != nil { return nil, err } return getFileStatsFromFilePatches(patch.FilePatches()), nil } func (c *Commit) String() string { return fmt.Sprintf( "%s %s\nAuthor: %s\nDate: %s\n\n%s\n", plumbing.CommitObject, c.Hash, c.Author.String(), c.Author.When.Format(DateFormat), indent(c.Message), ) } // Verify performs PGP verification of the commit with a provided armored // keyring and returns openpgp.Entity associated with verifying key on success. func (c *Commit) Verify(armoredKeyRing string) (*openpgp.Entity, error) { keyRingReader := strings.NewReader(armoredKeyRing) keyring, err := openpgp.ReadArmoredKeyRing(keyRingReader) if err != nil { return nil, err } // Extract signature. signature := strings.NewReader(c.PGPSignature) encoded := &plumbing.MemoryObject{} // Encode commit components, excluding signature and get a reader object. if err := c.encode(encoded, false); err != nil { return nil, err } er, err := encoded.Reader() if err != nil { return nil, err } return openpgp.CheckArmoredDetachedSignature(keyring, er, signature) } func indent(t string) string { var output []string for _, line := range strings.Split(t, "\n") { if len(line) != 0 { line = " " + line } output = append(output, line) } return strings.Join(output, "\n") } // CommitIter is a generic closable interface for iterating over commits. type CommitIter interface { Next() (*Commit, error) ForEach(func(*Commit) error) error Close() } // storerCommitIter provides an iterator from commits in an EncodedObjectStorer. type storerCommitIter struct { storer.EncodedObjectIter s storer.EncodedObjectStorer } // NewCommitIter takes a storer.EncodedObjectStorer and a // storer.EncodedObjectIter and returns a CommitIter that iterates over all // commits contained in the storer.EncodedObjectIter. // // Any non-commit object returned by the storer.EncodedObjectIter is skipped. func NewCommitIter(s storer.EncodedObjectStorer, iter storer.EncodedObjectIter) CommitIter { return &storerCommitIter{iter, s} } // Next moves the iterator to the next commit and returns a pointer to it. If // there are no more commits, it returns io.EOF. func (iter *storerCommitIter) Next() (*Commit, error) { obj, err := iter.EncodedObjectIter.Next() if err != nil { return nil, err } return DecodeCommit(iter.s, obj) } // ForEach call the cb function for each commit contained on this iter until // an error appends or the end of the iter is reached. If ErrStop is sent // the iteration is stopped but no error is returned. The iterator is closed. func (iter *storerCommitIter) ForEach(cb func(*Commit) error) error { return iter.EncodedObjectIter.ForEach(func(obj plumbing.EncodedObject) error { c, err := DecodeCommit(iter.s, obj) if err != nil { return err } return cb(c) }) } func (iter *storerCommitIter) Close() { iter.EncodedObjectIter.Close() } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/object/commit_stats_test.go000066400000000000000000000044561345605224300260670ustar00rootroot00000000000000package object_test import ( "context" "time" "gopkg.in/src-d/go-git.v4" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/object" "gopkg.in/src-d/go-git.v4/storage/memory" . "gopkg.in/check.v1" "gopkg.in/src-d/go-billy.v4/memfs" "gopkg.in/src-d/go-billy.v4/util" "gopkg.in/src-d/go-git-fixtures.v3" ) type CommitStatsSuite struct { fixtures.Suite } var _ = Suite(&CommitStatsSuite{}) func (s *CommitStatsSuite) TestStats(c *C) { r, hash := s.writeHisotry(c, []byte("foo\n"), []byte("foo\nbar\n")) aCommit, err := r.CommitObject(hash) c.Assert(err, IsNil) fileStats, err := aCommit.StatsContext(context.Background()) c.Assert(err, IsNil) c.Assert(fileStats[0].Name, Equals, "foo") c.Assert(fileStats[0].Addition, Equals, 1) c.Assert(fileStats[0].Deletion, Equals, 0) c.Assert(fileStats[0].String(), Equals, " foo | 1 +\n") } func (s *CommitStatsSuite) TestStats_RootCommit(c *C) { r, hash := s.writeHisotry(c, []byte("foo\n")) aCommit, err := r.CommitObject(hash) c.Assert(err, IsNil) fileStats, err := aCommit.Stats() c.Assert(err, IsNil) c.Assert(fileStats, HasLen, 1) c.Assert(fileStats[0].Name, Equals, "foo") c.Assert(fileStats[0].Addition, Equals, 1) c.Assert(fileStats[0].Deletion, Equals, 0) c.Assert(fileStats[0].String(), Equals, " foo | 1 +\n") } func (s *CommitStatsSuite) TestStats_WithoutNewLine(c *C) { r, hash := s.writeHisotry(c, []byte("foo\nbar"), []byte("foo\nbar\n")) aCommit, err := r.CommitObject(hash) c.Assert(err, IsNil) fileStats, err := aCommit.Stats() c.Assert(err, IsNil) c.Assert(fileStats[0].Name, Equals, "foo") c.Assert(fileStats[0].Addition, Equals, 1) c.Assert(fileStats[0].Deletion, Equals, 1) c.Assert(fileStats[0].String(), Equals, " foo | 2 +-\n") } func (s *CommitStatsSuite) writeHisotry(c *C, files ...[]byte) (*git.Repository, plumbing.Hash) { cm := &git.CommitOptions{ Author: &object.Signature{Name: "Foo", Email: "foo@example.local", When: time.Now()}, } fs := memfs.New() r, err := git.Init(memory.NewStorage(), fs) c.Assert(err, IsNil) w, err := r.Worktree() c.Assert(err, IsNil) var hash plumbing.Hash for _, content := range files { util.WriteFile(fs, "foo", content, 0644) _, err = w.Add("foo") c.Assert(err, IsNil) hash, err = w.Commit("foo\n", cm) c.Assert(err, IsNil) } return r, hash } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/object/commit_test.go000066400000000000000000000360501345605224300246440ustar00rootroot00000000000000package object import ( "bytes" "context" "io" "strings" "time" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/cache" . "gopkg.in/check.v1" "gopkg.in/src-d/go-git-fixtures.v3" "gopkg.in/src-d/go-git.v4/storage/filesystem" ) type SuiteCommit struct { BaseObjectsSuite Commit *Commit } var _ = Suite(&SuiteCommit{}) func (s *SuiteCommit) SetUpSuite(c *C) { s.BaseObjectsSuite.SetUpSuite(c) hash := plumbing.NewHash("1669dce138d9b841a518c64b10914d88f5e488ea") s.Commit = s.commit(c, hash) } func (s *SuiteCommit) TestDecodeNonCommit(c *C) { hash := plumbing.NewHash("9a48f23120e880dfbe41f7c9b7b708e9ee62a492") blob, err := s.Storer.EncodedObject(plumbing.AnyObject, hash) c.Assert(err, IsNil) commit := &Commit{} err = commit.Decode(blob) c.Assert(err, Equals, ErrUnsupportedObject) } func (s *SuiteCommit) TestType(c *C) { c.Assert(s.Commit.Type(), Equals, plumbing.CommitObject) } func (s *SuiteCommit) TestTree(c *C) { tree, err := s.Commit.Tree() c.Assert(err, IsNil) c.Assert(tree.ID().String(), Equals, "eba74343e2f15d62adedfd8c883ee0262b5c8021") } func (s *SuiteCommit) TestParents(c *C) { expected := []string{ "35e85108805c84807bc66a02d91535e1e24b38b9", "a5b8b09e2f8fcb0bb99d3ccb0958157b40890d69", } var output []string i := s.Commit.Parents() err := i.ForEach(func(commit *Commit) error { output = append(output, commit.ID().String()) return nil }) c.Assert(err, IsNil) c.Assert(output, DeepEquals, expected) i.Close() } func (s *SuiteCommit) TestParent(c *C) { commit, err := s.Commit.Parent(1) c.Assert(err, IsNil) c.Assert(commit.Hash.String(), Equals, "a5b8b09e2f8fcb0bb99d3ccb0958157b40890d69") } func (s *SuiteCommit) TestParentNotFound(c *C) { commit, err := s.Commit.Parent(42) c.Assert(err, Equals, ErrParentNotFound) c.Assert(commit, IsNil) } func (s *SuiteCommit) TestPatch(c *C) { from := s.commit(c, plumbing.NewHash("918c48b83bd081e863dbe1b80f8998f058cd8294")) to := s.commit(c, plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5")) patch, err := from.Patch(to) c.Assert(err, IsNil) buf := bytes.NewBuffer(nil) err = patch.Encode(buf) c.Assert(err, IsNil) c.Assert(buf.String(), Equals, `diff --git a/vendor/foo.go b/vendor/foo.go new file mode 100644 index 0000000000000000000000000000000000000000..9dea2395f5403188298c1dabe8bdafe562c491e3 --- /dev/null +++ b/vendor/foo.go @@ -0,0 +1,7 @@ +package main + +import "fmt" + +func main() { + fmt.Println("Hello, playground") +} `) c.Assert(buf.String(), Equals, patch.String()) from = s.commit(c, plumbing.NewHash("b8e471f58bcbca63b07bda20e428190409c2db47")) to = s.commit(c, plumbing.NewHash("35e85108805c84807bc66a02d91535e1e24b38b9")) patch, err = from.Patch(to) c.Assert(err, IsNil) buf.Reset() err = patch.Encode(buf) c.Assert(err, IsNil) c.Assert(buf.String(), Equals, `diff --git a/CHANGELOG b/CHANGELOG deleted file mode 100644 index d3ff53e0564a9f87d8e84b6e28e5060e517008aa..0000000000000000000000000000000000000000 --- a/CHANGELOG +++ /dev/null @@ -1 +0,0 @@ -Initial changelog diff --git a/binary.jpg b/binary.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d5c0f4ab811897cadf03aec358ae60d21f91c50d Binary files /dev/null and b/binary.jpg differ `) c.Assert(buf.String(), Equals, patch.String()) } func (s *SuiteCommit) TestPatchContext(c *C) { from := s.commit(c, plumbing.NewHash("918c48b83bd081e863dbe1b80f8998f058cd8294")) to := s.commit(c, plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5")) patch, err := from.PatchContext(context.Background(), to) c.Assert(err, IsNil) buf := bytes.NewBuffer(nil) err = patch.Encode(buf) c.Assert(err, IsNil) c.Assert(buf.String(), Equals, `diff --git a/vendor/foo.go b/vendor/foo.go new file mode 100644 index 0000000000000000000000000000000000000000..9dea2395f5403188298c1dabe8bdafe562c491e3 --- /dev/null +++ b/vendor/foo.go @@ -0,0 +1,7 @@ +package main + +import "fmt" + +func main() { + fmt.Println("Hello, playground") +} `) c.Assert(buf.String(), Equals, patch.String()) from = s.commit(c, plumbing.NewHash("b8e471f58bcbca63b07bda20e428190409c2db47")) to = s.commit(c, plumbing.NewHash("35e85108805c84807bc66a02d91535e1e24b38b9")) patch, err = from.PatchContext(context.Background(), to) c.Assert(err, IsNil) buf.Reset() err = patch.Encode(buf) c.Assert(err, IsNil) c.Assert(buf.String(), Equals, `diff --git a/CHANGELOG b/CHANGELOG deleted file mode 100644 index d3ff53e0564a9f87d8e84b6e28e5060e517008aa..0000000000000000000000000000000000000000 --- a/CHANGELOG +++ /dev/null @@ -1 +0,0 @@ -Initial changelog diff --git a/binary.jpg b/binary.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d5c0f4ab811897cadf03aec358ae60d21f91c50d Binary files /dev/null and b/binary.jpg differ `) c.Assert(buf.String(), Equals, patch.String()) } func (s *SuiteCommit) TestCommitEncodeDecodeIdempotent(c *C) { ts, err := time.Parse(time.RFC3339, "2006-01-02T15:04:05-07:00") c.Assert(err, IsNil) commits := []*Commit{ { Author: Signature{Name: "Foo", Email: "foo@example.local", When: ts}, Committer: Signature{Name: "Bar", Email: "bar@example.local", When: ts}, Message: "Message\n\nFoo\nBar\nWith trailing blank lines\n\n", TreeHash: plumbing.NewHash("f000000000000000000000000000000000000001"), ParentHashes: []plumbing.Hash{plumbing.NewHash("f000000000000000000000000000000000000002")}, }, { Author: Signature{Name: "Foo", Email: "foo@example.local", When: ts}, Committer: Signature{Name: "Bar", Email: "bar@example.local", When: ts}, Message: "Message\n\nFoo\nBar\nWith no trailing blank lines", TreeHash: plumbing.NewHash("0000000000000000000000000000000000000003"), ParentHashes: []plumbing.Hash{ plumbing.NewHash("f000000000000000000000000000000000000004"), plumbing.NewHash("f000000000000000000000000000000000000005"), plumbing.NewHash("f000000000000000000000000000000000000006"), plumbing.NewHash("f000000000000000000000000000000000000007"), }, }, } for _, commit := range commits { obj := &plumbing.MemoryObject{} err = commit.Encode(obj) c.Assert(err, IsNil) newCommit := &Commit{} err = newCommit.Decode(obj) c.Assert(err, IsNil) commit.Hash = obj.Hash() c.Assert(newCommit, DeepEquals, commit) } } func (s *SuiteCommit) TestFile(c *C) { file, err := s.Commit.File("CHANGELOG") c.Assert(err, IsNil) c.Assert(file.Name, Equals, "CHANGELOG") } func (s *SuiteCommit) TestNumParents(c *C) { c.Assert(s.Commit.NumParents(), Equals, 2) } func (s *SuiteCommit) TestString(c *C) { c.Assert(s.Commit.String(), Equals, ""+ "commit 1669dce138d9b841a518c64b10914d88f5e488ea\n"+ "Author: Máximo Cuadros Ortiz \n"+ "Date: Tue Mar 31 13:48:14 2015 +0200\n"+ "\n"+ " Merge branch 'master' of github.com:tyba/git-fixture\n"+ "\n", ) } func (s *SuiteCommit) TestStringMultiLine(c *C) { hash := plumbing.NewHash("e7d896db87294e33ca3202e536d4d9bb16023db3") f := fixtures.ByURL("https://github.com/src-d/go-git.git").One() sto := filesystem.NewStorage(f.DotGit(), cache.NewObjectLRUDefault()) o, err := sto.EncodedObject(plumbing.CommitObject, hash) c.Assert(err, IsNil) commit, err := DecodeCommit(sto, o) c.Assert(err, IsNil) c.Assert(commit.String(), Equals, ""+ "commit e7d896db87294e33ca3202e536d4d9bb16023db3\n"+ "Author: Alberto Cortés \n"+ "Date: Wed Jan 27 11:13:49 2016 +0100\n"+ "\n"+ " fix zlib invalid header error\n"+ "\n"+ " The return value of reads to the packfile were being ignored, so zlib\n"+ " was getting invalid data on it read buffers.\n"+ "\n", ) } func (s *SuiteCommit) TestCommitIterNext(c *C) { i := s.Commit.Parents() commit, err := i.Next() c.Assert(err, IsNil) c.Assert(commit.ID().String(), Equals, "35e85108805c84807bc66a02d91535e1e24b38b9") commit, err = i.Next() c.Assert(err, IsNil) c.Assert(commit.ID().String(), Equals, "a5b8b09e2f8fcb0bb99d3ccb0958157b40890d69") commit, err = i.Next() c.Assert(err, Equals, io.EOF) c.Assert(commit, IsNil) } func (s *SuiteCommit) TestLongCommitMessageSerialization(c *C) { encoded := &plumbing.MemoryObject{} decoded := &Commit{} commit := *s.Commit longMessage := "my message: message\n\n" + strings.Repeat("test", 4096) + "\nOK" commit.Message = longMessage err := commit.Encode(encoded) c.Assert(err, IsNil) err = decoded.Decode(encoded) c.Assert(err, IsNil) c.Assert(decoded.Message, Equals, longMessage) } func (s *SuiteCommit) TestPGPSignatureSerialization(c *C) { encoded := &plumbing.MemoryObject{} decoded := &Commit{} commit := *s.Commit pgpsignature := `-----BEGIN PGP SIGNATURE----- iQEcBAABAgAGBQJTZbQlAAoJEF0+sviABDDrZbQH/09PfE51KPVPlanr6q1v4/Ut LQxfojUWiLQdg2ESJItkcuweYg+kc3HCyFejeDIBw9dpXt00rY26p05qrpnG+85b hM1/PswpPLuBSr+oCIDj5GMC2r2iEKsfv2fJbNW8iWAXVLoWZRF8B0MfqX/YTMbm ecorc4iXzQu7tupRihslbNkfvfciMnSDeSvzCpWAHl7h8Wj6hhqePmLm9lAYqnKp 8S5B/1SSQuEAjRZgI4IexpZoeKGVDptPHxLLS38fozsyi0QyDyzEgJxcJQVMXxVi RUysgqjcpT8+iQM1PblGfHR4XAhuOqN5Fx06PSaFZhqvWFezJ28/CLyX5q+oIVk= =EFTF -----END PGP SIGNATURE----- ` commit.PGPSignature = pgpsignature err := commit.Encode(encoded) c.Assert(err, IsNil) err = decoded.Decode(encoded) c.Assert(err, IsNil) c.Assert(decoded.PGPSignature, Equals, pgpsignature) // signature with extra empty line, it caused "index out of range" when // parsing it pgpsignature2 := "\n" + pgpsignature commit.PGPSignature = pgpsignature2 encoded = &plumbing.MemoryObject{} decoded = &Commit{} err = commit.Encode(encoded) c.Assert(err, IsNil) err = decoded.Decode(encoded) c.Assert(err, IsNil) c.Assert(decoded.PGPSignature, Equals, pgpsignature2) // signature in author name commit.PGPSignature = "" commit.Author.Name = beginpgp encoded = &plumbing.MemoryObject{} decoded = &Commit{} err = commit.Encode(encoded) c.Assert(err, IsNil) err = decoded.Decode(encoded) c.Assert(err, IsNil) c.Assert(decoded.PGPSignature, Equals, "") c.Assert(decoded.Author.Name, Equals, beginpgp) // broken signature commit.PGPSignature = beginpgp + "\n" + "some\n" + "trash\n" + endpgp + "text\n" encoded = &plumbing.MemoryObject{} decoded = &Commit{} err = commit.Encode(encoded) c.Assert(err, IsNil) err = decoded.Decode(encoded) c.Assert(err, IsNil) c.Assert(decoded.PGPSignature, Equals, commit.PGPSignature) } func (s *SuiteCommit) TestStat(c *C) { aCommit := s.commit(c, plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5")) fileStats, err := aCommit.Stats() c.Assert(err, IsNil) c.Assert(fileStats[0].Name, Equals, "vendor/foo.go") c.Assert(fileStats[0].Addition, Equals, 7) c.Assert(fileStats[0].Deletion, Equals, 0) c.Assert(fileStats[0].String(), Equals, " vendor/foo.go | 7 +++++++\n") // Stats for another commit. aCommit = s.commit(c, plumbing.NewHash("918c48b83bd081e863dbe1b80f8998f058cd8294")) fileStats, err = aCommit.Stats() c.Assert(err, IsNil) c.Assert(fileStats[0].Name, Equals, "go/example.go") c.Assert(fileStats[0].Addition, Equals, 142) c.Assert(fileStats[0].Deletion, Equals, 0) c.Assert(fileStats[0].String(), Equals, " go/example.go | 142 +++++++++++++++++++++++++++++++++++++++++++++++++++++\n") c.Assert(fileStats[1].Name, Equals, "php/crappy.php") c.Assert(fileStats[1].Addition, Equals, 259) c.Assert(fileStats[1].Deletion, Equals, 0) c.Assert(fileStats[1].String(), Equals, " php/crappy.php | 259 ++++++++++++++++++++++++++++++++++++++++++++++++++++\n") } func (s *SuiteCommit) TestVerify(c *C) { ts := time.Unix(1511197315, 0) loc, _ := time.LoadLocation("Asia/Kolkata") commit := &Commit{ Hash: plumbing.NewHash("8a9cea36fe052711fbc42b86e1f99a4fa0065deb"), Author: Signature{Name: "Sunny", Email: "me@darkowlzz.space", When: ts.In(loc)}, Committer: Signature{Name: "Sunny", Email: "me@darkowlzz.space", When: ts.In(loc)}, Message: `status: simplify template command selection `, TreeHash: plumbing.NewHash("6572ba6df4f1fb323c8aaa24ce07bca0648b161e"), ParentHashes: []plumbing.Hash{plumbing.NewHash("ede5f57ea1280a0065beec96d3e1a3453d010dbd")}, PGPSignature: ` -----BEGIN PGP SIGNATURE----- iQFHBAABCAAxFiEEoRt6IzxHaZkkUslhQyLeMqcmyU4FAloTCrsTHG1lQGRhcmtv d2x6ei5zcGFjZQAKCRBDIt4ypybJTul5CADmVxB4kqlqRZ9fAcSU5LKva3GRXx0+ leX6vbzoyQztSWYgl7zALh4kB3a3t2C9EnnM6uehlgaORNigyMArCSY1ivWVviCT BvldSVi8f8OvnqwbWX0I/5a8KmItthDf5WqZRFjhcRlY1AK5Bo2hUGVRq71euf8F rE6wNhDoyBCEpftXuXbq8duD7D6qJ7QiOS4m5+ej1UCssS2WQ60yta7q57odduHY +txqTKI8MQUpBgoTqh+V4lOkwQQxLiz7hIQ/ZYLUcnp6fan7/kY/G7YoLt9pOG1Y vLzAWdidLH2P+EUOqlNMuVScHYWD1FZB0/L5LJ8no5pTowQd2Z+Nggxl =0uC8 -----END PGP SIGNATURE----- `, } armoredKeyRing := ` -----BEGIN PGP PUBLIC KEY BLOCK----- mQENBFmtHgABCADnfThM7q8D4pgUub9jMppSpgFh3ev84g3Csc3yQUlszEOVgXmu YiSWP1oAiWFQ8ahCydh3LT8TnEB2QvoRNiExUI5XlXFwVfKW3cpDu8gdhtufs90Q NvpaHOgTqRf/texGEKwXi6fvS47fpyaQ9BKNdN52LeaaHzDDZkVsAFmroE+7MMvj P4Mq8qDn2WcWnX9zheQKYrX6Cs48Tx80eehHor4f/XnuaP8DLmPQx7URdJ0Igckh N+i91Qv2ujin8zxUwhkfus66EZS9lQ4qR9iVHs4WHOs3j7whsejd4VhajonilVHj uqTtqHmpN/4njbIKb8q8uQkS26VQYoSYm2UvABEBAAG0GlN1bm55IDxtZUBkYXJr b3dsenouc3BhY2U+iQFUBBMBCAA+FiEEoRt6IzxHaZkkUslhQyLeMqcmyU4FAlmt HgACGwMFCQPCZwAFCwkIBwIGFQgJCgsCBBYCAwECHgECF4AACgkQQyLeMqcmyU7V nAf+J5BYu26B2i+iwctOzDRFcPwCLka9cBwe5wcDvoF2qL8QRo8NPWBBH4zWHa/k BthtGo1b89a53I2hnTwTQ0NOtAUNV+Vvu6nOHJd9Segsx3E1nM43bd2bUfGJ1eeO jDOlOvtP4ozuV6Ej+0Ln2ouMOc87yAwbAzTfQ9axU6CKUbqy0/t2dW1jdKntGH+t VPeFxJHL2gXjP89skCSPYA7yKqqyJRPFvC+7rde1OLdCmZi4VwghUiNbh3s1+xM3 gfr2ahsRDTN2SQzwuHu4y1EgZgPtuWfRxzHqduoRoSgfOfFr9H9Il3UMHf2Etleu rif40YZJhge6STwsIycGh4wOiLkBDQRZrR4AAQgArpUvPdGC/W9X4AuZXrXEShvx TqM4K2Jk9n0j+ABx87k9fm48qgtae7+TayMbb0i7kcbgnjltKbauTbyRbju/EJvN CdIw76IPpjy6jUM37wG2QGLFo6Ku3x8/ZpNGGOZ8KMU258/EBqDlJQ/4g4kJ8D+m 9yOH0r6/Xpe/jOY2V8Jo9pdFTm+8eAsSyZF0Cl7drz603Pymq1IS2wrwQbdxQA/w B75pQ5es7X34Ac7/9UZCwCPmZDAldnjHyw5dZgZe8XLrG84BIfbG0Hj8PjrFdF1D Czt9bk+PbYAnLORW2oX1oedxVrNFo5UrbWgBSjA1ppbGFjwSDHFlyjuEuxqyFwAR AQABiQE8BBgBCAAmFiEEoRt6IzxHaZkkUslhQyLeMqcmyU4FAlmtHgACGwwFCQPC ZwAACgkQQyLeMqcmyU7ZBggArzc8UUVSjde987Vqnu/S5Cv8Qhz+UB7gAFyTW2iF VYvB86r30H/NnfjvjCVkBE6FHCNHoxWVyDWmuxKviB7nkReHuwqniQHPgdJDcTKC tBboeX2IYBLJbEvEJuz5NSvnvFuYkIpZHqySFaqdl/qu9XcmoPL5AmIzIFOeiNty qT0ldkf3ru6yQQDDqBDpkfz4AzkpFnLYL59z6IbJDK2Hz7aKeSEeVOGiZLCjIZZV uISZThYqh5zUkvF346OHLDqfDdgQ4RZriqd/DTtRJPlz2uL0QcEIjJuYCkG0UWgl sYyf9RfOnw/KUFAQbdtvLx3ikODQC+D3KBtuKI9ISHQfgw== =FPev -----END PGP PUBLIC KEY BLOCK----- ` e, err := commit.Verify(armoredKeyRing) c.Assert(err, IsNil) _, ok := e.Identities["Sunny "] c.Assert(ok, Equals, true) } func (s *SuiteCommit) TestPatchCancel(c *C) { from := s.commit(c, plumbing.NewHash("918c48b83bd081e863dbe1b80f8998f058cd8294")) to := s.commit(c, plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5")) ctx, cancel := context.WithCancel(context.Background()) cancel() patch, err := from.PatchContext(ctx, to) c.Assert(patch, IsNil) c.Assert(err, ErrorMatches, "operation canceled") } func (s *SuiteCommit) TestMalformedHeader(c *C) { encoded := &plumbing.MemoryObject{} decoded := &Commit{} commit := *s.Commit commit.PGPSignature = "\n" commit.Author.Name = "\n" commit.Author.Email = "\n" commit.Committer.Name = "\n" commit.Committer.Email = "\n" err := commit.Encode(encoded) c.Assert(err, IsNil) err = decoded.Decode(encoded) c.Assert(err, IsNil) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/object/commit_walker.go000066400000000000000000000154741345605224300251610ustar00rootroot00000000000000package object import ( "container/list" "io" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/storer" "gopkg.in/src-d/go-git.v4/storage" ) type commitPreIterator struct { seenExternal map[plumbing.Hash]bool seen map[plumbing.Hash]bool stack []CommitIter start *Commit } // NewCommitPreorderIter returns a CommitIter that walks the commit history, // starting at the given commit and visiting its parents in pre-order. // The given callback will be called for each visited commit. Each commit will // be visited only once. If the callback returns an error, walking will stop // and will return the error. Other errors might be returned if the history // cannot be traversed (e.g. missing objects). Ignore allows to skip some // commits from being iterated. func NewCommitPreorderIter( c *Commit, seenExternal map[plumbing.Hash]bool, ignore []plumbing.Hash, ) CommitIter { seen := make(map[plumbing.Hash]bool) for _, h := range ignore { seen[h] = true } return &commitPreIterator{ seenExternal: seenExternal, seen: seen, stack: make([]CommitIter, 0), start: c, } } func (w *commitPreIterator) Next() (*Commit, error) { var c *Commit for { if w.start != nil { c = w.start w.start = nil } else { current := len(w.stack) - 1 if current < 0 { return nil, io.EOF } var err error c, err = w.stack[current].Next() if err == io.EOF { w.stack = w.stack[:current] continue } if err != nil { return nil, err } } if w.seen[c.Hash] || w.seenExternal[c.Hash] { continue } w.seen[c.Hash] = true if c.NumParents() > 0 { w.stack = append(w.stack, filteredParentIter(c, w.seen)) } return c, nil } } func filteredParentIter(c *Commit, seen map[plumbing.Hash]bool) CommitIter { var hashes []plumbing.Hash for _, h := range c.ParentHashes { if !seen[h] { hashes = append(hashes, h) } } return NewCommitIter(c.s, storer.NewEncodedObjectLookupIter(c.s, plumbing.CommitObject, hashes), ) } func (w *commitPreIterator) ForEach(cb func(*Commit) error) error { for { c, err := w.Next() if err == io.EOF { break } if err != nil { return err } err = cb(c) if err == storer.ErrStop { break } if err != nil { return err } } return nil } func (w *commitPreIterator) Close() {} type commitPostIterator struct { stack []*Commit seen map[plumbing.Hash]bool } // NewCommitPostorderIter returns a CommitIter that walks the commit // history like WalkCommitHistory but in post-order. This means that after // walking a merge commit, the merged commit will be walked before the base // it was merged on. This can be useful if you wish to see the history in // chronological order. Ignore allows to skip some commits from being iterated. func NewCommitPostorderIter(c *Commit, ignore []plumbing.Hash) CommitIter { seen := make(map[plumbing.Hash]bool) for _, h := range ignore { seen[h] = true } return &commitPostIterator{ stack: []*Commit{c}, seen: seen, } } func (w *commitPostIterator) Next() (*Commit, error) { for { if len(w.stack) == 0 { return nil, io.EOF } c := w.stack[len(w.stack)-1] w.stack = w.stack[:len(w.stack)-1] if w.seen[c.Hash] { continue } w.seen[c.Hash] = true return c, c.Parents().ForEach(func(p *Commit) error { w.stack = append(w.stack, p) return nil }) } } func (w *commitPostIterator) ForEach(cb func(*Commit) error) error { for { c, err := w.Next() if err == io.EOF { break } if err != nil { return err } err = cb(c) if err == storer.ErrStop { break } if err != nil { return err } } return nil } func (w *commitPostIterator) Close() {} // commitAllIterator stands for commit iterator for all refs. type commitAllIterator struct { // currCommit points to the current commit. currCommit *list.Element } // NewCommitAllIter returns a new commit iterator for all refs. // repoStorer is a repo Storer used to get commits and references. // commitIterFunc is a commit iterator function, used to iterate through ref commits in chosen order func NewCommitAllIter(repoStorer storage.Storer, commitIterFunc func(*Commit) CommitIter) (CommitIter, error) { commitsPath := list.New() commitsLookup := make(map[plumbing.Hash]*list.Element) head, err := storer.ResolveReference(repoStorer, plumbing.HEAD) if err == nil { err = addReference(repoStorer, commitIterFunc, head, commitsPath, commitsLookup) } if err != nil && err != plumbing.ErrReferenceNotFound { return nil, err } // add all references along with the HEAD refIter, err := repoStorer.IterReferences() if err != nil { return nil, err } defer refIter.Close() for { ref, err := refIter.Next() if err == io.EOF { break } if err == plumbing.ErrReferenceNotFound { continue } if err != nil { return nil, err } if err = addReference(repoStorer, commitIterFunc, ref, commitsPath, commitsLookup); err != nil { return nil, err } } return &commitAllIterator{commitsPath.Front()}, nil } func addReference( repoStorer storage.Storer, commitIterFunc func(*Commit) CommitIter, ref *plumbing.Reference, commitsPath *list.List, commitsLookup map[plumbing.Hash]*list.Element) error { _, exists := commitsLookup[ref.Hash()] if exists { // we already have it - skip the reference. return nil } refCommit, _ := GetCommit(repoStorer, ref.Hash()) if refCommit == nil { // if it's not a commit - skip it. return nil } var ( refCommits []*Commit parent *list.Element ) // collect all ref commits to add commitIter := commitIterFunc(refCommit) for c, e := commitIter.Next(); e == nil; { parent, exists = commitsLookup[c.Hash] if exists { break } refCommits = append(refCommits, c) c, e = commitIter.Next() } commitIter.Close() if parent == nil { // common parent - not found // add all commits to the path from this ref (maybe it's a HEAD and we don't have anything, yet) for _, c := range refCommits { parent = commitsPath.PushBack(c) commitsLookup[c.Hash] = parent } } else { // add ref's commits to the path in reverse order (from the latest) for i := len(refCommits) - 1; i >= 0; i-- { c := refCommits[i] // insert before found common parent parent = commitsPath.InsertBefore(c, parent) commitsLookup[c.Hash] = parent } } return nil } func (it *commitAllIterator) Next() (*Commit, error) { if it.currCommit == nil { return nil, io.EOF } c := it.currCommit.Value.(*Commit) it.currCommit = it.currCommit.Next() return c, nil } func (it *commitAllIterator) ForEach(cb func(*Commit) error) error { for { c, err := it.Next() if err == io.EOF { break } if err != nil { return err } err = cb(c) if err == storer.ErrStop { break } if err != nil { return err } } return nil } func (it *commitAllIterator) Close() { it.currCommit = nil } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/object/commit_walker_bfs.go000066400000000000000000000037451345605224300260110ustar00rootroot00000000000000package object import ( "io" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/storer" ) type bfsCommitIterator struct { seenExternal map[plumbing.Hash]bool seen map[plumbing.Hash]bool queue []*Commit } // NewCommitIterBSF returns a CommitIter that walks the commit history, // starting at the given commit and visiting its parents in pre-order. // The given callback will be called for each visited commit. Each commit will // be visited only once. If the callback returns an error, walking will stop // and will return the error. Other errors might be returned if the history // cannot be traversed (e.g. missing objects). Ignore allows to skip some // commits from being iterated. func NewCommitIterBSF( c *Commit, seenExternal map[plumbing.Hash]bool, ignore []plumbing.Hash, ) CommitIter { seen := make(map[plumbing.Hash]bool) for _, h := range ignore { seen[h] = true } return &bfsCommitIterator{ seenExternal: seenExternal, seen: seen, queue: []*Commit{c}, } } func (w *bfsCommitIterator) appendHash(store storer.EncodedObjectStorer, h plumbing.Hash) error { if w.seen[h] || w.seenExternal[h] { return nil } c, err := GetCommit(store, h) if err != nil { return err } w.queue = append(w.queue, c) return nil } func (w *bfsCommitIterator) Next() (*Commit, error) { var c *Commit for { if len(w.queue) == 0 { return nil, io.EOF } c = w.queue[0] w.queue = w.queue[1:] if w.seen[c.Hash] || w.seenExternal[c.Hash] { continue } w.seen[c.Hash] = true for _, h := range c.ParentHashes { err := w.appendHash(c.s, h) if err != nil { return nil, err } } return c, nil } } func (w *bfsCommitIterator) ForEach(cb func(*Commit) error) error { for { c, err := w.Next() if err == io.EOF { break } if err != nil { return err } err = cb(c) if err == storer.ErrStop { break } if err != nil { return err } } return nil } func (w *bfsCommitIterator) Close() {} golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/object/commit_walker_ctime.go000066400000000000000000000041461345605224300263340ustar00rootroot00000000000000package object import ( "io" "github.com/emirpasic/gods/trees/binaryheap" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/storer" ) type commitIteratorByCTime struct { seenExternal map[plumbing.Hash]bool seen map[plumbing.Hash]bool heap *binaryheap.Heap } // NewCommitIterCTime returns a CommitIter that walks the commit history, // starting at the given commit and visiting its parents while preserving Committer Time order. // this appears to be the closest order to `git log` // The given callback will be called for each visited commit. Each commit will // be visited only once. If the callback returns an error, walking will stop // and will return the error. Other errors might be returned if the history // cannot be traversed (e.g. missing objects). Ignore allows to skip some // commits from being iterated. func NewCommitIterCTime( c *Commit, seenExternal map[plumbing.Hash]bool, ignore []plumbing.Hash, ) CommitIter { seen := make(map[plumbing.Hash]bool) for _, h := range ignore { seen[h] = true } heap := binaryheap.NewWith(func(a, b interface{}) int { if a.(*Commit).Committer.When.Before(b.(*Commit).Committer.When) { return 1 } return -1 }) heap.Push(c) return &commitIteratorByCTime{ seenExternal: seenExternal, seen: seen, heap: heap, } } func (w *commitIteratorByCTime) Next() (*Commit, error) { var c *Commit for { cIn, ok := w.heap.Pop() if !ok { return nil, io.EOF } c = cIn.(*Commit) if w.seen[c.Hash] || w.seenExternal[c.Hash] { continue } w.seen[c.Hash] = true for _, h := range c.ParentHashes { if w.seen[h] || w.seenExternal[h] { continue } pc, err := GetCommit(c.s, h) if err != nil { return nil, err } w.heap.Push(pc) } return c, nil } } func (w *commitIteratorByCTime) ForEach(cb func(*Commit) error) error { for { c, err := w.Next() if err == io.EOF { break } if err != nil { return err } err = cb(c) if err == storer.ErrStop { break } if err != nil { return err } } return nil } func (w *commitIteratorByCTime) Close() {} golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/object/commit_walker_file.go000066400000000000000000000067561345605224300261630ustar00rootroot00000000000000package object import ( "io" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/storer" ) type commitFileIter struct { fileName string sourceIter CommitIter currentCommit *Commit checkParent bool } // NewCommitFileIterFromIter returns a commit iterator which performs diffTree between // successive trees returned from the commit iterator from the argument. The purpose of this is // to find the commits that explain how the files that match the path came to be. // If checkParent is true then the function double checks if potential parent (next commit in a path) // is one of the parents in the tree (it's used by `git log --all`). func NewCommitFileIterFromIter(fileName string, commitIter CommitIter, checkParent bool) CommitIter { iterator := new(commitFileIter) iterator.sourceIter = commitIter iterator.fileName = fileName iterator.checkParent = checkParent return iterator } func (c *commitFileIter) Next() (*Commit, error) { if c.currentCommit == nil { var err error c.currentCommit, err = c.sourceIter.Next() if err != nil { return nil, err } } commit, commitErr := c.getNextFileCommit() // Setting current-commit to nil to prevent unwanted states when errors are raised if commitErr != nil { c.currentCommit = nil } return commit, commitErr } func (c *commitFileIter) getNextFileCommit() (*Commit, error) { for { // Parent-commit can be nil if the current-commit is the initial commit parentCommit, parentCommitErr := c.sourceIter.Next() if parentCommitErr != nil { // If the parent-commit is beyond the initial commit, keep it nil if parentCommitErr != io.EOF { return nil, parentCommitErr } parentCommit = nil } // Fetch the trees of the current and parent commits currentTree, currTreeErr := c.currentCommit.Tree() if currTreeErr != nil { return nil, currTreeErr } var parentTree *Tree if parentCommit != nil { var parentTreeErr error parentTree, parentTreeErr = parentCommit.Tree() if parentTreeErr != nil { return nil, parentTreeErr } } // Find diff between current and parent trees changes, diffErr := DiffTree(currentTree, parentTree) if diffErr != nil { return nil, diffErr } found := c.hasFileChange(changes, parentCommit) // Storing the current-commit in-case a change is found, and // Updating the current-commit for the next-iteration prevCommit := c.currentCommit c.currentCommit = parentCommit if found { return prevCommit, nil } // If not matches found and if parent-commit is beyond the initial commit, then return with EOF if parentCommit == nil { return nil, io.EOF } } } func (c *commitFileIter) hasFileChange(changes Changes, parent *Commit) bool { for _, change := range changes { if change.name() != c.fileName { continue } // filename matches, now check if source iterator contains all commits (from all refs) if c.checkParent { if parent != nil && isParentHash(parent.Hash, c.currentCommit) { return true } continue } return true } return false } func isParentHash(hash plumbing.Hash, commit *Commit) bool { for _, h := range commit.ParentHashes { if h == hash { return true } } return false } func (c *commitFileIter) ForEach(cb func(*Commit) error) error { for { commit, nextErr := c.Next() if nextErr != nil { return nextErr } err := cb(commit) if err == storer.ErrStop { return nil } else if err != nil { return err } } } func (c *commitFileIter) Close() { c.sourceIter.Close() } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/object/commit_walker_test.go000066400000000000000000000143501345605224300262100ustar00rootroot00000000000000package object import ( "gopkg.in/src-d/go-git.v4/plumbing" . "gopkg.in/check.v1" ) type CommitWalkerSuite struct { BaseObjectsSuite } var _ = Suite(&CommitWalkerSuite{}) func (s *CommitWalkerSuite) TestCommitPreIterator(c *C) { commit := s.commit(c, s.Fixture.Head) var commits []*Commit NewCommitPreorderIter(commit, nil, nil).ForEach(func(c *Commit) error { commits = append(commits, c) return nil }) c.Assert(commits, HasLen, 8) expected := []string{ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", "918c48b83bd081e863dbe1b80f8998f058cd8294", "af2d6a6954d532f8ffb47615169c8fdf9d383a1a", "1669dce138d9b841a518c64b10914d88f5e488ea", "35e85108805c84807bc66a02d91535e1e24b38b9", "b029517f6300c2da0f4b651b8642506cd6aaf45d", "a5b8b09e2f8fcb0bb99d3ccb0958157b40890d69", "b8e471f58bcbca63b07bda20e428190409c2db47", } for i, commit := range commits { c.Assert(commit.Hash.String(), Equals, expected[i]) } } func (s *CommitWalkerSuite) TestCommitPreIteratorWithIgnore(c *C) { commit := s.commit(c, s.Fixture.Head) var commits []*Commit NewCommitPreorderIter(commit, nil, []plumbing.Hash{ plumbing.NewHash("af2d6a6954d532f8ffb47615169c8fdf9d383a1a"), }).ForEach(func(c *Commit) error { commits = append(commits, c) return nil }) c.Assert(commits, HasLen, 2) expected := []string{ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", "918c48b83bd081e863dbe1b80f8998f058cd8294", } for i, commit := range commits { c.Assert(commit.Hash.String(), Equals, expected[i]) } } func (s *CommitWalkerSuite) TestCommitPreIteratorWithSeenExternal(c *C) { commit := s.commit(c, s.Fixture.Head) var commits []*Commit seenExternal := map[plumbing.Hash]bool{ plumbing.NewHash("af2d6a6954d532f8ffb47615169c8fdf9d383a1a"): true, } NewCommitPreorderIter(commit, seenExternal, nil). ForEach(func(c *Commit) error { commits = append(commits, c) return nil }) c.Assert(commits, HasLen, 2) expected := []string{ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", "918c48b83bd081e863dbe1b80f8998f058cd8294", } for i, commit := range commits { c.Assert(commit.Hash.String(), Equals, expected[i]) } } func (s *CommitWalkerSuite) TestCommitPostIterator(c *C) { commit := s.commit(c, s.Fixture.Head) var commits []*Commit NewCommitPostorderIter(commit, nil).ForEach(func(c *Commit) error { commits = append(commits, c) return nil }) c.Assert(commits, HasLen, 8) expected := []string{ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", "918c48b83bd081e863dbe1b80f8998f058cd8294", "af2d6a6954d532f8ffb47615169c8fdf9d383a1a", "1669dce138d9b841a518c64b10914d88f5e488ea", "a5b8b09e2f8fcb0bb99d3ccb0958157b40890d69", "b8e471f58bcbca63b07bda20e428190409c2db47", "b029517f6300c2da0f4b651b8642506cd6aaf45d", "35e85108805c84807bc66a02d91535e1e24b38b9", } for i, commit := range commits { c.Assert(commit.Hash.String(), Equals, expected[i]) } } func (s *CommitWalkerSuite) TestCommitPostIteratorWithIgnore(c *C) { commit := s.commit(c, s.Fixture.Head) var commits []*Commit NewCommitPostorderIter(commit, []plumbing.Hash{ plumbing.NewHash("af2d6a6954d532f8ffb47615169c8fdf9d383a1a"), }).ForEach(func(c *Commit) error { commits = append(commits, c) return nil }) c.Assert(commits, HasLen, 2) expected := []string{ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", "918c48b83bd081e863dbe1b80f8998f058cd8294", } for i, commit := range commits { c.Assert(commit.Hash.String(), Equals, expected[i]) } } func (s *CommitWalkerSuite) TestCommitCTimeIterator(c *C) { commit := s.commit(c, s.Fixture.Head) var commits []*Commit NewCommitIterCTime(commit, nil, nil).ForEach(func(c *Commit) error { commits = append(commits, c) return nil }) c.Assert(commits, HasLen, 8) expected := []string{ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", // 2015-04-05T23:30:47+02:00 "918c48b83bd081e863dbe1b80f8998f058cd8294", // 2015-03-31T13:56:18+02:00 "af2d6a6954d532f8ffb47615169c8fdf9d383a1a", // 2015-03-31T13:51:51+02:00 "1669dce138d9b841a518c64b10914d88f5e488ea", // 2015-03-31T13:48:14+02:00 "a5b8b09e2f8fcb0bb99d3ccb0958157b40890d69", // 2015-03-31T13:47:14+02:00 "35e85108805c84807bc66a02d91535e1e24b38b9", // 2015-03-31T13:46:24+02:00 "b8e471f58bcbca63b07bda20e428190409c2db47", // 2015-03-31T13:44:52+02:00 "b029517f6300c2da0f4b651b8642506cd6aaf45d", // 2015-03-31T13:42:21+02:00 } for i, commit := range commits { c.Assert(commit.Hash.String(), Equals, expected[i]) } } func (s *CommitWalkerSuite) TestCommitCTimeIteratorWithIgnore(c *C) { commit := s.commit(c, s.Fixture.Head) var commits []*Commit NewCommitIterCTime(commit, nil, []plumbing.Hash{ plumbing.NewHash("af2d6a6954d532f8ffb47615169c8fdf9d383a1a"), }).ForEach(func(c *Commit) error { commits = append(commits, c) return nil }) c.Assert(commits, HasLen, 2) expected := []string{ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", "918c48b83bd081e863dbe1b80f8998f058cd8294", } for i, commit := range commits { c.Assert(commit.Hash.String(), Equals, expected[i]) } } func (s *CommitWalkerSuite) TestCommitBSFIterator(c *C) { commit := s.commit(c, s.Fixture.Head) var commits []*Commit NewCommitIterBSF(commit, nil, nil).ForEach(func(c *Commit) error { commits = append(commits, c) return nil }) c.Assert(commits, HasLen, 8) expected := []string{ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", "918c48b83bd081e863dbe1b80f8998f058cd8294", "af2d6a6954d532f8ffb47615169c8fdf9d383a1a", "1669dce138d9b841a518c64b10914d88f5e488ea", "35e85108805c84807bc66a02d91535e1e24b38b9", "a5b8b09e2f8fcb0bb99d3ccb0958157b40890d69", "b029517f6300c2da0f4b651b8642506cd6aaf45d", "b8e471f58bcbca63b07bda20e428190409c2db47", } for i, commit := range commits { c.Assert(commit.Hash.String(), Equals, expected[i]) } } func (s *CommitWalkerSuite) TestCommitBSFIteratorWithIgnore(c *C) { commit := s.commit(c, s.Fixture.Head) var commits []*Commit NewCommitIterBSF(commit, nil, []plumbing.Hash{ plumbing.NewHash("af2d6a6954d532f8ffb47615169c8fdf9d383a1a"), }).ForEach(func(c *Commit) error { commits = append(commits, c) return nil }) c.Assert(commits, HasLen, 2) expected := []string{ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", "918c48b83bd081e863dbe1b80f8998f058cd8294", } for i, commit := range commits { c.Assert(commit.Hash.String(), Equals, expected[i]) } } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/object/difftree.go000066400000000000000000000016561345605224300241110ustar00rootroot00000000000000package object import ( "bytes" "context" "gopkg.in/src-d/go-git.v4/utils/merkletrie" "gopkg.in/src-d/go-git.v4/utils/merkletrie/noder" ) // DiffTree compares the content and mode of the blobs found via two // tree objects. func DiffTree(a, b *Tree) (Changes, error) { return DiffTreeContext(context.Background(), a, b) } // DiffTree compares the content and mode of the blobs found via two // tree objects. Provided context must be non-nil. // An error will be return if context expires func DiffTreeContext(ctx context.Context, a, b *Tree) (Changes, error) { from := NewTreeRootNode(a) to := NewTreeRootNode(b) hashEqual := func(a, b noder.Hasher) bool { return bytes.Equal(a.Hash(), b.Hash()) } merkletrieChanges, err := merkletrie.DiffTreeContext(ctx, from, to, hashEqual) if err != nil { if err == merkletrie.ErrCanceled { return nil, ErrCanceled } return nil, err } return newChanges(merkletrieChanges) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/object/difftree_test.go000066400000000000000000000255261345605224300251520ustar00rootroot00000000000000package object import ( "sort" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/cache" "gopkg.in/src-d/go-git.v4/plumbing/filemode" "gopkg.in/src-d/go-git.v4/plumbing/format/packfile" "gopkg.in/src-d/go-git.v4/plumbing/storer" "gopkg.in/src-d/go-git.v4/storage/filesystem" "gopkg.in/src-d/go-git.v4/storage/memory" "gopkg.in/src-d/go-git.v4/utils/merkletrie" . "gopkg.in/check.v1" "gopkg.in/src-d/go-git-fixtures.v3" ) type DiffTreeSuite struct { fixtures.Suite Storer storer.EncodedObjectStorer Fixture *fixtures.Fixture cache map[string]storer.EncodedObjectStorer } func (s *DiffTreeSuite) SetUpSuite(c *C) { s.Suite.SetUpSuite(c) s.Fixture = fixtures.Basic().One() sto := filesystem.NewStorage(s.Fixture.DotGit(), cache.NewObjectLRUDefault()) s.Storer = sto s.cache = make(map[string]storer.EncodedObjectStorer) } func (s *DiffTreeSuite) commitFromStorer(c *C, sto storer.EncodedObjectStorer, h plumbing.Hash) *Commit { commit, err := GetCommit(sto, h) c.Assert(err, IsNil) return commit } func (s *DiffTreeSuite) storageFromPackfile(f *fixtures.Fixture) storer.EncodedObjectStorer { sto, ok := s.cache[f.URL] if ok { return sto } storer := memory.NewStorage() pf := f.Packfile() defer pf.Close() if err := packfile.UpdateObjectStorage(storer, pf); err != nil { panic(err) } s.cache[f.URL] = storer return storer } var _ = Suite(&DiffTreeSuite{}) type expectChange struct { Action merkletrie.Action Name string } func assertChanges(a Changes, c *C) { for _, changes := range a { action, err := changes.Action() c.Assert(err, IsNil) switch action { case merkletrie.Insert: c.Assert(changes.From.Tree, IsNil) c.Assert(changes.To.Tree, NotNil) case merkletrie.Delete: c.Assert(changes.From.Tree, NotNil) c.Assert(changes.To.Tree, IsNil) case merkletrie.Modify: c.Assert(changes.From.Tree, NotNil) c.Assert(changes.To.Tree, NotNil) default: c.Fatalf("unknown action: %d", action) } } } func equalChanges(a Changes, b []expectChange, c *C) bool { if len(a) != len(b) { return false } sort.Sort(a) for i, va := range a { vb := b[i] action, err := va.Action() c.Assert(err, IsNil) if action != vb.Action || va.name() != vb.Name { return false } } return true } func (s *DiffTreeSuite) TestDiffTree(c *C) { for i, t := range []struct { repository string // the repo name as in localRepos commit1 string // the commit of the first tree commit2 string // the commit of the second tree expected []expectChange // the expected list of []changeExpect }{ { "https://github.com/dezfowler/LiteMock.git", "", "", []expectChange{}, }, { "https://github.com/dezfowler/LiteMock.git", "b7965eaa2c4f245d07191fe0bcfe86da032d672a", "b7965eaa2c4f245d07191fe0bcfe86da032d672a", []expectChange{}, }, { "https://github.com/dezfowler/LiteMock.git", "", "b7965eaa2c4f245d07191fe0bcfe86da032d672a", []expectChange{ {Action: merkletrie.Insert, Name: "README"}, }, }, { "https://github.com/dezfowler/LiteMock.git", "b7965eaa2c4f245d07191fe0bcfe86da032d672a", "", []expectChange{ {Action: merkletrie.Delete, Name: "README"}, }, }, { "https://github.com/githubtraining/example-branches.git", "", "f0eb272cc8f77803478c6748103a1450aa1abd37", []expectChange{ {Action: merkletrie.Insert, Name: "README.md"}, }, }, { "https://github.com/githubtraining/example-branches.git", "f0eb272cc8f77803478c6748103a1450aa1abd37", "", []expectChange{ {Action: merkletrie.Delete, Name: "README.md"}, }, }, { "https://github.com/githubtraining/example-branches.git", "f0eb272cc8f77803478c6748103a1450aa1abd37", "f0eb272cc8f77803478c6748103a1450aa1abd37", []expectChange{}, }, { "https://github.com/github/gem-builder.git", "", "9608eed92b3839b06ebf72d5043da547de10ce85", []expectChange{ {Action: merkletrie.Insert, Name: "README"}, {Action: merkletrie.Insert, Name: "gem_builder.rb"}, {Action: merkletrie.Insert, Name: "gem_eval.rb"}, }, }, { "https://github.com/github/gem-builder.git", "9608eed92b3839b06ebf72d5043da547de10ce85", "", []expectChange{ {Action: merkletrie.Delete, Name: "README"}, {Action: merkletrie.Delete, Name: "gem_builder.rb"}, {Action: merkletrie.Delete, Name: "gem_eval.rb"}, }, }, { "https://github.com/github/gem-builder.git", "9608eed92b3839b06ebf72d5043da547de10ce85", "9608eed92b3839b06ebf72d5043da547de10ce85", []expectChange{}, }, { "https://github.com/toqueteos/ts3.git", "", "764e914b75d6d6df1fc5d832aa9840f590abf1bb", []expectChange{ {Action: merkletrie.Insert, Name: "README.markdown"}, {Action: merkletrie.Insert, Name: "examples/bot.go"}, {Action: merkletrie.Insert, Name: "examples/raw_shell.go"}, {Action: merkletrie.Insert, Name: "helpers.go"}, {Action: merkletrie.Insert, Name: "ts3.go"}, }, }, { "https://github.com/toqueteos/ts3.git", "764e914b75d6d6df1fc5d832aa9840f590abf1bb", "", []expectChange{ {Action: merkletrie.Delete, Name: "README.markdown"}, {Action: merkletrie.Delete, Name: "examples/bot.go"}, {Action: merkletrie.Delete, Name: "examples/raw_shell.go"}, {Action: merkletrie.Delete, Name: "helpers.go"}, {Action: merkletrie.Delete, Name: "ts3.go"}, }, }, { "https://github.com/toqueteos/ts3.git", "764e914b75d6d6df1fc5d832aa9840f590abf1bb", "764e914b75d6d6df1fc5d832aa9840f590abf1bb", []expectChange{}, }, { "https://github.com/github/gem-builder.git", "9608eed92b3839b06ebf72d5043da547de10ce85", "6c41e05a17e19805879689414026eb4e279f7de0", []expectChange{ {Action: merkletrie.Modify, Name: "gem_eval.rb"}, }, }, { "https://github.com/github/gem-builder.git", "6c41e05a17e19805879689414026eb4e279f7de0", "89be3aac2f178719c12953cc9eaa23441f8d9371", []expectChange{ {Action: merkletrie.Modify, Name: "gem_eval.rb"}, {Action: merkletrie.Insert, Name: "gem_eval_test.rb"}, {Action: merkletrie.Insert, Name: "security.rb"}, {Action: merkletrie.Insert, Name: "security_test.rb"}, }, }, { "https://github.com/github/gem-builder.git", "89be3aac2f178719c12953cc9eaa23441f8d9371", "597240b7da22d03ad555328f15abc480b820acc0", []expectChange{ {Action: merkletrie.Modify, Name: "gem_eval.rb"}, }, }, { "https://github.com/github/gem-builder.git", "597240b7da22d03ad555328f15abc480b820acc0", "0260380e375d2dd0e1a8fcab15f91ce56dbe778e", []expectChange{ {Action: merkletrie.Modify, Name: "gem_eval.rb"}, {Action: merkletrie.Modify, Name: "gem_eval_test.rb"}, {Action: merkletrie.Insert, Name: "lazy_dir.rb"}, {Action: merkletrie.Insert, Name: "lazy_dir_test.rb"}, {Action: merkletrie.Modify, Name: "security.rb"}, {Action: merkletrie.Modify, Name: "security_test.rb"}, }, }, { "https://github.com/github/gem-builder.git", "0260380e375d2dd0e1a8fcab15f91ce56dbe778e", "597240b7da22d03ad555328f15abc480b820acc0", []expectChange{ {Action: merkletrie.Modify, Name: "gem_eval.rb"}, {Action: merkletrie.Modify, Name: "gem_eval_test.rb"}, {Action: merkletrie.Delete, Name: "lazy_dir.rb"}, {Action: merkletrie.Delete, Name: "lazy_dir_test.rb"}, {Action: merkletrie.Modify, Name: "security.rb"}, {Action: merkletrie.Modify, Name: "security_test.rb"}, }, }, { "https://github.com/github/gem-builder.git", "0260380e375d2dd0e1a8fcab15f91ce56dbe778e", "ca9fd470bacb6262eb4ca23ee48bb2f43711c1ff", []expectChange{ {Action: merkletrie.Modify, Name: "gem_eval.rb"}, {Action: merkletrie.Modify, Name: "security.rb"}, {Action: merkletrie.Modify, Name: "security_test.rb"}, }, }, { "https://github.com/github/gem-builder.git", "fe3c86745f887c23a0d38c85cfd87ca957312f86", "b7e3f636febf7a0cd3ab473b6d30081786d2c5b6", []expectChange{ {Action: merkletrie.Modify, Name: "gem_eval.rb"}, {Action: merkletrie.Modify, Name: "gem_eval_test.rb"}, {Action: merkletrie.Insert, Name: "git_mock"}, {Action: merkletrie.Modify, Name: "lazy_dir.rb"}, {Action: merkletrie.Modify, Name: "lazy_dir_test.rb"}, {Action: merkletrie.Modify, Name: "security.rb"}, }, }, { "https://github.com/rumpkernel/rumprun-xen.git", "1831e47b0c6db750714cd0e4be97b5af17fb1eb0", "51d8515578ea0c88cc8fc1a057903675cf1fc16c", []expectChange{ {Action: merkletrie.Modify, Name: "Makefile"}, {Action: merkletrie.Modify, Name: "netbsd_init.c"}, {Action: merkletrie.Modify, Name: "rumphyper_stubs.c"}, {Action: merkletrie.Delete, Name: "sysproxy.c"}, }, }, { "https://github.com/rumpkernel/rumprun-xen.git", "1831e47b0c6db750714cd0e4be97b5af17fb1eb0", "e13e678f7ee9badd01b120889e0ec5fdc8ae3802", []expectChange{ {Action: merkletrie.Modify, Name: "app-tools/rumprun"}, }, }, } { f := fixtures.ByURL(t.repository).One() sto := s.storageFromPackfile(f) var tree1, tree2 *Tree var err error if t.commit1 != "" { tree1, err = s.commitFromStorer(c, sto, plumbing.NewHash(t.commit1)).Tree() c.Assert(err, IsNil, Commentf("subtest %d: unable to retrieve tree from commit %s and repo %s: %s", i, t.commit1, t.repository, err)) } if t.commit2 != "" { tree2, err = s.commitFromStorer(c, sto, plumbing.NewHash(t.commit2)).Tree() c.Assert(err, IsNil, Commentf("subtest %d: unable to retrieve tree from commit %s and repo %s", i, t.commit2, t.repository, err)) } obtained, err := DiffTree(tree1, tree2) c.Assert(err, IsNil, Commentf("subtest %d: unable to calculate difftree: %s", i, err)) obtainedFromMethod, err := tree1.Diff(tree2) c.Assert(err, IsNil, Commentf("subtest %d: unable to calculate difftree: %s. Result calling Diff method from Tree object returns an error", i, err)) c.Assert(obtained, DeepEquals, obtainedFromMethod) c.Assert(equalChanges(obtained, t.expected, c), Equals, true, Commentf("subtest:%d\nrepo=%s\ncommit1=%s\ncommit2=%s\nexpected=%s\nobtained=%s", i, t.repository, t.commit1, t.commit2, t.expected, obtained)) assertChanges(obtained, c) } } func (s *DiffTreeSuite) TestIssue279(c *C) { // treeNoders should have the same hash when their mode is // filemode.Deprecated and filemode.Regular. a := &treeNoder{ hash: plumbing.NewHash("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), mode: filemode.Regular, } b := &treeNoder{ hash: plumbing.NewHash("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), mode: filemode.Deprecated, } c.Assert(a.Hash(), DeepEquals, b.Hash()) // yet, they should have different hashes if their contents change. aa := &treeNoder{ hash: plumbing.NewHash("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"), mode: filemode.Regular, } c.Assert(a.Hash(), Not(DeepEquals), aa.Hash()) bb := &treeNoder{ hash: plumbing.NewHash("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"), mode: filemode.Deprecated, } c.Assert(b.Hash(), Not(DeepEquals), bb.Hash()) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/object/file.go000066400000000000000000000062111345605224300232300ustar00rootroot00000000000000package object import ( "bytes" "io" "strings" "gopkg.in/src-d/go-git.v4/plumbing/filemode" "gopkg.in/src-d/go-git.v4/plumbing/storer" "gopkg.in/src-d/go-git.v4/utils/binary" "gopkg.in/src-d/go-git.v4/utils/ioutil" ) // File represents git file objects. type File struct { // Name is the path of the file. It might be relative to a tree, // depending of the function that generates it. Name string // Mode is the file mode. Mode filemode.FileMode // Blob with the contents of the file. Blob } // NewFile returns a File based on the given blob object func NewFile(name string, m filemode.FileMode, b *Blob) *File { return &File{Name: name, Mode: m, Blob: *b} } // Contents returns the contents of a file as a string. func (f *File) Contents() (content string, err error) { reader, err := f.Reader() if err != nil { return "", err } defer ioutil.CheckClose(reader, &err) buf := new(bytes.Buffer) if _, err := buf.ReadFrom(reader); err != nil { return "", err } return buf.String(), nil } // IsBinary returns if the file is binary or not func (f *File) IsBinary() (bin bool, err error) { reader, err := f.Reader() if err != nil { return false, err } defer ioutil.CheckClose(reader, &err) return binary.IsBinary(reader) } // Lines returns a slice of lines from the contents of a file, stripping // all end of line characters. If the last line is empty (does not end // in an end of line), it is also stripped. func (f *File) Lines() ([]string, error) { content, err := f.Contents() if err != nil { return nil, err } splits := strings.Split(content, "\n") // remove the last line if it is empty if splits[len(splits)-1] == "" { return splits[:len(splits)-1], nil } return splits, nil } // FileIter provides an iterator for the files in a tree. type FileIter struct { s storer.EncodedObjectStorer w TreeWalker } // NewFileIter takes a storer.EncodedObjectStorer and a Tree and returns a // *FileIter that iterates over all files contained in the tree, recursively. func NewFileIter(s storer.EncodedObjectStorer, t *Tree) *FileIter { return &FileIter{s: s, w: *NewTreeWalker(t, true, nil)} } // Next moves the iterator to the next file and returns a pointer to it. If // there are no more files, it returns io.EOF. func (iter *FileIter) Next() (*File, error) { for { name, entry, err := iter.w.Next() if err != nil { return nil, err } if entry.Mode == filemode.Dir || entry.Mode == filemode.Submodule { continue } blob, err := GetBlob(iter.s, entry.Hash) if err != nil { return nil, err } return NewFile(name, entry.Mode, blob), nil } } // ForEach call the cb function for each file contained in this iter until // an error happens or the end of the iter is reached. If plumbing.ErrStop is sent // the iteration is stop but no error is returned. The iterator is closed. func (iter *FileIter) ForEach(cb func(*File) error) error { defer iter.Close() for { f, err := iter.Next() if err != nil { if err == io.EOF { return nil } return err } if err := cb(f); err != nil { if err == storer.ErrStop { return nil } return err } } } func (iter *FileIter) Close() { iter.w.Close() } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/object/file_test.go000066400000000000000000000162011345605224300242670ustar00rootroot00000000000000package object import ( "io" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/cache" "gopkg.in/src-d/go-git.v4/plumbing/filemode" "gopkg.in/src-d/go-git.v4/plumbing/storer" "gopkg.in/src-d/go-git.v4/storage/filesystem" . "gopkg.in/check.v1" "gopkg.in/src-d/go-git-fixtures.v3" ) type FileSuite struct { BaseObjectsSuite } var _ = Suite(&FileSuite{}) type fileIterExpectedEntry struct { Name string Hash string } var fileIterTests = []struct { repo string // the repo name as in localRepos commit string // the commit to search for the file files []fileIterExpectedEntry }{ {"https://github.com/git-fixtures/basic.git", "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", []fileIterExpectedEntry{ {".gitignore", "32858aad3c383ed1ff0a0f9bdf231d54a00c9e88"}, {"CHANGELOG", "d3ff53e0564a9f87d8e84b6e28e5060e517008aa"}, {"LICENSE", "c192bd6a24ea1ab01d78686e417c8bdc7c3d197f"}, {"binary.jpg", "d5c0f4ab811897cadf03aec358ae60d21f91c50d"}, {"go/example.go", "880cd14280f4b9b6ed3986d6671f907d7cc2a198"}, {"json/long.json", "49c6bb89b17060d7b4deacb7b338fcc6ea2352a9"}, {"json/short.json", "c8f1d8c61f9da76f4cb49fd86322b6e685dba956"}, {"php/crappy.php", "9a48f23120e880dfbe41f7c9b7b708e9ee62a492"}, {"vendor/foo.go", "9dea2395f5403188298c1dabe8bdafe562c491e3"}, }}, } func (s *FileSuite) TestIter(c *C) { for i, t := range fileIterTests { f := fixtures.ByURL(t.repo).One() sto := filesystem.NewStorage(f.DotGit(), cache.NewObjectLRUDefault()) h := plumbing.NewHash(t.commit) commit, err := GetCommit(sto, h) c.Assert(err, IsNil, Commentf("subtest %d: %v (%s)", i, err, t.commit)) tree, err := commit.Tree() c.Assert(err, IsNil) iter := NewFileIter(sto, tree) for k := 0; k < len(t.files); k++ { exp := t.files[k] file, err := iter.Next() c.Assert(err, IsNil, Commentf("subtest %d, iter %d, err=%v", i, k, err)) c.Assert(file.Mode, Equals, filemode.Regular) c.Assert(file.Hash.IsZero(), Equals, false) c.Assert(file.Hash, Equals, file.ID()) c.Assert(file.Name, Equals, exp.Name, Commentf("subtest %d, iter %d, name=%s, expected=%s", i, k, file.Name, exp.Hash)) c.Assert(file.Hash.String(), Equals, exp.Hash, Commentf("subtest %d, iter %d, hash=%v, expected=%s", i, k, file.Hash.String(), exp.Hash)) } _, err = iter.Next() c.Assert(err, Equals, io.EOF) } } var contentsTests = []struct { repo string // the repo name as in localRepos commit string // the commit to search for the file path string // the path of the file to find contents string // expected contents of the file }{ { "https://github.com/git-fixtures/basic.git", "b029517f6300c2da0f4b651b8642506cd6aaf45d", ".gitignore", `*.class # Mobile Tools for Java (J2ME) .mtj.tmp/ # Package Files # *.jar *.war *.ear # virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml hs_err_pid* `, }, { "https://github.com/git-fixtures/basic.git", "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", "CHANGELOG", `Initial changelog `, }, } func (s *FileSuite) TestContents(c *C) { for i, t := range contentsTests { f := fixtures.ByURL(t.repo).One() sto := filesystem.NewStorage(f.DotGit(), cache.NewObjectLRUDefault()) h := plumbing.NewHash(t.commit) commit, err := GetCommit(sto, h) c.Assert(err, IsNil, Commentf("subtest %d: %v (%s)", i, err, t.commit)) file, err := commit.File(t.path) c.Assert(err, IsNil) content, err := file.Contents() c.Assert(err, IsNil) c.Assert(content, Equals, t.contents, Commentf( "subtest %d: commit=%s, path=%s", i, t.commit, t.path)) } } var linesTests = []struct { repo string // the repo name as in localRepos commit string // the commit to search for the file path string // the path of the file to find lines []string // expected lines in the file }{ { "https://github.com/git-fixtures/basic.git", "b029517f6300c2da0f4b651b8642506cd6aaf45d", ".gitignore", []string{ "*.class", "", "# Mobile Tools for Java (J2ME)", ".mtj.tmp/", "", "# Package Files #", "*.jar", "*.war", "*.ear", "", "# virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml", "hs_err_pid*", }, }, { "https://github.com/git-fixtures/basic.git", "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", "CHANGELOG", []string{ "Initial changelog", }, }, } func (s *FileSuite) TestLines(c *C) { for i, t := range linesTests { f := fixtures.ByURL(t.repo).One() sto := filesystem.NewStorage(f.DotGit(), cache.NewObjectLRUDefault()) h := plumbing.NewHash(t.commit) commit, err := GetCommit(sto, h) c.Assert(err, IsNil, Commentf("subtest %d: %v (%s)", i, err, t.commit)) file, err := commit.File(t.path) c.Assert(err, IsNil) lines, err := file.Lines() c.Assert(err, IsNil) c.Assert(lines, DeepEquals, t.lines, Commentf( "subtest %d: commit=%s, path=%s", i, t.commit, t.path)) } } var ignoreEmptyDirEntriesTests = []struct { repo string // the repo name as in localRepos commit string // the commit to search for the file }{ { "https://github.com/cpcs499/Final_Pres_P.git", "70bade703ce556c2c7391a8065c45c943e8b6bc3", // the Final dir in this commit is empty }, } // It is difficult to assert that we are ignoring an (empty) dir as even // if we don't, no files will be found in it. // // At least this test has a high chance of panicking if // we don't ignore empty dirs. func (s *FileSuite) TestIgnoreEmptyDirEntries(c *C) { for i, t := range ignoreEmptyDirEntriesTests { f := fixtures.ByURL(t.repo).One() sto := filesystem.NewStorage(f.DotGit(), cache.NewObjectLRUDefault()) h := plumbing.NewHash(t.commit) commit, err := GetCommit(sto, h) c.Assert(err, IsNil, Commentf("subtest %d: %v (%s)", i, err, t.commit)) tree, err := commit.Tree() c.Assert(err, IsNil) iter := tree.Files() defer iter.Close() for file, err := iter.Next(); err == nil; file, err = iter.Next() { _, _ = file.Contents() // this would probably panic if we are not ignoring empty dirs } } } func (s *FileSuite) TestFileIter(c *C) { hash := plumbing.NewHash("1669dce138d9b841a518c64b10914d88f5e488ea") commit, err := GetCommit(s.Storer, hash) c.Assert(err, IsNil) tree, err := commit.Tree() c.Assert(err, IsNil) expected := []string{ ".gitignore", "CHANGELOG", "LICENSE", "binary.jpg", } var count int i := tree.Files() i.ForEach(func(f *File) error { c.Assert(f.Name, Equals, expected[count]) count++ return nil }) c.Assert(count, Equals, 4) count = 0 i = tree.Files() i.ForEach(func(f *File) error { count++ return storer.ErrStop }) c.Assert(count, Equals, 1) } func (s *FileSuite) TestFileIterSubmodule(c *C) { dotgit := fixtures.ByURL("https://github.com/git-fixtures/submodule.git").One().DotGit() st := filesystem.NewStorage(dotgit, cache.NewObjectLRUDefault()) hash := plumbing.NewHash("b685400c1f9316f350965a5993d350bc746b0bf4") commit, err := GetCommit(st, hash) c.Assert(err, IsNil) tree, err := commit.Tree() c.Assert(err, IsNil) expected := []string{ ".gitmodules", "README.md", } var count int i := tree.Files() i.ForEach(func(f *File) error { c.Assert(f.Name, Equals, expected[count]) count++ return nil }) c.Assert(count, Equals, 2) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/object/object.go000066400000000000000000000137351345605224300235700ustar00rootroot00000000000000// Package object contains implementations of all Git objects and utility // functions to work with them. package object import ( "bytes" "errors" "fmt" "io" "strconv" "time" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/storer" ) // ErrUnsupportedObject trigger when a non-supported object is being decoded. var ErrUnsupportedObject = errors.New("unsupported object type") // Object is a generic representation of any git object. It is implemented by // Commit, Tree, Blob, and Tag, and includes the functions that are common to // them. // // Object is returned when an object can be of any type. It is frequently used // with a type cast to acquire the specific type of object: // // func process(obj Object) { // switch o := obj.(type) { // case *Commit: // // o is a Commit // case *Tree: // // o is a Tree // case *Blob: // // o is a Blob // case *Tag: // // o is a Tag // } // } // // This interface is intentionally different from plumbing.EncodedObject, which // is a lower level interface used by storage implementations to read and write // objects in its encoded form. type Object interface { ID() plumbing.Hash Type() plumbing.ObjectType Decode(plumbing.EncodedObject) error Encode(plumbing.EncodedObject) error } // GetObject gets an object from an object storer and decodes it. func GetObject(s storer.EncodedObjectStorer, h plumbing.Hash) (Object, error) { o, err := s.EncodedObject(plumbing.AnyObject, h) if err != nil { return nil, err } return DecodeObject(s, o) } // DecodeObject decodes an encoded object into an Object and associates it to // the given object storer. func DecodeObject(s storer.EncodedObjectStorer, o plumbing.EncodedObject) (Object, error) { switch o.Type() { case plumbing.CommitObject: return DecodeCommit(s, o) case plumbing.TreeObject: return DecodeTree(s, o) case plumbing.BlobObject: return DecodeBlob(o) case plumbing.TagObject: return DecodeTag(s, o) default: return nil, plumbing.ErrInvalidType } } // DateFormat is the format being used in the original git implementation const DateFormat = "Mon Jan 02 15:04:05 2006 -0700" // Signature is used to identify who and when created a commit or tag. type Signature struct { // Name represents a person name. It is an arbitrary string. Name string // Email is an email, but it cannot be assumed to be well-formed. Email string // When is the timestamp of the signature. When time.Time } // Decode decodes a byte slice into a signature func (s *Signature) Decode(b []byte) { open := bytes.LastIndexByte(b, '<') close := bytes.LastIndexByte(b, '>') if open == -1 || close == -1 { return } if close < open { return } s.Name = string(bytes.Trim(b[:open], " ")) s.Email = string(b[open+1 : close]) hasTime := close+2 < len(b) if hasTime { s.decodeTimeAndTimeZone(b[close+2:]) } } // Encode encodes a Signature into a writer. func (s *Signature) Encode(w io.Writer) error { if _, err := fmt.Fprintf(w, "%s <%s> ", s.Name, s.Email); err != nil { return err } if err := s.encodeTimeAndTimeZone(w); err != nil { return err } return nil } var timeZoneLength = 5 func (s *Signature) decodeTimeAndTimeZone(b []byte) { space := bytes.IndexByte(b, ' ') if space == -1 { space = len(b) } ts, err := strconv.ParseInt(string(b[:space]), 10, 64) if err != nil { return } s.When = time.Unix(ts, 0).In(time.UTC) var tzStart = space + 1 if tzStart >= len(b) || tzStart+timeZoneLength > len(b) { return } // Include a dummy year in this time.Parse() call to avoid a bug in Go: // https://github.com/golang/go/issues/19750 // // Parsing the timezone with no other details causes the tl.Location() call // below to return time.Local instead of the parsed zone in some cases tl, err := time.Parse("2006 -0700", "1970 "+string(b[tzStart:tzStart+timeZoneLength])) if err != nil { return } s.When = s.When.In(tl.Location()) } func (s *Signature) encodeTimeAndTimeZone(w io.Writer) error { u := s.When.Unix() if u < 0 { u = 0 } _, err := fmt.Fprintf(w, "%d %s", u, s.When.Format("-0700")) return err } func (s *Signature) String() string { return fmt.Sprintf("%s <%s>", s.Name, s.Email) } // ObjectIter provides an iterator for a set of objects. type ObjectIter struct { storer.EncodedObjectIter s storer.EncodedObjectStorer } // NewObjectIter takes a storer.EncodedObjectStorer and a // storer.EncodedObjectIter and returns an *ObjectIter that iterates over all // objects contained in the storer.EncodedObjectIter. func NewObjectIter(s storer.EncodedObjectStorer, iter storer.EncodedObjectIter) *ObjectIter { return &ObjectIter{iter, s} } // Next moves the iterator to the next object and returns a pointer to it. If // there are no more objects, it returns io.EOF. func (iter *ObjectIter) Next() (Object, error) { for { obj, err := iter.EncodedObjectIter.Next() if err != nil { return nil, err } o, err := iter.toObject(obj) if err == plumbing.ErrInvalidType { continue } if err != nil { return nil, err } return o, nil } } // ForEach call the cb function for each object contained on this iter until // an error happens or the end of the iter is reached. If ErrStop is sent // the iteration is stop but no error is returned. The iterator is closed. func (iter *ObjectIter) ForEach(cb func(Object) error) error { return iter.EncodedObjectIter.ForEach(func(obj plumbing.EncodedObject) error { o, err := iter.toObject(obj) if err == plumbing.ErrInvalidType { return nil } if err != nil { return err } return cb(o) }) } func (iter *ObjectIter) toObject(obj plumbing.EncodedObject) (Object, error) { switch obj.Type() { case plumbing.BlobObject: blob := &Blob{} return blob, blob.Decode(obj) case plumbing.TreeObject: tree := &Tree{s: iter.s} return tree, tree.Decode(obj) case plumbing.CommitObject: commit := &Commit{} return commit, commit.Decode(obj) case plumbing.TagObject: tag := &Tag{} return tag, tag.Decode(obj) default: return nil, plumbing.ErrInvalidType } } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/object/object_test.go000066400000000000000000000122401345605224300246150ustar00rootroot00000000000000package object import ( "io" "io/ioutil" "testing" "time" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/cache" "gopkg.in/src-d/go-git.v4/plumbing/filemode" "gopkg.in/src-d/go-git.v4/plumbing/storer" "gopkg.in/src-d/go-git.v4/storage/filesystem" . "gopkg.in/check.v1" "gopkg.in/src-d/go-git-fixtures.v3" ) func Test(t *testing.T) { TestingT(t) } type BaseObjectsSuite struct { fixtures.Suite Storer storer.EncodedObjectStorer Fixture *fixtures.Fixture } func (s *BaseObjectsSuite) SetUpSuite(c *C) { s.Suite.SetUpSuite(c) s.Fixture = fixtures.Basic().One() storer := filesystem.NewStorage(s.Fixture.DotGit(), cache.NewObjectLRUDefault()) s.Storer = storer } func (s *BaseObjectsSuite) tag(c *C, h plumbing.Hash) *Tag { t, err := GetTag(s.Storer, h) c.Assert(err, IsNil) return t } func (s *BaseObjectsSuite) tree(c *C, h plumbing.Hash) *Tree { t, err := GetTree(s.Storer, h) c.Assert(err, IsNil) return t } func (s *BaseObjectsSuite) commit(c *C, h plumbing.Hash) *Commit { commit, err := GetCommit(s.Storer, h) c.Assert(err, IsNil) return commit } type ObjectsSuite struct { BaseObjectsSuite } var _ = Suite(&ObjectsSuite{}) func (s *ObjectsSuite) TestNewCommit(c *C) { hash := plumbing.NewHash("a5b8b09e2f8fcb0bb99d3ccb0958157b40890d69") commit := s.commit(c, hash) c.Assert(commit.Hash, Equals, commit.ID()) c.Assert(commit.Hash.String(), Equals, "a5b8b09e2f8fcb0bb99d3ccb0958157b40890d69") tree, err := commit.Tree() c.Assert(err, IsNil) c.Assert(tree.Hash.String(), Equals, "c2d30fa8ef288618f65f6eed6e168e0d514886f4") parents := commit.Parents() parentCommit, err := parents.Next() c.Assert(err, IsNil) c.Assert(parentCommit.Hash.String(), Equals, "b029517f6300c2da0f4b651b8642506cd6aaf45d") parentCommit, err = parents.Next() c.Assert(err, IsNil) c.Assert(parentCommit.Hash.String(), Equals, "b8e471f58bcbca63b07bda20e428190409c2db47") c.Assert(commit.Author.Email, Equals, "mcuadros@gmail.com") c.Assert(commit.Author.Name, Equals, "Máximo Cuadros") c.Assert(commit.Author.When.Format(time.RFC3339), Equals, "2015-03-31T13:47:14+02:00") c.Assert(commit.Committer.Email, Equals, "mcuadros@gmail.com") c.Assert(commit.Message, Equals, "Merge pull request #1 from dripolles/feature\n\nCreating changelog") } func (s *ObjectsSuite) TestParseTree(c *C) { hash := plumbing.NewHash("a8d315b2b1c615d43042c3a62402b8a54288cf5c") tree, err := GetTree(s.Storer, hash) c.Assert(err, IsNil) c.Assert(tree.Entries, HasLen, 8) tree.buildMap() c.Assert(tree.m, HasLen, 8) c.Assert(tree.m[".gitignore"].Name, Equals, ".gitignore") c.Assert(tree.m[".gitignore"].Mode, Equals, filemode.Regular) c.Assert(tree.m[".gitignore"].Hash.String(), Equals, "32858aad3c383ed1ff0a0f9bdf231d54a00c9e88") count := 0 iter := tree.Files() defer iter.Close() for f, err := iter.Next(); err == nil; f, err = iter.Next() { count++ if f.Name == "go/example.go" { reader, err := f.Reader() c.Assert(err, IsNil) defer func() { c.Assert(reader.Close(), IsNil) }() content, _ := ioutil.ReadAll(reader) c.Assert(content, HasLen, 2780) } } c.Assert(count, Equals, 9) } func (s *ObjectsSuite) TestParseSignature(c *C) { cases := map[string]Signature{ `Foo Bar 1257894000 +0100`: { Name: "Foo Bar", Email: "foo@bar.com", When: MustParseTime("2009-11-11 00:00:00 +0100"), }, `Foo Bar 1257894000 -0700`: { Name: "Foo Bar", Email: "foo@bar.com", When: MustParseTime("2009-11-10 16:00:00 -0700"), }, `Foo Bar <> 1257894000 +0100`: { Name: "Foo Bar", Email: "", When: MustParseTime("2009-11-11 00:00:00 +0100"), }, ` <> 1257894000`: { Name: "", Email: "", When: MustParseTime("2009-11-10 23:00:00 +0000"), }, `Foo Bar `: { Name: "Foo Bar", Email: "foo@bar.com", When: time.Time{}, }, `crap> 1257894000 +1000`: { Name: "crap>", Email: "foo@bar.com", When: MustParseTime("2009-11-11 09:00:00 +1000"), }, `><`: { Name: "", Email: "", When: time.Time{}, }, ``: { Name: "", Email: "", When: time.Time{}, }, `<`: { Name: "", Email: "", When: time.Time{}, }, } for raw, exp := range cases { got := &Signature{} got.Decode([]byte(raw)) c.Assert(got.Name, Equals, exp.Name) c.Assert(got.Email, Equals, exp.Email) c.Assert(got.When.Format(time.RFC3339), Equals, exp.When.Format(time.RFC3339)) } } func (s *ObjectsSuite) TestObjectIter(c *C) { encIter, err := s.Storer.IterEncodedObjects(plumbing.AnyObject) c.Assert(err, IsNil) iter := NewObjectIter(s.Storer, encIter) objects := []Object{} iter.ForEach(func(o Object) error { objects = append(objects, o) return nil }) c.Assert(len(objects) > 0, Equals, true) iter.Close() encIter, err = s.Storer.IterEncodedObjects(plumbing.AnyObject) c.Assert(err, IsNil) iter = NewObjectIter(s.Storer, encIter) i := 0 for { o, err := iter.Next() if err == io.EOF { break } c.Assert(err, IsNil) c.Assert(o.ID(), Equals, objects[i].ID()) c.Assert(o.Type(), Equals, objects[i].Type()) i++ } iter.Close() } func MustParseTime(value string) time.Time { t, _ := time.Parse("2006-01-02 15:04:05 -0700", value) return t } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/object/patch.go000066400000000000000000000162161345605224300234160ustar00rootroot00000000000000package object import ( "bytes" "context" "errors" "fmt" "io" "math" "strings" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/filemode" fdiff "gopkg.in/src-d/go-git.v4/plumbing/format/diff" "gopkg.in/src-d/go-git.v4/utils/diff" dmp "github.com/sergi/go-diff/diffmatchpatch" ) var ( ErrCanceled = errors.New("operation canceled") ) func getPatch(message string, changes ...*Change) (*Patch, error) { ctx := context.Background() return getPatchContext(ctx, message, changes...) } func getPatchContext(ctx context.Context, message string, changes ...*Change) (*Patch, error) { var filePatches []fdiff.FilePatch for _, c := range changes { select { case <-ctx.Done(): return nil, ErrCanceled default: } fp, err := filePatchWithContext(ctx, c) if err != nil { return nil, err } filePatches = append(filePatches, fp) } return &Patch{message, filePatches}, nil } func filePatchWithContext(ctx context.Context, c *Change) (fdiff.FilePatch, error) { from, to, err := c.Files() if err != nil { return nil, err } fromContent, fIsBinary, err := fileContent(from) if err != nil { return nil, err } toContent, tIsBinary, err := fileContent(to) if err != nil { return nil, err } if fIsBinary || tIsBinary { return &textFilePatch{from: c.From, to: c.To}, nil } diffs := diff.Do(fromContent, toContent) var chunks []fdiff.Chunk for _, d := range diffs { select { case <-ctx.Done(): return nil, ErrCanceled default: } var op fdiff.Operation switch d.Type { case dmp.DiffEqual: op = fdiff.Equal case dmp.DiffDelete: op = fdiff.Delete case dmp.DiffInsert: op = fdiff.Add } chunks = append(chunks, &textChunk{d.Text, op}) } return &textFilePatch{ chunks: chunks, from: c.From, to: c.To, }, nil } func filePatch(c *Change) (fdiff.FilePatch, error) { return filePatchWithContext(context.Background(), c) } func fileContent(f *File) (content string, isBinary bool, err error) { if f == nil { return } isBinary, err = f.IsBinary() if err != nil || isBinary { return } content, err = f.Contents() return } // textPatch is an implementation of fdiff.Patch interface type Patch struct { message string filePatches []fdiff.FilePatch } func (t *Patch) FilePatches() []fdiff.FilePatch { return t.filePatches } func (t *Patch) Message() string { return t.message } func (p *Patch) Encode(w io.Writer) error { ue := fdiff.NewUnifiedEncoder(w, fdiff.DefaultContextLines) return ue.Encode(p) } func (p *Patch) Stats() FileStats { return getFileStatsFromFilePatches(p.FilePatches()) } func (p *Patch) String() string { buf := bytes.NewBuffer(nil) err := p.Encode(buf) if err != nil { return fmt.Sprintf("malformed patch: %s", err.Error()) } return buf.String() } // changeEntryWrapper is an implementation of fdiff.File interface type changeEntryWrapper struct { ce ChangeEntry } func (f *changeEntryWrapper) Hash() plumbing.Hash { if !f.ce.TreeEntry.Mode.IsFile() { return plumbing.ZeroHash } return f.ce.TreeEntry.Hash } func (f *changeEntryWrapper) Mode() filemode.FileMode { return f.ce.TreeEntry.Mode } func (f *changeEntryWrapper) Path() string { if !f.ce.TreeEntry.Mode.IsFile() { return "" } return f.ce.Name } func (f *changeEntryWrapper) Empty() bool { return !f.ce.TreeEntry.Mode.IsFile() } // textFilePatch is an implementation of fdiff.FilePatch interface type textFilePatch struct { chunks []fdiff.Chunk from, to ChangeEntry } func (tf *textFilePatch) Files() (from fdiff.File, to fdiff.File) { f := &changeEntryWrapper{tf.from} t := &changeEntryWrapper{tf.to} if !f.Empty() { from = f } if !t.Empty() { to = t } return } func (t *textFilePatch) IsBinary() bool { return len(t.chunks) == 0 } func (t *textFilePatch) Chunks() []fdiff.Chunk { return t.chunks } // textChunk is an implementation of fdiff.Chunk interface type textChunk struct { content string op fdiff.Operation } func (t *textChunk) Content() string { return t.content } func (t *textChunk) Type() fdiff.Operation { return t.op } // FileStat stores the status of changes in content of a file. type FileStat struct { Name string Addition int Deletion int } func (fs FileStat) String() string { return printStat([]FileStat{fs}) } // FileStats is a collection of FileStat. type FileStats []FileStat func (fileStats FileStats) String() string { return printStat(fileStats) } func printStat(fileStats []FileStat) string { padLength := float64(len(" ")) newlineLength := float64(len("\n")) separatorLength := float64(len("|")) // Soft line length limit. The text length calculation below excludes // length of the change number. Adding that would take it closer to 80, // but probably not more than 80, until it's a huge number. lineLength := 72.0 // Get the longest filename and longest total change. var longestLength float64 var longestTotalChange float64 for _, fs := range fileStats { if int(longestLength) < len(fs.Name) { longestLength = float64(len(fs.Name)) } totalChange := fs.Addition + fs.Deletion if int(longestTotalChange) < totalChange { longestTotalChange = float64(totalChange) } } // Parts of the output: // |<+++/---> // example: " main.go | 10 +++++++--- " // leftTextLength := padLength + longestLength + padLength // <+++++/-----> // Excluding number length here. rightTextLength := padLength + padLength + newlineLength totalTextArea := leftTextLength + separatorLength + rightTextLength heightOfHistogram := lineLength - totalTextArea // Scale the histogram. var scaleFactor float64 if longestTotalChange > heightOfHistogram { // Scale down to heightOfHistogram. scaleFactor = float64(longestTotalChange / heightOfHistogram) } else { scaleFactor = 1.0 } finalOutput := "" for _, fs := range fileStats { addn := float64(fs.Addition) deln := float64(fs.Deletion) adds := strings.Repeat("+", int(math.Floor(addn/scaleFactor))) dels := strings.Repeat("-", int(math.Floor(deln/scaleFactor))) finalOutput += fmt.Sprintf(" %s | %d %s%s\n", fs.Name, (fs.Addition + fs.Deletion), adds, dels) } return finalOutput } func getFileStatsFromFilePatches(filePatches []fdiff.FilePatch) FileStats { var fileStats FileStats for _, fp := range filePatches { // ignore empty patches (binary files, submodule refs updates) if len(fp.Chunks()) == 0 { continue } cs := FileStat{} from, to := fp.Files() if from == nil { // New File is created. cs.Name = to.Path() } else if to == nil { // File is deleted. cs.Name = from.Path() } else if from.Path() != to.Path() { // File is renamed. Not supported. // cs.Name = fmt.Sprintf("%s => %s", from.Path(), to.Path()) } else { cs.Name = from.Path() } for _, chunk := range fp.Chunks() { s := chunk.Content() switch chunk.Type() { case fdiff.Add: cs.Addition += strings.Count(s, "\n") if s[len(s)-1] != '\n' { cs.Addition++ } case fdiff.Delete: cs.Deletion += strings.Count(s, "\n") if s[len(s)-1] != '\n' { cs.Deletion++ } } } fileStats = append(fileStats, cs) } return fileStats } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/object/patch_test.go000066400000000000000000000017061345605224300244530ustar00rootroot00000000000000package object import ( . "gopkg.in/check.v1" fixtures "gopkg.in/src-d/go-git-fixtures.v3" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/cache" "gopkg.in/src-d/go-git.v4/storage/filesystem" ) type PatchSuite struct { BaseObjectsSuite } var _ = Suite(&PatchSuite{}) func (s *PatchSuite) TestStatsWithSubmodules(c *C) { storer := filesystem.NewStorage( fixtures.ByURL("https://github.com/git-fixtures/submodule.git").One().DotGit(), cache.NewObjectLRUDefault()) commit, err := GetCommit(storer, plumbing.NewHash("b685400c1f9316f350965a5993d350bc746b0bf4")) tree, err := commit.Tree() c.Assert(err, IsNil) e, err := tree.entry("basic") c.Assert(err, IsNil) ch := &Change{ From: ChangeEntry{ Name: "basic", Tree: tree, TreeEntry: *e, }, To: ChangeEntry{ Name: "basic", Tree: tree, TreeEntry: *e, }, } p, err := getPatch("", ch) c.Assert(err, IsNil) c.Assert(p, NotNil) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/object/tag.go000066400000000000000000000212701345605224300230660ustar00rootroot00000000000000package object import ( "bufio" "bytes" "fmt" "io" stdioutil "io/ioutil" "strings" "golang.org/x/crypto/openpgp" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/storer" "gopkg.in/src-d/go-git.v4/utils/ioutil" ) // Tag represents an annotated tag object. It points to a single git object of // any type, but tags typically are applied to commit or blob objects. It // provides a reference that associates the target with a tag name. It also // contains meta-information about the tag, including the tagger, tag date and // message. // // Note that this is not used for lightweight tags. // // https://git-scm.com/book/en/v2/Git-Internals-Git-References#Tags type Tag struct { // Hash of the tag. Hash plumbing.Hash // Name of the tag. Name string // Tagger is the one who created the tag. Tagger Signature // Message is an arbitrary text message. Message string // PGPSignature is the PGP signature of the tag. PGPSignature string // TargetType is the object type of the target. TargetType plumbing.ObjectType // Target is the hash of the target object. Target plumbing.Hash s storer.EncodedObjectStorer } // GetTag gets a tag from an object storer and decodes it. func GetTag(s storer.EncodedObjectStorer, h plumbing.Hash) (*Tag, error) { o, err := s.EncodedObject(plumbing.TagObject, h) if err != nil { return nil, err } return DecodeTag(s, o) } // DecodeTag decodes an encoded object into a *Commit and associates it to the // given object storer. func DecodeTag(s storer.EncodedObjectStorer, o plumbing.EncodedObject) (*Tag, error) { t := &Tag{s: s} if err := t.Decode(o); err != nil { return nil, err } return t, nil } // ID returns the object ID of the tag, not the object that the tag references. // The returned value will always match the current value of Tag.Hash. // // ID is present to fulfill the Object interface. func (t *Tag) ID() plumbing.Hash { return t.Hash } // Type returns the type of object. It always returns plumbing.TagObject. // // Type is present to fulfill the Object interface. func (t *Tag) Type() plumbing.ObjectType { return plumbing.TagObject } // Decode transforms a plumbing.EncodedObject into a Tag struct. func (t *Tag) Decode(o plumbing.EncodedObject) (err error) { if o.Type() != plumbing.TagObject { return ErrUnsupportedObject } t.Hash = o.Hash() reader, err := o.Reader() if err != nil { return err } defer ioutil.CheckClose(reader, &err) r := bufio.NewReader(reader) for { var line []byte line, err = r.ReadBytes('\n') if err != nil && err != io.EOF { return err } line = bytes.TrimSpace(line) if len(line) == 0 { break // Start of message } split := bytes.SplitN(line, []byte{' '}, 2) switch string(split[0]) { case "object": t.Target = plumbing.NewHash(string(split[1])) case "type": t.TargetType, err = plumbing.ParseObjectType(string(split[1])) if err != nil { return err } case "tag": t.Name = string(split[1]) case "tagger": t.Tagger.Decode(split[1]) } if err == io.EOF { return nil } } data, err := stdioutil.ReadAll(r) if err != nil { return err } var pgpsig bool // Check if data contains PGP signature. if bytes.Contains(data, []byte(beginpgp)) { // Split the lines at newline. messageAndSig := bytes.Split(data, []byte("\n")) for _, l := range messageAndSig { if pgpsig { if bytes.Contains(l, []byte(endpgp)) { t.PGPSignature += endpgp + "\n" pgpsig = false } else { t.PGPSignature += string(l) + "\n" } continue } // Check if it's the beginning of a PGP signature. if bytes.Contains(l, []byte(beginpgp)) { t.PGPSignature += beginpgp + "\n" pgpsig = true continue } t.Message += string(l) + "\n" } } else { t.Message = string(data) } return nil } // Encode transforms a Tag into a plumbing.EncodedObject. func (t *Tag) Encode(o plumbing.EncodedObject) error { return t.encode(o, true) } func (t *Tag) encode(o plumbing.EncodedObject, includeSig bool) (err error) { o.SetType(plumbing.TagObject) w, err := o.Writer() if err != nil { return err } defer ioutil.CheckClose(w, &err) if _, err = fmt.Fprintf(w, "object %s\ntype %s\ntag %s\ntagger ", t.Target.String(), t.TargetType.Bytes(), t.Name); err != nil { return err } if err = t.Tagger.Encode(w); err != nil { return err } if _, err = fmt.Fprint(w, "\n\n"); err != nil { return err } if _, err = fmt.Fprint(w, t.Message); err != nil { return err } // Note that this is highly sensitive to what it sent along in the message. // Message *always* needs to end with a newline, or else the message and the // signature will be concatenated into a corrupt object. Since this is a // lower-level method, we assume you know what you are doing and have already // done the needful on the message in the caller. if includeSig { if _, err = fmt.Fprint(w, t.PGPSignature); err != nil { return err } } return err } // Commit returns the commit pointed to by the tag. If the tag points to a // different type of object ErrUnsupportedObject will be returned. func (t *Tag) Commit() (*Commit, error) { if t.TargetType != plumbing.CommitObject { return nil, ErrUnsupportedObject } o, err := t.s.EncodedObject(plumbing.CommitObject, t.Target) if err != nil { return nil, err } return DecodeCommit(t.s, o) } // Tree returns the tree pointed to by the tag. If the tag points to a commit // object the tree of that commit will be returned. If the tag does not point // to a commit or tree object ErrUnsupportedObject will be returned. func (t *Tag) Tree() (*Tree, error) { switch t.TargetType { case plumbing.CommitObject: c, err := t.Commit() if err != nil { return nil, err } return c.Tree() case plumbing.TreeObject: return GetTree(t.s, t.Target) default: return nil, ErrUnsupportedObject } } // Blob returns the blob pointed to by the tag. If the tag points to a // different type of object ErrUnsupportedObject will be returned. func (t *Tag) Blob() (*Blob, error) { if t.TargetType != plumbing.BlobObject { return nil, ErrUnsupportedObject } return GetBlob(t.s, t.Target) } // Object returns the object pointed to by the tag. func (t *Tag) Object() (Object, error) { o, err := t.s.EncodedObject(t.TargetType, t.Target) if err != nil { return nil, err } return DecodeObject(t.s, o) } // String returns the meta information contained in the tag as a formatted // string. func (t *Tag) String() string { obj, _ := t.Object() return fmt.Sprintf( "%s %s\nTagger: %s\nDate: %s\n\n%s\n%s", plumbing.TagObject, t.Name, t.Tagger.String(), t.Tagger.When.Format(DateFormat), t.Message, objectAsString(obj), ) } // Verify performs PGP verification of the tag with a provided armored // keyring and returns openpgp.Entity associated with verifying key on success. func (t *Tag) Verify(armoredKeyRing string) (*openpgp.Entity, error) { keyRingReader := strings.NewReader(armoredKeyRing) keyring, err := openpgp.ReadArmoredKeyRing(keyRingReader) if err != nil { return nil, err } // Extract signature. signature := strings.NewReader(t.PGPSignature) encoded := &plumbing.MemoryObject{} // Encode tag components, excluding signature and get a reader object. if err := t.encode(encoded, false); err != nil { return nil, err } er, err := encoded.Reader() if err != nil { return nil, err } return openpgp.CheckArmoredDetachedSignature(keyring, er, signature) } // TagIter provides an iterator for a set of tags. type TagIter struct { storer.EncodedObjectIter s storer.EncodedObjectStorer } // NewTagIter takes a storer.EncodedObjectStorer and a // storer.EncodedObjectIter and returns a *TagIter that iterates over all // tags contained in the storer.EncodedObjectIter. // // Any non-tag object returned by the storer.EncodedObjectIter is skipped. func NewTagIter(s storer.EncodedObjectStorer, iter storer.EncodedObjectIter) *TagIter { return &TagIter{iter, s} } // Next moves the iterator to the next tag and returns a pointer to it. If // there are no more tags, it returns io.EOF. func (iter *TagIter) Next() (*Tag, error) { obj, err := iter.EncodedObjectIter.Next() if err != nil { return nil, err } return DecodeTag(iter.s, obj) } // ForEach call the cb function for each tag contained on this iter until // an error happens or the end of the iter is reached. If ErrStop is sent // the iteration is stop but no error is returned. The iterator is closed. func (iter *TagIter) ForEach(cb func(*Tag) error) error { return iter.EncodedObjectIter.ForEach(func(obj plumbing.EncodedObject) error { t, err := DecodeTag(iter.s, obj) if err != nil { return err } return cb(t) }) } func objectAsString(obj Object) string { switch o := obj.(type) { case *Commit: return o.String() default: return "" } } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/object/tag_test.go000066400000000000000000000264501345605224300241320ustar00rootroot00000000000000package object import ( "fmt" "io" "strings" "time" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/cache" "gopkg.in/src-d/go-git.v4/storage/filesystem" "gopkg.in/src-d/go-git.v4/storage/memory" . "gopkg.in/check.v1" "gopkg.in/src-d/go-git-fixtures.v3" ) type TagSuite struct { BaseObjectsSuite } var _ = Suite(&TagSuite{}) func (s *TagSuite) SetUpSuite(c *C) { s.BaseObjectsSuite.SetUpSuite(c) storer := filesystem.NewStorage(fixtures.ByURL("https://github.com/git-fixtures/tags.git").One().DotGit(), cache.NewObjectLRUDefault()) s.Storer = storer } func (s *TagSuite) TestNameIDAndType(c *C) { h := plumbing.NewHash("b742a2a9fa0afcfa9a6fad080980fbc26b007c69") tag := s.tag(c, h) c.Assert(tag.Name, Equals, "annotated-tag") c.Assert(h, Equals, tag.ID()) c.Assert(plumbing.TagObject, Equals, tag.Type()) } func (s *TagSuite) TestTagger(c *C) { tag := s.tag(c, plumbing.NewHash("b742a2a9fa0afcfa9a6fad080980fbc26b007c69")) c.Assert(tag.Tagger.String(), Equals, "Máximo Cuadros ") } func (s *TagSuite) TestAnnotated(c *C) { tag := s.tag(c, plumbing.NewHash("b742a2a9fa0afcfa9a6fad080980fbc26b007c69")) c.Assert(tag.Message, Equals, "example annotated tag\n") commit, err := tag.Commit() c.Assert(err, IsNil) c.Assert(commit.Type(), Equals, plumbing.CommitObject) c.Assert(commit.ID().String(), Equals, "f7b877701fbf855b44c0a9e86f3fdce2c298b07f") } func (s *TagSuite) TestCommitError(c *C) { tag := s.tag(c, plumbing.NewHash("fe6cb94756faa81e5ed9240f9191b833db5f40ae")) commit, err := tag.Commit() c.Assert(commit, IsNil) c.Assert(err, NotNil) c.Assert(err, Equals, ErrUnsupportedObject) } func (s *TagSuite) TestCommit(c *C) { tag := s.tag(c, plumbing.NewHash("ad7897c0fb8e7d9a9ba41fa66072cf06095a6cfc")) c.Assert(tag.Message, Equals, "a tagged commit\n") commit, err := tag.Commit() c.Assert(err, IsNil) c.Assert(commit.Type(), Equals, plumbing.CommitObject) c.Assert(commit.ID().String(), Equals, "f7b877701fbf855b44c0a9e86f3fdce2c298b07f") } func (s *TagSuite) TestBlobError(c *C) { tag := s.tag(c, plumbing.NewHash("ad7897c0fb8e7d9a9ba41fa66072cf06095a6cfc")) commit, err := tag.Blob() c.Assert(commit, IsNil) c.Assert(err, NotNil) c.Assert(err, Equals, ErrUnsupportedObject) } func (s *TagSuite) TestBlob(c *C) { tag := s.tag(c, plumbing.NewHash("fe6cb94756faa81e5ed9240f9191b833db5f40ae")) c.Assert(tag.Message, Equals, "a tagged blob\n") blob, err := tag.Blob() c.Assert(err, IsNil) c.Assert(blob.Type(), Equals, plumbing.BlobObject) c.Assert(blob.ID().String(), Equals, "e69de29bb2d1d6434b8b29ae775ad8c2e48c5391") } func (s *TagSuite) TestTreeError(c *C) { tag := s.tag(c, plumbing.NewHash("fe6cb94756faa81e5ed9240f9191b833db5f40ae")) tree, err := tag.Tree() c.Assert(tree, IsNil) c.Assert(err, NotNil) c.Assert(err, Equals, ErrUnsupportedObject) } func (s *TagSuite) TestTree(c *C) { tag := s.tag(c, plumbing.NewHash("152175bf7e5580299fa1f0ba41ef6474cc043b70")) c.Assert(tag.Message, Equals, "a tagged tree\n") tree, err := tag.Tree() c.Assert(err, IsNil) c.Assert(tree.Type(), Equals, plumbing.TreeObject) c.Assert(tree.ID().String(), Equals, "70846e9a10ef7b41064b40f07713d5b8b9a8fc73") } func (s *TagSuite) TestTreeFromCommit(c *C) { tag := s.tag(c, plumbing.NewHash("ad7897c0fb8e7d9a9ba41fa66072cf06095a6cfc")) c.Assert(tag.Message, Equals, "a tagged commit\n") tree, err := tag.Tree() c.Assert(err, IsNil) c.Assert(tree.Type(), Equals, plumbing.TreeObject) c.Assert(tree.ID().String(), Equals, "70846e9a10ef7b41064b40f07713d5b8b9a8fc73") } func (s *TagSuite) TestObject(c *C) { tag := s.tag(c, plumbing.NewHash("ad7897c0fb8e7d9a9ba41fa66072cf06095a6cfc")) obj, err := tag.Object() c.Assert(err, IsNil) c.Assert(obj.Type(), Equals, plumbing.CommitObject) c.Assert(obj.ID().String(), Equals, "f7b877701fbf855b44c0a9e86f3fdce2c298b07f") } func (s *TagSuite) TestTagItter(c *C) { iter, err := s.Storer.IterEncodedObjects(plumbing.TagObject) c.Assert(err, IsNil) var count int i := NewTagIter(s.Storer, iter) tag, err := i.Next() c.Assert(err, IsNil) c.Assert(tag, NotNil) c.Assert(tag.Type(), Equals, plumbing.TagObject) err = i.ForEach(func(t *Tag) error { c.Assert(t, NotNil) c.Assert(t.Type(), Equals, plumbing.TagObject) count++ return nil }) c.Assert(err, IsNil) c.Assert(count, Equals, 3) tag, err = i.Next() c.Assert(err, Equals, io.EOF) c.Assert(tag, IsNil) } func (s *TagSuite) TestTagIterError(c *C) { iter, err := s.Storer.IterEncodedObjects(plumbing.TagObject) c.Assert(err, IsNil) randomErr := fmt.Errorf("a random error") i := NewTagIter(s.Storer, iter) err = i.ForEach(func(t *Tag) error { return randomErr }) c.Assert(err, NotNil) c.Assert(err, Equals, randomErr) } func (s *TagSuite) TestTagDecodeWrongType(c *C) { newTag := &Tag{} obj := &plumbing.MemoryObject{} obj.SetType(plumbing.BlobObject) err := newTag.Decode(obj) c.Assert(err, Equals, ErrUnsupportedObject) } func (s *TagSuite) TestTagEncodeDecodeIdempotent(c *C) { ts, err := time.Parse(time.RFC3339, "2006-01-02T15:04:05-07:00") c.Assert(err, IsNil) tags := []*Tag{ { Name: "foo", Tagger: Signature{Name: "Foo", Email: "foo@example.local", When: ts}, Message: "Message\n\nFoo\nBar\nBaz\n\n", TargetType: plumbing.BlobObject, Target: plumbing.NewHash("b029517f6300c2da0f4b651b8642506cd6aaf45d"), }, { Name: "foo", Tagger: Signature{Name: "Foo", Email: "foo@example.local", When: ts}, TargetType: plumbing.BlobObject, Target: plumbing.NewHash("b029517f6300c2da0f4b651b8642506cd6aaf45d"), }, } for _, tag := range tags { obj := &plumbing.MemoryObject{} err = tag.Encode(obj) c.Assert(err, IsNil) newTag := &Tag{} err = newTag.Decode(obj) c.Assert(err, IsNil) tag.Hash = obj.Hash() c.Assert(newTag, DeepEquals, tag) } } func (s *TagSuite) TestString(c *C) { tag := s.tag(c, plumbing.NewHash("b742a2a9fa0afcfa9a6fad080980fbc26b007c69")) c.Assert(tag.String(), Equals, ""+ "tag annotated-tag\n"+ "Tagger: Máximo Cuadros \n"+ "Date: Wed Sep 21 21:13:35 2016 +0200\n"+ "\n"+ "example annotated tag\n"+ "\n"+ "commit f7b877701fbf855b44c0a9e86f3fdce2c298b07f\n"+ "Author: Máximo Cuadros \n"+ "Date: Wed Sep 21 21:10:52 2016 +0200\n"+ "\n"+ " initial\n"+ "\n", ) tag = s.tag(c, plumbing.NewHash("152175bf7e5580299fa1f0ba41ef6474cc043b70")) c.Assert(tag.String(), Equals, ""+ "tag tree-tag\n"+ "Tagger: Máximo Cuadros \n"+ "Date: Wed Sep 21 21:17:56 2016 +0200\n"+ "\n"+ "a tagged tree\n"+ "\n", ) } func (s *TagSuite) TestStringNonCommit(c *C) { store := memory.NewStorage() target := &Tag{ Target: plumbing.NewHash("TAGONE"), Name: "TAG ONE", Message: "tag one", TargetType: plumbing.TagObject, } targetObj := &plumbing.MemoryObject{} target.Encode(targetObj) store.SetEncodedObject(targetObj) tag := &Tag{ Target: targetObj.Hash(), Name: "TAG TWO", Message: "tag two", TargetType: plumbing.TagObject, } tagObj := &plumbing.MemoryObject{} tag.Encode(tagObj) store.SetEncodedObject(tagObj) tag, err := GetTag(store, tagObj.Hash()) c.Assert(err, IsNil) c.Assert(tag.String(), Equals, "tag TAG TWO\n"+ "Tagger: <>\n"+ "Date: Thu Jan 01 00:00:00 1970 +0000\n"+ "\n"+ "tag two\n") } func (s *TagSuite) TestLongTagNameSerialization(c *C) { encoded := &plumbing.MemoryObject{} decoded := &Tag{} tag := s.tag(c, plumbing.NewHash("b742a2a9fa0afcfa9a6fad080980fbc26b007c69")) longName := "my tag: name " + strings.Repeat("test", 4096) + " OK" tag.Name = longName err := tag.Encode(encoded) c.Assert(err, IsNil) err = decoded.Decode(encoded) c.Assert(err, IsNil) c.Assert(decoded.Name, Equals, longName) } func (s *TagSuite) TestPGPSignatureSerialization(c *C) { encoded := &plumbing.MemoryObject{} decoded := &Tag{} tag := s.tag(c, plumbing.NewHash("b742a2a9fa0afcfa9a6fad080980fbc26b007c69")) pgpsignature := `-----BEGIN PGP SIGNATURE----- iQEcBAABAgAGBQJTZbQlAAoJEF0+sviABDDrZbQH/09PfE51KPVPlanr6q1v4/Ut LQxfojUWiLQdg2ESJItkcuweYg+kc3HCyFejeDIBw9dpXt00rY26p05qrpnG+85b hM1/PswpPLuBSr+oCIDj5GMC2r2iEKsfv2fJbNW8iWAXVLoWZRF8B0MfqX/YTMbm ecorc4iXzQu7tupRihslbNkfvfciMnSDeSvzCpWAHl7h8Wj6hhqePmLm9lAYqnKp 8S5B/1SSQuEAjRZgI4IexpZoeKGVDptPHxLLS38fozsyi0QyDyzEgJxcJQVMXxVi RUysgqjcpT8+iQM1PblGfHR4XAhuOqN5Fx06PSaFZhqvWFezJ28/CLyX5q+oIVk= =EFTF -----END PGP SIGNATURE----- ` tag.PGPSignature = pgpsignature err := tag.Encode(encoded) c.Assert(err, IsNil) err = decoded.Decode(encoded) c.Assert(err, IsNil) c.Assert(decoded.PGPSignature, Equals, pgpsignature) } func (s *TagSuite) TestVerify(c *C) { ts := time.Unix(1511524851, 0) loc, _ := time.LoadLocation("Asia/Kolkata") tag := &Tag{ Name: "v0.2", Tagger: Signature{Name: "Sunny", Email: "me@darkowlzz.space", When: ts.In(loc)}, Message: `This is a signed tag `, TargetType: plumbing.CommitObject, Target: plumbing.NewHash("064f92fe00e70e6b64cb358a65039daa4b6ae8d2"), PGPSignature: ` -----BEGIN PGP SIGNATURE----- iQFHBAABCAAxFiEEoRt6IzxHaZkkUslhQyLeMqcmyU4FAloYCg8THG1lQGRhcmtv d2x6ei5zcGFjZQAKCRBDIt4ypybJTs0cCACjQZe2610t3gfbUPbgQiWDL9uvlCeb sNSeTC6hLAFSvHTMqLr/6RpiLlfQXyATD7TZUH0DUSLsERLheG82OgVxkOTzPCpy GL6iGKeZ4eZ1KiV+SBPjqizC9ShhGooPUw9oUSVdj4jsaHDdDHtY63Pjl0KvJmms OVi9SSxjeMbmaC81C8r0ZuOLTXJh/JRKh2BsehdcnK3736BK+16YRD7ugXLpkQ5d nsCFVbuYYoLMoJL5NmEun0pbUrpY+MI8VPK0f9HV5NeaC4NksC+ke/xYMT+P2lRL CN+9zcCIU+mXr2fCl1xOQcnQzwOElObDxpDcPcxVn0X+AhmPc+uj0mqD =l75D -----END PGP SIGNATURE----- `, } armoredKeyRing := ` -----BEGIN PGP PUBLIC KEY BLOCK----- mQENBFmtHgABCADnfThM7q8D4pgUub9jMppSpgFh3ev84g3Csc3yQUlszEOVgXmu YiSWP1oAiWFQ8ahCydh3LT8TnEB2QvoRNiExUI5XlXFwVfKW3cpDu8gdhtufs90Q NvpaHOgTqRf/texGEKwXi6fvS47fpyaQ9BKNdN52LeaaHzDDZkVsAFmroE+7MMvj P4Mq8qDn2WcWnX9zheQKYrX6Cs48Tx80eehHor4f/XnuaP8DLmPQx7URdJ0Igckh N+i91Qv2ujin8zxUwhkfus66EZS9lQ4qR9iVHs4WHOs3j7whsejd4VhajonilVHj uqTtqHmpN/4njbIKb8q8uQkS26VQYoSYm2UvABEBAAG0GlN1bm55IDxtZUBkYXJr b3dsenouc3BhY2U+iQFUBBMBCAA+FiEEoRt6IzxHaZkkUslhQyLeMqcmyU4FAlmt HgACGwMFCQPCZwAFCwkIBwIGFQgJCgsCBBYCAwECHgECF4AACgkQQyLeMqcmyU7V nAf+J5BYu26B2i+iwctOzDRFcPwCLka9cBwe5wcDvoF2qL8QRo8NPWBBH4zWHa/k BthtGo1b89a53I2hnTwTQ0NOtAUNV+Vvu6nOHJd9Segsx3E1nM43bd2bUfGJ1eeO jDOlOvtP4ozuV6Ej+0Ln2ouMOc87yAwbAzTfQ9axU6CKUbqy0/t2dW1jdKntGH+t VPeFxJHL2gXjP89skCSPYA7yKqqyJRPFvC+7rde1OLdCmZi4VwghUiNbh3s1+xM3 gfr2ahsRDTN2SQzwuHu4y1EgZgPtuWfRxzHqduoRoSgfOfFr9H9Il3UMHf2Etleu rif40YZJhge6STwsIycGh4wOiLkBDQRZrR4AAQgArpUvPdGC/W9X4AuZXrXEShvx TqM4K2Jk9n0j+ABx87k9fm48qgtae7+TayMbb0i7kcbgnjltKbauTbyRbju/EJvN CdIw76IPpjy6jUM37wG2QGLFo6Ku3x8/ZpNGGOZ8KMU258/EBqDlJQ/4g4kJ8D+m 9yOH0r6/Xpe/jOY2V8Jo9pdFTm+8eAsSyZF0Cl7drz603Pymq1IS2wrwQbdxQA/w B75pQ5es7X34Ac7/9UZCwCPmZDAldnjHyw5dZgZe8XLrG84BIfbG0Hj8PjrFdF1D Czt9bk+PbYAnLORW2oX1oedxVrNFo5UrbWgBSjA1ppbGFjwSDHFlyjuEuxqyFwAR AQABiQE8BBgBCAAmFiEEoRt6IzxHaZkkUslhQyLeMqcmyU4FAlmtHgACGwwFCQPC ZwAACgkQQyLeMqcmyU7ZBggArzc8UUVSjde987Vqnu/S5Cv8Qhz+UB7gAFyTW2iF VYvB86r30H/NnfjvjCVkBE6FHCNHoxWVyDWmuxKviB7nkReHuwqniQHPgdJDcTKC tBboeX2IYBLJbEvEJuz5NSvnvFuYkIpZHqySFaqdl/qu9XcmoPL5AmIzIFOeiNty qT0ldkf3ru6yQQDDqBDpkfz4AzkpFnLYL59z6IbJDK2Hz7aKeSEeVOGiZLCjIZZV uISZThYqh5zUkvF346OHLDqfDdgQ4RZriqd/DTtRJPlz2uL0QcEIjJuYCkG0UWgl sYyf9RfOnw/KUFAQbdtvLx3ikODQC+D3KBtuKI9ISHQfgw== =FPev -----END PGP PUBLIC KEY BLOCK----- ` e, err := tag.Verify(armoredKeyRing) c.Assert(err, IsNil) _, ok := e.Identities["Sunny "] c.Assert(ok, Equals, true) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/object/tree.go000066400000000000000000000267151345605224300232630ustar00rootroot00000000000000package object import ( "bufio" "context" "errors" "fmt" "io" "path" "path/filepath" "strings" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/filemode" "gopkg.in/src-d/go-git.v4/plumbing/storer" "gopkg.in/src-d/go-git.v4/utils/ioutil" ) const ( maxTreeDepth = 1024 startingStackSize = 8 ) // New errors defined by this package. var ( ErrMaxTreeDepth = errors.New("maximum tree depth exceeded") ErrFileNotFound = errors.New("file not found") ErrDirectoryNotFound = errors.New("directory not found") ErrEntryNotFound = errors.New("entry not found") ) // Tree is basically like a directory - it references a bunch of other trees // and/or blobs (i.e. files and sub-directories) type Tree struct { Entries []TreeEntry Hash plumbing.Hash s storer.EncodedObjectStorer m map[string]*TreeEntry t map[string]*Tree // tree path cache } // GetTree gets a tree from an object storer and decodes it. func GetTree(s storer.EncodedObjectStorer, h plumbing.Hash) (*Tree, error) { o, err := s.EncodedObject(plumbing.TreeObject, h) if err != nil { return nil, err } return DecodeTree(s, o) } // DecodeTree decodes an encoded object into a *Tree and associates it to the // given object storer. func DecodeTree(s storer.EncodedObjectStorer, o plumbing.EncodedObject) (*Tree, error) { t := &Tree{s: s} if err := t.Decode(o); err != nil { return nil, err } return t, nil } // TreeEntry represents a file type TreeEntry struct { Name string Mode filemode.FileMode Hash plumbing.Hash } // File returns the hash of the file identified by the `path` argument. // The path is interpreted as relative to the tree receiver. func (t *Tree) File(path string) (*File, error) { e, err := t.FindEntry(path) if err != nil { return nil, ErrFileNotFound } blob, err := GetBlob(t.s, e.Hash) if err != nil { if err == plumbing.ErrObjectNotFound { return nil, ErrFileNotFound } return nil, err } return NewFile(path, e.Mode, blob), nil } // Size returns the plaintext size of an object, without reading it // into memory. func (t *Tree) Size(path string) (int64, error) { e, err := t.FindEntry(path) if err != nil { return 0, ErrEntryNotFound } return t.s.EncodedObjectSize(e.Hash) } // Tree returns the tree identified by the `path` argument. // The path is interpreted as relative to the tree receiver. func (t *Tree) Tree(path string) (*Tree, error) { e, err := t.FindEntry(path) if err != nil { return nil, ErrDirectoryNotFound } tree, err := GetTree(t.s, e.Hash) if err == plumbing.ErrObjectNotFound { return nil, ErrDirectoryNotFound } return tree, err } // TreeEntryFile returns the *File for a given *TreeEntry. func (t *Tree) TreeEntryFile(e *TreeEntry) (*File, error) { blob, err := GetBlob(t.s, e.Hash) if err != nil { return nil, err } return NewFile(e.Name, e.Mode, blob), nil } // FindEntry search a TreeEntry in this tree or any subtree. func (t *Tree) FindEntry(path string) (*TreeEntry, error) { if t.t == nil { t.t = make(map[string]*Tree) } pathParts := strings.Split(path, "/") startingTree := t pathCurrent := "" // search for the longest path in the tree path cache for i := len(pathParts) - 1; i > 1; i-- { path := filepath.Join(pathParts[:i]...) tree, ok := t.t[path] if ok { startingTree = tree pathParts = pathParts[i:] pathCurrent = path break } } var tree *Tree var err error for tree = startingTree; len(pathParts) > 1; pathParts = pathParts[1:] { if tree, err = tree.dir(pathParts[0]); err != nil { return nil, err } pathCurrent = filepath.Join(pathCurrent, pathParts[0]) t.t[pathCurrent] = tree } return tree.entry(pathParts[0]) } func (t *Tree) dir(baseName string) (*Tree, error) { entry, err := t.entry(baseName) if err != nil { return nil, ErrDirectoryNotFound } obj, err := t.s.EncodedObject(plumbing.TreeObject, entry.Hash) if err != nil { return nil, err } tree := &Tree{s: t.s} err = tree.Decode(obj) return tree, err } func (t *Tree) entry(baseName string) (*TreeEntry, error) { if t.m == nil { t.buildMap() } entry, ok := t.m[baseName] if !ok { return nil, ErrEntryNotFound } return entry, nil } // Files returns a FileIter allowing to iterate over the Tree func (t *Tree) Files() *FileIter { return NewFileIter(t.s, t) } // ID returns the object ID of the tree. The returned value will always match // the current value of Tree.Hash. // // ID is present to fulfill the Object interface. func (t *Tree) ID() plumbing.Hash { return t.Hash } // Type returns the type of object. It always returns plumbing.TreeObject. func (t *Tree) Type() plumbing.ObjectType { return plumbing.TreeObject } // Decode transform an plumbing.EncodedObject into a Tree struct func (t *Tree) Decode(o plumbing.EncodedObject) (err error) { if o.Type() != plumbing.TreeObject { return ErrUnsupportedObject } t.Hash = o.Hash() if o.Size() == 0 { return nil } t.Entries = nil t.m = nil reader, err := o.Reader() if err != nil { return err } defer ioutil.CheckClose(reader, &err) r := bufio.NewReader(reader) for { str, err := r.ReadString(' ') if err != nil { if err == io.EOF { break } return err } str = str[:len(str)-1] // strip last byte (' ') mode, err := filemode.New(str) if err != nil { return err } name, err := r.ReadString(0) if err != nil && err != io.EOF { return err } var hash plumbing.Hash if _, err = io.ReadFull(r, hash[:]); err != nil { return err } baseName := name[:len(name)-1] t.Entries = append(t.Entries, TreeEntry{ Hash: hash, Mode: mode, Name: baseName, }) } return nil } // Encode transforms a Tree into a plumbing.EncodedObject. func (t *Tree) Encode(o plumbing.EncodedObject) (err error) { o.SetType(plumbing.TreeObject) w, err := o.Writer() if err != nil { return err } defer ioutil.CheckClose(w, &err) for _, entry := range t.Entries { if _, err = fmt.Fprintf(w, "%o %s", entry.Mode, entry.Name); err != nil { return err } if _, err = w.Write([]byte{0x00}); err != nil { return err } if _, err = w.Write([]byte(entry.Hash[:])); err != nil { return err } } return err } func (t *Tree) buildMap() { t.m = make(map[string]*TreeEntry) for i := 0; i < len(t.Entries); i++ { t.m[t.Entries[i].Name] = &t.Entries[i] } } // Diff returns a list of changes between this tree and the provided one func (from *Tree) Diff(to *Tree) (Changes, error) { return DiffTree(from, to) } // Diff returns a list of changes between this tree and the provided one // Error will be returned if context expires // Provided context must be non nil func (from *Tree) DiffContext(ctx context.Context, to *Tree) (Changes, error) { return DiffTreeContext(ctx, from, to) } // Patch returns a slice of Patch objects with all the changes between trees // in chunks. This representation can be used to create several diff outputs. func (from *Tree) Patch(to *Tree) (*Patch, error) { return from.PatchContext(context.Background(), to) } // Patch returns a slice of Patch objects with all the changes between trees // in chunks. This representation can be used to create several diff outputs. // If context expires, an error will be returned // Provided context must be non-nil func (from *Tree) PatchContext(ctx context.Context, to *Tree) (*Patch, error) { changes, err := DiffTreeContext(ctx, from, to) if err != nil { return nil, err } return changes.PatchContext(ctx) } // treeEntryIter facilitates iterating through the TreeEntry objects in a Tree. type treeEntryIter struct { t *Tree pos int } func (iter *treeEntryIter) Next() (TreeEntry, error) { if iter.pos >= len(iter.t.Entries) { return TreeEntry{}, io.EOF } iter.pos++ return iter.t.Entries[iter.pos-1], nil } // TreeWalker provides a means of walking through all of the entries in a Tree. type TreeWalker struct { stack []*treeEntryIter base string recursive bool seen map[plumbing.Hash]bool s storer.EncodedObjectStorer t *Tree } // NewTreeWalker returns a new TreeWalker for the given tree. // // It is the caller's responsibility to call Close() when finished with the // tree walker. func NewTreeWalker(t *Tree, recursive bool, seen map[plumbing.Hash]bool) *TreeWalker { stack := make([]*treeEntryIter, 0, startingStackSize) stack = append(stack, &treeEntryIter{t, 0}) return &TreeWalker{ stack: stack, recursive: recursive, seen: seen, s: t.s, t: t, } } // Next returns the next object from the tree. Objects are returned in order // and subtrees are included. After the last object has been returned further // calls to Next() will return io.EOF. // // In the current implementation any objects which cannot be found in the // underlying repository will be skipped automatically. It is possible that this // may change in future versions. func (w *TreeWalker) Next() (name string, entry TreeEntry, err error) { var obj Object for { current := len(w.stack) - 1 if current < 0 { // Nothing left on the stack so we're finished err = io.EOF return } if current > maxTreeDepth { // We're probably following bad data or some self-referencing tree err = ErrMaxTreeDepth return } entry, err = w.stack[current].Next() if err == io.EOF { // Finished with the current tree, move back up to the parent w.stack = w.stack[:current] w.base, _ = path.Split(w.base) w.base = path.Clean(w.base) // Remove trailing slash continue } if err != nil { return } if w.seen[entry.Hash] { continue } if entry.Mode == filemode.Dir { obj, err = GetTree(w.s, entry.Hash) } name = path.Join(w.base, entry.Name) if err != nil { err = io.EOF return } break } if !w.recursive { return } if t, ok := obj.(*Tree); ok { w.stack = append(w.stack, &treeEntryIter{t, 0}) w.base = path.Join(w.base, entry.Name) } return } // Tree returns the tree that the tree walker most recently operated on. func (w *TreeWalker) Tree() *Tree { current := len(w.stack) - 1 if w.stack[current].pos == 0 { current-- } if current < 0 { return nil } return w.stack[current].t } // Close releases any resources used by the TreeWalker. func (w *TreeWalker) Close() { w.stack = nil } // TreeIter provides an iterator for a set of trees. type TreeIter struct { storer.EncodedObjectIter s storer.EncodedObjectStorer } // NewTreeIter takes a storer.EncodedObjectStorer and a // storer.EncodedObjectIter and returns a *TreeIter that iterates over all // tree contained in the storer.EncodedObjectIter. // // Any non-tree object returned by the storer.EncodedObjectIter is skipped. func NewTreeIter(s storer.EncodedObjectStorer, iter storer.EncodedObjectIter) *TreeIter { return &TreeIter{iter, s} } // Next moves the iterator to the next tree and returns a pointer to it. If // there are no more trees, it returns io.EOF. func (iter *TreeIter) Next() (*Tree, error) { for { obj, err := iter.EncodedObjectIter.Next() if err != nil { return nil, err } if obj.Type() != plumbing.TreeObject { continue } return DecodeTree(iter.s, obj) } } // ForEach call the cb function for each tree contained on this iter until // an error happens or the end of the iter is reached. If ErrStop is sent // the iteration is stop but no error is returned. The iterator is closed. func (iter *TreeIter) ForEach(cb func(*Tree) error) error { return iter.EncodedObjectIter.ForEach(func(obj plumbing.EncodedObject) error { if obj.Type() != plumbing.TreeObject { return nil } t, err := DecodeTree(iter.s, obj) if err != nil { return err } return cb(t) }) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/object/tree_test.go000066400000000000000000002222461345605224300243170ustar00rootroot00000000000000package object import ( "errors" "io" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/cache" "gopkg.in/src-d/go-git.v4/plumbing/filemode" "gopkg.in/src-d/go-git.v4/plumbing/storer" "gopkg.in/src-d/go-git.v4/storage/filesystem" . "gopkg.in/check.v1" "gopkg.in/src-d/go-git-fixtures.v3" ) type TreeSuite struct { BaseObjectsSuite Tree *Tree } var _ = Suite(&TreeSuite{}) func (s *TreeSuite) SetUpSuite(c *C) { s.BaseObjectsSuite.SetUpSuite(c) hash := plumbing.NewHash("a8d315b2b1c615d43042c3a62402b8a54288cf5c") s.Tree = s.tree(c, hash) } func (s *TreeSuite) TestDecode(c *C) { c.Assert(s.Tree.Entries, HasLen, 8) c.Assert(s.Tree.Entries[0].Name, Equals, ".gitignore") c.Assert(s.Tree.Entries[0].Hash.String(), Equals, "32858aad3c383ed1ff0a0f9bdf231d54a00c9e88") c.Assert(s.Tree.Entries[0].Mode, Equals, filemode.Regular) c.Assert(s.Tree.Entries[4].Name, Equals, "go") c.Assert(s.Tree.Entries[4].Hash.String(), Equals, "a39771a7651f97faf5c72e08224d857fc35133db") c.Assert(s.Tree.Entries[4].Mode, Equals, filemode.Dir) } func (s *TreeSuite) TestDecodeNonTree(c *C) { hash := plumbing.NewHash("9a48f23120e880dfbe41f7c9b7b708e9ee62a492") blob, err := s.Storer.EncodedObject(plumbing.BlobObject, hash) c.Assert(err, IsNil) tree := &Tree{} err = tree.Decode(blob) c.Assert(err, Equals, ErrUnsupportedObject) } func (s *TreeSuite) TestType(c *C) { c.Assert(s.Tree.Type(), Equals, plumbing.TreeObject) } func (s *TreeSuite) TestTree(c *C) { expectedEntry, ok := s.Tree.m["vendor"] c.Assert(ok, Equals, true) expected := expectedEntry.Hash obtainedTree, err := s.Tree.Tree("vendor") c.Assert(err, IsNil) c.Assert(obtainedTree.Hash, Equals, expected) } func (s *TreeSuite) TestTreeNotFound(c *C) { d, err := s.Tree.Tree("not-found") c.Assert(d, IsNil) c.Assert(err, Equals, ErrDirectoryNotFound) } func (s *TreeSuite) TestTreeFailsWithExistingFiles(c *C) { _, err := s.Tree.File("LICENSE") c.Assert(err, IsNil) d, err := s.Tree.Tree("LICENSE") c.Assert(d, IsNil) c.Assert(err, Equals, ErrDirectoryNotFound) } func (s *TreeSuite) TestFile(c *C) { f, err := s.Tree.File("LICENSE") c.Assert(err, IsNil) c.Assert(f.Name, Equals, "LICENSE") } func (s *TreeSuite) TestFileNotFound(c *C) { f, err := s.Tree.File("not-found") c.Assert(f, IsNil) c.Assert(err, Equals, ErrFileNotFound) } func (s *TreeSuite) TestFileFailsWithExistingTrees(c *C) { _, err := s.Tree.Tree("vendor") c.Assert(err, IsNil) f, err := s.Tree.File("vendor") c.Assert(f, IsNil) c.Assert(err, Equals, ErrFileNotFound) } func (s *TreeSuite) TestSize(c *C) { size, err := s.Tree.Size("LICENSE") c.Assert(err, IsNil) c.Assert(size, Equals, int64(1072)) } func (s *TreeSuite) TestFiles(c *C) { var count int err := s.Tree.Files().ForEach(func(f *File) error { count++ return nil }) c.Assert(err, IsNil) c.Assert(count, Equals, 9) } func (s *TreeSuite) TestFindEntry(c *C) { e, err := s.Tree.FindEntry("vendor/foo.go") c.Assert(err, IsNil) c.Assert(e.Name, Equals, "foo.go") } func (s *TreeSuite) TestFindEntryNotFound(c *C) { e, err := s.Tree.FindEntry("not-found") c.Assert(e, IsNil) c.Assert(err, Equals, ErrEntryNotFound) e, err = s.Tree.FindEntry("not-found/not-found/not-found") c.Assert(e, IsNil) c.Assert(err, Equals, ErrDirectoryNotFound) } // Overrides returned plumbing.EncodedObject for given hash. // Otherwise, delegates to actual storer to get real object type fakeStorer struct { storer.EncodedObjectStorer hash plumbing.Hash fake fakeEncodedObject } func (fs fakeStorer) EncodedObject(t plumbing.ObjectType, h plumbing.Hash) (plumbing.EncodedObject, error) { if fs.hash == h { return fs.fake, nil } return fs.EncodedObjectStorer.EncodedObject(t, h) } // Overrides reader of plumbing.EncodedObject to simulate read error type fakeEncodedObject struct{ plumbing.EncodedObject } func (fe fakeEncodedObject) Reader() (io.ReadCloser, error) { return nil, errors.New("Simulate encoded object can't be read") } func (s *TreeSuite) TestDir(c *C) { vendor, err := s.Tree.dir("vendor") c.Assert(err, IsNil) t, err := GetTree(s.Tree.s, s.Tree.ID()) c.Assert(err, IsNil) o, err := t.s.EncodedObject(plumbing.AnyObject, vendor.ID()) c.Assert(err, IsNil) t.s = fakeStorer{t.s, vendor.ID(), fakeEncodedObject{o}} _, err = t.dir("vendor") c.Assert(err, NotNil) } // This plumbing.EncodedObject implementation has a reader that only returns 6 // bytes at a time, this should simulate the conditions when a read // returns less bytes than asked, for example when reading a hash which // is bigger than 6 bytes. type SortReadObject struct { t plumbing.ObjectType h plumbing.Hash cont []byte sz int64 } func (o *SortReadObject) Hash() plumbing.Hash { return o.h } func (o *SortReadObject) Type() plumbing.ObjectType { return o.t } func (o *SortReadObject) SetType(t plumbing.ObjectType) { o.t = t } func (o *SortReadObject) Size() int64 { return o.sz } func (o *SortReadObject) SetSize(s int64) { o.sz = s } func (o *SortReadObject) Content() []byte { return o.cont } func (o *SortReadObject) Reader() (io.ReadCloser, error) { return &SortReadCloser{pos: 0, data: o.cont}, nil } func (o *SortReadObject) Writer() (io.WriteCloser, error) { return o, nil } func (o *SortReadObject) Write(p []byte) (n int, err error) { return len(p), nil } func (o *SortReadObject) Close() error { return nil } // a ReadCloser that only returns 6 bytes at a time, to simulate incomplete reads. type SortReadCloser struct { pos int data []byte } func (o *SortReadCloser) Close() error { return nil } func (o *SortReadCloser) Read(p []byte) (int, error) { if o.pos == len(o.data) { return 0, io.EOF } sz := len(p) remaining := len(o.data) - o.pos if sz > 6 { // don't read more than 6 bytes at a time sz = 6 } if sz > remaining { sz = remaining } src := o.data[o.pos : o.pos+sz] nw := copy(p, src) o.pos += nw return nw, nil } func (s *TreeSuite) TestTreeDecodeEncodeIdempotent(c *C) { trees := []*Tree{ { Entries: []TreeEntry{ {"foo", filemode.Empty, plumbing.NewHash("b029517f6300c2da0f4b651b8642506cd6aaf45d")}, {"bar", filemode.Empty, plumbing.NewHash("c029517f6300c2da0f4b651b8642506cd6aaf45d")}, {"baz", filemode.Empty, plumbing.NewHash("d029517f6300c2da0f4b651b8642506cd6aaf45d")}, }, }, } for _, tree := range trees { obj := &plumbing.MemoryObject{} err := tree.Encode(obj) c.Assert(err, IsNil) newTree := &Tree{} err = newTree.Decode(obj) c.Assert(err, IsNil) tree.Hash = obj.Hash() c.Assert(newTree, DeepEquals, tree) } } func (s *TreeSuite) TestTreeIter(c *C) { encIter, err := s.Storer.IterEncodedObjects(plumbing.TreeObject) c.Assert(err, IsNil) iter := NewTreeIter(s.Storer, encIter) trees := []*Tree{} iter.ForEach(func(t *Tree) error { t.s = nil trees = append(trees, t) return nil }) c.Assert(len(trees) > 0, Equals, true) iter.Close() encIter, err = s.Storer.IterEncodedObjects(plumbing.TreeObject) c.Assert(err, IsNil) iter = NewTreeIter(s.Storer, encIter) i := 0 for { t, err := iter.Next() if err == io.EOF { break } t.s = nil c.Assert(err, IsNil) c.Assert(t, DeepEquals, trees[i]) i += 1 } iter.Close() } func (s *TreeSuite) TestTreeWalkerNext(c *C) { commit, err := GetCommit(s.Storer, plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5")) c.Assert(err, IsNil) tree, err := commit.Tree() c.Assert(err, IsNil) walker := NewTreeWalker(tree, true, nil) for _, e := range treeWalkerExpects { name, entry, err := walker.Next() if err == io.EOF { break } c.Assert(err, IsNil) c.Assert(name, Equals, e.Path) c.Assert(entry.Name, Equals, e.Name) c.Assert(entry.Mode, Equals, e.Mode) c.Assert(entry.Hash.String(), Equals, e.Hash) c.Assert(walker.Tree().ID().String(), Equals, e.Tree) } } func (s *TreeSuite) TestTreeWalkerNextSkipSeen(c *C) { commit, err := GetCommit(s.Storer, plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5")) c.Assert(err, IsNil) tree, err := commit.Tree() c.Assert(err, IsNil) seen := map[plumbing.Hash]bool{ plumbing.NewHash(treeWalkerExpects[0].Hash): true, } walker := NewTreeWalker(tree, true, seen) for _, e := range treeWalkerExpects[1:] { name, entry, err := walker.Next() if err == io.EOF { break } c.Assert(err, IsNil) c.Assert(name, Equals, e.Path) c.Assert(entry.Name, Equals, e.Name) c.Assert(entry.Mode, Equals, e.Mode) c.Assert(entry.Hash.String(), Equals, e.Hash) c.Assert(walker.Tree().ID().String(), Equals, e.Tree) } } func (s *TreeSuite) TestTreeWalkerNextNonRecursive(c *C) { commit := s.commit(c, plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5")) tree, err := commit.Tree() c.Assert(err, IsNil) var count int walker := NewTreeWalker(tree, false, nil) for { name, entry, err := walker.Next() if err == io.EOF { break } c.Assert(err, IsNil) c.Assert(name, Not(Equals), "") c.Assert(entry, NotNil) c.Assert(walker.Tree().ID().String(), Equals, "a8d315b2b1c615d43042c3a62402b8a54288cf5c") count++ } c.Assert(count, Equals, 8) } func (s *TreeSuite) TestTreeWalkerNextSubmodule(c *C) { dotgit := fixtures.ByURL("https://github.com/git-fixtures/submodule.git").One().DotGit() st := filesystem.NewStorage(dotgit, cache.NewObjectLRUDefault()) hash := plumbing.NewHash("b685400c1f9316f350965a5993d350bc746b0bf4") commit, err := GetCommit(st, hash) c.Assert(err, IsNil) tree, err := commit.Tree() c.Assert(err, IsNil) expected := []string{ ".gitmodules", "README.md", "basic", "itself", } var count int walker := NewTreeWalker(tree, true, nil) defer walker.Close() for { name, entry, err := walker.Next() if err == io.EOF { break } c.Assert(err, IsNil) c.Assert(entry, NotNil) c.Assert(name, Equals, expected[count]) count++ } c.Assert(count, Equals, 4) } var treeWalkerExpects = []struct { Path string Mode filemode.FileMode Name, Hash, Tree string }{{ Path: ".gitignore", Mode: filemode.Regular, Name: ".gitignore", Hash: "32858aad3c383ed1ff0a0f9bdf231d54a00c9e88", Tree: "a8d315b2b1c615d43042c3a62402b8a54288cf5c", }, { Path: "CHANGELOG", Mode: filemode.Regular, Name: "CHANGELOG", Hash: "d3ff53e0564a9f87d8e84b6e28e5060e517008aa", Tree: "a8d315b2b1c615d43042c3a62402b8a54288cf5c", }, { Path: "LICENSE", Mode: filemode.Regular, Name: "LICENSE", Hash: "c192bd6a24ea1ab01d78686e417c8bdc7c3d197f", Tree: "a8d315b2b1c615d43042c3a62402b8a54288cf5c", }, { Path: "binary.jpg", Mode: filemode.Regular, Name: "binary.jpg", Hash: "d5c0f4ab811897cadf03aec358ae60d21f91c50d", Tree: "a8d315b2b1c615d43042c3a62402b8a54288cf5c", }, { Path: "go", Mode: filemode.Dir, Name: "go", Hash: "a39771a7651f97faf5c72e08224d857fc35133db", Tree: "a8d315b2b1c615d43042c3a62402b8a54288cf5c", }, { Path: "go/example.go", Mode: filemode.Regular, Name: "example.go", Hash: "880cd14280f4b9b6ed3986d6671f907d7cc2a198", Tree: "a39771a7651f97faf5c72e08224d857fc35133db", }, { Path: "json", Mode: filemode.Dir, Name: "json", Hash: "5a877e6a906a2743ad6e45d99c1793642aaf8eda", Tree: "a8d315b2b1c615d43042c3a62402b8a54288cf5c", }, { Path: "json/long.json", Mode: filemode.Regular, Name: "long.json", Hash: "49c6bb89b17060d7b4deacb7b338fcc6ea2352a9", Tree: "5a877e6a906a2743ad6e45d99c1793642aaf8eda", }, { Path: "json/short.json", Mode: filemode.Regular, Name: "short.json", Hash: "c8f1d8c61f9da76f4cb49fd86322b6e685dba956", Tree: "5a877e6a906a2743ad6e45d99c1793642aaf8eda", }, { Path: "php", Mode: filemode.Dir, Name: "php", Hash: "586af567d0bb5e771e49bdd9434f5e0fb76d25fa", Tree: "a8d315b2b1c615d43042c3a62402b8a54288cf5c", }, { Path: "php/crappy.php", Mode: filemode.Regular, Name: "crappy.php", Hash: "9a48f23120e880dfbe41f7c9b7b708e9ee62a492", Tree: "586af567d0bb5e771e49bdd9434f5e0fb76d25fa", }, { Path: "vendor", Mode: filemode.Dir, Name: "vendor", Hash: "cf4aa3b38974fb7d81f367c0830f7d78d65ab86b", Tree: "a8d315b2b1c615d43042c3a62402b8a54288cf5c", }, { Path: "vendor/foo.go", Mode: filemode.Regular, Name: "foo.go", Hash: "9dea2395f5403188298c1dabe8bdafe562c491e3", Tree: "cf4aa3b38974fb7d81f367c0830f7d78d65ab86b", }} func entriesEquals(a, b []TreeEntry) bool { if a == nil && b == nil { return true } if a == nil || b == nil { return false } if len(a) != len(b) { return false } for i, v := range a { if v != b[i] { return false } } return true } // When decoding a tree we were not checking the return value of read // when reading hashes. As a hash is quite small, it worked well nearly // all the time. // // I have found some examples of repos where the read is incomplete and // the tree decode fails, for example // http://github.com/sqlcipher/sqlcipher.git, object // 0ba19d22411289293ab5c012891529967d7c933e. // // This tests is performed with that object but using a SortReadObject to // simulate incomplete reads on all platforms and operating systems. func (s *TreeSuite) TestTreeDecodeReadBug(c *C) { cont := []byte{ 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x61, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x63, 0x0, 0xa4, 0x9d, 0x33, 0x49, 0xd7, 0xe2, 0x3f, 0xb5, 0x81, 0x19, 0x4f, 0x4c, 0xb5, 0x9a, 0xc0, 0xd5, 0x1b, 0x2, 0x1f, 0x78, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x61, 0x6e, 0x61, 0x6c, 0x79, 0x7a, 0x65, 0x2e, 0x63, 0x0, 0x9a, 0x3e, 0x95, 0x97, 0xdb, 0xb, 0x3, 0x20, 0x77, 0xc9, 0x1d, 0x96, 0x9d, 0x22, 0xc6, 0x27, 0x3f, 0x70, 0x2a, 0xc, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x61, 0x74, 0x74, 0x61, 0x63, 0x68, 0x2e, 0x63, 0x0, 0xb8, 0xe1, 0x21, 0x99, 0xb5, 0x7d, 0xe8, 0x11, 0xea, 0xe0, 0xd0, 0x61, 0x42, 0xd5, 0xac, 0x4f, 0xd4, 0x30, 0xb1, 0xd8, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x61, 0x75, 0x74, 0x68, 0x2e, 0x63, 0x0, 0xd3, 0x8b, 0xb8, 0x36, 0xa7, 0x84, 0xfb, 0xfa, 0xb6, 0xab, 0x7b, 0x3, 0xd4, 0xe6, 0xdd, 0x43, 0xed, 0xc4, 0x1f, 0xa7, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x63, 0x0, 0x25, 0x2f, 0x61, 0xcf, 0xca, 0xa8, 0xfc, 0xf3, 0x13, 0x7e, 0x8, 0xed, 0x68, 0x47, 0xdc, 0xfe, 0x1d, 0xc1, 0xde, 0x54, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x62, 0x69, 0x74, 0x76, 0x65, 0x63, 0x2e, 0x63, 0x0, 0x52, 0x18, 0x4a, 0xa9, 0x64, 0xce, 0x18, 0x98, 0xf3, 0x5d, 0x1b, 0x3d, 0x87, 0x87, 0x1c, 0x2d, 0xe, 0xf4, 0xc5, 0x3d, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x62, 0x74, 0x6d, 0x75, 0x74, 0x65, 0x78, 0x2e, 0x63, 0x0, 0xd8, 0x7d, 0x4d, 0x5f, 0xee, 0xb6, 0x30, 0x7a, 0xec, 0xdc, 0x9a, 0x83, 0x11, 0x14, 0x89, 0xab, 0x30, 0xc6, 0x78, 0xc3, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x62, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x63, 0x0, 0x3c, 0xa6, 0x5, 0x83, 0xe3, 0xc8, 0xe3, 0x12, 0x0, 0xf9, 0x73, 0xe0, 0xe9, 0xc4, 0x53, 0x62, 0x58, 0xb2, 0x64, 0x39, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x62, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x68, 0x0, 0xac, 0xe0, 0xf8, 0xcd, 0x21, 0x77, 0x70, 0xa2, 0xf6, 0x6b, 0x2e, 0xb8, 0x71, 0xbb, 0xc5, 0xfd, 0xc6, 0xfc, 0x2b, 0x68, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x62, 0x74, 0x72, 0x65, 0x65, 0x49, 0x6e, 0x74, 0x2e, 0x68, 0x0, 0xce, 0x3c, 0x54, 0x93, 0xf8, 0xca, 0xd0, 0xbc, 0x54, 0x8a, 0xe8, 0xe4, 0x4e, 0x51, 0x28, 0x31, 0xd8, 0xfa, 0xc4, 0x31, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x2e, 0x63, 0x0, 0x3c, 0x91, 0xcd, 0xcf, 0xdb, 0x7b, 0x1, 0x7c, 0xbc, 0x2d, 0x5c, 0x29, 0x57, 0x1a, 0x98, 0x27, 0xd, 0xe0, 0x71, 0xe6, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x63, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x2e, 0x63, 0x0, 0xd4, 0xc, 0x65, 0xcb, 0x92, 0x45, 0x80, 0x29, 0x6a, 0xd0, 0x69, 0xa0, 0x4b, 0xf9, 0xc9, 0xe9, 0x53, 0x4e, 0xca, 0xa7, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x2e, 0x63, 0x0, 0x9e, 0x91, 0x40, 0x8, 0x5c, 0x0, 0x46, 0xed, 0x3b, 0xf6, 0xf4, 0x48, 0x52, 0x20, 0x69, 0x2d, 0xca, 0x17, 0x43, 0xc5, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x2e, 0x63, 0x0, 0x25, 0x51, 0xe6, 0xba, 0x2, 0x39, 0xf8, 0x5a, 0x35, 0x77, 0x96, 0xa8, 0xdd, 0xa8, 0xca, 0x3e, 0x29, 0x70, 0x93, 0xf8, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x2e, 0x68, 0x0, 0xf7, 0x1f, 0x53, 0x2c, 0xdc, 0x44, 0x8f, 0xa, 0x1d, 0xd5, 0xc6, 0xef, 0xf5, 0xfb, 0xd3, 0x3a, 0x91, 0x55, 0xaa, 0x97, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x63, 0x63, 0x2e, 0x63, 0x0, 0x53, 0x7d, 0xf7, 0xe3, 0xb3, 0x6a, 0xb5, 0xcf, 0xdd, 0x6f, 0xca, 0x40, 0x28, 0xeb, 0xca, 0xe1, 0x86, 0x87, 0xd6, 0x4d, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x69, 0x6d, 0x70, 0x6c, 0x2e, 0x63, 0x0, 0xa5, 0x89, 0x27, 0xc7, 0x6e, 0xf6, 0x20, 0x56, 0x77, 0xbe, 0x5c, 0x1a, 0x8e, 0x80, 0xc9, 0x83, 0x56, 0xb3, 0xa9, 0xd3, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6c, 0x69, 0x62, 0x74, 0x6f, 0x6d, 0x63, 0x72, 0x79, 0x70, 0x74, 0x2e, 0x63, 0x0, 0x1a, 0x33, 0x83, 0xe0, 0x1, 0xa7, 0x21, 0x11, 0xc3, 0xf6, 0x61, 0x92, 0x22, 0xb0, 0x65, 0xf4, 0xbd, 0x1, 0xb, 0xe1, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x73, 0x73, 0x6c, 0x2e, 0x63, 0x0, 0xd0, 0x19, 0x81, 0x3b, 0x47, 0x6c, 0x52, 0xd0, 0x20, 0xe2, 0xc0, 0xac, 0xd5, 0x24, 0xe9, 0xea, 0x3d, 0xf, 0xb9, 0xfe, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x63, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x63, 0x0, 0x60, 0x59, 0x5f, 0xf8, 0x8d, 0x92, 0xf7, 0x8, 0x26, 0x4, 0xfb, 0xd9, 0xdf, 0x9a, 0xfe, 0xa1, 0x6a, 0xe8, 0x6f, 0xf, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x63, 0x0, 0x75, 0x8d, 0xd7, 0xc8, 0x9b, 0xca, 0x39, 0x37, 0xa9, 0xd, 0x70, 0x6e, 0xa9, 0x82, 0xce, 0x3a, 0xcf, 0x11, 0xd1, 0x83, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x2e, 0x63, 0x0, 0x63, 0x4e, 0x11, 0x55, 0x63, 0xae, 0x12, 0xba, 0x65, 0x58, 0xcc, 0xc5, 0x12, 0xae, 0xd6, 0x31, 0xc0, 0x66, 0xba, 0xd8, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x63, 0x0, 0x66, 0x3, 0x97, 0xe0, 0x78, 0xae, 0x48, 0xb2, 0xe7, 0x17, 0x5e, 0x33, 0x85, 0x67, 0x78, 0x19, 0x72, 0x2d, 0xdd, 0x6c, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x2e, 0x63, 0x0, 0xc3, 0x2, 0x8c, 0x4f, 0x93, 0x6e, 0xdf, 0x96, 0x71, 0x2d, 0xbe, 0x73, 0xa0, 0x76, 0x62, 0xf0, 0xa2, 0x6b, 0x1d, 0xa, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x66, 0x6b, 0x65, 0x79, 0x2e, 0x63, 0x0, 0xac, 0x35, 0xbc, 0x19, 0x4c, 0xde, 0xb1, 0x27, 0x98, 0x9b, 0x9, 0x40, 0x35, 0xce, 0xe0, 0x6f, 0x57, 0x37, 0x6f, 0x5e, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x2e, 0x63, 0x0, 0xc0, 0x2f, 0x9, 0x6a, 0xda, 0xd5, 0xbc, 0xe9, 0xac, 0x83, 0xd3, 0x5f, 0xf, 0x46, 0x9, 0xd6, 0xf6, 0xd4, 0x3b, 0xe5, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x2e, 0x63, 0x0, 0x7b, 0x2, 0xcf, 0x21, 0x30, 0xe0, 0xd1, 0xa7, 0xb8, 0x89, 0xd8, 0x44, 0xc, 0xcc, 0x82, 0x8, 0xf7, 0xb6, 0x7b, 0xf9, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x68, 0x61, 0x73, 0x68, 0x2e, 0x63, 0x0, 0xe8, 0x1d, 0xcf, 0x95, 0xe4, 0x38, 0x48, 0xfa, 0x70, 0x86, 0xb7, 0xf7, 0x81, 0xc0, 0x90, 0xad, 0xc7, 0xe6, 0xca, 0x8e, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x68, 0x61, 0x73, 0x68, 0x2e, 0x68, 0x0, 0x82, 0xb7, 0xc5, 0x8c, 0x71, 0x9, 0xb, 0x54, 0x7e, 0x10, 0x17, 0x42, 0xaa, 0x9, 0x51, 0x73, 0x9f, 0xf2, 0xee, 0xe7, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x68, 0x77, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x68, 0x0, 0xb8, 0xbc, 0x5a, 0x29, 0x5b, 0xe3, 0xfa, 0xc8, 0x35, 0x1f, 0xa9, 0xf0, 0x8a, 0x77, 0x57, 0x9d, 0x59, 0xc9, 0xa8, 0xe4, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x74, 0x2e, 0x63, 0x0, 0x9a, 0x56, 0x61, 0xf5, 0x9a, 0x72, 0x95, 0x2b, 0xe6, 0xc1, 0x67, 0xa0, 0xc2, 0xdb, 0x15, 0x9b, 0x91, 0xb7, 0x1f, 0xae, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x6a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x0, 0xfe, 0xd2, 0x7b, 0xe3, 0xe3, 0x80, 0x55, 0xd2, 0x20, 0x43, 0x95, 0xcd, 0xe6, 0xff, 0xc9, 0x45, 0x89, 0xfb, 0xf5, 0xe8, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x2e, 0x63, 0x0, 0x94, 0x64, 0x9a, 0xe7, 0x5, 0xab, 0x93, 0x85, 0x10, 0x8d, 0xd, 0x88, 0x7a, 0xf0, 0x75, 0x92, 0x89, 0xfb, 0x23, 0xcb, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x6c, 0x65, 0x6d, 0x70, 0x61, 0x72, 0x2e, 0x63, 0x0, 0x2a, 0xfa, 0xa6, 0xce, 0xa6, 0xd8, 0x29, 0x60, 0x2c, 0x27, 0x86, 0xc1, 0xf8, 0xa3, 0x7f, 0x56, 0x7c, 0xf6, 0xfd, 0x53, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x6c, 0x6f, 0x61, 0x64, 0x65, 0x78, 0x74, 0x2e, 0x63, 0x0, 0xcd, 0xcf, 0x6a, 0x93, 0xb8, 0xc4, 0xf, 0x91, 0x4b, 0x94, 0x24, 0xe, 0xf1, 0x4c, 0xb4, 0xa3, 0xa, 0x37, 0xec, 0xa1, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x6d, 0x61, 0x69, 0x6e, 0x2e, 0x63, 0x0, 0x39, 0xf6, 0x4, 0x21, 0xe6, 0x81, 0x27, 0x7c, 0xc3, 0xdb, 0xa0, 0x9a, 0xbe, 0x7c, 0xf7, 0x90, 0xd5, 0x28, 0xf5, 0xc3, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x6d, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x2e, 0x63, 0x0, 0x35, 0xa4, 0x4e, 0x5f, 0x61, 0xc2, 0xe4, 0x4c, 0x48, 0x1c, 0x62, 0x51, 0xbd, 0xa, 0xae, 0x7a, 0xcd, 0xa4, 0xde, 0xb, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x6d, 0x65, 0x6d, 0x30, 0x2e, 0x63, 0x0, 0xd, 0xb, 0x66, 0x67, 0xd6, 0xa, 0x95, 0x5a, 0x6, 0x96, 0xdf, 0x62, 0x89, 0xb4, 0x91, 0x78, 0x96, 0x93, 0x43, 0xaa, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x6d, 0x65, 0x6d, 0x31, 0x2e, 0x63, 0x0, 0x35, 0x78, 0x49, 0x6f, 0x33, 0x3, 0x7, 0xb2, 0x31, 0xdf, 0xb5, 0x3c, 0xc, 0x2e, 0x1c, 0x6b, 0x32, 0x3d, 0x79, 0x1e, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x6d, 0x65, 0x6d, 0x32, 0x2e, 0x63, 0x0, 0x26, 0x44, 0x8e, 0xa8, 0xaa, 0xe0, 0x36, 0x6a, 0xf0, 0x54, 0x1a, 0xfe, 0xa4, 0x79, 0xb, 0x42, 0xf4, 0xa6, 0x9b, 0x5a, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x6d, 0x65, 0x6d, 0x33, 0x2e, 0x63, 0x0, 0x1a, 0x1b, 0x79, 0x1f, 0x28, 0xf8, 0xcf, 0x3c, 0xe4, 0xf9, 0xa3, 0x5c, 0xda, 0xd7, 0xb7, 0x10, 0x75, 0x68, 0xc7, 0x15, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x6d, 0x65, 0x6d, 0x35, 0x2e, 0x63, 0x0, 0x78, 0x3c, 0xef, 0x61, 0x76, 0xc5, 0x9c, 0xbf, 0x30, 0x91, 0x46, 0x31, 0x9, 0x5a, 0x1a, 0x54, 0xf4, 0xe4, 0x2e, 0x8, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x6d, 0x65, 0x6d, 0x6a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x0, 0x5, 0x72, 0x59, 0x48, 0xf6, 0x5d, 0x42, 0x7b, 0x7, 0xf7, 0xf9, 0x29, 0xac, 0xa3, 0xff, 0x22, 0x4b, 0x17, 0x53, 0xdf, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x6d, 0x75, 0x74, 0x65, 0x78, 0x2e, 0x63, 0x0, 0xb5, 0x67, 0xe7, 0xc2, 0x7e, 0xf2, 0x4, 0x10, 0x86, 0xaf, 0xe0, 0xf6, 0x96, 0x66, 0xe2, 0x7b, 0xf5, 0x9, 0x8a, 0x59, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x6d, 0x75, 0x74, 0x65, 0x78, 0x2e, 0x68, 0x0, 0x9, 0x78, 0x81, 0x22, 0x52, 0x77, 0x89, 0xa, 0x9c, 0x36, 0xc2, 0x4d, 0x41, 0xf6, 0x11, 0x4d, 0x64, 0xc0, 0x6d, 0xb3, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x6d, 0x75, 0x74, 0x65, 0x78, 0x5f, 0x6e, 0x6f, 0x6f, 0x70, 0x2e, 0x63, 0x0, 0x45, 0x6e, 0x82, 0xa2, 0x5e, 0x27, 0x1b, 0x6, 0x14, 0xe7, 0xf4, 0xf8, 0x3c, 0x22, 0x85, 0x53, 0xb7, 0xfa, 0x1, 0x58, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x6d, 0x75, 0x74, 0x65, 0x78, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x2e, 0x63, 0x0, 0xec, 0xa7, 0x29, 0x58, 0x31, 0xc2, 0xf0, 0xee, 0x48, 0xba, 0x54, 0xd0, 0x62, 0x91, 0x4d, 0x6, 0xa1, 0xdd, 0x8e, 0xbe, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x6d, 0x75, 0x74, 0x65, 0x78, 0x5f, 0x77, 0x33, 0x32, 0x2e, 0x63, 0x0, 0x27, 0xd1, 0xa, 0xf5, 0xbd, 0x33, 0x1b, 0xdb, 0x97, 0x3f, 0x61, 0x45, 0xb7, 0x4f, 0x72, 0xb6, 0x7, 0xcf, 0xc4, 0x6e, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x2e, 0x63, 0x0, 0xfc, 0xab, 0x5b, 0xfa, 0xf0, 0x19, 0x8, 0xd3, 0xde, 0x93, 0xfa, 0x88, 0xb5, 0xea, 0xe9, 0xe9, 0x6c, 0xa3, 0xc8, 0xe8, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x6f, 0x73, 0x2e, 0x63, 0x0, 0xbe, 0x2e, 0xa4, 0xcf, 0xc0, 0x19, 0x59, 0x93, 0xa3, 0x40, 0xc9, 0x2, 0xae, 0xdd, 0xf1, 0xbe, 0x4b, 0x8e, 0xd7, 0x3a, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x6f, 0x73, 0x2e, 0x68, 0x0, 0x7, 0xa, 0x2d, 0xdd, 0x17, 0xf7, 0x71, 0xf9, 0x8f, 0xf8, 0xcc, 0xd6, 0xf0, 0x33, 0xbd, 0xac, 0xc5, 0xe9, 0xf6, 0xc, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x6f, 0x73, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x68, 0x0, 0xf6, 0xc3, 0xe7, 0xff, 0x89, 0x46, 0x30, 0x86, 0x40, 0x18, 0x22, 0xf4, 0x81, 0xe7, 0xe3, 0xb8, 0x7b, 0x2c, 0x78, 0xc7, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x6f, 0x73, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x2e, 0x63, 0x0, 0xab, 0xc2, 0x3a, 0x45, 0x2e, 0x72, 0xf7, 0x1c, 0x76, 0xaf, 0xa9, 0x98, 0x3c, 0x3a, 0xd9, 0xd4, 0x25, 0x61, 0x6c, 0x6d, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x6f, 0x73, 0x5f, 0x77, 0x69, 0x6e, 0x2e, 0x63, 0x0, 0xae, 0xb0, 0x88, 0x14, 0xb3, 0xda, 0xbe, 0x81, 0xb8, 0x4c, 0xda, 0x91, 0x85, 0x82, 0xb0, 0xf, 0xfd, 0x86, 0xe4, 0x87, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x70, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x63, 0x0, 0x61, 0x72, 0x7f, 0xaa, 0x9c, 0xf, 0x3d, 0x56, 0x62, 0x65, 0xbe, 0x7e, 0xec, 0x5b, 0x2a, 0x35, 0xf6, 0xa4, 0xbc, 0x9f, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x70, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x68, 0x0, 0x6f, 0x65, 0x91, 0x36, 0xe2, 0x76, 0x7, 0x9d, 0xa4, 0x3a, 0x2e, 0x39, 0xe1, 0xb6, 0x86, 0x37, 0xec, 0xad, 0xcf, 0x68, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x70, 0x61, 0x72, 0x73, 0x65, 0x2e, 0x79, 0x0, 0x83, 0x10, 0xb2, 0x69, 0x89, 0xb0, 0x5b, 0xed, 0x1e, 0x1b, 0x3, 0xda, 0x80, 0xf5, 0xc0, 0xa5, 0x2e, 0x9a, 0xd1, 0xd2, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x70, 0x63, 0x61, 0x63, 0x68, 0x65, 0x2e, 0x63, 0x0, 0x48, 0x2a, 0x18, 0x8b, 0xee, 0x19, 0x91, 0xbc, 0x8a, 0xda, 0xc9, 0x6a, 0x19, 0x3a, 0x53, 0xe5, 0x46, 0x2a, 0x8c, 0x10, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x70, 0x63, 0x61, 0x63, 0x68, 0x65, 0x2e, 0x68, 0x0, 0xf4, 0xd4, 0xad, 0x71, 0xc1, 0xd, 0x78, 0xc6, 0xda, 0xbd, 0xe2, 0x52, 0x15, 0xcd, 0x41, 0x5a, 0x76, 0x1, 0x48, 0xca, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x70, 0x63, 0x61, 0x63, 0x68, 0x65, 0x31, 0x2e, 0x63, 0x0, 0x41, 0x47, 0xd2, 0xef, 0xf5, 0x5b, 0xdd, 0x9f, 0xf7, 0xc6, 0x86, 0xc, 0x60, 0x18, 0x10, 0x20, 0x16, 0x6c, 0x5f, 0x50, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x70, 0x72, 0x61, 0x67, 0x6d, 0x61, 0x2e, 0x63, 0x0, 0x22, 0x97, 0x71, 0x69, 0x61, 0x7d, 0x49, 0x22, 0xb3, 0x99, 0x3f, 0x76, 0x9d, 0x90, 0xfa, 0x7b, 0xc4, 0x41, 0xea, 0x50, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x70, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x2e, 0x63, 0x0, 0xd7, 0x8d, 0x83, 0xcb, 0xd8, 0x78, 0x97, 0xf5, 0x73, 0x30, 0x3f, 0x9f, 0x57, 0xab, 0x8d, 0xe0, 0x24, 0xa6, 0xe3, 0xf8, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x66, 0x2e, 0x63, 0x0, 0x9f, 0x68, 0xd2, 0x4, 0xff, 0xdc, 0x9f, 0x3d, 0x42, 0x7f, 0x80, 0xa8, 0x23, 0x9a, 0x7f, 0xa3, 0xa9, 0x8a, 0xec, 0xbd, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x72, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x2e, 0x63, 0x0, 0x23, 0x4e, 0xbd, 0xf6, 0x58, 0xf4, 0x36, 0xcc, 0x7c, 0x68, 0xf0, 0x27, 0xc4, 0x8b, 0xe, 0x1b, 0x9b, 0xa3, 0x4e, 0x98, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x2e, 0x63, 0x0, 0x91, 0xef, 0xca, 0xa1, 0xa1, 0x6b, 0xfc, 0x98, 0xfb, 0x35, 0xd8, 0x5c, 0xad, 0x15, 0x6b, 0x93, 0x53, 0x3e, 0x4e, 0x6, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x72, 0x6f, 0x77, 0x73, 0x65, 0x74, 0x2e, 0x63, 0x0, 0x57, 0x61, 0xf9, 0x85, 0x50, 0xb1, 0x76, 0xcc, 0xe1, 0x1d, 0xcb, 0xce, 0xc9, 0x38, 0x99, 0xa0, 0x75, 0xbb, 0x64, 0xfd, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x2e, 0x63, 0x0, 0xf3, 0xf1, 0x49, 0x9, 0x63, 0x95, 0x5b, 0x8e, 0xd0, 0xc9, 0xfe, 0x6e, 0x1e, 0xec, 0x83, 0x6c, 0x1a, 0x52, 0x94, 0xb4, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x73, 0x68, 0x65, 0x6c, 0x6c, 0x2e, 0x63, 0x0, 0x1b, 0xe2, 0x87, 0x1f, 0xed, 0x9a, 0x1f, 0xdf, 0x1d, 0xf7, 0x19, 0x8e, 0x11, 0x25, 0x36, 0x0, 0xec, 0xba, 0x76, 0xcc, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x73, 0x71, 0x6c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x2e, 0x68, 0x0, 0x82, 0x75, 0x30, 0x95, 0xcd, 0x17, 0x23, 0xc5, 0xff, 0x4f, 0x11, 0x15, 0xe4, 0x97, 0x55, 0x91, 0xee, 0x34, 0xf5, 0xce, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x73, 0x71, 0x6c, 0x69, 0x74, 0x65, 0x2e, 0x68, 0x2e, 0x69, 0x6e, 0x0, 0x66, 0x8, 0x82, 0x31, 0x75, 0xde, 0x5b, 0x6a, 0xd, 0x37, 0x8f, 0xdb, 0xc, 0x38, 0x18, 0xb6, 0xab, 0x4f, 0xbf, 0x8e, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x73, 0x71, 0x6c, 0x69, 0x74, 0x65, 0x33, 0x2e, 0x72, 0x63, 0x0, 0x96, 0x98, 0x76, 0xda, 0x1e, 0x57, 0x14, 0x3d, 0xe0, 0xb4, 0xd1, 0xc7, 0x62, 0x9f, 0xd3, 0x35, 0x6f, 0x2e, 0x1c, 0x96, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x73, 0x71, 0x6c, 0x69, 0x74, 0x65, 0x33, 0x65, 0x78, 0x74, 0x2e, 0x68, 0x0, 0x92, 0x8b, 0xb3, 0xba, 0xd9, 0xdd, 0x64, 0x3c, 0x30, 0x1d, 0xd2, 0xb0, 0xac, 0x22, 0x28, 0x7a, 0x81, 0x28, 0x48, 0x84, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x73, 0x71, 0x6c, 0x69, 0x74, 0x65, 0x49, 0x6e, 0x74, 0x2e, 0x68, 0x0, 0x59, 0x50, 0xf2, 0x37, 0xd9, 0xf9, 0xf2, 0xd3, 0xef, 0x6b, 0xd8, 0xbe, 0x34, 0x2d, 0xcf, 0x64, 0x89, 0x22, 0x51, 0x42, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x73, 0x71, 0x6c, 0x69, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x68, 0x0, 0xc7, 0xae, 0xe5, 0x3c, 0xeb, 0xca, 0x94, 0xda, 0x51, 0xe7, 0x1a, 0x82, 0x2e, 0xa5, 0xa6, 0xde, 0xb9, 0x3, 0x85, 0xdf, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x63, 0x0, 0x28, 0x34, 0x9e, 0x6d, 0x3d, 0x20, 0x88, 0xe0, 0x0, 0x3b, 0x76, 0xf8, 0xa, 0x89, 0x54, 0xfa, 0xec, 0x59, 0x30, 0xba, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x63, 0x0, 0x26, 0xbb, 0xfb, 0x4f, 0x45, 0x6c, 0x42, 0x98, 0x25, 0x29, 0xea, 0x1a, 0x63, 0xa0, 0x17, 0x51, 0xdd, 0x3e, 0xe9, 0x5a, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x74, 0x63, 0x6c, 0x73, 0x71, 0x6c, 0x69, 0x74, 0x65, 0x2e, 0x63, 0x0, 0xf1, 0xbb, 0x29, 0x21, 0xda, 0xc, 0x68, 0xa4, 0xf1, 0xc8, 0xe1, 0x5c, 0xf5, 0x66, 0xb2, 0x33, 0xe9, 0x2a, 0x51, 0x9f, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x74, 0x65, 0x73, 0x74, 0x31, 0x2e, 0x63, 0x0, 0xa6, 0x38, 0xe4, 0x80, 0xad, 0xdf, 0x14, 0x43, 0x9c, 0xdf, 0xa4, 0xee, 0x16, 0x4d, 0xc3, 0x1b, 0x79, 0xf8, 0xbc, 0xac, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x74, 0x65, 0x73, 0x74, 0x32, 0x2e, 0x63, 0x0, 0xd1, 0x30, 0xe9, 0xd0, 0x1b, 0x70, 0x24, 0xa5, 0xec, 0x6d, 0x73, 0x5, 0x92, 0xee, 0x4d, 0x1f, 0xb0, 0x2c, 0xfd, 0xb4, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x74, 0x65, 0x73, 0x74, 0x33, 0x2e, 0x63, 0x0, 0xe3, 0xed, 0x31, 0xc, 0x81, 0x4, 0xfe, 0x36, 0x21, 0xce, 0xbb, 0xf, 0x51, 0xd1, 0x1, 0x45, 0x1, 0x8d, 0x4f, 0xac, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x74, 0x65, 0x73, 0x74, 0x34, 0x2e, 0x63, 0x0, 0xa6, 0x37, 0x5c, 0x7c, 0xc4, 0x3, 0xf6, 0xc, 0xaa, 0xb7, 0xe9, 0x59, 0x53, 0x3e, 0x3d, 0xb1, 0xff, 0x75, 0xa, 0xe4, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x74, 0x65, 0x73, 0x74, 0x35, 0x2e, 0x63, 0x0, 0x30, 0x3d, 0x12, 0x5, 0xb2, 0x26, 0x28, 0x42, 0x3d, 0x98, 0x6f, 0x71, 0xe2, 0x7c, 0x7c, 0xf7, 0x14, 0xa7, 0x45, 0xa6, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x74, 0x65, 0x73, 0x74, 0x36, 0x2e, 0x63, 0x0, 0xc1, 0x51, 0xea, 0x42, 0x98, 0x9b, 0xb, 0xe2, 0x4e, 0xe4, 0xb9, 0xa4, 0xbe, 0x37, 0x8b, 0x4f, 0x63, 0x6d, 0xb6, 0x41, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x74, 0x65, 0x73, 0x74, 0x37, 0x2e, 0x63, 0x0, 0x3c, 0xd4, 0xa2, 0x24, 0xd7, 0xe8, 0xe1, 0x6b, 0xd7, 0xcb, 0xe4, 0x9e, 0x2d, 0x3e, 0x94, 0xce, 0x9b, 0x17, 0xbd, 0x76, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x74, 0x65, 0x73, 0x74, 0x38, 0x2e, 0x63, 0x0, 0xc5, 0x73, 0x93, 0x32, 0xd4, 0x6e, 0x57, 0x12, 0x1d, 0xa2, 0x7c, 0x3e, 0x88, 0xfd, 0xe7, 0x5a, 0xeb, 0x87, 0x10, 0xf7, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x74, 0x65, 0x73, 0x74, 0x39, 0x2e, 0x63, 0x0, 0xe5, 0x99, 0x3e, 0x8f, 0xf7, 0x8f, 0x61, 0xc2, 0x43, 0x5b, 0x6f, 0x97, 0xa3, 0xb4, 0x63, 0xe2, 0x27, 0xc7, 0x67, 0xac, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x63, 0x0, 0xb0, 0xb9, 0x43, 0x18, 0x5b, 0xfc, 0x23, 0xc1, 0x7f, 0xd0, 0x8f, 0x55, 0x76, 0x8c, 0xac, 0x12, 0xa9, 0xf5, 0x69, 0x51, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x61, 0x75, 0x74, 0x6f, 0x65, 0x78, 0x74, 0x2e, 0x63, 0x0, 0xb5, 0x1, 0x3f, 0x31, 0x73, 0xa2, 0x17, 0x6e, 0x2d, 0x9f, 0xc, 0xaa, 0x99, 0x19, 0x30, 0x36, 0xbf, 0xc3, 0x7e, 0x91, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x63, 0x0, 0xe9, 0x67, 0x42, 0x4a, 0x29, 0xf, 0x73, 0x8a, 0xec, 0xfd, 0xac, 0x57, 0x8e, 0x9b, 0x87, 0xa4, 0xc4, 0xae, 0x8d, 0x7f, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x62, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x63, 0x0, 0xdb, 0x72, 0x88, 0x9b, 0x2a, 0xfb, 0x62, 0x72, 0x82, 0x8d, 0xda, 0x86, 0x6d, 0xcc, 0xf1, 0x22, 0xa4, 0x9a, 0x72, 0x99, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x0, 0x53, 0x47, 0x27, 0xa0, 0x80, 0x42, 0xb6, 0xca, 0xd6, 0x7e, 0x26, 0x7e, 0x87, 0xb4, 0x3, 0xa4, 0x1a, 0x73, 0xb2, 0x99, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x64, 0x65, 0x6d, 0x6f, 0x76, 0x66, 0x73, 0x2e, 0x63, 0x0, 0x63, 0x76, 0x27, 0x7, 0x1d, 0x9e, 0x28, 0xf4, 0xb3, 0x45, 0x1b, 0xbb, 0xdd, 0xf8, 0x8, 0xd1, 0xa9, 0x12, 0x0, 0xf8, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x64, 0x65, 0x76, 0x73, 0x79, 0x6d, 0x2e, 0x63, 0x0, 0x21, 0xf0, 0xf6, 0x84, 0xd8, 0x61, 0x11, 0x67, 0x70, 0xde, 0xfc, 0xde, 0xcd, 0x53, 0x2b, 0xa3, 0xee, 0xab, 0xa9, 0x75, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x66, 0x73, 0x2e, 0x63, 0x0, 0x47, 0x8c, 0xad, 0x80, 0xb1, 0x6a, 0x90, 0x9b, 0x23, 0xbd, 0x3, 0xc2, 0xda, 0xd8, 0xb4, 0x49, 0xa7, 0x45, 0x87, 0xa1, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x66, 0x75, 0x6e, 0x63, 0x2e, 0x63, 0x0, 0x6f, 0x9b, 0xb0, 0x3d, 0xc8, 0x8a, 0x21, 0xd6, 0x58, 0xbf, 0x99, 0x99, 0xba, 0xf6, 0x6d, 0xc1, 0xd5, 0x2e, 0xbc, 0x54, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x78, 0x69, 0x6f, 0x2e, 0x63, 0x0, 0xb2, 0xb, 0x5c, 0xe7, 0x30, 0xab, 0x7f, 0xa8, 0x0, 0xd2, 0xd0, 0xcc, 0x38, 0xc7, 0x72, 0x75, 0x59, 0x3e, 0xbd, 0xbb, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x6e, 0x69, 0x74, 0x2e, 0x63, 0x0, 0xe3, 0x72, 0x4d, 0x8b, 0xe3, 0x14, 0xdb, 0x9, 0xee, 0xa8, 0x4, 0xb, 0x9d, 0xdf, 0xc8, 0xa8, 0xbe, 0xee, 0x22, 0x91, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x6e, 0x74, 0x61, 0x72, 0x72, 0x61, 0x79, 0x2e, 0x63, 0x0, 0xf5, 0xc3, 0xd9, 0xe4, 0x5, 0x9a, 0x16, 0x56, 0x7, 0x34, 0x7, 0xe4, 0x3a, 0x92, 0x11, 0x79, 0x99, 0x69, 0x7b, 0x93, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x6e, 0x74, 0x61, 0x72, 0x72, 0x61, 0x79, 0x2e, 0x68, 0x0, 0x69, 0x13, 0x37, 0xd1, 0xae, 0xd6, 0x37, 0x15, 0xd6, 0x2e, 0x76, 0x26, 0x6f, 0xf, 0x3b, 0x50, 0x8b, 0x1, 0xa, 0x34, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x6a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x0, 0xe8, 0x70, 0x1a, 0x4e, 0xea, 0xdb, 0x8e, 0xad, 0x16, 0x9d, 0x60, 0x6, 0x40, 0x7d, 0x54, 0xa8, 0x98, 0x59, 0x2d, 0x70, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x6c, 0x6f, 0x61, 0x64, 0x65, 0x78, 0x74, 0x2e, 0x63, 0x0, 0x11, 0x37, 0xe3, 0xa9, 0xaa, 0xe9, 0x29, 0x6, 0xb8, 0x28, 0x9f, 0x6c, 0x3d, 0xaa, 0x61, 0xf0, 0xd0, 0x70, 0xf5, 0x5a, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x6d, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x2e, 0x63, 0x0, 0xcf, 0x98, 0xa8, 0xfb, 0x21, 0x82, 0xc0, 0xba, 0xf5, 0xa, 0xd5, 0x79, 0x79, 0xb6, 0x75, 0xbb, 0x70, 0x7a, 0x93, 0xb0, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x78, 0x2e, 0x63, 0x0, 0x62, 0x45, 0x41, 0xb3, 0x2a, 0x10, 0xd2, 0x1a, 0x2f, 0xd1, 0xa, 0x35, 0xee, 0x66, 0x32, 0xbd, 0xac, 0x55, 0x2d, 0x41, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x78, 0x2e, 0x68, 0x0, 0xb7, 0xe1, 0xaf, 0xea, 0x5f, 0xd7, 0x8b, 0x87, 0x58, 0x2, 0x65, 0xf8, 0x4c, 0x81, 0x61, 0x2c, 0xbd, 0x2, 0x5b, 0xaf, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x6d, 0x75, 0x74, 0x65, 0x78, 0x2e, 0x63, 0x0, 0xc9, 0xb4, 0xa2, 0x9a, 0xb7, 0x5c, 0x77, 0xea, 0x5f, 0x36, 0xb5, 0x19, 0x32, 0x56, 0xd7, 0xf, 0xe6, 0x58, 0xe, 0x95, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x6f, 0x6e, 0x65, 0x66, 0x69, 0x6c, 0x65, 0x2e, 0x63, 0x0, 0x69, 0x86, 0x74, 0x41, 0xb8, 0xcc, 0x9a, 0x62, 0x1a, 0xf3, 0x24, 0x13, 0xfc, 0x63, 0xda, 0x80, 0x99, 0x37, 0x64, 0xf4, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x6f, 0x73, 0x69, 0x6e, 0x73, 0x74, 0x2e, 0x63, 0x0, 0x53, 0x14, 0x33, 0x31, 0x3e, 0xe3, 0x6c, 0x7, 0xeb, 0x21, 0xc0, 0x2f, 0x31, 0x15, 0xcb, 0x7a, 0x37, 0x48, 0x6c, 0x79, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x63, 0x61, 0x63, 0x68, 0x65, 0x2e, 0x63, 0x0, 0x8f, 0xcf, 0xe7, 0xe2, 0x6e, 0x3f, 0xf1, 0x74, 0x96, 0xb8, 0x40, 0xf5, 0xd6, 0x3c, 0x75, 0x78, 0x3a, 0xff, 0x81, 0x62, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x2e, 0x63, 0x0, 0xe5, 0x90, 0x99, 0x6c, 0xa4, 0xb8, 0x57, 0x4a, 0xb1, 0xe4, 0x18, 0x5d, 0x57, 0x77, 0x56, 0x66, 0x4a, 0xd2, 0x49, 0x5f, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x2e, 0x68, 0x0, 0x2d, 0x7, 0x67, 0xa1, 0x9a, 0xb7, 0xc3, 0xa4, 0x21, 0xcd, 0xba, 0x6a, 0x3, 0x49, 0x20, 0x43, 0x67, 0xc2, 0x2c, 0x81, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x72, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x63, 0x0, 0xf5, 0x4a, 0xe9, 0xb0, 0x63, 0xbb, 0x73, 0x71, 0x2f, 0xcf, 0xc1, 0xc6, 0x83, 0x2e, 0x2a, 0x50, 0xf6, 0x2a, 0x97, 0xe7, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x63, 0x0, 0x12, 0x64, 0x44, 0x67, 0x64, 0x7d, 0x51, 0x39, 0x4a, 0x1, 0xf9, 0xfa, 0x60, 0x37, 0x62, 0x98, 0x18, 0x54, 0x66, 0xfd, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x63, 0x0, 0xed, 0x8, 0x18, 0xe6, 0xf6, 0x5f, 0x27, 0x28, 0x2d, 0xc7, 0xb1, 0xc1, 0x90, 0xec, 0x18, 0x8c, 0x89, 0x33, 0x0, 0x2b, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x73, 0x71, 0x6c, 0x6c, 0x6f, 0x67, 0x2e, 0x63, 0x0, 0x4a, 0xa6, 0x8b, 0x7c, 0x42, 0x93, 0x23, 0xb8, 0xee, 0xbe, 0x6c, 0x9c, 0x2d, 0x7, 0xfc, 0x66, 0xd, 0x8d, 0x47, 0xc9, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x2e, 0x63, 0x0, 0xd4, 0xc9, 0x2, 0xb5, 0xea, 0x11, 0x1a, 0xd5, 0x8a, 0x73, 0x71, 0x12, 0xc2, 0x8f, 0x0, 0x38, 0x43, 0x4c, 0x85, 0xc0, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x63, 0x0, 0x93, 0x6f, 0xca, 0xd0, 0xc5, 0x6f, 0x6b, 0xc8, 0x58, 0x9, 0x74, 0x2f, 0x6a, 0xe1, 0xc1, 0xee, 0xb8, 0xb7, 0xd2, 0xf1, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x73, 0x79, 0x73, 0x63, 0x61, 0x6c, 0x6c, 0x2e, 0x63, 0x0, 0x7c, 0x8, 0x73, 0xc1, 0x6d, 0x84, 0x32, 0x2, 0xf3, 0xe, 0x2d, 0xb9, 0x45, 0x9f, 0xa2, 0x99, 0x75, 0xea, 0x5e, 0x68, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x74, 0x63, 0x6c, 0x76, 0x61, 0x72, 0x2e, 0x63, 0x0, 0x12, 0x19, 0x19, 0xc, 0x3, 0x0, 0xfd, 0x5e, 0xc7, 0xa3, 0xc5, 0x84, 0x8, 0xf3, 0x38, 0x43, 0xd2, 0xe, 0xee, 0x15, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x61, 0x64, 0x2e, 0x63, 0x0, 0x2f, 0x93, 0x63, 0xb7, 0x50, 0x1e, 0x51, 0x19, 0x81, 0xfe, 0x32, 0x83, 0x1f, 0xf2, 0xe8, 0xfd, 0x2f, 0x30, 0xc4, 0x93, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x76, 0x66, 0x73, 0x2e, 0x63, 0x0, 0xfc, 0xd5, 0x77, 0x43, 0x9c, 0xfd, 0x6c, 0x72, 0xdd, 0xe4, 0x83, 0x58, 0x92, 0x14, 0x20, 0xcf, 0x6e, 0xf1, 0xf8, 0x6d, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x76, 0x66, 0x73, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x63, 0x0, 0xa, 0xac, 0xc0, 0x1f, 0xe4, 0x2e, 0x77, 0xfe, 0xb8, 0x58, 0xe4, 0xbe, 0xd0, 0xcb, 0x7e, 0x4, 0xa4, 0x35, 0xb2, 0x10, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x77, 0x73, 0x64, 0x2e, 0x63, 0x0, 0x99, 0xe4, 0xa0, 0x56, 0x58, 0x1f, 0x58, 0xf4, 0x53, 0x6f, 0xdb, 0x5a, 0x5d, 0xf7, 0x5c, 0x74, 0x69, 0x8a, 0x81, 0x62, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x69, 0x7a, 0x65, 0x2e, 0x63, 0x0, 0xfa, 0xea, 0x5f, 0x26, 0xc7, 0x9c, 0x5e, 0x18, 0x8f, 0xa8, 0x7f, 0x2f, 0xdf, 0x6f, 0xf7, 0x6a, 0x7a, 0x60, 0x6, 0xc5, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x2e, 0x63, 0x0, 0xf1, 0xff, 0x76, 0x6e, 0x20, 0x2a, 0x45, 0x18, 0xec, 0x10, 0xe5, 0x27, 0x12, 0xc, 0xd3, 0xe, 0x83, 0xfb, 0xd0, 0x34, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x63, 0x0, 0x3a, 0xb1, 0xab, 0x2a, 0x4b, 0x65, 0xda, 0x3f, 0x19, 0x8c, 0x15, 0x84, 0xd5, 0x4d, 0x36, 0xf1, 0x8c, 0xa1, 0x21, 0x4a, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x75, 0x74, 0x66, 0x2e, 0x63, 0x0, 0x6d, 0x5b, 0x1b, 0xfe, 0x40, 0xc, 0x37, 0x48, 0xaa, 0x70, 0xa3, 0xb2, 0xfd, 0x5e, 0xe, 0xac, 0x5f, 0xc0, 0x4d, 0xe2, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x63, 0x0, 0xd8, 0x3a, 0x63, 0x1, 0x5f, 0xd8, 0x7d, 0xcc, 0x4f, 0xb4, 0x41, 0x66, 0xfa, 0xbf, 0x2e, 0x9b, 0xc9, 0x67, 0x1e, 0xb8, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x76, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x2e, 0x63, 0x0, 0x4a, 0xfb, 0x2c, 0xca, 0x64, 0xdd, 0x60, 0x76, 0x11, 0x22, 0x2c, 0x7, 0x93, 0x2d, 0x12, 0xea, 0xcf, 0xa, 0x2c, 0x22, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x76, 0x64, 0x62, 0x65, 0x2e, 0x63, 0x0, 0xf3, 0x43, 0xe1, 0x3d, 0x4e, 0x91, 0x78, 0x4b, 0x15, 0x88, 0x10, 0xc5, 0xb7, 0xd4, 0x46, 0x84, 0xdf, 0xbf, 0xa2, 0xa5, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x76, 0x64, 0x62, 0x65, 0x2e, 0x68, 0x0, 0xfa, 0x7b, 0x31, 0xb7, 0x27, 0xa, 0x90, 0xd4, 0xf6, 0x37, 0x36, 0x5a, 0xfc, 0xc9, 0xbd, 0xa1, 0xd1, 0xb1, 0xe1, 0xd6, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x76, 0x64, 0x62, 0x65, 0x49, 0x6e, 0x74, 0x2e, 0x68, 0x0, 0x3a, 0x5b, 0x40, 0x28, 0xbb, 0xd6, 0xc9, 0x56, 0x10, 0xd7, 0xc, 0xce, 0x3, 0x69, 0xdf, 0xcd, 0x60, 0x7a, 0xa9, 0x0, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x76, 0x64, 0x62, 0x65, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x0, 0x7c, 0x86, 0x1e, 0x2d, 0x47, 0x21, 0x8c, 0x91, 0x63, 0x31, 0x77, 0x77, 0xc3, 0x7, 0x21, 0x99, 0xe9, 0xb4, 0x2, 0x80, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x76, 0x64, 0x62, 0x65, 0x61, 0x75, 0x78, 0x2e, 0x63, 0x0, 0x2c, 0x42, 0x69, 0xa5, 0x9e, 0x6d, 0xbc, 0xe8, 0x67, 0x1c, 0x47, 0x4f, 0x34, 0x61, 0x90, 0xbe, 0x2a, 0xe, 0x18, 0x51, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x76, 0x64, 0x62, 0x65, 0x62, 0x6c, 0x6f, 0x62, 0x2e, 0x63, 0x0, 0x2e, 0x8f, 0xd8, 0xee, 0x74, 0x47, 0xe6, 0x46, 0x46, 0xe3, 0x49, 0x4b, 0x4c, 0x4, 0x1d, 0x3a, 0x4a, 0xbb, 0x8, 0x85, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x76, 0x64, 0x62, 0x65, 0x6d, 0x65, 0x6d, 0x2e, 0x63, 0x0, 0x8f, 0xc2, 0x22, 0xe2, 0xde, 0x20, 0x50, 0x14, 0x50, 0xec, 0xea, 0x9d, 0x4e, 0xbf, 0xaa, 0xc9, 0x81, 0x4a, 0xae, 0x59, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x76, 0x64, 0x62, 0x65, 0x73, 0x6f, 0x72, 0x74, 0x2e, 0x63, 0x0, 0xfd, 0xfc, 0x4a, 0x79, 0xdd, 0xc9, 0x6e, 0x59, 0x9b, 0x1b, 0xe, 0xeb, 0xac, 0xbd, 0xb8, 0x45, 0xc6, 0x38, 0x13, 0xb2, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x76, 0x64, 0x62, 0x65, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x63, 0x0, 0x35, 0x62, 0x77, 0xe8, 0xd2, 0x3b, 0xca, 0xdb, 0x67, 0x6b, 0x59, 0xd1, 0xa4, 0xdc, 0xf8, 0x42, 0xfd, 0xc4, 0xc9, 0x72, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x76, 0x74, 0x61, 0x62, 0x2e, 0x63, 0x0, 0x95, 0x82, 0x2, 0xc3, 0x1e, 0x24, 0x15, 0xb, 0x60, 0xf1, 0xa, 0x8a, 0xf, 0x74, 0x41, 0xaf, 0xac, 0x3f, 0xbb, 0x1c, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x77, 0x61, 0x6c, 0x2e, 0x63, 0x0, 0xe6, 0x42, 0xea, 0x21, 0x5, 0xb5, 0xc5, 0x4a, 0xf3, 0x5, 0x88, 0x9, 0x62, 0x69, 0xab, 0x75, 0xcb, 0xef, 0x8f, 0xf2, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x77, 0x61, 0x6c, 0x2e, 0x68, 0x0, 0x9, 0x25, 0x46, 0x35, 0x4b, 0x34, 0xc0, 0xab, 0x3d, 0x20, 0x5, 0x6a, 0x7f, 0x8a, 0x8a, 0x52, 0xe4, 0xd0, 0xb5, 0xf5, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x77, 0x61, 0x6c, 0x6b, 0x65, 0x72, 0x2e, 0x63, 0x0, 0xe7, 0x1e, 0xd2, 0xac, 0x48, 0x4c, 0x91, 0x6c, 0x1c, 0xc1, 0x0, 0x7e, 0x5e, 0x5, 0xda, 0x47, 0x1c, 0xb4, 0x95, 0x99, 0x31, 0x30, 0x30, 0x36, 0x34, 0x34, 0x20, 0x77, 0x68, 0x65, 0x72, 0x65, 0x2e, 0x63, 0x0, 0xe6, 0x14, 0xf4, 0xa6, 0xd8, 0x64, 0xe7, 0xe, 0xc4, 0x32, 0x8d, 0xb, 0xdb, 0x25, 0x4e, 0x3a, 0xc9, 0xf0, 0xd2, 0x87, } obj := &SortReadObject{ t: plumbing.TreeObject, h: plumbing.ZeroHash, cont: cont, sz: 5313, } expected := &Tree{ Entries: []TreeEntry{ { Name: "alter.c", Mode: filemode.Regular, Hash: plumbing.Hash{0xa4, 0x9d, 0x33, 0x49, 0xd7, 0xe2, 0x3f, 0xb5, 0x81, 0x19, 0x4f, 0x4c, 0xb5, 0x9a, 0xc0, 0xd5, 0x1b, 0x2, 0x1f, 0x78}, }, { Name: "analyze.c", Mode: filemode.Regular, Hash: plumbing.Hash{0x9a, 0x3e, 0x95, 0x97, 0xdb, 0xb, 0x3, 0x20, 0x77, 0xc9, 0x1d, 0x96, 0x9d, 0x22, 0xc6, 0x27, 0x3f, 0x70, 0x2a, 0xc}, }, { Name: "attach.c", Mode: filemode.Regular, Hash: plumbing.Hash{0xb8, 0xe1, 0x21, 0x99, 0xb5, 0x7d, 0xe8, 0x11, 0xea, 0xe0, 0xd0, 0x61, 0x42, 0xd5, 0xac, 0x4f, 0xd4, 0x30, 0xb1, 0xd8}, }, { Name: "auth.c", Mode: filemode.Regular, Hash: plumbing.Hash{0xd3, 0x8b, 0xb8, 0x36, 0xa7, 0x84, 0xfb, 0xfa, 0xb6, 0xab, 0x7b, 0x3, 0xd4, 0xe6, 0xdd, 0x43, 0xed, 0xc4, 0x1f, 0xa7}, }, { Name: "backup.c", Mode: filemode.Regular, Hash: plumbing.Hash{0x25, 0x2f, 0x61, 0xcf, 0xca, 0xa8, 0xfc, 0xf3, 0x13, 0x7e, 0x8, 0xed, 0x68, 0x47, 0xdc, 0xfe, 0x1d, 0xc1, 0xde, 0x54}, }, { Name: "bitvec.c", Mode: filemode.Regular, Hash: plumbing.Hash{0x52, 0x18, 0x4a, 0xa9, 0x64, 0xce, 0x18, 0x98, 0xf3, 0x5d, 0x1b, 0x3d, 0x87, 0x87, 0x1c, 0x2d, 0xe, 0xf4, 0xc5, 0x3d}, }, { Name: "btmutex.c", Mode: filemode.Regular, Hash: plumbing.Hash{0xd8, 0x7d, 0x4d, 0x5f, 0xee, 0xb6, 0x30, 0x7a, 0xec, 0xdc, 0x9a, 0x83, 0x11, 0x14, 0x89, 0xab, 0x30, 0xc6, 0x78, 0xc3}, }, { Name: "btree.c", Mode: filemode.Regular, Hash: plumbing.Hash{0x3c, 0xa6, 0x5, 0x83, 0xe3, 0xc8, 0xe3, 0x12, 0x0, 0xf9, 0x73, 0xe0, 0xe9, 0xc4, 0x53, 0x62, 0x58, 0xb2, 0x64, 0x39}, }, { Name: "btree.h", Mode: filemode.Regular, Hash: plumbing.Hash{0xac, 0xe0, 0xf8, 0xcd, 0x21, 0x77, 0x70, 0xa2, 0xf6, 0x6b, 0x2e, 0xb8, 0x71, 0xbb, 0xc5, 0xfd, 0xc6, 0xfc, 0x2b, 0x68}, }, { Name: "btreeInt.h", Mode: filemode.Regular, Hash: plumbing.Hash{0xce, 0x3c, 0x54, 0x93, 0xf8, 0xca, 0xd0, 0xbc, 0x54, 0x8a, 0xe8, 0xe4, 0x4e, 0x51, 0x28, 0x31, 0xd8, 0xfa, 0xc4, 0x31}, }, { Name: "build.c", Mode: filemode.Regular, Hash: plumbing.Hash{0x3c, 0x91, 0xcd, 0xcf, 0xdb, 0x7b, 0x1, 0x7c, 0xbc, 0x2d, 0x5c, 0x29, 0x57, 0x1a, 0x98, 0x27, 0xd, 0xe0, 0x71, 0xe6}, }, { Name: "callback.c", Mode: filemode.Regular, Hash: plumbing.Hash{0xd4, 0xc, 0x65, 0xcb, 0x92, 0x45, 0x80, 0x29, 0x6a, 0xd0, 0x69, 0xa0, 0x4b, 0xf9, 0xc9, 0xe9, 0x53, 0x4e, 0xca, 0xa7}, }, { Name: "complete.c", Mode: filemode.Regular, Hash: plumbing.Hash{0x9e, 0x91, 0x40, 0x8, 0x5c, 0x0, 0x46, 0xed, 0x3b, 0xf6, 0xf4, 0x48, 0x52, 0x20, 0x69, 0x2d, 0xca, 0x17, 0x43, 0xc5}, }, { Name: "crypto.c", Mode: filemode.Regular, Hash: plumbing.Hash{0x25, 0x51, 0xe6, 0xba, 0x2, 0x39, 0xf8, 0x5a, 0x35, 0x77, 0x96, 0xa8, 0xdd, 0xa8, 0xca, 0x3e, 0x29, 0x70, 0x93, 0xf8}, }, { Name: "crypto.h", Mode: filemode.Regular, Hash: plumbing.Hash{0xf7, 0x1f, 0x53, 0x2c, 0xdc, 0x44, 0x8f, 0xa, 0x1d, 0xd5, 0xc6, 0xef, 0xf5, 0xfb, 0xd3, 0x3a, 0x91, 0x55, 0xaa, 0x97}, }, { Name: "crypto_cc.c", Mode: filemode.Regular, Hash: plumbing.Hash{0x53, 0x7d, 0xf7, 0xe3, 0xb3, 0x6a, 0xb5, 0xcf, 0xdd, 0x6f, 0xca, 0x40, 0x28, 0xeb, 0xca, 0xe1, 0x86, 0x87, 0xd6, 0x4d}, }, { Name: "crypto_impl.c", Mode: filemode.Regular, Hash: plumbing.Hash{0xa5, 0x89, 0x27, 0xc7, 0x6e, 0xf6, 0x20, 0x56, 0x77, 0xbe, 0x5c, 0x1a, 0x8e, 0x80, 0xc9, 0x83, 0x56, 0xb3, 0xa9, 0xd3}, }, { Name: "crypto_libtomcrypt.c", Mode: filemode.Regular, Hash: plumbing.Hash{0x1a, 0x33, 0x83, 0xe0, 0x1, 0xa7, 0x21, 0x11, 0xc3, 0xf6, 0x61, 0x92, 0x22, 0xb0, 0x65, 0xf4, 0xbd, 0x1, 0xb, 0xe1}, }, { Name: "crypto_openssl.c", Mode: filemode.Regular, Hash: plumbing.Hash{0xd0, 0x19, 0x81, 0x3b, 0x47, 0x6c, 0x52, 0xd0, 0x20, 0xe2, 0xc0, 0xac, 0xd5, 0x24, 0xe9, 0xea, 0x3d, 0xf, 0xb9, 0xfe}, }, { Name: "ctime.c", Mode: filemode.Regular, Hash: plumbing.Hash{0x60, 0x59, 0x5f, 0xf8, 0x8d, 0x92, 0xf7, 0x8, 0x26, 0x4, 0xfb, 0xd9, 0xdf, 0x9a, 0xfe, 0xa1, 0x6a, 0xe8, 0x6f, 0xf}, }, { Name: "date.c", Mode: filemode.Regular, Hash: plumbing.Hash{0x75, 0x8d, 0xd7, 0xc8, 0x9b, 0xca, 0x39, 0x37, 0xa9, 0xd, 0x70, 0x6e, 0xa9, 0x82, 0xce, 0x3a, 0xcf, 0x11, 0xd1, 0x83}, }, { Name: "delete.c", Mode: filemode.Regular, Hash: plumbing.Hash{0x63, 0x4e, 0x11, 0x55, 0x63, 0xae, 0x12, 0xba, 0x65, 0x58, 0xcc, 0xc5, 0x12, 0xae, 0xd6, 0x31, 0xc0, 0x66, 0xba, 0xd8}, }, { Name: "expr.c", Mode: filemode.Regular, Hash: plumbing.Hash{0x66, 0x3, 0x97, 0xe0, 0x78, 0xae, 0x48, 0xb2, 0xe7, 0x17, 0x5e, 0x33, 0x85, 0x67, 0x78, 0x19, 0x72, 0x2d, 0xdd, 0x6c}, }, { Name: "fault.c", Mode: filemode.Regular, Hash: plumbing.Hash{0xc3, 0x2, 0x8c, 0x4f, 0x93, 0x6e, 0xdf, 0x96, 0x71, 0x2d, 0xbe, 0x73, 0xa0, 0x76, 0x62, 0xf0, 0xa2, 0x6b, 0x1d, 0xa}, }, { Name: "fkey.c", Mode: filemode.Regular, Hash: plumbing.Hash{0xac, 0x35, 0xbc, 0x19, 0x4c, 0xde, 0xb1, 0x27, 0x98, 0x9b, 0x9, 0x40, 0x35, 0xce, 0xe0, 0x6f, 0x57, 0x37, 0x6f, 0x5e}, }, { Name: "func.c", Mode: filemode.Regular, Hash: plumbing.Hash{0xc0, 0x2f, 0x9, 0x6a, 0xda, 0xd5, 0xbc, 0xe9, 0xac, 0x83, 0xd3, 0x5f, 0xf, 0x46, 0x9, 0xd6, 0xf6, 0xd4, 0x3b, 0xe5}, }, { Name: "global.c", Mode: filemode.Regular, Hash: plumbing.Hash{0x7b, 0x2, 0xcf, 0x21, 0x30, 0xe0, 0xd1, 0xa7, 0xb8, 0x89, 0xd8, 0x44, 0xc, 0xcc, 0x82, 0x8, 0xf7, 0xb6, 0x7b, 0xf9}, }, { Name: "hash.c", Mode: filemode.Regular, Hash: plumbing.Hash{0xe8, 0x1d, 0xcf, 0x95, 0xe4, 0x38, 0x48, 0xfa, 0x70, 0x86, 0xb7, 0xf7, 0x81, 0xc0, 0x90, 0xad, 0xc7, 0xe6, 0xca, 0x8e}, }, { Name: "hash.h", Mode: filemode.Regular, Hash: plumbing.Hash{0x82, 0xb7, 0xc5, 0x8c, 0x71, 0x9, 0xb, 0x54, 0x7e, 0x10, 0x17, 0x42, 0xaa, 0x9, 0x51, 0x73, 0x9f, 0xf2, 0xee, 0xe7}, }, { Name: "hwtime.h", Mode: filemode.Regular, Hash: plumbing.Hash{0xb8, 0xbc, 0x5a, 0x29, 0x5b, 0xe3, 0xfa, 0xc8, 0x35, 0x1f, 0xa9, 0xf0, 0x8a, 0x77, 0x57, 0x9d, 0x59, 0xc9, 0xa8, 0xe4}, }, { Name: "insert.c", Mode: filemode.Regular, Hash: plumbing.Hash{0x9a, 0x56, 0x61, 0xf5, 0x9a, 0x72, 0x95, 0x2b, 0xe6, 0xc1, 0x67, 0xa0, 0xc2, 0xdb, 0x15, 0x9b, 0x91, 0xb7, 0x1f, 0xae}, }, { Name: "journal.c", Mode: filemode.Regular, Hash: plumbing.Hash{0xfe, 0xd2, 0x7b, 0xe3, 0xe3, 0x80, 0x55, 0xd2, 0x20, 0x43, 0x95, 0xcd, 0xe6, 0xff, 0xc9, 0x45, 0x89, 0xfb, 0xf5, 0xe8}, }, { Name: "legacy.c", Mode: filemode.Regular, Hash: plumbing.Hash{0x94, 0x64, 0x9a, 0xe7, 0x5, 0xab, 0x93, 0x85, 0x10, 0x8d, 0xd, 0x88, 0x7a, 0xf0, 0x75, 0x92, 0x89, 0xfb, 0x23, 0xcb}, }, { Name: "lempar.c", Mode: filemode.Regular, Hash: plumbing.Hash{0x2a, 0xfa, 0xa6, 0xce, 0xa6, 0xd8, 0x29, 0x60, 0x2c, 0x27, 0x86, 0xc1, 0xf8, 0xa3, 0x7f, 0x56, 0x7c, 0xf6, 0xfd, 0x53}, }, { Name: "loadext.c", Mode: filemode.Regular, Hash: plumbing.Hash{0xcd, 0xcf, 0x6a, 0x93, 0xb8, 0xc4, 0xf, 0x91, 0x4b, 0x94, 0x24, 0xe, 0xf1, 0x4c, 0xb4, 0xa3, 0xa, 0x37, 0xec, 0xa1}, }, { Name: "main.c", Mode: filemode.Regular, Hash: plumbing.Hash{0x39, 0xf6, 0x4, 0x21, 0xe6, 0x81, 0x27, 0x7c, 0xc3, 0xdb, 0xa0, 0x9a, 0xbe, 0x7c, 0xf7, 0x90, 0xd5, 0x28, 0xf5, 0xc3}, }, { Name: "malloc.c", Mode: filemode.Regular, Hash: plumbing.Hash{0x35, 0xa4, 0x4e, 0x5f, 0x61, 0xc2, 0xe4, 0x4c, 0x48, 0x1c, 0x62, 0x51, 0xbd, 0xa, 0xae, 0x7a, 0xcd, 0xa4, 0xde, 0xb}, }, { Name: "mem0.c", Mode: filemode.Regular, Hash: plumbing.Hash{0xd, 0xb, 0x66, 0x67, 0xd6, 0xa, 0x95, 0x5a, 0x6, 0x96, 0xdf, 0x62, 0x89, 0xb4, 0x91, 0x78, 0x96, 0x93, 0x43, 0xaa}, }, { Name: "mem1.c", Mode: filemode.Regular, Hash: plumbing.Hash{0x35, 0x78, 0x49, 0x6f, 0x33, 0x3, 0x7, 0xb2, 0x31, 0xdf, 0xb5, 0x3c, 0xc, 0x2e, 0x1c, 0x6b, 0x32, 0x3d, 0x79, 0x1e}, }, { Name: "mem2.c", Mode: filemode.Regular, Hash: plumbing.Hash{0x26, 0x44, 0x8e, 0xa8, 0xaa, 0xe0, 0x36, 0x6a, 0xf0, 0x54, 0x1a, 0xfe, 0xa4, 0x79, 0xb, 0x42, 0xf4, 0xa6, 0x9b, 0x5a}, }, { Name: "mem3.c", Mode: filemode.Regular, Hash: plumbing.Hash{0x1a, 0x1b, 0x79, 0x1f, 0x28, 0xf8, 0xcf, 0x3c, 0xe4, 0xf9, 0xa3, 0x5c, 0xda, 0xd7, 0xb7, 0x10, 0x75, 0x68, 0xc7, 0x15}, }, { Name: "mem5.c", Mode: filemode.Regular, Hash: plumbing.Hash{0x78, 0x3c, 0xef, 0x61, 0x76, 0xc5, 0x9c, 0xbf, 0x30, 0x91, 0x46, 0x31, 0x9, 0x5a, 0x1a, 0x54, 0xf4, 0xe4, 0x2e, 0x8}, }, { Name: "memjournal.c", Mode: filemode.Regular, Hash: plumbing.Hash{0x5, 0x72, 0x59, 0x48, 0xf6, 0x5d, 0x42, 0x7b, 0x7, 0xf7, 0xf9, 0x29, 0xac, 0xa3, 0xff, 0x22, 0x4b, 0x17, 0x53, 0xdf}, }, { Name: "mutex.c", Mode: filemode.Regular, Hash: plumbing.Hash{0xb5, 0x67, 0xe7, 0xc2, 0x7e, 0xf2, 0x4, 0x10, 0x86, 0xaf, 0xe0, 0xf6, 0x96, 0x66, 0xe2, 0x7b, 0xf5, 0x9, 0x8a, 0x59}, }, { Name: "mutex.h", Mode: filemode.Regular, Hash: plumbing.Hash{0x9, 0x78, 0x81, 0x22, 0x52, 0x77, 0x89, 0xa, 0x9c, 0x36, 0xc2, 0x4d, 0x41, 0xf6, 0x11, 0x4d, 0x64, 0xc0, 0x6d, 0xb3}, }, { Name: "mutex_noop.c", Mode: filemode.Regular, Hash: plumbing.Hash{0x45, 0x6e, 0x82, 0xa2, 0x5e, 0x27, 0x1b, 0x6, 0x14, 0xe7, 0xf4, 0xf8, 0x3c, 0x22, 0x85, 0x53, 0xb7, 0xfa, 0x1, 0x58}, }, { Name: "mutex_unix.c", Mode: filemode.Regular, Hash: plumbing.Hash{0xec, 0xa7, 0x29, 0x58, 0x31, 0xc2, 0xf0, 0xee, 0x48, 0xba, 0x54, 0xd0, 0x62, 0x91, 0x4d, 0x6, 0xa1, 0xdd, 0x8e, 0xbe}, }, { Name: "mutex_w32.c", Mode: filemode.Regular, Hash: plumbing.Hash{0x27, 0xd1, 0xa, 0xf5, 0xbd, 0x33, 0x1b, 0xdb, 0x97, 0x3f, 0x61, 0x45, 0xb7, 0x4f, 0x72, 0xb6, 0x7, 0xcf, 0xc4, 0x6e}, }, { Name: "notify.c", Mode: filemode.Regular, Hash: plumbing.Hash{0xfc, 0xab, 0x5b, 0xfa, 0xf0, 0x19, 0x8, 0xd3, 0xde, 0x93, 0xfa, 0x88, 0xb5, 0xea, 0xe9, 0xe9, 0x6c, 0xa3, 0xc8, 0xe8}, }, { Name: "os.c", Mode: filemode.Regular, Hash: plumbing.Hash{0xbe, 0x2e, 0xa4, 0xcf, 0xc0, 0x19, 0x59, 0x93, 0xa3, 0x40, 0xc9, 0x2, 0xae, 0xdd, 0xf1, 0xbe, 0x4b, 0x8e, 0xd7, 0x3a}, }, { Name: "os.h", Mode: filemode.Regular, Hash: plumbing.Hash{0x7, 0xa, 0x2d, 0xdd, 0x17, 0xf7, 0x71, 0xf9, 0x8f, 0xf8, 0xcc, 0xd6, 0xf0, 0x33, 0xbd, 0xac, 0xc5, 0xe9, 0xf6, 0xc}, }, { Name: "os_common.h", Mode: filemode.Regular, Hash: plumbing.Hash{0xf6, 0xc3, 0xe7, 0xff, 0x89, 0x46, 0x30, 0x86, 0x40, 0x18, 0x22, 0xf4, 0x81, 0xe7, 0xe3, 0xb8, 0x7b, 0x2c, 0x78, 0xc7}, }, { Name: "os_unix.c", Mode: filemode.Regular, Hash: plumbing.Hash{0xab, 0xc2, 0x3a, 0x45, 0x2e, 0x72, 0xf7, 0x1c, 0x76, 0xaf, 0xa9, 0x98, 0x3c, 0x3a, 0xd9, 0xd4, 0x25, 0x61, 0x6c, 0x6d}, }, { Name: "os_win.c", Mode: filemode.Regular, Hash: plumbing.Hash{0xae, 0xb0, 0x88, 0x14, 0xb3, 0xda, 0xbe, 0x81, 0xb8, 0x4c, 0xda, 0x91, 0x85, 0x82, 0xb0, 0xf, 0xfd, 0x86, 0xe4, 0x87}, }, { Name: "pager.c", Mode: filemode.Regular, Hash: plumbing.Hash{0x61, 0x72, 0x7f, 0xaa, 0x9c, 0xf, 0x3d, 0x56, 0x62, 0x65, 0xbe, 0x7e, 0xec, 0x5b, 0x2a, 0x35, 0xf6, 0xa4, 0xbc, 0x9f}, }, { Name: "pager.h", Mode: filemode.Regular, Hash: plumbing.Hash{0x6f, 0x65, 0x91, 0x36, 0xe2, 0x76, 0x7, 0x9d, 0xa4, 0x3a, 0x2e, 0x39, 0xe1, 0xb6, 0x86, 0x37, 0xec, 0xad, 0xcf, 0x68}, }, { Name: "parse.y", Mode: filemode.Regular, Hash: plumbing.Hash{0x83, 0x10, 0xb2, 0x69, 0x89, 0xb0, 0x5b, 0xed, 0x1e, 0x1b, 0x3, 0xda, 0x80, 0xf5, 0xc0, 0xa5, 0x2e, 0x9a, 0xd1, 0xd2}, }, { Name: "pcache.c", Mode: filemode.Regular, Hash: plumbing.Hash{0x48, 0x2a, 0x18, 0x8b, 0xee, 0x19, 0x91, 0xbc, 0x8a, 0xda, 0xc9, 0x6a, 0x19, 0x3a, 0x53, 0xe5, 0x46, 0x2a, 0x8c, 0x10}, }, { Name: "pcache.h", Mode: filemode.Regular, Hash: plumbing.Hash{0xf4, 0xd4, 0xad, 0x71, 0xc1, 0xd, 0x78, 0xc6, 0xda, 0xbd, 0xe2, 0x52, 0x15, 0xcd, 0x41, 0x5a, 0x76, 0x1, 0x48, 0xca}, }, { Name: "pcache1.c", Mode: filemode.Regular, Hash: plumbing.Hash{0x41, 0x47, 0xd2, 0xef, 0xf5, 0x5b, 0xdd, 0x9f, 0xf7, 0xc6, 0x86, 0xc, 0x60, 0x18, 0x10, 0x20, 0x16, 0x6c, 0x5f, 0x50}, }, { Name: "pragma.c", Mode: filemode.Regular, Hash: plumbing.Hash{0x22, 0x97, 0x71, 0x69, 0x61, 0x7d, 0x49, 0x22, 0xb3, 0x99, 0x3f, 0x76, 0x9d, 0x90, 0xfa, 0x7b, 0xc4, 0x41, 0xea, 0x50}, }, { Name: "prepare.c", Mode: filemode.Regular, Hash: plumbing.Hash{0xd7, 0x8d, 0x83, 0xcb, 0xd8, 0x78, 0x97, 0xf5, 0x73, 0x30, 0x3f, 0x9f, 0x57, 0xab, 0x8d, 0xe0, 0x24, 0xa6, 0xe3, 0xf8}, }, { Name: "printf.c", Mode: filemode.Regular, Hash: plumbing.Hash{0x9f, 0x68, 0xd2, 0x4, 0xff, 0xdc, 0x9f, 0x3d, 0x42, 0x7f, 0x80, 0xa8, 0x23, 0x9a, 0x7f, 0xa3, 0xa9, 0x8a, 0xec, 0xbd}, }, { Name: "random.c", Mode: filemode.Regular, Hash: plumbing.Hash{0x23, 0x4e, 0xbd, 0xf6, 0x58, 0xf4, 0x36, 0xcc, 0x7c, 0x68, 0xf0, 0x27, 0xc4, 0x8b, 0xe, 0x1b, 0x9b, 0xa3, 0x4e, 0x98}, }, { Name: "resolve.c", Mode: filemode.Regular, Hash: plumbing.Hash{0x91, 0xef, 0xca, 0xa1, 0xa1, 0x6b, 0xfc, 0x98, 0xfb, 0x35, 0xd8, 0x5c, 0xad, 0x15, 0x6b, 0x93, 0x53, 0x3e, 0x4e, 0x6}, }, { Name: "rowset.c", Mode: filemode.Regular, Hash: plumbing.Hash{0x57, 0x61, 0xf9, 0x85, 0x50, 0xb1, 0x76, 0xcc, 0xe1, 0x1d, 0xcb, 0xce, 0xc9, 0x38, 0x99, 0xa0, 0x75, 0xbb, 0x64, 0xfd}, }, { Name: "select.c", Mode: filemode.Regular, Hash: plumbing.Hash{0xf3, 0xf1, 0x49, 0x9, 0x63, 0x95, 0x5b, 0x8e, 0xd0, 0xc9, 0xfe, 0x6e, 0x1e, 0xec, 0x83, 0x6c, 0x1a, 0x52, 0x94, 0xb4}, }, { Name: "shell.c", Mode: filemode.Regular, Hash: plumbing.Hash{0x1b, 0xe2, 0x87, 0x1f, 0xed, 0x9a, 0x1f, 0xdf, 0x1d, 0xf7, 0x19, 0x8e, 0x11, 0x25, 0x36, 0x0, 0xec, 0xba, 0x76, 0xcc}, }, { Name: "sqlcipher.h", Mode: filemode.Regular, Hash: plumbing.Hash{0x82, 0x75, 0x30, 0x95, 0xcd, 0x17, 0x23, 0xc5, 0xff, 0x4f, 0x11, 0x15, 0xe4, 0x97, 0x55, 0x91, 0xee, 0x34, 0xf5, 0xce}, }, { Name: "sqlite.h.in", Mode: filemode.Regular, Hash: plumbing.Hash{0x66, 0x8, 0x82, 0x31, 0x75, 0xde, 0x5b, 0x6a, 0xd, 0x37, 0x8f, 0xdb, 0xc, 0x38, 0x18, 0xb6, 0xab, 0x4f, 0xbf, 0x8e}, }, { Name: "sqlite3.rc", Mode: filemode.Regular, Hash: plumbing.Hash{0x96, 0x98, 0x76, 0xda, 0x1e, 0x57, 0x14, 0x3d, 0xe0, 0xb4, 0xd1, 0xc7, 0x62, 0x9f, 0xd3, 0x35, 0x6f, 0x2e, 0x1c, 0x96}, }, { Name: "sqlite3ext.h", Mode: filemode.Regular, Hash: plumbing.Hash{0x92, 0x8b, 0xb3, 0xba, 0xd9, 0xdd, 0x64, 0x3c, 0x30, 0x1d, 0xd2, 0xb0, 0xac, 0x22, 0x28, 0x7a, 0x81, 0x28, 0x48, 0x84}, }, { Name: "sqliteInt.h", Mode: filemode.Regular, Hash: plumbing.Hash{0x59, 0x50, 0xf2, 0x37, 0xd9, 0xf9, 0xf2, 0xd3, 0xef, 0x6b, 0xd8, 0xbe, 0x34, 0x2d, 0xcf, 0x64, 0x89, 0x22, 0x51, 0x42}, }, { Name: "sqliteLimit.h", Mode: filemode.Regular, Hash: plumbing.Hash{0xc7, 0xae, 0xe5, 0x3c, 0xeb, 0xca, 0x94, 0xda, 0x51, 0xe7, 0x1a, 0x82, 0x2e, 0xa5, 0xa6, 0xde, 0xb9, 0x3, 0x85, 0xdf}, }, { Name: "status.c", Mode: filemode.Regular, Hash: plumbing.Hash{0x28, 0x34, 0x9e, 0x6d, 0x3d, 0x20, 0x88, 0xe0, 0x0, 0x3b, 0x76, 0xf8, 0xa, 0x89, 0x54, 0xfa, 0xec, 0x59, 0x30, 0xba}, }, { Name: "table.c", Mode: filemode.Regular, Hash: plumbing.Hash{0x26, 0xbb, 0xfb, 0x4f, 0x45, 0x6c, 0x42, 0x98, 0x25, 0x29, 0xea, 0x1a, 0x63, 0xa0, 0x17, 0x51, 0xdd, 0x3e, 0xe9, 0x5a}, }, { Name: "tclsqlite.c", Mode: filemode.Regular, Hash: plumbing.Hash{0xf1, 0xbb, 0x29, 0x21, 0xda, 0xc, 0x68, 0xa4, 0xf1, 0xc8, 0xe1, 0x5c, 0xf5, 0x66, 0xb2, 0x33, 0xe9, 0x2a, 0x51, 0x9f}, }, { Name: "test1.c", Mode: filemode.Regular, Hash: plumbing.Hash{0xa6, 0x38, 0xe4, 0x80, 0xad, 0xdf, 0x14, 0x43, 0x9c, 0xdf, 0xa4, 0xee, 0x16, 0x4d, 0xc3, 0x1b, 0x79, 0xf8, 0xbc, 0xac}, }, { Name: "test2.c", Mode: filemode.Regular, Hash: plumbing.Hash{0xd1, 0x30, 0xe9, 0xd0, 0x1b, 0x70, 0x24, 0xa5, 0xec, 0x6d, 0x73, 0x5, 0x92, 0xee, 0x4d, 0x1f, 0xb0, 0x2c, 0xfd, 0xb4}, }, { Name: "test3.c", Mode: filemode.Regular, Hash: plumbing.Hash{0xe3, 0xed, 0x31, 0xc, 0x81, 0x4, 0xfe, 0x36, 0x21, 0xce, 0xbb, 0xf, 0x51, 0xd1, 0x1, 0x45, 0x1, 0x8d, 0x4f, 0xac}, }, { Name: "test4.c", Mode: filemode.Regular, Hash: plumbing.Hash{0xa6, 0x37, 0x5c, 0x7c, 0xc4, 0x3, 0xf6, 0xc, 0xaa, 0xb7, 0xe9, 0x59, 0x53, 0x3e, 0x3d, 0xb1, 0xff, 0x75, 0xa, 0xe4}, }, { Name: "test5.c", Mode: filemode.Regular, Hash: plumbing.Hash{0x30, 0x3d, 0x12, 0x5, 0xb2, 0x26, 0x28, 0x42, 0x3d, 0x98, 0x6f, 0x71, 0xe2, 0x7c, 0x7c, 0xf7, 0x14, 0xa7, 0x45, 0xa6}, }, { Name: "test6.c", Mode: filemode.Regular, Hash: plumbing.Hash{0xc1, 0x51, 0xea, 0x42, 0x98, 0x9b, 0xb, 0xe2, 0x4e, 0xe4, 0xb9, 0xa4, 0xbe, 0x37, 0x8b, 0x4f, 0x63, 0x6d, 0xb6, 0x41}, }, { Name: "test7.c", Mode: filemode.Regular, Hash: plumbing.Hash{0x3c, 0xd4, 0xa2, 0x24, 0xd7, 0xe8, 0xe1, 0x6b, 0xd7, 0xcb, 0xe4, 0x9e, 0x2d, 0x3e, 0x94, 0xce, 0x9b, 0x17, 0xbd, 0x76}, }, { Name: "test8.c", Mode: filemode.Regular, Hash: plumbing.Hash{0xc5, 0x73, 0x93, 0x32, 0xd4, 0x6e, 0x57, 0x12, 0x1d, 0xa2, 0x7c, 0x3e, 0x88, 0xfd, 0xe7, 0x5a, 0xeb, 0x87, 0x10, 0xf7}, }, { Name: "test9.c", Mode: filemode.Regular, Hash: plumbing.Hash{0xe5, 0x99, 0x3e, 0x8f, 0xf7, 0x8f, 0x61, 0xc2, 0x43, 0x5b, 0x6f, 0x97, 0xa3, 0xb4, 0x63, 0xe2, 0x27, 0xc7, 0x67, 0xac}, }, { Name: "test_async.c", Mode: filemode.Regular, Hash: plumbing.Hash{0xb0, 0xb9, 0x43, 0x18, 0x5b, 0xfc, 0x23, 0xc1, 0x7f, 0xd0, 0x8f, 0x55, 0x76, 0x8c, 0xac, 0x12, 0xa9, 0xf5, 0x69, 0x51}, }, { Name: "test_autoext.c", Mode: filemode.Regular, Hash: plumbing.Hash{0xb5, 0x1, 0x3f, 0x31, 0x73, 0xa2, 0x17, 0x6e, 0x2d, 0x9f, 0xc, 0xaa, 0x99, 0x19, 0x30, 0x36, 0xbf, 0xc3, 0x7e, 0x91}, }, { Name: "test_backup.c", Mode: filemode.Regular, Hash: plumbing.Hash{0xe9, 0x67, 0x42, 0x4a, 0x29, 0xf, 0x73, 0x8a, 0xec, 0xfd, 0xac, 0x57, 0x8e, 0x9b, 0x87, 0xa4, 0xc4, 0xae, 0x8d, 0x7f}, }, { Name: "test_btree.c", Mode: filemode.Regular, Hash: plumbing.Hash{0xdb, 0x72, 0x88, 0x9b, 0x2a, 0xfb, 0x62, 0x72, 0x82, 0x8d, 0xda, 0x86, 0x6d, 0xcc, 0xf1, 0x22, 0xa4, 0x9a, 0x72, 0x99}, }, { Name: "test_config.c", Mode: filemode.Regular, Hash: plumbing.Hash{0x53, 0x47, 0x27, 0xa0, 0x80, 0x42, 0xb6, 0xca, 0xd6, 0x7e, 0x26, 0x7e, 0x87, 0xb4, 0x3, 0xa4, 0x1a, 0x73, 0xb2, 0x99}, }, { Name: "test_demovfs.c", Mode: filemode.Regular, Hash: plumbing.Hash{0x63, 0x76, 0x27, 0x7, 0x1d, 0x9e, 0x28, 0xf4, 0xb3, 0x45, 0x1b, 0xbb, 0xdd, 0xf8, 0x8, 0xd1, 0xa9, 0x12, 0x0, 0xf8}, }, { Name: "test_devsym.c", Mode: filemode.Regular, Hash: plumbing.Hash{0x21, 0xf0, 0xf6, 0x84, 0xd8, 0x61, 0x11, 0x67, 0x70, 0xde, 0xfc, 0xde, 0xcd, 0x53, 0x2b, 0xa3, 0xee, 0xab, 0xa9, 0x75}, }, { Name: "test_fs.c", Mode: filemode.Regular, Hash: plumbing.Hash{0x47, 0x8c, 0xad, 0x80, 0xb1, 0x6a, 0x90, 0x9b, 0x23, 0xbd, 0x3, 0xc2, 0xda, 0xd8, 0xb4, 0x49, 0xa7, 0x45, 0x87, 0xa1}, }, { Name: "test_func.c", Mode: filemode.Regular, Hash: plumbing.Hash{0x6f, 0x9b, 0xb0, 0x3d, 0xc8, 0x8a, 0x21, 0xd6, 0x58, 0xbf, 0x99, 0x99, 0xba, 0xf6, 0x6d, 0xc1, 0xd5, 0x2e, 0xbc, 0x54}, }, { Name: "test_hexio.c", Mode: filemode.Regular, Hash: plumbing.Hash{0xb2, 0xb, 0x5c, 0xe7, 0x30, 0xab, 0x7f, 0xa8, 0x0, 0xd2, 0xd0, 0xcc, 0x38, 0xc7, 0x72, 0x75, 0x59, 0x3e, 0xbd, 0xbb}, }, { Name: "test_init.c", Mode: filemode.Regular, Hash: plumbing.Hash{0xe3, 0x72, 0x4d, 0x8b, 0xe3, 0x14, 0xdb, 0x9, 0xee, 0xa8, 0x4, 0xb, 0x9d, 0xdf, 0xc8, 0xa8, 0xbe, 0xee, 0x22, 0x91}, }, { Name: "test_intarray.c", Mode: filemode.Regular, Hash: plumbing.Hash{0xf5, 0xc3, 0xd9, 0xe4, 0x5, 0x9a, 0x16, 0x56, 0x7, 0x34, 0x7, 0xe4, 0x3a, 0x92, 0x11, 0x79, 0x99, 0x69, 0x7b, 0x93}, }, { Name: "test_intarray.h", Mode: filemode.Regular, Hash: plumbing.Hash{0x69, 0x13, 0x37, 0xd1, 0xae, 0xd6, 0x37, 0x15, 0xd6, 0x2e, 0x76, 0x26, 0x6f, 0xf, 0x3b, 0x50, 0x8b, 0x1, 0xa, 0x34}, }, { Name: "test_journal.c", Mode: filemode.Regular, Hash: plumbing.Hash{0xe8, 0x70, 0x1a, 0x4e, 0xea, 0xdb, 0x8e, 0xad, 0x16, 0x9d, 0x60, 0x6, 0x40, 0x7d, 0x54, 0xa8, 0x98, 0x59, 0x2d, 0x70}, }, { Name: "test_loadext.c", Mode: filemode.Regular, Hash: plumbing.Hash{0x11, 0x37, 0xe3, 0xa9, 0xaa, 0xe9, 0x29, 0x6, 0xb8, 0x28, 0x9f, 0x6c, 0x3d, 0xaa, 0x61, 0xf0, 0xd0, 0x70, 0xf5, 0x5a}, }, { Name: "test_malloc.c", Mode: filemode.Regular, Hash: plumbing.Hash{0xcf, 0x98, 0xa8, 0xfb, 0x21, 0x82, 0xc0, 0xba, 0xf5, 0xa, 0xd5, 0x79, 0x79, 0xb6, 0x75, 0xbb, 0x70, 0x7a, 0x93, 0xb0}, }, { Name: "test_multiplex.c", Mode: filemode.Regular, Hash: plumbing.Hash{0x62, 0x45, 0x41, 0xb3, 0x2a, 0x10, 0xd2, 0x1a, 0x2f, 0xd1, 0xa, 0x35, 0xee, 0x66, 0x32, 0xbd, 0xac, 0x55, 0x2d, 0x41}, }, { Name: "test_multiplex.h", Mode: filemode.Regular, Hash: plumbing.Hash{0xb7, 0xe1, 0xaf, 0xea, 0x5f, 0xd7, 0x8b, 0x87, 0x58, 0x2, 0x65, 0xf8, 0x4c, 0x81, 0x61, 0x2c, 0xbd, 0x2, 0x5b, 0xaf}, }, { Name: "test_mutex.c", Mode: filemode.Regular, Hash: plumbing.Hash{0xc9, 0xb4, 0xa2, 0x9a, 0xb7, 0x5c, 0x77, 0xea, 0x5f, 0x36, 0xb5, 0x19, 0x32, 0x56, 0xd7, 0xf, 0xe6, 0x58, 0xe, 0x95}, }, { Name: "test_onefile.c", Mode: filemode.Regular, Hash: plumbing.Hash{0x69, 0x86, 0x74, 0x41, 0xb8, 0xcc, 0x9a, 0x62, 0x1a, 0xf3, 0x24, 0x13, 0xfc, 0x63, 0xda, 0x80, 0x99, 0x37, 0x64, 0xf4}, }, { Name: "test_osinst.c", Mode: filemode.Regular, Hash: plumbing.Hash{0x53, 0x14, 0x33, 0x31, 0x3e, 0xe3, 0x6c, 0x7, 0xeb, 0x21, 0xc0, 0x2f, 0x31, 0x15, 0xcb, 0x7a, 0x37, 0x48, 0x6c, 0x79}, }, { Name: "test_pcache.c", Mode: filemode.Regular, Hash: plumbing.Hash{0x8f, 0xcf, 0xe7, 0xe2, 0x6e, 0x3f, 0xf1, 0x74, 0x96, 0xb8, 0x40, 0xf5, 0xd6, 0x3c, 0x75, 0x78, 0x3a, 0xff, 0x81, 0x62}, }, {Name: "test_quota.c", Mode: filemode.Regular, Hash: plumbing.Hash{ 0xe5, 0x90, 0x99, 0x6c, 0xa4, 0xb8, 0x57, 0x4a, 0xb1, 0xe4, 0x18, 0x5d, 0x57, 0x77, 0x56, 0x66, 0x4a, 0xd2, 0x49, 0x5f}}, {Name: "test_quota.h", Mode: filemode.Regular, Hash: plumbing.Hash{0x2d, 0x7, 0x67, 0xa1, 0x9a, 0xb7, 0xc3, 0xa4, 0x21, 0xcd, 0xba, 0x6a, 0x3, 0x49, 0x20, 0x43, 0x67, 0xc2, 0x2c, 0x81}, }, { Name: "test_rtree.c", Mode: filemode.Regular, Hash: plumbing.Hash{0xf5, 0x4a, 0xe9, 0xb0, 0x63, 0xbb, 0x73, 0x71, 0x2f, 0xcf, 0xc1, 0xc6, 0x83, 0x2e, 0x2a, 0x50, 0xf6, 0x2a, 0x97, 0xe7}, }, { Name: "test_schema.c", Mode: filemode.Regular, Hash: plumbing.Hash{0x12, 0x64, 0x44, 0x67, 0x64, 0x7d, 0x51, 0x39, 0x4a, 0x1, 0xf9, 0xfa, 0x60, 0x37, 0x62, 0x98, 0x18, 0x54, 0x66, 0xfd}, }, { Name: "test_server.c", Mode: filemode.Regular, Hash: plumbing.Hash{0xed, 0x8, 0x18, 0xe6, 0xf6, 0x5f, 0x27, 0x28, 0x2d, 0xc7, 0xb1, 0xc1, 0x90, 0xec, 0x18, 0x8c, 0x89, 0x33, 0x0, 0x2b}, }, { Name: "test_sqllog.c", Mode: filemode.Regular, Hash: plumbing.Hash{0x4a, 0xa6, 0x8b, 0x7c, 0x42, 0x93, 0x23, 0xb8, 0xee, 0xbe, 0x6c, 0x9c, 0x2d, 0x7, 0xfc, 0x66, 0xd, 0x8d, 0x47, 0xc9}, }, { Name: "test_stat.c", Mode: filemode.Regular, Hash: plumbing.Hash{0xd4, 0xc9, 0x2, 0xb5, 0xea, 0x11, 0x1a, 0xd5, 0x8a, 0x73, 0x71, 0x12, 0xc2, 0x8f, 0x0, 0x38, 0x43, 0x4c, 0x85, 0xc0}, }, { Name: "test_superlock.c", Mode: filemode.Regular, Hash: plumbing.Hash{0x93, 0x6f, 0xca, 0xd0, 0xc5, 0x6f, 0x6b, 0xc8, 0x58, 0x9, 0x74, 0x2f, 0x6a, 0xe1, 0xc1, 0xee, 0xb8, 0xb7, 0xd2, 0xf1}, }, { Name: "test_syscall.c", Mode: filemode.Regular, Hash: plumbing.Hash{0x7c, 0x8, 0x73, 0xc1, 0x6d, 0x84, 0x32, 0x2, 0xf3, 0xe, 0x2d, 0xb9, 0x45, 0x9f, 0xa2, 0x99, 0x75, 0xea, 0x5e, 0x68}, }, { Name: "test_tclvar.c", Mode: filemode.Regular, Hash: plumbing.Hash{0x12, 0x19, 0x19, 0xc, 0x3, 0x0, 0xfd, 0x5e, 0xc7, 0xa3, 0xc5, 0x84, 0x8, 0xf3, 0x38, 0x43, 0xd2, 0xe, 0xee, 0x15}, }, { Name: "test_thread.c", Mode: filemode.Regular, Hash: plumbing.Hash{0x2f, 0x93, 0x63, 0xb7, 0x50, 0x1e, 0x51, 0x19, 0x81, 0xfe, 0x32, 0x83, 0x1f, 0xf2, 0xe8, 0xfd, 0x2f, 0x30, 0xc4, 0x93}, }, { Name: "test_vfs.c", Mode: filemode.Regular, Hash: plumbing.Hash{0xfc, 0xd5, 0x77, 0x43, 0x9c, 0xfd, 0x6c, 0x72, 0xdd, 0xe4, 0x83, 0x58, 0x92, 0x14, 0x20, 0xcf, 0x6e, 0xf1, 0xf8, 0x6d}, }, { Name: "test_vfstrace.c", Mode: filemode.Regular, Hash: plumbing.Hash{0xa, 0xac, 0xc0, 0x1f, 0xe4, 0x2e, 0x77, 0xfe, 0xb8, 0x58, 0xe4, 0xbe, 0xd0, 0xcb, 0x7e, 0x4, 0xa4, 0x35, 0xb2, 0x10}, }, { Name: "test_wsd.c", Mode: filemode.Regular, Hash: plumbing.Hash{0x99, 0xe4, 0xa0, 0x56, 0x58, 0x1f, 0x58, 0xf4, 0x53, 0x6f, 0xdb, 0x5a, 0x5d, 0xf7, 0x5c, 0x74, 0x69, 0x8a, 0x81, 0x62}, }, { Name: "tokenize.c", Mode: filemode.Regular, Hash: plumbing.Hash{0xfa, 0xea, 0x5f, 0x26, 0xc7, 0x9c, 0x5e, 0x18, 0x8f, 0xa8, 0x7f, 0x2f, 0xdf, 0x6f, 0xf7, 0x6a, 0x7a, 0x60, 0x6, 0xc5}, }, { Name: "trigger.c", Mode: filemode.Regular, Hash: plumbing.Hash{0xf1, 0xff, 0x76, 0x6e, 0x20, 0x2a, 0x45, 0x18, 0xec, 0x10, 0xe5, 0x27, 0x12, 0xc, 0xd3, 0xe, 0x83, 0xfb, 0xd0, 0x34}, }, { Name: "update.c", Mode: filemode.Regular, Hash: plumbing.Hash{0x3a, 0xb1, 0xab, 0x2a, 0x4b, 0x65, 0xda, 0x3f, 0x19, 0x8c, 0x15, 0x84, 0xd5, 0x4d, 0x36, 0xf1, 0x8c, 0xa1, 0x21, 0x4a}, }, { Name: "utf.c", Mode: filemode.Regular, Hash: plumbing.Hash{0x6d, 0x5b, 0x1b, 0xfe, 0x40, 0xc, 0x37, 0x48, 0xaa, 0x70, 0xa3, 0xb2, 0xfd, 0x5e, 0xe, 0xac, 0x5f, 0xc0, 0x4d, 0xe2}, }, { Name: "util.c", Mode: filemode.Regular, Hash: plumbing.Hash{0xd8, 0x3a, 0x63, 0x1, 0x5f, 0xd8, 0x7d, 0xcc, 0x4f, 0xb4, 0x41, 0x66, 0xfa, 0xbf, 0x2e, 0x9b, 0xc9, 0x67, 0x1e, 0xb8}, }, { Name: "vacuum.c", Mode: filemode.Regular, Hash: plumbing.Hash{0x4a, 0xfb, 0x2c, 0xca, 0x64, 0xdd, 0x60, 0x76, 0x11, 0x22, 0x2c, 0x7, 0x93, 0x2d, 0x12, 0xea, 0xcf, 0xa, 0x2c, 0x22}, }, { Name: "vdbe.c", Mode: filemode.Regular, Hash: plumbing.Hash{0xf3, 0x43, 0xe1, 0x3d, 0x4e, 0x91, 0x78, 0x4b, 0x15, 0x88, 0x10, 0xc5, 0xb7, 0xd4, 0x46, 0x84, 0xdf, 0xbf, 0xa2, 0xa5}, }, { Name: "vdbe.h", Mode: filemode.Regular, Hash: plumbing.Hash{0xfa, 0x7b, 0x31, 0xb7, 0x27, 0xa, 0x90, 0xd4, 0xf6, 0x37, 0x36, 0x5a, 0xfc, 0xc9, 0xbd, 0xa1, 0xd1, 0xb1, 0xe1, 0xd6}, }, { Name: "vdbeInt.h", Mode: filemode.Regular, Hash: plumbing.Hash{0x3a, 0x5b, 0x40, 0x28, 0xbb, 0xd6, 0xc9, 0x56, 0x10, 0xd7, 0xc, 0xce, 0x3, 0x69, 0xdf, 0xcd, 0x60, 0x7a, 0xa9, 0x0}, }, { Name: "vdbeapi.c", Mode: filemode.Regular, Hash: plumbing.Hash{0x7c, 0x86, 0x1e, 0x2d, 0x47, 0x21, 0x8c, 0x91, 0x63, 0x31, 0x77, 0x77, 0xc3, 0x7, 0x21, 0x99, 0xe9, 0xb4, 0x2, 0x80}, }, { Name: "vdbeaux.c", Mode: filemode.Regular, Hash: plumbing.Hash{0x2c, 0x42, 0x69, 0xa5, 0x9e, 0x6d, 0xbc, 0xe8, 0x67, 0x1c, 0x47, 0x4f, 0x34, 0x61, 0x90, 0xbe, 0x2a, 0xe, 0x18, 0x51}, }, { Name: "vdbeblob.c", Mode: filemode.Regular, Hash: plumbing.Hash{0x2e, 0x8f, 0xd8, 0xee, 0x74, 0x47, 0xe6, 0x46, 0x46, 0xe3, 0x49, 0x4b, 0x4c, 0x4, 0x1d, 0x3a, 0x4a, 0xbb, 0x8, 0x85}, }, { Name: "vdbemem.c", Mode: filemode.Regular, Hash: plumbing.Hash{0x8f, 0xc2, 0x22, 0xe2, 0xde, 0x20, 0x50, 0x14, 0x50, 0xec, 0xea, 0x9d, 0x4e, 0xbf, 0xaa, 0xc9, 0x81, 0x4a, 0xae, 0x59}, }, { Name: "vdbesort.c", Mode: filemode.Regular, Hash: plumbing.Hash{0xfd, 0xfc, 0x4a, 0x79, 0xdd, 0xc9, 0x6e, 0x59, 0x9b, 0x1b, 0xe, 0xeb, 0xac, 0xbd, 0xb8, 0x45, 0xc6, 0x38, 0x13, 0xb2}, }, { Name: "vdbetrace.c", Mode: filemode.Regular, Hash: plumbing.Hash{0x35, 0x62, 0x77, 0xe8, 0xd2, 0x3b, 0xca, 0xdb, 0x67, 0x6b, 0x59, 0xd1, 0xa4, 0xdc, 0xf8, 0x42, 0xfd, 0xc4, 0xc9, 0x72}, }, { Name: "vtab.c", Mode: filemode.Regular, Hash: plumbing.Hash{0x95, 0x82, 0x2, 0xc3, 0x1e, 0x24, 0x15, 0xb, 0x60, 0xf1, 0xa, 0x8a, 0xf, 0x74, 0x41, 0xaf, 0xac, 0x3f, 0xbb, 0x1c}, }, { Name: "wal.c", Mode: filemode.Regular, Hash: plumbing.Hash{0xe6, 0x42, 0xea, 0x21, 0x5, 0xb5, 0xc5, 0x4a, 0xf3, 0x5, 0x88, 0x9, 0x62, 0x69, 0xab, 0x75, 0xcb, 0xef, 0x8f, 0xf2}, }, { Name: "wal.h", Mode: filemode.Regular, Hash: plumbing.Hash{0x9, 0x25, 0x46, 0x35, 0x4b, 0x34, 0xc0, 0xab, 0x3d, 0x20, 0x5, 0x6a, 0x7f, 0x8a, 0x8a, 0x52, 0xe4, 0xd0, 0xb5, 0xf5}, }, { Name: "walker.c", Mode: filemode.Regular, Hash: plumbing.Hash{0xe7, 0x1e, 0xd2, 0xac, 0x48, 0x4c, 0x91, 0x6c, 0x1c, 0xc1, 0x0, 0x7e, 0x5e, 0x5, 0xda, 0x47, 0x1c, 0xb4, 0x95, 0x99}, }, { Name: "where.c", Mode: filemode.Regular, Hash: plumbing.Hash{0xe6, 0x14, 0xf4, 0xa6, 0xd8, 0x64, 0xe7, 0xe, 0xc4, 0x32, 0x8d, 0xb, 0xdb, 0x25, 0x4e, 0x3a, 0xc9, 0xf0, 0xd2, 0x87}, }, }, Hash: plumbing.Hash{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, s: (storer.EncodedObjectStorer)(nil), m: map[string]*TreeEntry(nil), } var obtained Tree err := obtained.Decode(obj) c.Assert(err, IsNil) c.Assert(entriesEquals(obtained.Entries, expected.Entries), Equals, true) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/object/treenoder.go000066400000000000000000000063361345605224300243100ustar00rootroot00000000000000package object import ( "io" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/filemode" "gopkg.in/src-d/go-git.v4/utils/merkletrie/noder" ) // A treenoder is a helper type that wraps git trees into merkletrie // noders. // // As a merkletrie noder doesn't understand the concept of modes (e.g. // file permissions), the treenoder includes the mode of the git tree in // the hash, so changes in the modes will be detected as modifications // to the file contents by the merkletrie difftree algorithm. This is // consistent with how the "git diff-tree" command works. type treeNoder struct { parent *Tree // the root node is its own parent name string // empty string for the root node mode filemode.FileMode hash plumbing.Hash children []noder.Noder // memoized } // NewTreeRootNode returns the root node of a Tree func NewTreeRootNode(t *Tree) noder.Noder { if t == nil { return &treeNoder{} } return &treeNoder{ parent: t, name: "", mode: filemode.Dir, hash: t.Hash, } } func (t *treeNoder) isRoot() bool { return t.name == "" } func (t *treeNoder) String() string { return "treeNoder <" + t.name + ">" } func (t *treeNoder) Hash() []byte { if t.mode == filemode.Deprecated { return append(t.hash[:], filemode.Regular.Bytes()...) } return append(t.hash[:], t.mode.Bytes()...) } func (t *treeNoder) Name() string { return t.name } func (t *treeNoder) IsDir() bool { return t.mode == filemode.Dir } // Children will return the children of a treenoder as treenoders, // building them from the children of the wrapped git tree. func (t *treeNoder) Children() ([]noder.Noder, error) { if t.mode != filemode.Dir { return noder.NoChildren, nil } // children are memoized for efficiency if t.children != nil { return t.children, nil } // the parent of the returned children will be ourself as a tree if // we are a not the root treenoder. The root is special as it // is is own parent. parent := t.parent if !t.isRoot() { var err error if parent, err = t.parent.Tree(t.name); err != nil { return nil, err } } return transformChildren(parent) } // Returns the children of a tree as treenoders. // Efficiency is key here. func transformChildren(t *Tree) ([]noder.Noder, error) { var err error var e TreeEntry // there will be more tree entries than children in the tree, // due to submodules and empty directories, but I think it is still // worth it to pre-allocate the whole array now, even if sometimes // is bigger than needed. ret := make([]noder.Noder, 0, len(t.Entries)) walker := NewTreeWalker(t, false, nil) // don't recurse // don't defer walker.Close() for efficiency reasons. for { _, e, err = walker.Next() if err == io.EOF { break } if err != nil { walker.Close() return nil, err } ret = append(ret, &treeNoder{ parent: t, name: e.Name, mode: e.Mode, hash: e.Hash, }) } walker.Close() return ret, nil } // len(t.tree.Entries) != the number of elements walked by treewalker // for some reason because of empty directories, submodules, etc, so we // have to walk here. func (t *treeNoder) NumChildren() (int, error) { children, err := t.Children() if err != nil { return 0, err } return len(children), nil } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/object_test.go000066400000000000000000000024061345605224300233520ustar00rootroot00000000000000package plumbing import . "gopkg.in/check.v1" type ObjectSuite struct{} var _ = Suite(&ObjectSuite{}) func (s *ObjectSuite) TestObjectTypeString(c *C) { c.Assert(CommitObject.String(), Equals, "commit") c.Assert(TreeObject.String(), Equals, "tree") c.Assert(BlobObject.String(), Equals, "blob") c.Assert(TagObject.String(), Equals, "tag") c.Assert(REFDeltaObject.String(), Equals, "ref-delta") c.Assert(OFSDeltaObject.String(), Equals, "ofs-delta") c.Assert(AnyObject.String(), Equals, "any") c.Assert(ObjectType(42).String(), Equals, "unknown") } func (s *ObjectSuite) TestObjectTypeBytes(c *C) { c.Assert(CommitObject.Bytes(), DeepEquals, []byte("commit")) } func (s *ObjectSuite) TestObjectTypeValid(c *C) { c.Assert(CommitObject.Valid(), Equals, true) c.Assert(ObjectType(42).Valid(), Equals, false) } func (s *ObjectSuite) TestParseObjectType(c *C) { for s, e := range map[string]ObjectType{ "commit": CommitObject, "tree": TreeObject, "blob": BlobObject, "tag": TagObject, "ref-delta": REFDeltaObject, "ofs-delta": OFSDeltaObject, } { t, err := ParseObjectType(s) c.Assert(err, IsNil) c.Assert(e, Equals, t) } t, err := ParseObjectType("foo") c.Assert(err, Equals, ErrInvalidType) c.Assert(t, Equals, InvalidObject) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/protocol/000077500000000000000000000000001345605224300223555ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/protocol/packp/000077500000000000000000000000001345605224300234535ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/protocol/packp/advrefs.go000066400000000000000000000123651345605224300254430ustar00rootroot00000000000000package packp import ( "fmt" "sort" "strings" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability" "gopkg.in/src-d/go-git.v4/plumbing/storer" "gopkg.in/src-d/go-git.v4/storage/memory" ) // AdvRefs values represent the information transmitted on an // advertised-refs message. Values from this type are not zero-value // safe, use the New function instead. type AdvRefs struct { // Prefix stores prefix payloads. // // When using this message over (smart) HTTP, you have to add a pktline // before the whole thing with the following payload: // // '# service=$servicename" LF // // Moreover, some (all) git HTTP smart servers will send a flush-pkt // just after the first pkt-line. // // To accommodate both situations, the Prefix field allow you to store // any data you want to send before the actual pktlines. It will also // be filled up with whatever is found on the line. Prefix [][]byte // Head stores the resolved HEAD reference if present. // This can be present with git-upload-pack, not with git-receive-pack. Head *plumbing.Hash // Capabilities are the capabilities. Capabilities *capability.List // References are the hash references. References map[string]plumbing.Hash // Peeled are the peeled hash references. Peeled map[string]plumbing.Hash // Shallows are the shallow object ids. Shallows []plumbing.Hash } // NewAdvRefs returns a pointer to a new AdvRefs value, ready to be used. func NewAdvRefs() *AdvRefs { return &AdvRefs{ Prefix: [][]byte{}, Capabilities: capability.NewList(), References: make(map[string]plumbing.Hash), Peeled: make(map[string]plumbing.Hash), Shallows: []plumbing.Hash{}, } } func (a *AdvRefs) AddReference(r *plumbing.Reference) error { switch r.Type() { case plumbing.SymbolicReference: v := fmt.Sprintf("%s:%s", r.Name().String(), r.Target().String()) a.Capabilities.Add(capability.SymRef, v) case plumbing.HashReference: a.References[r.Name().String()] = r.Hash() default: return plumbing.ErrInvalidType } return nil } func (a *AdvRefs) AllReferences() (memory.ReferenceStorage, error) { s := memory.ReferenceStorage{} if err := a.addRefs(s); err != nil { return s, plumbing.NewUnexpectedError(err) } return s, nil } func (a *AdvRefs) addRefs(s storer.ReferenceStorer) error { for name, hash := range a.References { ref := plumbing.NewReferenceFromStrings(name, hash.String()) if err := s.SetReference(ref); err != nil { return err } } if a.supportSymrefs() { return a.addSymbolicRefs(s) } return a.resolveHead(s) } // If the server does not support symrefs capability, // we need to guess the reference where HEAD is pointing to. // // Git versions prior to 1.8.4.3 has an special procedure to get // the reference where is pointing to HEAD: // - Check if a reference called master exists. If exists and it // has the same hash as HEAD hash, we can say that HEAD is pointing to master // - If master does not exists or does not have the same hash as HEAD, // order references and check in that order if that reference has the same // hash than HEAD. If yes, set HEAD pointing to that branch hash // - If no reference is found, throw an error func (a *AdvRefs) resolveHead(s storer.ReferenceStorer) error { if a.Head == nil { return nil } ref, err := s.Reference(plumbing.ReferenceName(plumbing.Master)) // check first if HEAD is pointing to master if err == nil { ok, err := a.createHeadIfCorrectReference(ref, s) if err != nil { return err } if ok { return nil } } if err != nil && err != plumbing.ErrReferenceNotFound { return err } // From here we are trying to guess the branch that HEAD is pointing refIter, err := s.IterReferences() if err != nil { return err } var refNames []string err = refIter.ForEach(func(r *plumbing.Reference) error { refNames = append(refNames, string(r.Name())) return nil }) if err != nil { return err } sort.Strings(refNames) var headSet bool for _, refName := range refNames { ref, err := s.Reference(plumbing.ReferenceName(refName)) if err != nil { return err } ok, err := a.createHeadIfCorrectReference(ref, s) if err != nil { return err } if ok { headSet = true break } } if !headSet { return plumbing.ErrReferenceNotFound } return nil } func (a *AdvRefs) createHeadIfCorrectReference( reference *plumbing.Reference, s storer.ReferenceStorer) (bool, error) { if reference.Hash() == *a.Head { headRef := plumbing.NewSymbolicReference(plumbing.HEAD, reference.Name()) if err := s.SetReference(headRef); err != nil { return false, err } return true, nil } return false, nil } func (a *AdvRefs) addSymbolicRefs(s storer.ReferenceStorer) error { for _, symref := range a.Capabilities.Get(capability.SymRef) { chunks := strings.Split(symref, ":") if len(chunks) != 2 { err := fmt.Errorf("bad number of `:` in symref value (%q)", symref) return plumbing.NewUnexpectedError(err) } name := plumbing.ReferenceName(chunks[0]) target := plumbing.ReferenceName(chunks[1]) ref := plumbing.NewSymbolicReference(name, target) if err := s.SetReference(ref); err != nil { return nil } } return nil } func (a *AdvRefs) supportSymrefs() bool { return a.Capabilities.Supports(capability.SymRef) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/protocol/packp/advrefs_decode.go000066400000000000000000000145631345605224300267500ustar00rootroot00000000000000package packp import ( "bytes" "encoding/hex" "errors" "fmt" "io" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/format/pktline" ) // Decode reads the next advertised-refs message form its input and // stores it in the AdvRefs. func (a *AdvRefs) Decode(r io.Reader) error { d := newAdvRefsDecoder(r) return d.Decode(a) } type advRefsDecoder struct { s *pktline.Scanner // a pkt-line scanner from the input stream line []byte // current pkt-line contents, use parser.nextLine() to make it advance nLine int // current pkt-line number for debugging, begins at 1 hash plumbing.Hash // last hash read err error // sticky error, use the parser.error() method to fill this out data *AdvRefs // parsed data is stored here } var ( // ErrEmptyAdvRefs is returned by Decode if it gets an empty advertised // references message. ErrEmptyAdvRefs = errors.New("empty advertised-ref message") // ErrEmptyInput is returned by Decode if the input is empty. ErrEmptyInput = errors.New("empty input") ) func newAdvRefsDecoder(r io.Reader) *advRefsDecoder { return &advRefsDecoder{ s: pktline.NewScanner(r), } } func (d *advRefsDecoder) Decode(v *AdvRefs) error { d.data = v for state := decodePrefix; state != nil; { state = state(d) } return d.err } type decoderStateFn func(*advRefsDecoder) decoderStateFn // fills out the parser stiky error func (d *advRefsDecoder) error(format string, a ...interface{}) { msg := fmt.Sprintf( "pkt-line %d: %s", d.nLine, fmt.Sprintf(format, a...), ) d.err = NewErrUnexpectedData(msg, d.line) } // Reads a new pkt-line from the scanner, makes its payload available as // p.line and increments p.nLine. A successful invocation returns true, // otherwise, false is returned and the sticky error is filled out // accordingly. Trims eols at the end of the payloads. func (d *advRefsDecoder) nextLine() bool { d.nLine++ if !d.s.Scan() { if d.err = d.s.Err(); d.err != nil { return false } if d.nLine == 1 { d.err = ErrEmptyInput return false } d.error("EOF") return false } d.line = d.s.Bytes() d.line = bytes.TrimSuffix(d.line, eol) return true } // The HTTP smart prefix is often followed by a flush-pkt. func decodePrefix(d *advRefsDecoder) decoderStateFn { if ok := d.nextLine(); !ok { return nil } if !isPrefix(d.line) { return decodeFirstHash } tmp := make([]byte, len(d.line)) copy(tmp, d.line) d.data.Prefix = append(d.data.Prefix, tmp) if ok := d.nextLine(); !ok { return nil } if !isFlush(d.line) { return decodeFirstHash } d.data.Prefix = append(d.data.Prefix, pktline.Flush) if ok := d.nextLine(); !ok { return nil } return decodeFirstHash } func isPrefix(payload []byte) bool { return len(payload) > 0 && payload[0] == '#' } // If the first hash is zero, then a no-refs is coming. Otherwise, a // list-of-refs is coming, and the hash will be followed by the first // advertised ref. func decodeFirstHash(p *advRefsDecoder) decoderStateFn { // If the repository is empty, we receive a flush here (HTTP). if isFlush(p.line) { p.err = ErrEmptyAdvRefs return nil } if len(p.line) < hashSize { p.error("cannot read hash, pkt-line too short") return nil } if _, err := hex.Decode(p.hash[:], p.line[:hashSize]); err != nil { p.error("invalid hash text: %s", err) return nil } p.line = p.line[hashSize:] if p.hash.IsZero() { return decodeSkipNoRefs } return decodeFirstRef } // Skips SP "capabilities^{}" NUL func decodeSkipNoRefs(p *advRefsDecoder) decoderStateFn { if len(p.line) < len(noHeadMark) { p.error("too short zero-id ref") return nil } if !bytes.HasPrefix(p.line, noHeadMark) { p.error("malformed zero-id ref") return nil } p.line = p.line[len(noHeadMark):] return decodeCaps } // decode the refname, expects SP refname NULL func decodeFirstRef(l *advRefsDecoder) decoderStateFn { if len(l.line) < 3 { l.error("line too short after hash") return nil } if !bytes.HasPrefix(l.line, sp) { l.error("no space after hash") return nil } l.line = l.line[1:] chunks := bytes.SplitN(l.line, null, 2) if len(chunks) < 2 { l.error("NULL not found") return nil } ref := chunks[0] l.line = chunks[1] if bytes.Equal(ref, []byte(head)) { l.data.Head = &l.hash } else { l.data.References[string(ref)] = l.hash } return decodeCaps } func decodeCaps(p *advRefsDecoder) decoderStateFn { if err := p.data.Capabilities.Decode(p.line); err != nil { p.error("invalid capabilities: %s", err) return nil } return decodeOtherRefs } // The refs are either tips (obj-id SP refname) or a peeled (obj-id SP refname^{}). // If there are no refs, then there might be a shallow or flush-ptk. func decodeOtherRefs(p *advRefsDecoder) decoderStateFn { if ok := p.nextLine(); !ok { return nil } if bytes.HasPrefix(p.line, shallow) { return decodeShallow } if len(p.line) == 0 { return nil } saveTo := p.data.References if bytes.HasSuffix(p.line, peeled) { p.line = bytes.TrimSuffix(p.line, peeled) saveTo = p.data.Peeled } ref, hash, err := readRef(p.line) if err != nil { p.error("%s", err) return nil } saveTo[ref] = hash return decodeOtherRefs } // Reads a ref-name func readRef(data []byte) (string, plumbing.Hash, error) { chunks := bytes.Split(data, sp) switch { case len(chunks) == 1: return "", plumbing.ZeroHash, fmt.Errorf("malformed ref data: no space was found") case len(chunks) > 2: return "", plumbing.ZeroHash, fmt.Errorf("malformed ref data: more than one space found") default: return string(chunks[1]), plumbing.NewHash(string(chunks[0])), nil } } // Keeps reading shallows until a flush-pkt is found func decodeShallow(p *advRefsDecoder) decoderStateFn { if !bytes.HasPrefix(p.line, shallow) { p.error("malformed shallow prefix, found %q... instead", p.line[:len(shallow)]) return nil } p.line = bytes.TrimPrefix(p.line, shallow) if len(p.line) != hashSize { p.error(fmt.Sprintf( "malformed shallow hash: wrong length, expected 40 bytes, read %d bytes", len(p.line))) return nil } text := p.line[:hashSize] var h plumbing.Hash if _, err := hex.Decode(h[:], text); err != nil { p.error("invalid hash text: %s", err) return nil } p.data.Shallows = append(p.data.Shallows, h) if ok := p.nextLine(); !ok { return nil } if len(p.line) == 0 { return nil // succesfull parse of the advertised-refs message } return decodeShallow } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/protocol/packp/advrefs_decode_test.go000066400000000000000000000406211345605224300300010ustar00rootroot00000000000000package packp import ( "bytes" "io" "strings" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/format/pktline" "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability" . "gopkg.in/check.v1" ) type AdvRefsDecodeSuite struct{} var _ = Suite(&AdvRefsDecodeSuite{}) func (s *AdvRefsDecodeSuite) TestEmpty(c *C) { var buf bytes.Buffer ar := NewAdvRefs() c.Assert(ar.Decode(&buf), Equals, ErrEmptyInput) } func (s *AdvRefsDecodeSuite) TestEmptyFlush(c *C) { var buf bytes.Buffer e := pktline.NewEncoder(&buf) e.Flush() ar := NewAdvRefs() c.Assert(ar.Decode(&buf), Equals, ErrEmptyAdvRefs) } func (s *AdvRefsDecodeSuite) TestEmptyPrefixFlush(c *C) { var buf bytes.Buffer e := pktline.NewEncoder(&buf) e.EncodeString("# service=git-upload-pack") e.Flush() e.Flush() ar := NewAdvRefs() c.Assert(ar.Decode(&buf), Equals, ErrEmptyAdvRefs) } func (s *AdvRefsDecodeSuite) TestShortForHash(c *C) { payloads := []string{ "6ecf0ef2c2dffb796", pktline.FlushString, } r := toPktLines(c, payloads) s.testDecoderErrorMatches(c, r, ".*too short.*") } func (s *AdvRefsDecodeSuite) testDecoderErrorMatches(c *C, input io.Reader, pattern string) { ar := NewAdvRefs() c.Assert(ar.Decode(input), ErrorMatches, pattern) } func (s *AdvRefsDecodeSuite) TestInvalidFirstHash(c *C) { payloads := []string{ "6ecf0ef2c2dffb796alberto2219af86ec6584e5 HEAD\x00multi_ack thin-pack\n", pktline.FlushString, } r := toPktLines(c, payloads) s.testDecoderErrorMatches(c, r, ".*invalid hash.*") } func (s *AdvRefsDecodeSuite) TestZeroId(c *C) { payloads := []string{ "0000000000000000000000000000000000000000 capabilities^{}\x00multi_ack thin-pack\n", pktline.FlushString, } ar := s.testDecodeOK(c, payloads) c.Assert(ar.Head, IsNil) } func (s *AdvRefsDecodeSuite) testDecodeOK(c *C, payloads []string) *AdvRefs { var buf bytes.Buffer e := pktline.NewEncoder(&buf) err := e.EncodeString(payloads...) c.Assert(err, IsNil) ar := NewAdvRefs() c.Assert(ar.Decode(&buf), IsNil) return ar } func (s *AdvRefsDecodeSuite) TestMalformedZeroId(c *C) { payloads := []string{ "0000000000000000000000000000000000000000 wrong\x00multi_ack thin-pack\n", pktline.FlushString, } r := toPktLines(c, payloads) s.testDecoderErrorMatches(c, r, ".*malformed zero-id.*") } func (s *AdvRefsDecodeSuite) TestShortZeroId(c *C) { payloads := []string{ "0000000000000000000000000000000000000000 capabi", pktline.FlushString, } r := toPktLines(c, payloads) s.testDecoderErrorMatches(c, r, ".*too short zero-id.*") } func (s *AdvRefsDecodeSuite) TestHead(c *C) { payloads := []string{ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00", pktline.FlushString, } ar := s.testDecodeOK(c, payloads) c.Assert(*ar.Head, Equals, plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5")) } func (s *AdvRefsDecodeSuite) TestFirstIsNotHead(c *C) { payloads := []string{ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 refs/heads/master\x00", pktline.FlushString, } ar := s.testDecodeOK(c, payloads) c.Assert(ar.Head, IsNil) c.Assert(ar.References["refs/heads/master"], Equals, plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5")) } func (s *AdvRefsDecodeSuite) TestShortRef(c *C) { payloads := []string{ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 H", pktline.FlushString, } r := toPktLines(c, payloads) s.testDecoderErrorMatches(c, r, ".*too short.*") } func (s *AdvRefsDecodeSuite) TestNoNULL(c *C) { payloads := []string{ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEADofs-delta multi_ack", pktline.FlushString, } r := toPktLines(c, payloads) s.testDecoderErrorMatches(c, r, ".*NULL not found.*") } func (s *AdvRefsDecodeSuite) TestNoSpaceAfterHash(c *C) { payloads := []string{ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5-HEAD\x00", pktline.FlushString, } r := toPktLines(c, payloads) s.testDecoderErrorMatches(c, r, ".*no space after hash.*") } func (s *AdvRefsDecodeSuite) TestNoCaps(c *C) { payloads := []string{ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00", pktline.FlushString, } ar := s.testDecodeOK(c, payloads) c.Assert(ar.Capabilities.IsEmpty(), Equals, true) } func (s *AdvRefsDecodeSuite) TestCaps(c *C) { type entry struct { Name capability.Capability Values []string } for _, test := range [...]struct { input []string capabilities []entry }{{ input: []string{ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00", pktline.FlushString, }, capabilities: []entry{}, }, { input: []string{ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00\n", pktline.FlushString, }, capabilities: []entry{}, }, { input: []string{ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta", pktline.FlushString, }, capabilities: []entry{ { Name: capability.OFSDelta, Values: []string(nil), }, }, }, { input: []string{ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta multi_ack", pktline.FlushString, }, capabilities: []entry{ {Name: capability.OFSDelta, Values: []string(nil)}, {Name: capability.MultiACK, Values: []string(nil)}, }, }, { input: []string{ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta multi_ack\n", pktline.FlushString, }, capabilities: []entry{ {Name: capability.OFSDelta, Values: []string(nil)}, {Name: capability.MultiACK, Values: []string(nil)}, }, }, { input: []string{ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00symref=HEAD:refs/heads/master agent=foo=bar\n", pktline.FlushString, }, capabilities: []entry{ {Name: capability.SymRef, Values: []string{"HEAD:refs/heads/master"}}, {Name: capability.Agent, Values: []string{"foo=bar"}}, }, }} { ar := s.testDecodeOK(c, test.input) for _, fixCap := range test.capabilities { c.Assert(ar.Capabilities.Supports(fixCap.Name), Equals, true, Commentf("input = %q, capability = %q", test.input, fixCap.Name)) c.Assert(ar.Capabilities.Get(fixCap.Name), DeepEquals, fixCap.Values, Commentf("input = %q, capability = %q", test.input, fixCap.Name)) } } } func (s *AdvRefsDecodeSuite) TestWithPrefix(c *C) { payloads := []string{ "# this is a prefix\n", "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta\n", pktline.FlushString, } ar := s.testDecodeOK(c, payloads) c.Assert(len(ar.Prefix), Equals, 1) c.Assert(ar.Prefix[0], DeepEquals, []byte("# this is a prefix")) } func (s *AdvRefsDecodeSuite) TestWithPrefixAndFlush(c *C) { payloads := []string{ "# this is a prefix\n", pktline.FlushString, "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta\n", pktline.FlushString, } ar := s.testDecodeOK(c, payloads) c.Assert(len(ar.Prefix), Equals, 2) c.Assert(ar.Prefix[0], DeepEquals, []byte("# this is a prefix")) c.Assert(ar.Prefix[1], DeepEquals, []byte(pktline.FlushString)) } func (s *AdvRefsDecodeSuite) TestOtherRefs(c *C) { for _, test := range [...]struct { input []string references map[string]plumbing.Hash peeled map[string]plumbing.Hash }{{ input: []string{ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n", pktline.FlushString, }, references: make(map[string]plumbing.Hash), peeled: make(map[string]plumbing.Hash), }, { input: []string{ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n", "1111111111111111111111111111111111111111 ref/foo", pktline.FlushString, }, references: map[string]plumbing.Hash{ "ref/foo": plumbing.NewHash("1111111111111111111111111111111111111111"), }, peeled: make(map[string]plumbing.Hash), }, { input: []string{ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n", "1111111111111111111111111111111111111111 ref/foo\n", pktline.FlushString, }, references: map[string]plumbing.Hash{ "ref/foo": plumbing.NewHash("1111111111111111111111111111111111111111"), }, peeled: make(map[string]plumbing.Hash), }, { input: []string{ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n", "1111111111111111111111111111111111111111 ref/foo\n", "2222222222222222222222222222222222222222 ref/bar", pktline.FlushString, }, references: map[string]plumbing.Hash{ "ref/foo": plumbing.NewHash("1111111111111111111111111111111111111111"), "ref/bar": plumbing.NewHash("2222222222222222222222222222222222222222"), }, peeled: make(map[string]plumbing.Hash), }, { input: []string{ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n", "1111111111111111111111111111111111111111 ref/foo^{}\n", pktline.FlushString, }, references: make(map[string]plumbing.Hash), peeled: map[string]plumbing.Hash{ "ref/foo": plumbing.NewHash("1111111111111111111111111111111111111111"), }, }, { input: []string{ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n", "1111111111111111111111111111111111111111 ref/foo\n", "2222222222222222222222222222222222222222 ref/bar^{}", pktline.FlushString, }, references: map[string]plumbing.Hash{ "ref/foo": plumbing.NewHash("1111111111111111111111111111111111111111"), }, peeled: map[string]plumbing.Hash{ "ref/bar": plumbing.NewHash("2222222222222222222222222222222222222222"), }, }, { input: []string{ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n", "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n", "51b8b4fb32271d39fbdd760397406177b2b0fd36 refs/pull/10/head\n", "02b5a6031ba7a8cbfde5d65ff9e13ecdbc4a92ca refs/pull/100/head\n", "c284c212704c43659bf5913656b8b28e32da1621 refs/pull/100/merge\n", "3d6537dce68c8b7874333a1720958bd8db3ae8ca refs/pull/101/merge\n", "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11\n", "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11^{}\n", "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n", "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n", pktline.FlushString, }, references: map[string]plumbing.Hash{ "refs/heads/master": plumbing.NewHash("a6930aaee06755d1bdcfd943fbf614e4d92bb0c7"), "refs/pull/10/head": plumbing.NewHash("51b8b4fb32271d39fbdd760397406177b2b0fd36"), "refs/pull/100/head": plumbing.NewHash("02b5a6031ba7a8cbfde5d65ff9e13ecdbc4a92ca"), "refs/pull/100/merge": plumbing.NewHash("c284c212704c43659bf5913656b8b28e32da1621"), "refs/pull/101/merge": plumbing.NewHash("3d6537dce68c8b7874333a1720958bd8db3ae8ca"), "refs/tags/v2.6.11": plumbing.NewHash("5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c"), "refs/tags/v2.6.11-tree": plumbing.NewHash("5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c"), }, peeled: map[string]plumbing.Hash{ "refs/tags/v2.6.11": plumbing.NewHash("c39ae07f393806ccf406ef966e9a15afc43cc36a"), "refs/tags/v2.6.11-tree": plumbing.NewHash("c39ae07f393806ccf406ef966e9a15afc43cc36a"), }, }} { ar := s.testDecodeOK(c, test.input) comment := Commentf("input = %v\n", test.input) c.Assert(ar.References, DeepEquals, test.references, comment) c.Assert(ar.Peeled, DeepEquals, test.peeled, comment) } } func (s *AdvRefsDecodeSuite) TestMalformedOtherRefsNoSpace(c *C) { payloads := []string{ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00multi_ack thin-pack\n", "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8crefs/tags/v2.6.11\n", pktline.FlushString, } r := toPktLines(c, payloads) s.testDecoderErrorMatches(c, r, ".*malformed ref data.*") } func (s *AdvRefsDecodeSuite) TestMalformedOtherRefsMultipleSpaces(c *C) { payloads := []string{ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00multi_ack thin-pack\n", "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags v2.6.11\n", pktline.FlushString, } r := toPktLines(c, payloads) s.testDecoderErrorMatches(c, r, ".*malformed ref data.*") } func (s *AdvRefsDecodeSuite) TestShallow(c *C) { for _, test := range [...]struct { input []string shallows []plumbing.Hash }{{ input: []string{ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n", "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n", "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n", "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n", pktline.FlushString, }, shallows: []plumbing.Hash{}, }, { input: []string{ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n", "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n", "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n", "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n", "shallow 1111111111111111111111111111111111111111\n", pktline.FlushString, }, shallows: []plumbing.Hash{plumbing.NewHash("1111111111111111111111111111111111111111")}, }, { input: []string{ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n", "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n", "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n", "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n", "shallow 1111111111111111111111111111111111111111\n", "shallow 2222222222222222222222222222222222222222\n", pktline.FlushString, }, shallows: []plumbing.Hash{ plumbing.NewHash("1111111111111111111111111111111111111111"), plumbing.NewHash("2222222222222222222222222222222222222222"), }, }} { ar := s.testDecodeOK(c, test.input) comment := Commentf("input = %v\n", test.input) c.Assert(ar.Shallows, DeepEquals, test.shallows, comment) } } func (s *AdvRefsDecodeSuite) TestInvalidShallowHash(c *C) { payloads := []string{ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n", "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n", "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n", "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n", "shallow 11111111alcortes111111111111111111111111\n", "shallow 2222222222222222222222222222222222222222\n", pktline.FlushString, } r := toPktLines(c, payloads) s.testDecoderErrorMatches(c, r, ".*invalid hash text.*") } func (s *AdvRefsDecodeSuite) TestGarbageAfterShallow(c *C) { payloads := []string{ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n", "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n", "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n", "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n", "shallow 1111111111111111111111111111111111111111\n", "shallow 2222222222222222222222222222222222222222\n", "b5be40b90dbaa6bd337f3b77de361bfc0723468b refs/tags/v4.4", pktline.FlushString, } r := toPktLines(c, payloads) s.testDecoderErrorMatches(c, r, ".*malformed shallow prefix.*") } func (s *AdvRefsDecodeSuite) TestMalformedShallowHash(c *C) { payloads := []string{ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n", "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n", "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n", "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n", "shallow 1111111111111111111111111111111111111111\n", "shallow 2222222222222222222222222222222222222222 malformed\n", pktline.FlushString, } r := toPktLines(c, payloads) s.testDecoderErrorMatches(c, r, ".*malformed shallow hash.*") } func (s *AdvRefsDecodeSuite) TestEOFRefs(c *C) { input := strings.NewReader("" + "005b6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n" + "003fa6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n" + "00355dc01c595e6c6ec9ccda4f6ffbf614e4d92bb0c7 refs/foo\n", ) s.testDecoderErrorMatches(c, input, ".*invalid pkt-len.*") } func (s *AdvRefsDecodeSuite) TestEOFShallows(c *C) { input := strings.NewReader("" + "005b6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n" + "003fa6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n" + "00445dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n" + "0047c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n" + "0035shallow 1111111111111111111111111111111111111111\n" + "0034shallow 222222222222222222222222") s.testDecoderErrorMatches(c, input, ".*unexpected EOF.*") } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/protocol/packp/advrefs_encode.go000066400000000000000000000104071345605224300267530ustar00rootroot00000000000000package packp import ( "bytes" "fmt" "io" "sort" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/format/pktline" "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability" ) // Encode writes the AdvRefs encoding to a writer. // // All the payloads will end with a newline character. Capabilities, // references and shallows are written in alphabetical order, except for // peeled references that always follow their corresponding references. func (a *AdvRefs) Encode(w io.Writer) error { e := newAdvRefsEncoder(w) return e.Encode(a) } type advRefsEncoder struct { data *AdvRefs // data to encode pe *pktline.Encoder // where to write the encoded data firstRefName string // reference name to encode in the first pkt-line (HEAD if present) firstRefHash plumbing.Hash // hash referenced to encode in the first pkt-line (HEAD if present) sortedRefs []string // hash references to encode ordered by increasing order err error // sticky error } func newAdvRefsEncoder(w io.Writer) *advRefsEncoder { return &advRefsEncoder{ pe: pktline.NewEncoder(w), } } func (e *advRefsEncoder) Encode(v *AdvRefs) error { e.data = v e.sortRefs() e.setFirstRef() for state := encodePrefix; state != nil; { state = state(e) } return e.err } func (e *advRefsEncoder) sortRefs() { if len(e.data.References) > 0 { refs := make([]string, 0, len(e.data.References)) for refName := range e.data.References { refs = append(refs, refName) } sort.Strings(refs) e.sortedRefs = refs } } func (e *advRefsEncoder) setFirstRef() { if e.data.Head != nil { e.firstRefName = head e.firstRefHash = *e.data.Head return } if len(e.sortedRefs) > 0 { refName := e.sortedRefs[0] e.firstRefName = refName e.firstRefHash = e.data.References[refName] } } type encoderStateFn func(*advRefsEncoder) encoderStateFn func encodePrefix(e *advRefsEncoder) encoderStateFn { for _, p := range e.data.Prefix { if bytes.Equal(p, pktline.Flush) { if e.err = e.pe.Flush(); e.err != nil { return nil } continue } if e.err = e.pe.Encodef("%s\n", string(p)); e.err != nil { return nil } } return encodeFirstLine } // Adds the first pkt-line payload: head hash, head ref and capabilities. // If HEAD ref is not found, the first reference ordered in increasing order will be used. // If there aren't HEAD neither refs, the first line will be "PKT-LINE(zero-id SP "capabilities^{}" NUL capability-list)". // See: https://github.com/git/git/blob/master/Documentation/technical/pack-protocol.txt // See: https://github.com/git/git/blob/master/Documentation/technical/protocol-common.txt func encodeFirstLine(e *advRefsEncoder) encoderStateFn { const formatFirstLine = "%s %s\x00%s\n" var firstLine string capabilities := formatCaps(e.data.Capabilities) if e.firstRefName == "" { firstLine = fmt.Sprintf(formatFirstLine, plumbing.ZeroHash.String(), "capabilities^{}", capabilities) } else { firstLine = fmt.Sprintf(formatFirstLine, e.firstRefHash.String(), e.firstRefName, capabilities) } if e.err = e.pe.EncodeString(firstLine); e.err != nil { return nil } return encodeRefs } func formatCaps(c *capability.List) string { if c == nil { return "" } return c.String() } // Adds the (sorted) refs: hash SP refname EOL // and their peeled refs if any. func encodeRefs(e *advRefsEncoder) encoderStateFn { for _, r := range e.sortedRefs { if r == e.firstRefName { continue } hash := e.data.References[r] if e.err = e.pe.Encodef("%s %s\n", hash.String(), r); e.err != nil { return nil } if hash, ok := e.data.Peeled[r]; ok { if e.err = e.pe.Encodef("%s %s^{}\n", hash.String(), r); e.err != nil { return nil } } } return encodeShallow } // Adds the (sorted) shallows: "shallow" SP hash EOL func encodeShallow(e *advRefsEncoder) encoderStateFn { sorted := sortShallows(e.data.Shallows) for _, hash := range sorted { if e.err = e.pe.Encodef("shallow %s\n", hash); e.err != nil { return nil } } return encodeFlush } func sortShallows(c []plumbing.Hash) []string { ret := []string{} for _, h := range c { ret = append(ret, h.String()) } sort.Strings(ret) return ret } func encodeFlush(e *advRefsEncoder) encoderStateFn { e.err = e.pe.Flush() return nil } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/protocol/packp/advrefs_encode_test.go000066400000000000000000000176201345605224300300160ustar00rootroot00000000000000package packp import ( "bytes" "strings" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/format/pktline" "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability" . "gopkg.in/check.v1" ) type AdvRefsEncodeSuite struct{} var _ = Suite(&AdvRefsEncodeSuite{}) func testEncode(c *C, input *AdvRefs, expected []byte) { var buf bytes.Buffer c.Assert(input.Encode(&buf), IsNil) obtained := buf.Bytes() comment := Commentf("\nobtained = %s\nexpected = %s\n", string(obtained), string(expected)) c.Assert(obtained, DeepEquals, expected, comment) } func (s *AdvRefsEncodeSuite) TestZeroValue(c *C) { ar := &AdvRefs{} expected := pktlines(c, "0000000000000000000000000000000000000000 capabilities^{}\x00\n", pktline.FlushString, ) testEncode(c, ar, expected) } func (s *AdvRefsEncodeSuite) TestHead(c *C) { hash := plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5") ar := &AdvRefs{ Head: &hash, } expected := pktlines(c, "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00\n", pktline.FlushString, ) testEncode(c, ar, expected) } func (s *AdvRefsEncodeSuite) TestCapsNoHead(c *C) { capabilities := capability.NewList() capabilities.Add(capability.MultiACK) capabilities.Add(capability.OFSDelta) capabilities.Add(capability.SymRef, "HEAD:/refs/heads/master") ar := &AdvRefs{ Capabilities: capabilities, } expected := pktlines(c, "0000000000000000000000000000000000000000 capabilities^{}\x00multi_ack ofs-delta symref=HEAD:/refs/heads/master\n", pktline.FlushString, ) testEncode(c, ar, expected) } func (s *AdvRefsEncodeSuite) TestCapsWithHead(c *C) { hash := plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5") capabilities := capability.NewList() capabilities.Add(capability.MultiACK) capabilities.Add(capability.OFSDelta) capabilities.Add(capability.SymRef, "HEAD:/refs/heads/master") ar := &AdvRefs{ Head: &hash, Capabilities: capabilities, } expected := pktlines(c, "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00multi_ack ofs-delta symref=HEAD:/refs/heads/master\n", pktline.FlushString, ) testEncode(c, ar, expected) } func (s *AdvRefsEncodeSuite) TestRefs(c *C) { references := map[string]plumbing.Hash{ "refs/heads/master": plumbing.NewHash("a6930aaee06755d1bdcfd943fbf614e4d92bb0c7"), "refs/tags/v2.6.12-tree": plumbing.NewHash("1111111111111111111111111111111111111111"), "refs/tags/v2.7.13-tree": plumbing.NewHash("3333333333333333333333333333333333333333"), "refs/tags/v2.6.13-tree": plumbing.NewHash("2222222222222222222222222222222222222222"), "refs/tags/v2.6.11-tree": plumbing.NewHash("5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c"), } ar := &AdvRefs{ References: references, } expected := pktlines(c, "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\x00\n", "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n", "1111111111111111111111111111111111111111 refs/tags/v2.6.12-tree\n", "2222222222222222222222222222222222222222 refs/tags/v2.6.13-tree\n", "3333333333333333333333333333333333333333 refs/tags/v2.7.13-tree\n", pktline.FlushString, ) testEncode(c, ar, expected) } func (s *AdvRefsEncodeSuite) TestPeeled(c *C) { references := map[string]plumbing.Hash{ "refs/heads/master": plumbing.NewHash("a6930aaee06755d1bdcfd943fbf614e4d92bb0c7"), "refs/tags/v2.6.12-tree": plumbing.NewHash("1111111111111111111111111111111111111111"), "refs/tags/v2.7.13-tree": plumbing.NewHash("3333333333333333333333333333333333333333"), "refs/tags/v2.6.13-tree": plumbing.NewHash("2222222222222222222222222222222222222222"), "refs/tags/v2.6.11-tree": plumbing.NewHash("5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c"), } peeled := map[string]plumbing.Hash{ "refs/tags/v2.7.13-tree": plumbing.NewHash("4444444444444444444444444444444444444444"), "refs/tags/v2.6.12-tree": plumbing.NewHash("5555555555555555555555555555555555555555"), } ar := &AdvRefs{ References: references, Peeled: peeled, } expected := pktlines(c, "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\x00\n", "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n", "1111111111111111111111111111111111111111 refs/tags/v2.6.12-tree\n", "5555555555555555555555555555555555555555 refs/tags/v2.6.12-tree^{}\n", "2222222222222222222222222222222222222222 refs/tags/v2.6.13-tree\n", "3333333333333333333333333333333333333333 refs/tags/v2.7.13-tree\n", "4444444444444444444444444444444444444444 refs/tags/v2.7.13-tree^{}\n", pktline.FlushString, ) testEncode(c, ar, expected) } func (s *AdvRefsEncodeSuite) TestShallow(c *C) { shallows := []plumbing.Hash{ plumbing.NewHash("1111111111111111111111111111111111111111"), plumbing.NewHash("4444444444444444444444444444444444444444"), plumbing.NewHash("3333333333333333333333333333333333333333"), plumbing.NewHash("2222222222222222222222222222222222222222"), } ar := &AdvRefs{ Shallows: shallows, } expected := pktlines(c, "0000000000000000000000000000000000000000 capabilities^{}\x00\n", "shallow 1111111111111111111111111111111111111111\n", "shallow 2222222222222222222222222222222222222222\n", "shallow 3333333333333333333333333333333333333333\n", "shallow 4444444444444444444444444444444444444444\n", pktline.FlushString, ) testEncode(c, ar, expected) } func (s *AdvRefsEncodeSuite) TestAll(c *C) { hash := plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5") capabilities := capability.NewList() capabilities.Add(capability.MultiACK) capabilities.Add(capability.OFSDelta) capabilities.Add(capability.SymRef, "HEAD:/refs/heads/master") references := map[string]plumbing.Hash{ "refs/heads/master": plumbing.NewHash("a6930aaee06755d1bdcfd943fbf614e4d92bb0c7"), "refs/tags/v2.6.12-tree": plumbing.NewHash("1111111111111111111111111111111111111111"), "refs/tags/v2.7.13-tree": plumbing.NewHash("3333333333333333333333333333333333333333"), "refs/tags/v2.6.13-tree": plumbing.NewHash("2222222222222222222222222222222222222222"), "refs/tags/v2.6.11-tree": plumbing.NewHash("5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c"), } peeled := map[string]plumbing.Hash{ "refs/tags/v2.7.13-tree": plumbing.NewHash("4444444444444444444444444444444444444444"), "refs/tags/v2.6.12-tree": plumbing.NewHash("5555555555555555555555555555555555555555"), } shallows := []plumbing.Hash{ plumbing.NewHash("1111111111111111111111111111111111111111"), plumbing.NewHash("4444444444444444444444444444444444444444"), plumbing.NewHash("3333333333333333333333333333333333333333"), plumbing.NewHash("2222222222222222222222222222222222222222"), } ar := &AdvRefs{ Head: &hash, Capabilities: capabilities, References: references, Peeled: peeled, Shallows: shallows, } expected := pktlines(c, "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00multi_ack ofs-delta symref=HEAD:/refs/heads/master\n", "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n", "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n", "1111111111111111111111111111111111111111 refs/tags/v2.6.12-tree\n", "5555555555555555555555555555555555555555 refs/tags/v2.6.12-tree^{}\n", "2222222222222222222222222222222222222222 refs/tags/v2.6.13-tree\n", "3333333333333333333333333333333333333333 refs/tags/v2.7.13-tree\n", "4444444444444444444444444444444444444444 refs/tags/v2.7.13-tree^{}\n", "shallow 1111111111111111111111111111111111111111\n", "shallow 2222222222222222222222222222222222222222\n", "shallow 3333333333333333333333333333333333333333\n", "shallow 4444444444444444444444444444444444444444\n", pktline.FlushString, ) testEncode(c, ar, expected) } func (s *AdvRefsEncodeSuite) TestErrorTooLong(c *C) { references := map[string]plumbing.Hash{ strings.Repeat("a", pktline.MaxPayloadSize): plumbing.NewHash("a6930aaee06755d1bdcfd943fbf614e4d92bb0c7"), } ar := &AdvRefs{ References: references, } var buf bytes.Buffer err := ar.Encode(&buf) c.Assert(err, ErrorMatches, ".*payload is too long.*") } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/protocol/packp/advrefs_test.go000066400000000000000000000354221345605224300265010ustar00rootroot00000000000000package packp import ( "bytes" "fmt" "io" "strings" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/format/pktline" "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability" . "gopkg.in/check.v1" ) type AdvRefSuite struct{} var _ = Suite(&AdvRefSuite{}) func (s *AdvRefSuite) TestAddReferenceSymbolic(c *C) { ref := plumbing.NewSymbolicReference("foo", "bar") a := NewAdvRefs() err := a.AddReference(ref) c.Assert(err, IsNil) values := a.Capabilities.Get(capability.SymRef) c.Assert(values, HasLen, 1) c.Assert(values[0], Equals, "foo:bar") } func (s *AdvRefSuite) TestAddReferenceHash(c *C) { ref := plumbing.NewHashReference("foo", plumbing.NewHash("5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c")) a := NewAdvRefs() err := a.AddReference(ref) c.Assert(err, IsNil) c.Assert(a.References, HasLen, 1) c.Assert(a.References["foo"].String(), Equals, "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c") } func (s *AdvRefSuite) TestAllReferences(c *C) { hash := plumbing.NewHash("5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c") a := NewAdvRefs() err := a.AddReference(plumbing.NewSymbolicReference("foo", "bar")) c.Assert(err, IsNil) err = a.AddReference(plumbing.NewHashReference("bar", hash)) c.Assert(err, IsNil) refs, err := a.AllReferences() c.Assert(err, IsNil) iter, err := refs.IterReferences() c.Assert(err, IsNil) var count int iter.ForEach(func(ref *plumbing.Reference) error { count++ switch ref.Name() { case "bar": c.Assert(ref.Hash(), Equals, hash) case "foo": c.Assert(ref.Target().String(), Equals, "bar") } return nil }) c.Assert(count, Equals, 2) } func (s *AdvRefSuite) TestAllReferencesBadSymref(c *C) { a := NewAdvRefs() err := a.Capabilities.Set(capability.SymRef, "foo") c.Assert(err, IsNil) _, err = a.AllReferences() c.Assert(err, NotNil) } func (s *AdvRefSuite) TestNoSymRefCapabilityHeadToMaster(c *C) { a := NewAdvRefs() headHash := plumbing.NewHash("5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c") a.Head = &headHash ref := plumbing.NewHashReference(plumbing.Master, plumbing.NewHash("5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c")) err := a.AddReference(ref) c.Assert(err, IsNil) storage, err := a.AllReferences() c.Assert(err, IsNil) head, err := storage.Reference(plumbing.HEAD) c.Assert(err, IsNil) c.Assert(head.Target(), Equals, ref.Name()) } func (s *AdvRefSuite) TestNoSymRefCapabilityHeadToOtherThanMaster(c *C) { a := NewAdvRefs() headHash := plumbing.NewHash("0000000000000000000000000000000000000000") a.Head = &headHash ref1 := plumbing.NewHashReference(plumbing.Master, plumbing.NewHash("5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c")) ref2 := plumbing.NewHashReference("other/ref", plumbing.NewHash("0000000000000000000000000000000000000000")) err := a.AddReference(ref1) c.Assert(err, IsNil) err = a.AddReference(ref2) c.Assert(err, IsNil) storage, err := a.AllReferences() c.Assert(err, IsNil) head, err := storage.Reference(plumbing.HEAD) c.Assert(err, IsNil) c.Assert(head.Hash(), Equals, ref2.Hash()) } func (s *AdvRefSuite) TestNoSymRefCapabilityHeadToNoRef(c *C) { a := NewAdvRefs() headHash := plumbing.NewHash("0000000000000000000000000000000000000000") a.Head = &headHash ref := plumbing.NewHashReference(plumbing.Master, plumbing.NewHash("5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c")) err := a.AddReference(ref) c.Assert(err, IsNil) _, err = a.AllReferences() c.Assert(err, NotNil) } func (s *AdvRefSuite) TestNoSymRefCapabilityHeadToNoMasterAlphabeticallyOrdered(c *C) { a := NewAdvRefs() headHash := plumbing.NewHash("5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c") a.Head = &headHash ref1 := plumbing.NewHashReference(plumbing.Master, plumbing.NewHash("0000000000000000000000000000000000000000")) ref2 := plumbing.NewHashReference("aaaaaaaaaaaaaaa", plumbing.NewHash("5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c")) ref3 := plumbing.NewHashReference("bbbbbbbbbbbbbbb", plumbing.NewHash("5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c")) err := a.AddReference(ref1) c.Assert(err, IsNil) err = a.AddReference(ref3) c.Assert(err, IsNil) err = a.AddReference(ref2) c.Assert(err, IsNil) storage, err := a.AllReferences() c.Assert(err, IsNil) head, err := storage.Reference(plumbing.HEAD) c.Assert(err, IsNil) c.Assert(head.Target(), Equals, ref2.Name()) } type AdvRefsDecodeEncodeSuite struct{} var _ = Suite(&AdvRefsDecodeEncodeSuite{}) func (s *AdvRefsDecodeEncodeSuite) test(c *C, in []string, exp []string) { var err error var input io.Reader { var buf bytes.Buffer p := pktline.NewEncoder(&buf) err = p.EncodeString(in...) c.Assert(err, IsNil) input = &buf } var expected []byte { var buf bytes.Buffer p := pktline.NewEncoder(&buf) err = p.EncodeString(exp...) c.Assert(err, IsNil) expected = buf.Bytes() } var obtained []byte { ar := NewAdvRefs() c.Assert(ar.Decode(input), IsNil) var buf bytes.Buffer c.Assert(ar.Encode(&buf), IsNil) obtained = buf.Bytes() } c.Assert(string(obtained), DeepEquals, string(expected)) } func (s *AdvRefsDecodeEncodeSuite) TestNoHead(c *C) { input := []string{ "0000000000000000000000000000000000000000 capabilities^{}\x00", pktline.FlushString, } expected := []string{ "0000000000000000000000000000000000000000 capabilities^{}\x00\n", pktline.FlushString, } s.test(c, input, expected) } func (s *AdvRefsDecodeEncodeSuite) TestNoHeadSmart(c *C) { input := []string{ "# service=git-upload-pack\n", "0000000000000000000000000000000000000000 capabilities^{}\x00", pktline.FlushString, } expected := []string{ "# service=git-upload-pack\n", "0000000000000000000000000000000000000000 capabilities^{}\x00\n", pktline.FlushString, } s.test(c, input, expected) } func (s *AdvRefsDecodeEncodeSuite) TestNoHeadSmartBug(c *C) { input := []string{ "# service=git-upload-pack\n", pktline.FlushString, "0000000000000000000000000000000000000000 capabilities^{}\x00\n", pktline.FlushString, } expected := []string{ "# service=git-upload-pack\n", pktline.FlushString, "0000000000000000000000000000000000000000 capabilities^{}\x00\n", pktline.FlushString, } s.test(c, input, expected) } func (s *AdvRefsDecodeEncodeSuite) TestRefs(c *C) { input := []string{ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00symref=HEAD:/refs/heads/master ofs-delta multi_ack", "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master", "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n", "7777777777777777777777777777777777777777 refs/tags/v2.6.12-tree", pktline.FlushString, } expected := []string{ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00symref=HEAD:/refs/heads/master ofs-delta multi_ack\n", "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n", "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n", "7777777777777777777777777777777777777777 refs/tags/v2.6.12-tree\n", pktline.FlushString, } s.test(c, input, expected) } func (s *AdvRefsDecodeEncodeSuite) TestPeeled(c *C) { input := []string{ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00symref=HEAD:/refs/heads/master ofs-delta multi_ack", "7777777777777777777777777777777777777777 refs/tags/v2.6.12-tree\n", "8888888888888888888888888888888888888888 refs/tags/v2.6.12-tree^{}", "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n", "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree", "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n", pktline.FlushString, } expected := []string{ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00symref=HEAD:/refs/heads/master ofs-delta multi_ack\n", "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n", "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n", "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n", "7777777777777777777777777777777777777777 refs/tags/v2.6.12-tree\n", "8888888888888888888888888888888888888888 refs/tags/v2.6.12-tree^{}\n", pktline.FlushString, } s.test(c, input, expected) } func (s *AdvRefsDecodeEncodeSuite) TestAll(c *C) { input := []string{ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00symref=HEAD:/refs/heads/master ofs-delta multi_ack\n", "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n", "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree", "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n", "7777777777777777777777777777777777777777 refs/tags/v2.6.12-tree\n", "8888888888888888888888888888888888888888 refs/tags/v2.6.12-tree^{}", "shallow 1111111111111111111111111111111111111111", "shallow 2222222222222222222222222222222222222222\n", pktline.FlushString, } expected := []string{ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00symref=HEAD:/refs/heads/master ofs-delta multi_ack\n", "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n", "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n", "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n", "7777777777777777777777777777777777777777 refs/tags/v2.6.12-tree\n", "8888888888888888888888888888888888888888 refs/tags/v2.6.12-tree^{}\n", "shallow 1111111111111111111111111111111111111111\n", "shallow 2222222222222222222222222222222222222222\n", pktline.FlushString, } s.test(c, input, expected) } func (s *AdvRefsDecodeEncodeSuite) TestAllSmart(c *C) { input := []string{ "# service=git-upload-pack\n", pktline.FlushString, "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00symref=HEAD:/refs/heads/master ofs-delta multi_ack\n", "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n", "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n", "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n", "7777777777777777777777777777777777777777 refs/tags/v2.6.12-tree\n", "8888888888888888888888888888888888888888 refs/tags/v2.6.12-tree^{}\n", "shallow 1111111111111111111111111111111111111111\n", "shallow 2222222222222222222222222222222222222222\n", pktline.FlushString, } expected := []string{ "# service=git-upload-pack\n", pktline.FlushString, "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00symref=HEAD:/refs/heads/master ofs-delta multi_ack\n", "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n", "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n", "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n", "7777777777777777777777777777777777777777 refs/tags/v2.6.12-tree\n", "8888888888888888888888888888888888888888 refs/tags/v2.6.12-tree^{}\n", "shallow 1111111111111111111111111111111111111111\n", "shallow 2222222222222222222222222222222222222222\n", pktline.FlushString, } s.test(c, input, expected) } func (s *AdvRefsDecodeEncodeSuite) TestAllSmartBug(c *C) { input := []string{ "# service=git-upload-pack\n", pktline.FlushString, "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00symref=HEAD:/refs/heads/master ofs-delta multi_ack\n", "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n", "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n", "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n", "7777777777777777777777777777777777777777 refs/tags/v2.6.12-tree\n", "8888888888888888888888888888888888888888 refs/tags/v2.6.12-tree^{}\n", "shallow 1111111111111111111111111111111111111111\n", "shallow 2222222222222222222222222222222222222222\n", pktline.FlushString, } expected := []string{ "# service=git-upload-pack\n", pktline.FlushString, "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00symref=HEAD:/refs/heads/master ofs-delta multi_ack\n", "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n", "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n", "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n", "7777777777777777777777777777777777777777 refs/tags/v2.6.12-tree\n", "8888888888888888888888888888888888888888 refs/tags/v2.6.12-tree^{}\n", "shallow 1111111111111111111111111111111111111111\n", "shallow 2222222222222222222222222222222222222222\n", pktline.FlushString, } s.test(c, input, expected) } func ExampleAdvRefs_Decode() { // Here is a raw advertised-ref message. raw := "" + "0065a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 HEAD\x00multi_ack ofs-delta symref=HEAD:/refs/heads/master\n" + "003fa6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n" + "00441111111111111111111111111111111111111111 refs/tags/v2.6.11-tree\n" + "00475555555555555555555555555555555555555555 refs/tags/v2.6.11-tree^{}\n" + "0035shallow 5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c\n" + "0000" // Use the raw message as our input. input := strings.NewReader(raw) // Decode the input into a newly allocated AdvRefs value. ar := NewAdvRefs() _ = ar.Decode(input) // error check ignored for brevity // Do something interesting with the AdvRefs, e.g. print its contents. fmt.Println("head =", ar.Head) fmt.Println("capabilities =", ar.Capabilities.String()) fmt.Println("...") fmt.Println("shallows =", ar.Shallows) // Output: head = a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 // capabilities = multi_ack ofs-delta symref=HEAD:/refs/heads/master // ... // shallows = [5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c] } func ExampleAdvRefs_Encode() { // Create an AdvRefs with the contents you want... ar := NewAdvRefs() // ...add a hash for the HEAD... head := plumbing.NewHash("1111111111111111111111111111111111111111") ar.Head = &head // ...add some server capabilities... ar.Capabilities.Add(capability.MultiACK) ar.Capabilities.Add(capability.OFSDelta) ar.Capabilities.Add(capability.SymRef, "HEAD:/refs/heads/master") // ...add a couple of references... ar.References["refs/heads/master"] = plumbing.NewHash("2222222222222222222222222222222222222222") ar.References["refs/tags/v1"] = plumbing.NewHash("3333333333333333333333333333333333333333") // ...including a peeled ref... ar.Peeled["refs/tags/v1"] = plumbing.NewHash("4444444444444444444444444444444444444444") // ...and finally add a shallow ar.Shallows = append(ar.Shallows, plumbing.NewHash("5555555555555555555555555555555555555555")) // Encode the packpContents to a bytes.Buffer. // You can encode into stdout too, but you will not be able // see the '\x00' after "HEAD". var buf bytes.Buffer _ = ar.Encode(&buf) // error checks ignored for brevity // Print the contents of the buffer as a quoted string. // Printing is as a non-quoted string will be prettier but you // will miss the '\x00' after "HEAD". fmt.Printf("%q", buf.String()) // Output: // "00651111111111111111111111111111111111111111 HEAD\x00multi_ack ofs-delta symref=HEAD:/refs/heads/master\n003f2222222222222222222222222222222222222222 refs/heads/master\n003a3333333333333333333333333333333333333333 refs/tags/v1\n003d4444444444444444444444444444444444444444 refs/tags/v1^{}\n0035shallow 5555555555555555555555555555555555555555\n0000" } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/protocol/packp/capability/000077500000000000000000000000001345605224300255745ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/protocol/packp/capability/capability.go000066400000000000000000000312511345605224300302460ustar00rootroot00000000000000// Package capability defines the server and client capabilities. package capability // Capability describes a server or client capability. type Capability string func (n Capability) String() string { return string(n) } const ( // MultiACK capability allows the server to return "ACK obj-id continue" as // soon as it finds a commit that it can use as a common base, between the // client's wants and the client's have set. // // By sending this early, the server can potentially head off the client // from walking any further down that particular branch of the client's // repository history. The client may still need to walk down other // branches, sending have lines for those, until the server has a // complete cut across the DAG, or the client has said "done". // // Without multi_ack, a client sends have lines in --date-order until // the server has found a common base. That means the client will send // have lines that are already known by the server to be common, because // they overlap in time with another branch that the server hasn't found // a common base on yet. // // For example suppose the client has commits in caps that the server // doesn't and the server has commits in lower case that the client // doesn't, as in the following diagram: // // +---- u ---------------------- x // / +----- y // / / // a -- b -- c -- d -- E -- F // \ // +--- Q -- R -- S // // If the client wants x,y and starts out by saying have F,S, the server // doesn't know what F,S is. Eventually the client says "have d" and // the server sends "ACK d continue" to let the client know to stop // walking down that line (so don't send c-b-a), but it's not done yet, // it needs a base for x. The client keeps going with S-R-Q, until a // gets reached, at which point the server has a clear base and it all // ends. // // Without multi_ack the client would have sent that c-b-a chain anyway, // interleaved with S-R-Q. MultiACK Capability = "multi_ack" // MultiACKDetailed is an extension of multi_ack that permits client to // better understand the server's in-memory state. MultiACKDetailed Capability = "multi_ack_detailed" // NoDone should only be used with the smart HTTP protocol. If // multi_ack_detailed and no-done are both present, then the sender is // free to immediately send a pack following its first "ACK obj-id ready" // message. // // Without no-done in the smart HTTP protocol, the server session would // end and the client has to make another trip to send "done" before // the server can send the pack. no-done removes the last round and // thus slightly reduces latency. NoDone Capability = "no-done" // ThinPack is one with deltas which reference base objects not // contained within the pack (but are known to exist at the receiving // end). This can reduce the network traffic significantly, but it // requires the receiving end to know how to "thicken" these packs by // adding the missing bases to the pack. // // The upload-pack server advertises 'thin-pack' when it can generate // and send a thin pack. A client requests the 'thin-pack' capability // when it understands how to "thicken" it, notifying the server that // it can receive such a pack. A client MUST NOT request the // 'thin-pack' capability if it cannot turn a thin pack into a // self-contained pack. // // Receive-pack, on the other hand, is assumed by default to be able to // handle thin packs, but can ask the client not to use the feature by // advertising the 'no-thin' capability. A client MUST NOT send a thin // pack if the server advertises the 'no-thin' capability. // // The reasons for this asymmetry are historical. The receive-pack // program did not exist until after the invention of thin packs, so // historically the reference implementation of receive-pack always // understood thin packs. Adding 'no-thin' later allowed receive-pack // to disable the feature in a backwards-compatible manner. ThinPack Capability = "thin-pack" // Sideband means that server can send, and client understand multiplexed // progress reports and error info interleaved with the packfile itself. // // These two options are mutually exclusive. A modern client always // favors Sideband64k. // // Either mode indicates that the packfile data will be streamed broken // up into packets of up to either 1000 bytes in the case of 'side_band', // or 65520 bytes in the case of 'side_band_64k'. Each packet is made up // of a leading 4-byte pkt-line length of how much data is in the packet, // followed by a 1-byte stream code, followed by the actual data. // // The stream code can be one of: // // 1 - pack data // 2 - progress messages // 3 - fatal error message just before stream aborts // // The "side-band-64k" capability came about as a way for newer clients // that can handle much larger packets to request packets that are // actually crammed nearly full, while maintaining backward compatibility // for the older clients. // // Further, with side-band and its up to 1000-byte messages, it's actually // 999 bytes of payload and 1 byte for the stream code. With side-band-64k, // same deal, you have up to 65519 bytes of data and 1 byte for the stream // code. // // The client MUST send only maximum of one of "side-band" and "side- // band-64k". Server MUST diagnose it as an error if client requests // both. Sideband Capability = "side-band" Sideband64k Capability = "side-band-64k" // OFSDelta server can send, and client understand PACKv2 with delta // referring to its base by position in pack rather than by an obj-id. That // is, they can send/read OBJ_OFS_DELTA (aka type 6) in a packfile. OFSDelta Capability = "ofs-delta" // Agent the server may optionally send this capability to notify the client // that the server is running version `X`. The client may optionally return // its own agent string by responding with an `agent=Y` capability (but it // MUST NOT do so if the server did not mention the agent capability). The // `X` and `Y` strings may contain any printable ASCII characters except // space (i.e., the byte range 32 < x < 127), and are typically of the form // "package/version" (e.g., "git/1.8.3.1"). The agent strings are purely // informative for statistics and debugging purposes, and MUST NOT be used // to programmatically assume the presence or absence of particular features. Agent Capability = "agent" // Shallow capability adds "deepen", "shallow" and "unshallow" commands to // the fetch-pack/upload-pack protocol so clients can request shallow // clones. Shallow Capability = "shallow" // DeepenSince adds "deepen-since" command to fetch-pack/upload-pack // protocol so the client can request shallow clones that are cut at a // specific time, instead of depth. Internally it's equivalent of doing // "rev-list --max-age=" on the server side. "deepen-since" // cannot be used with "deepen". DeepenSince Capability = "deepen-since" // DeepenNot adds "deepen-not" command to fetch-pack/upload-pack // protocol so the client can request shallow clones that are cut at a // specific revision, instead of depth. Internally it's equivalent of // doing "rev-list --not " on the server side. "deepen-not" // cannot be used with "deepen", but can be used with "deepen-since". DeepenNot Capability = "deepen-not" // DeepenRelative if this capability is requested by the client, the // semantics of "deepen" command is changed. The "depth" argument is the // depth from the current shallow boundary, instead of the depth from // remote refs. DeepenRelative Capability = "deepen-relative" // NoProgress the client was started with "git clone -q" or something, and // doesn't want that side band 2. Basically the client just says "I do not // wish to receive stream 2 on sideband, so do not send it to me, and if // you did, I will drop it on the floor anyway". However, the sideband // channel 3 is still used for error responses. NoProgress Capability = "no-progress" // IncludeTag capability is about sending annotated tags if we are // sending objects they point to. If we pack an object to the client, and // a tag object points exactly at that object, we pack the tag object too. // In general this allows a client to get all new annotated tags when it // fetches a branch, in a single network connection. // // Clients MAY always send include-tag, hardcoding it into a request when // the server advertises this capability. The decision for a client to // request include-tag only has to do with the client's desires for tag // data, whether or not a server had advertised objects in the // refs/tags/* namespace. // // Servers MUST pack the tags if their referrant is packed and the client // has requested include-tags. // // Clients MUST be prepared for the case where a server has ignored // include-tag and has not actually sent tags in the pack. In such // cases the client SHOULD issue a subsequent fetch to acquire the tags // that include-tag would have otherwise given the client. // // The server SHOULD send include-tag, if it supports it, regardless // of whether or not there are tags available. IncludeTag Capability = "include-tag" // ReportStatus the receive-pack process can receive a 'report-status' // capability, which tells it that the client wants a report of what // happened after a packfile upload and reference update. If the pushing // client requests this capability, after unpacking and updating references // the server will respond with whether the packfile unpacked successfully // and if each reference was updated successfully. If any of those were not // successful, it will send back an error message. See pack-protocol.txt // for example messages. ReportStatus Capability = "report-status" // DeleteRefs If the server sends back this capability, it means that // it is capable of accepting a zero-id value as the target // value of a reference update. It is not sent back by the client, it // simply informs the client that it can be sent zero-id values // to delete references DeleteRefs Capability = "delete-refs" // Quiet If the receive-pack server advertises this capability, it is // capable of silencing human-readable progress output which otherwise may // be shown when processing the received pack. A send-pack client should // respond with the 'quiet' capability to suppress server-side progress // reporting if the local progress reporting is also being suppressed // (e.g., via `push -q`, or if stderr does not go to a tty). Quiet Capability = "quiet" // Atomic If the server sends this capability it is capable of accepting // atomic pushes. If the pushing client requests this capability, the server // will update the refs in one atomic transaction. Either all refs are // updated or none. Atomic Capability = "atomic" // PushOptions If the server sends this capability it is able to accept // push options after the update commands have been sent, but before the // packfile is streamed. If the pushing client requests this capability, // the server will pass the options to the pre- and post- receive hooks // that process this push request. PushOptions Capability = "push-options" // AllowTipSHA1InWant if the upload-pack server advertises this capability, // fetch-pack may send "want" lines with SHA-1s that exist at the server but // are not advertised by upload-pack. AllowTipSHA1InWant Capability = "allow-tip-sha1-in-want" // AllowReachableSHA1InWant if the upload-pack server advertises this // capability, fetch-pack may send "want" lines with SHA-1s that exist at // the server but are not advertised by upload-pack. AllowReachableSHA1InWant Capability = "allow-reachable-sha1-in-want" // PushCert the receive-pack server that advertises this capability is // willing to accept a signed push certificate, and asks the to be // included in the push certificate. A send-pack client MUST NOT // send a push-cert packet unless the receive-pack server advertises // this capability. PushCert Capability = "push-cert" // SymRef symbolic reference support for better negotiation. SymRef Capability = "symref" ) const DefaultAgent = "go-git/4.x" var known = map[Capability]bool{ MultiACK: true, MultiACKDetailed: true, NoDone: true, ThinPack: true, Sideband: true, Sideband64k: true, OFSDelta: true, Agent: true, Shallow: true, DeepenSince: true, DeepenNot: true, DeepenRelative: true, NoProgress: true, IncludeTag: true, ReportStatus: true, DeleteRefs: true, Quiet: true, Atomic: true, PushOptions: true, AllowTipSHA1InWant: true, AllowReachableSHA1InWant: true, PushCert: true, SymRef: true, } var requiresArgument = map[Capability]bool{ Agent: true, PushCert: true, SymRef: true, } var multipleArgument = map[Capability]bool{ SymRef: true, } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/protocol/packp/capability/list.go000066400000000000000000000101361345605224300270770ustar00rootroot00000000000000package capability import ( "bytes" "errors" "fmt" "strings" ) var ( // ErrArgumentsRequired is returned if no arguments are giving with a // capability that requires arguments ErrArgumentsRequired = errors.New("arguments required") // ErrArguments is returned if arguments are given with a capabilities that // not supports arguments ErrArguments = errors.New("arguments not allowed") // ErrEmtpyArgument is returned when an empty value is given ErrEmtpyArgument = errors.New("empty argument") // ErrMultipleArguments multiple argument given to a capabilities that not // support it ErrMultipleArguments = errors.New("multiple arguments not allowed") ) // List represents a list of capabilities type List struct { m map[Capability]*entry sort []string } type entry struct { Name Capability Values []string } // NewList returns a new List of capabilities func NewList() *List { return &List{ m: make(map[Capability]*entry), } } // IsEmpty returns true if the List is empty func (l *List) IsEmpty() bool { return len(l.sort) == 0 } // Decode decodes list of capabilities from raw into the list func (l *List) Decode(raw []byte) error { // git 1.x receive pack used to send a leading space on its // git-receive-pack capabilities announcement. We just trim space to be // tolerant to space changes in different versions. raw = bytes.TrimSpace(raw) if len(raw) == 0 { return nil } for _, data := range bytes.Split(raw, []byte{' '}) { pair := bytes.SplitN(data, []byte{'='}, 2) c := Capability(pair[0]) if len(pair) == 1 { if err := l.Add(c); err != nil { return err } continue } if err := l.Add(c, string(pair[1])); err != nil { return err } } return nil } // Get returns the values for a capability func (l *List) Get(capability Capability) []string { if _, ok := l.m[capability]; !ok { return nil } return l.m[capability].Values } // Set sets a capability removing the previous values func (l *List) Set(capability Capability, values ...string) error { if _, ok := l.m[capability]; ok { delete(l.m, capability) } return l.Add(capability, values...) } // Add adds a capability, values are optional func (l *List) Add(c Capability, values ...string) error { if err := l.validate(c, values); err != nil { return err } if !l.Supports(c) { l.m[c] = &entry{Name: c} l.sort = append(l.sort, c.String()) } if len(values) == 0 { return nil } if known[c] && !multipleArgument[c] && len(l.m[c].Values) > 0 { return ErrMultipleArguments } l.m[c].Values = append(l.m[c].Values, values...) return nil } func (l *List) validateNoEmptyArgs(values []string) error { for _, v := range values { if v == "" { return ErrEmtpyArgument } } return nil } func (l *List) validate(c Capability, values []string) error { if !known[c] { return l.validateNoEmptyArgs(values) } if requiresArgument[c] && len(values) == 0 { return ErrArgumentsRequired } if !requiresArgument[c] && len(values) != 0 { return ErrArguments } if !multipleArgument[c] && len(values) > 1 { return ErrMultipleArguments } return l.validateNoEmptyArgs(values) } // Supports returns true if capability is present func (l *List) Supports(capability Capability) bool { _, ok := l.m[capability] return ok } // Delete deletes a capability from the List func (l *List) Delete(capability Capability) { if !l.Supports(capability) { return } delete(l.m, capability) for i, c := range l.sort { if c != string(capability) { continue } l.sort = append(l.sort[:i], l.sort[i+1:]...) return } } // All returns a slice with all defined capabilities. func (l *List) All() []Capability { var cs []Capability for _, key := range l.sort { cs = append(cs, Capability(key)) } return cs } // String generates the capabilities strings, the capabilities are sorted in // insertion order func (l *List) String() string { var o []string for _, key := range l.sort { cap := l.m[Capability(key)] if len(cap.Values) == 0 { o = append(o, key) continue } for _, value := range cap.Values { o = append(o, fmt.Sprintf("%s=%s", key, value)) } } return strings.Join(o, " ") } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/protocol/packp/capability/list_test.go000066400000000000000000000126331345605224300301420ustar00rootroot00000000000000package capability import ( "testing" check "gopkg.in/check.v1" ) func Test(t *testing.T) { check.TestingT(t) } type SuiteCapabilities struct{} var _ = check.Suite(&SuiteCapabilities{}) func (s *SuiteCapabilities) TestIsEmpty(c *check.C) { cap := NewList() c.Assert(cap.IsEmpty(), check.Equals, true) } func (s *SuiteCapabilities) TestDecode(c *check.C) { cap := NewList() err := cap.Decode([]byte("symref=foo symref=qux thin-pack")) c.Assert(err, check.IsNil) c.Assert(cap.m, check.HasLen, 2) c.Assert(cap.Get(SymRef), check.DeepEquals, []string{"foo", "qux"}) c.Assert(cap.Get(ThinPack), check.IsNil) } func (s *SuiteCapabilities) TestDecodeWithLeadingSpace(c *check.C) { cap := NewList() err := cap.Decode([]byte(" report-status")) c.Assert(err, check.IsNil) c.Assert(cap.m, check.HasLen, 1) c.Assert(cap.Supports(ReportStatus), check.Equals, true) } func (s *SuiteCapabilities) TestDecodeEmpty(c *check.C) { cap := NewList() err := cap.Decode(nil) c.Assert(err, check.IsNil) c.Assert(cap, check.DeepEquals, NewList()) } func (s *SuiteCapabilities) TestDecodeWithErrArguments(c *check.C) { cap := NewList() err := cap.Decode([]byte("thin-pack=foo")) c.Assert(err, check.Equals, ErrArguments) } func (s *SuiteCapabilities) TestDecodeWithEqual(c *check.C) { cap := NewList() err := cap.Decode([]byte("agent=foo=bar")) c.Assert(err, check.IsNil) c.Assert(cap.m, check.HasLen, 1) c.Assert(cap.Get(Agent), check.DeepEquals, []string{"foo=bar"}) } func (s *SuiteCapabilities) TestDecodeWithUnknownCapability(c *check.C) { cap := NewList() err := cap.Decode([]byte("foo")) c.Assert(err, check.IsNil) c.Assert(cap.Supports(Capability("foo")), check.Equals, true) } func (s *SuiteCapabilities) TestDecodeWithUnknownCapabilityWithArgument(c *check.C) { cap := NewList() err := cap.Decode([]byte("oldref=HEAD:refs/heads/v2 thin-pack")) c.Assert(err, check.IsNil) c.Assert(cap.m, check.HasLen, 2) c.Assert(cap.Get("oldref"), check.DeepEquals, []string{"HEAD:refs/heads/v2"}) c.Assert(cap.Get(ThinPack), check.IsNil) } func (s *SuiteCapabilities) TestDecodeWithUnknownCapabilityWithMultipleArgument(c *check.C) { cap := NewList() err := cap.Decode([]byte("foo=HEAD:refs/heads/v2 foo=HEAD:refs/heads/v1 thin-pack")) c.Assert(err, check.IsNil) c.Assert(cap.m, check.HasLen, 2) c.Assert(cap.Get("foo"), check.DeepEquals, []string{"HEAD:refs/heads/v2", "HEAD:refs/heads/v1"}) c.Assert(cap.Get(ThinPack), check.IsNil) } func (s *SuiteCapabilities) TestString(c *check.C) { cap := NewList() cap.Set(Agent, "bar") cap.Set(SymRef, "foo:qux") cap.Set(ThinPack) c.Assert(cap.String(), check.Equals, "agent=bar symref=foo:qux thin-pack") } func (s *SuiteCapabilities) TestStringSort(c *check.C) { cap := NewList() cap.Set(Agent, "bar") cap.Set(SymRef, "foo:qux") cap.Set(ThinPack) c.Assert(cap.String(), check.Equals, "agent=bar symref=foo:qux thin-pack") } func (s *SuiteCapabilities) TestSet(c *check.C) { cap := NewList() err := cap.Add(SymRef, "foo", "qux") c.Assert(err, check.IsNil) err = cap.Set(SymRef, "bar") c.Assert(err, check.IsNil) c.Assert(cap.m, check.HasLen, 1) c.Assert(cap.Get(SymRef), check.DeepEquals, []string{"bar"}) } func (s *SuiteCapabilities) TestSetEmpty(c *check.C) { cap := NewList() err := cap.Set(Agent, "bar") c.Assert(err, check.IsNil) c.Assert(cap.Get(Agent), check.HasLen, 1) } func (s *SuiteCapabilities) TestGetEmpty(c *check.C) { cap := NewList() c.Assert(cap.Get(Agent), check.HasLen, 0) } func (s *SuiteCapabilities) TestDelete(c *check.C) { cap := NewList() cap.Delete(SymRef) err := cap.Add(Sideband) c.Assert(err, check.IsNil) err = cap.Set(SymRef, "bar") c.Assert(err, check.IsNil) err = cap.Set(Sideband64k) c.Assert(err, check.IsNil) cap.Delete(SymRef) c.Assert(cap.String(), check.Equals, "side-band side-band-64k") } func (s *SuiteCapabilities) TestAdd(c *check.C) { cap := NewList() err := cap.Add(SymRef, "foo", "qux") c.Assert(err, check.IsNil) err = cap.Add(ThinPack) c.Assert(err, check.IsNil) c.Assert(cap.String(), check.Equals, "symref=foo symref=qux thin-pack") } func (s *SuiteCapabilities) TestAddUnknownCapability(c *check.C) { cap := NewList() err := cap.Add(Capability("foo")) c.Assert(err, check.IsNil) c.Assert(cap.Supports(Capability("foo")), check.Equals, true) } func (s *SuiteCapabilities) TestAddErrArgumentsRequired(c *check.C) { cap := NewList() err := cap.Add(SymRef) c.Assert(err, check.Equals, ErrArgumentsRequired) } func (s *SuiteCapabilities) TestAddErrArgumentsNotAllowed(c *check.C) { cap := NewList() err := cap.Add(OFSDelta, "foo") c.Assert(err, check.Equals, ErrArguments) } func (s *SuiteCapabilities) TestAddErrArguments(c *check.C) { cap := NewList() err := cap.Add(SymRef, "") c.Assert(err, check.Equals, ErrEmtpyArgument) } func (s *SuiteCapabilities) TestAddErrMultipleArguments(c *check.C) { cap := NewList() err := cap.Add(Agent, "foo") c.Assert(err, check.IsNil) err = cap.Add(Agent, "bar") c.Assert(err, check.Equals, ErrMultipleArguments) } func (s *SuiteCapabilities) TestAddErrMultipleArgumentsAtTheSameTime(c *check.C) { cap := NewList() err := cap.Add(Agent, "foo", "bar") c.Assert(err, check.Equals, ErrMultipleArguments) } func (s *SuiteCapabilities) TestAll(c *check.C) { cap := NewList() c.Assert(NewList().All(), check.IsNil) cap.Add(Agent, "foo") c.Assert(cap.All(), check.DeepEquals, []Capability{Agent}) cap.Add(OFSDelta) c.Assert(cap.All(), check.DeepEquals, []Capability{Agent, OFSDelta}) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/protocol/packp/common.go000066400000000000000000000024541345605224300252770ustar00rootroot00000000000000package packp import ( "fmt" ) type stateFn func() stateFn const ( // common hashSize = 40 // advrefs head = "HEAD" noHead = "capabilities^{}" ) var ( // common sp = []byte(" ") eol = []byte("\n") eq = []byte{'='} // advertised-refs null = []byte("\x00") peeled = []byte("^{}") noHeadMark = []byte(" capabilities^{}\x00") // upload-request want = []byte("want ") shallow = []byte("shallow ") deepen = []byte("deepen") deepenCommits = []byte("deepen ") deepenSince = []byte("deepen-since ") deepenReference = []byte("deepen-not ") // shallow-update unshallow = []byte("unshallow ") // server-response ack = []byte("ACK") nak = []byte("NAK") // updreq shallowNoSp = []byte("shallow") ) func isFlush(payload []byte) bool { return len(payload) == 0 } // ErrUnexpectedData represents an unexpected data decoding a message type ErrUnexpectedData struct { Msg string Data []byte } // NewErrUnexpectedData returns a new ErrUnexpectedData containing the data and // the message given func NewErrUnexpectedData(msg string, data []byte) error { return &ErrUnexpectedData{Msg: msg, Data: data} } func (err *ErrUnexpectedData) Error() string { if len(err.Data) == 0 { return err.Msg } return fmt.Sprintf("%s (%s)", err.Msg, err.Data) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/protocol/packp/common_test.go000066400000000000000000000012151345605224300263300ustar00rootroot00000000000000package packp import ( "bytes" "io" "testing" "gopkg.in/src-d/go-git.v4/plumbing/format/pktline" . "gopkg.in/check.v1" ) func Test(t *testing.T) { TestingT(t) } // returns a byte slice with the pkt-lines for the given payloads. func pktlines(c *C, payloads ...string) []byte { var buf bytes.Buffer e := pktline.NewEncoder(&buf) err := e.EncodeString(payloads...) c.Assert(err, IsNil, Commentf("building pktlines for %v\n", payloads)) return buf.Bytes() } func toPktLines(c *C, payloads []string) io.Reader { var buf bytes.Buffer e := pktline.NewEncoder(&buf) err := e.EncodeString(payloads...) c.Assert(err, IsNil) return &buf } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/protocol/packp/doc.go000066400000000000000000000644661345605224300245670ustar00rootroot00000000000000package packp /* A nice way to trace the real data transmitted and received by git, use: GIT_TRACE_PACKET=true git ls-remote http://github.com/src-d/go-git GIT_TRACE_PACKET=true git clone http://github.com/src-d/go-git Here follows a copy of the current protocol specification at the time of this writing. (Please notice that most http git servers will add a flush-pkt after the first pkt-line when using HTTP smart.) Documentation Common to Pack and Http Protocols =============================================== ABNF Notation ------------- ABNF notation as described by RFC 5234 is used within the protocol documents, except the following replacement core rules are used: ---- HEXDIG = DIGIT / "a" / "b" / "c" / "d" / "e" / "f" ---- We also define the following common rules: ---- NUL = %x00 zero-id = 40*"0" obj-id = 40*(HEXDIGIT) refname = "HEAD" refname /= "refs/" ---- A refname is a hierarchical octet string beginning with "refs/" and not violating the 'git-check-ref-format' command's validation rules. More specifically, they: . They can include slash `/` for hierarchical (directory) grouping, but no slash-separated component can begin with a dot `.`. . They must contain at least one `/`. This enforces the presence of a category like `heads/`, `tags/` etc. but the actual names are not restricted. . They cannot have two consecutive dots `..` anywhere. . They cannot have ASCII control characters (i.e. bytes whose values are lower than \040, or \177 `DEL`), space, tilde `~`, caret `^`, colon `:`, question-mark `?`, asterisk `*`, or open bracket `[` anywhere. . They cannot end with a slash `/` or a dot `.`. . They cannot end with the sequence `.lock`. . They cannot contain a sequence `@{`. . They cannot contain a `\\`. pkt-line Format --------------- Much (but not all) of the payload is described around pkt-lines. A pkt-line is a variable length binary string. The first four bytes of the line, the pkt-len, indicates the total length of the line, in hexadecimal. The pkt-len includes the 4 bytes used to contain the length's hexadecimal representation. A pkt-line MAY contain binary data, so implementors MUST ensure pkt-line parsing/formatting routines are 8-bit clean. A non-binary line SHOULD BE terminated by an LF, which if present MUST be included in the total length. Receivers MUST treat pkt-lines with non-binary data the same whether or not they contain the trailing LF (stripping the LF if present, and not complaining when it is missing). The maximum length of a pkt-line's data component is 65516 bytes. Implementations MUST NOT send pkt-line whose length exceeds 65520 (65516 bytes of payload + 4 bytes of length data). Implementations SHOULD NOT send an empty pkt-line ("0004"). A pkt-line with a length field of 0 ("0000"), called a flush-pkt, is a special case and MUST be handled differently than an empty pkt-line ("0004"). ---- pkt-line = data-pkt / flush-pkt data-pkt = pkt-len pkt-payload pkt-len = 4*(HEXDIG) pkt-payload = (pkt-len - 4)*(OCTET) flush-pkt = "0000" ---- Examples (as C-style strings): ---- pkt-line actual value --------------------------------- "0006a\n" "a\n" "0005a" "a" "000bfoobar\n" "foobar\n" "0004" "" ---- Packfile transfer protocols =========================== Git supports transferring data in packfiles over the ssh://, git://, http:// and file:// transports. There exist two sets of protocols, one for pushing data from a client to a server and another for fetching data from a server to a client. The three transports (ssh, git, file) use the same protocol to transfer data. http is documented in http-protocol.txt. The processes invoked in the canonical Git implementation are 'upload-pack' on the server side and 'fetch-pack' on the client side for fetching data; then 'receive-pack' on the server and 'send-pack' on the client for pushing data. The protocol functions to have a server tell a client what is currently on the server, then for the two to negotiate the smallest amount of data to send in order to fully update one or the other. pkt-line Format --------------- The descriptions below build on the pkt-line format described in protocol-common.txt. When the grammar indicate `PKT-LINE(...)`, unless otherwise noted the usual pkt-line LF rules apply: the sender SHOULD include a LF, but the receiver MUST NOT complain if it is not present. Transports ---------- There are three transports over which the packfile protocol is initiated. The Git transport is a simple, unauthenticated server that takes the command (almost always 'upload-pack', though Git servers can be configured to be globally writable, in which 'receive- pack' initiation is also allowed) with which the client wishes to communicate and executes it and connects it to the requesting process. In the SSH transport, the client just runs the 'upload-pack' or 'receive-pack' process on the server over the SSH protocol and then communicates with that invoked process over the SSH connection. The file:// transport runs the 'upload-pack' or 'receive-pack' process locally and communicates with it over a pipe. Git Transport ------------- The Git transport starts off by sending the command and repository on the wire using the pkt-line format, followed by a NUL byte and a hostname parameter, terminated by a NUL byte. 0032git-upload-pack /project.git\0host=myserver.com\0 -- git-proto-request = request-command SP pathname NUL [ host-parameter NUL ] request-command = "git-upload-pack" / "git-receive-pack" / "git-upload-archive" ; case sensitive pathname = *( %x01-ff ) ; exclude NUL host-parameter = "host=" hostname [ ":" port ] -- Only host-parameter is allowed in the git-proto-request. Clients MUST NOT attempt to send additional parameters. It is used for the git-daemon name based virtual hosting. See --interpolated-path option to git daemon, with the %H/%CH format characters. Basically what the Git client is doing to connect to an 'upload-pack' process on the server side over the Git protocol is this: $ echo -e -n \ "0039git-upload-pack /schacon/gitbook.git\0host=example.com\0" | nc -v example.com 9418 If the server refuses the request for some reasons, it could abort gracefully with an error message. ---- error-line = PKT-LINE("ERR" SP explanation-text) ---- SSH Transport ------------- Initiating the upload-pack or receive-pack processes over SSH is executing the binary on the server via SSH remote execution. It is basically equivalent to running this: $ ssh git.example.com "git-upload-pack '/project.git'" For a server to support Git pushing and pulling for a given user over SSH, that user needs to be able to execute one or both of those commands via the SSH shell that they are provided on login. On some systems, that shell access is limited to only being able to run those two commands, or even just one of them. In an ssh:// format URI, it's absolute in the URI, so the '/' after the host name (or port number) is sent as an argument, which is then read by the remote git-upload-pack exactly as is, so it's effectively an absolute path in the remote filesystem. git clone ssh://user@example.com/project.git | v ssh user@example.com "git-upload-pack '/project.git'" In a "user@host:path" format URI, its relative to the user's home directory, because the Git client will run: git clone user@example.com:project.git | v ssh user@example.com "git-upload-pack 'project.git'" The exception is if a '~' is used, in which case we execute it without the leading '/'. ssh://user@example.com/~alice/project.git, | v ssh user@example.com "git-upload-pack '~alice/project.git'" A few things to remember here: - The "command name" is spelled with dash (e.g. git-upload-pack), but this can be overridden by the client; - The repository path is always quoted with single quotes. Fetching Data From a Server --------------------------- When one Git repository wants to get data that a second repository has, the first can 'fetch' from the second. This operation determines what data the server has that the client does not then streams that data down to the client in packfile format. Reference Discovery ------------------- When the client initially connects the server will immediately respond with a listing of each reference it has (all branches and tags) along with the object name that each reference currently points to. $ echo -e -n "0039git-upload-pack /schacon/gitbook.git\0host=example.com\0" | nc -v example.com 9418 00887217a7c7e582c46cec22a130adf4b9d7d950fba0 HEAD\0multi_ack thin-pack side-band side-band-64k ofs-delta shallow no-progress include-tag 00441d3fcd5ced445d1abc402225c0b8a1299641f497 refs/heads/integration 003f7217a7c7e582c46cec22a130adf4b9d7d950fba0 refs/heads/master 003cb88d2441cac0977faf98efc80305012112238d9d refs/tags/v0.9 003c525128480b96c89e6418b1e40909bf6c5b2d580f refs/tags/v1.0 003fe92df48743b7bc7d26bcaabfddde0a1e20cae47c refs/tags/v1.0^{} 0000 The returned response is a pkt-line stream describing each ref and its current value. The stream MUST be sorted by name according to the C locale ordering. If HEAD is a valid ref, HEAD MUST appear as the first advertised ref. If HEAD is not a valid ref, HEAD MUST NOT appear in the advertisement list at all, but other refs may still appear. The stream MUST include capability declarations behind a NUL on the first ref. The peeled value of a ref (that is "ref^{}") MUST be immediately after the ref itself, if presented. A conforming server MUST peel the ref if it's an annotated tag. ---- advertised-refs = (no-refs / list-of-refs) *shallow flush-pkt no-refs = PKT-LINE(zero-id SP "capabilities^{}" NUL capability-list) list-of-refs = first-ref *other-ref first-ref = PKT-LINE(obj-id SP refname NUL capability-list) other-ref = PKT-LINE(other-tip / other-peeled) other-tip = obj-id SP refname other-peeled = obj-id SP refname "^{}" shallow = PKT-LINE("shallow" SP obj-id) capability-list = capability *(SP capability) capability = 1*(LC_ALPHA / DIGIT / "-" / "_") LC_ALPHA = %x61-7A ---- Server and client MUST use lowercase for obj-id, both MUST treat obj-id as case-insensitive. See protocol-capabilities.txt for a list of allowed server capabilities and descriptions. Packfile Negotiation -------------------- After reference and capabilities discovery, the client can decide to terminate the connection by sending a flush-pkt, telling the server it can now gracefully terminate, and disconnect, when it does not need any pack data. This can happen with the ls-remote command, and also can happen when the client already is up-to-date. Otherwise, it enters the negotiation phase, where the client and server determine what the minimal packfile necessary for transport is, by telling the server what objects it wants, its shallow objects (if any), and the maximum commit depth it wants (if any). The client will also send a list of the capabilities it wants to be in effect, out of what the server said it could do with the first 'want' line. ---- upload-request = want-list *shallow-line *1depth-request flush-pkt want-list = first-want *additional-want shallow-line = PKT-LINE("shallow" SP obj-id) depth-request = PKT-LINE("deepen" SP depth) / PKT-LINE("deepen-since" SP timestamp) / PKT-LINE("deepen-not" SP ref) first-want = PKT-LINE("want" SP obj-id SP capability-list) additional-want = PKT-LINE("want" SP obj-id) depth = 1*DIGIT ---- Clients MUST send all the obj-ids it wants from the reference discovery phase as 'want' lines. Clients MUST send at least one 'want' command in the request body. Clients MUST NOT mention an obj-id in a 'want' command which did not appear in the response obtained through ref discovery. The client MUST write all obj-ids which it only has shallow copies of (meaning that it does not have the parents of a commit) as 'shallow' lines so that the server is aware of the limitations of the client's history. The client now sends the maximum commit history depth it wants for this transaction, which is the number of commits it wants from the tip of the history, if any, as a 'deepen' line. A depth of 0 is the same as not making a depth request. The client does not want to receive any commits beyond this depth, nor does it want objects needed only to complete those commits. Commits whose parents are not received as a result are defined as shallow and marked as such in the server. This information is sent back to the client in the next step. Once all the 'want's and 'shallow's (and optional 'deepen') are transferred, clients MUST send a flush-pkt, to tell the server side that it is done sending the list. Otherwise, if the client sent a positive depth request, the server will determine which commits will and will not be shallow and send this information to the client. If the client did not request a positive depth, this step is skipped. ---- shallow-update = *shallow-line *unshallow-line flush-pkt shallow-line = PKT-LINE("shallow" SP obj-id) unshallow-line = PKT-LINE("unshallow" SP obj-id) ---- If the client has requested a positive depth, the server will compute the set of commits which are no deeper than the desired depth. The set of commits start at the client's wants. The server writes 'shallow' lines for each commit whose parents will not be sent as a result. The server writes an 'unshallow' line for each commit which the client has indicated is shallow, but is no longer shallow at the currently requested depth (that is, its parents will now be sent). The server MUST NOT mark as unshallow anything which the client has not indicated was shallow. Now the client will send a list of the obj-ids it has using 'have' lines, so the server can make a packfile that only contains the objects that the client needs. In multi_ack mode, the canonical implementation will send up to 32 of these at a time, then will send a flush-pkt. The canonical implementation will skip ahead and send the next 32 immediately, so that there is always a block of 32 "in-flight on the wire" at a time. ---- upload-haves = have-list compute-end have-list = *have-line have-line = PKT-LINE("have" SP obj-id) compute-end = flush-pkt / PKT-LINE("done") ---- If the server reads 'have' lines, it then will respond by ACKing any of the obj-ids the client said it had that the server also has. The server will ACK obj-ids differently depending on which ack mode is chosen by the client. In multi_ack mode: * the server will respond with 'ACK obj-id continue' for any common commits. * once the server has found an acceptable common base commit and is ready to make a packfile, it will blindly ACK all 'have' obj-ids back to the client. * the server will then send a 'NAK' and then wait for another response from the client - either a 'done' or another list of 'have' lines. In multi_ack_detailed mode: * the server will differentiate the ACKs where it is signaling that it is ready to send data with 'ACK obj-id ready' lines, and signals the identified common commits with 'ACK obj-id common' lines. Without either multi_ack or multi_ack_detailed: * upload-pack sends "ACK obj-id" on the first common object it finds. After that it says nothing until the client gives it a "done". * upload-pack sends "NAK" on a flush-pkt if no common object has been found yet. If one has been found, and thus an ACK was already sent, it's silent on the flush-pkt. After the client has gotten enough ACK responses that it can determine that the server has enough information to send an efficient packfile (in the canonical implementation, this is determined when it has received enough ACKs that it can color everything left in the --date-order queue as common with the server, or the --date-order queue is empty), or the client determines that it wants to give up (in the canonical implementation, this is determined when the client sends 256 'have' lines without getting any of them ACKed by the server - meaning there is nothing in common and the server should just send all of its objects), then the client will send a 'done' command. The 'done' command signals to the server that the client is ready to receive its packfile data. However, the 256 limit *only* turns on in the canonical client implementation if we have received at least one "ACK %s continue" during a prior round. This helps to ensure that at least one common ancestor is found before we give up entirely. Once the 'done' line is read from the client, the server will either send a final 'ACK obj-id' or it will send a 'NAK'. 'obj-id' is the object name of the last commit determined to be common. The server only sends ACK after 'done' if there is at least one common base and multi_ack or multi_ack_detailed is enabled. The server always sends NAK after 'done' if there is no common base found. Then the server will start sending its packfile data. ---- server-response = *ack_multi ack / nak ack_multi = PKT-LINE("ACK" SP obj-id ack_status) ack_status = "continue" / "common" / "ready" ack = PKT-LINE("ACK" SP obj-id) nak = PKT-LINE("NAK") ---- A simple clone may look like this (with no 'have' lines): ---- C: 0054want 74730d410fcb6603ace96f1dc55ea6196122532d multi_ack \ side-band-64k ofs-delta\n C: 0032want 7d1665144a3a975c05f1f43902ddaf084e784dbe\n C: 0032want 5a3f6be755bbb7deae50065988cbfa1ffa9ab68a\n C: 0032want 7e47fe2bd8d01d481f44d7af0531bd93d3b21c01\n C: 0032want 74730d410fcb6603ace96f1dc55ea6196122532d\n C: 0000 C: 0009done\n S: 0008NAK\n S: [PACKFILE] ---- An incremental update (fetch) response might look like this: ---- C: 0054want 74730d410fcb6603ace96f1dc55ea6196122532d multi_ack \ side-band-64k ofs-delta\n C: 0032want 7d1665144a3a975c05f1f43902ddaf084e784dbe\n C: 0032want 5a3f6be755bbb7deae50065988cbfa1ffa9ab68a\n C: 0000 C: 0032have 7e47fe2bd8d01d481f44d7af0531bd93d3b21c01\n C: [30 more have lines] C: 0032have 74730d410fcb6603ace96f1dc55ea6196122532d\n C: 0000 S: 003aACK 7e47fe2bd8d01d481f44d7af0531bd93d3b21c01 continue\n S: 003aACK 74730d410fcb6603ace96f1dc55ea6196122532d continue\n S: 0008NAK\n C: 0009done\n S: 0031ACK 74730d410fcb6603ace96f1dc55ea6196122532d\n S: [PACKFILE] ---- Packfile Data ------------- Now that the client and server have finished negotiation about what the minimal amount of data that needs to be sent to the client is, the server will construct and send the required data in packfile format. See pack-format.txt for what the packfile itself actually looks like. If 'side-band' or 'side-band-64k' capabilities have been specified by the client, the server will send the packfile data multiplexed. Each packet starting with the packet-line length of the amount of data that follows, followed by a single byte specifying the sideband the following data is coming in on. In 'side-band' mode, it will send up to 999 data bytes plus 1 control code, for a total of up to 1000 bytes in a pkt-line. In 'side-band-64k' mode it will send up to 65519 data bytes plus 1 control code, for a total of up to 65520 bytes in a pkt-line. The sideband byte will be a '1', '2' or a '3'. Sideband '1' will contain packfile data, sideband '2' will be used for progress information that the client will generally print to stderr and sideband '3' is used for error information. If no 'side-band' capability was specified, the server will stream the entire packfile without multiplexing. Pushing Data To a Server ------------------------ Pushing data to a server will invoke the 'receive-pack' process on the server, which will allow the client to tell it which references it should update and then send all the data the server will need for those new references to be complete. Once all the data is received and validated, the server will then update its references to what the client specified. Authentication -------------- The protocol itself contains no authentication mechanisms. That is to be handled by the transport, such as SSH, before the 'receive-pack' process is invoked. If 'receive-pack' is configured over the Git transport, those repositories will be writable by anyone who can access that port (9418) as that transport is unauthenticated. Reference Discovery ------------------- The reference discovery phase is done nearly the same way as it is in the fetching protocol. Each reference obj-id and name on the server is sent in packet-line format to the client, followed by a flush-pkt. The only real difference is that the capability listing is different - the only possible values are 'report-status', 'delete-refs', 'ofs-delta' and 'push-options'. Reference Update Request and Packfile Transfer ---------------------------------------------- Once the client knows what references the server is at, it can send a list of reference update requests. For each reference on the server that it wants to update, it sends a line listing the obj-id currently on the server, the obj-id the client would like to update it to and the name of the reference. This list is followed by a flush-pkt. Then the push options are transmitted one per packet followed by another flush-pkt. After that the packfile that should contain all the objects that the server will need to complete the new references will be sent. ---- update-request = *shallow ( command-list | push-cert ) [packfile] shallow = PKT-LINE("shallow" SP obj-id) command-list = PKT-LINE(command NUL capability-list) *PKT-LINE(command) flush-pkt command = create / delete / update create = zero-id SP new-id SP name delete = old-id SP zero-id SP name update = old-id SP new-id SP name old-id = obj-id new-id = obj-id push-cert = PKT-LINE("push-cert" NUL capability-list LF) PKT-LINE("certificate version 0.1" LF) PKT-LINE("pusher" SP ident LF) PKT-LINE("pushee" SP url LF) PKT-LINE("nonce" SP nonce LF) PKT-LINE(LF) *PKT-LINE(command LF) *PKT-LINE(gpg-signature-lines LF) PKT-LINE("push-cert-end" LF) packfile = "PACK" 28*(OCTET) ---- If the receiving end does not support delete-refs, the sending end MUST NOT ask for delete command. If the receiving end does not support push-cert, the sending end MUST NOT send a push-cert command. When a push-cert command is sent, command-list MUST NOT be sent; the commands recorded in the push certificate is used instead. The packfile MUST NOT be sent if the only command used is 'delete'. A packfile MUST be sent if either create or update command is used, even if the server already has all the necessary objects. In this case the client MUST send an empty packfile. The only time this is likely to happen is if the client is creating a new branch or a tag that points to an existing obj-id. The server will receive the packfile, unpack it, then validate each reference that is being updated that it hasn't changed while the request was being processed (the obj-id is still the same as the old-id), and it will run any update hooks to make sure that the update is acceptable. If all of that is fine, the server will then update the references. Push Certificate ---------------- A push certificate begins with a set of header lines. After the header and an empty line, the protocol commands follow, one per line. Note that the trailing LF in push-cert PKT-LINEs is _not_ optional; it must be present. Currently, the following header fields are defined: `pusher` ident:: Identify the GPG key in "Human Readable Name " format. `pushee` url:: The repository URL (anonymized, if the URL contains authentication material) the user who ran `git push` intended to push into. `nonce` nonce:: The 'nonce' string the receiving repository asked the pushing user to include in the certificate, to prevent replay attacks. The GPG signature lines are a detached signature for the contents recorded in the push certificate before the signature block begins. The detached signature is used to certify that the commands were given by the pusher, who must be the signer. Report Status ------------- After receiving the pack data from the sender, the receiver sends a report if 'report-status' capability is in effect. It is a short listing of what happened in that update. It will first list the status of the packfile unpacking as either 'unpack ok' or 'unpack [error]'. Then it will list the status for each of the references that it tried to update. Each line is either 'ok [refname]' if the update was successful, or 'ng [refname] [error]' if the update was not. ---- report-status = unpack-status 1*(command-status) flush-pkt unpack-status = PKT-LINE("unpack" SP unpack-result) unpack-result = "ok" / error-msg command-status = command-ok / command-fail command-ok = PKT-LINE("ok" SP refname) command-fail = PKT-LINE("ng" SP refname SP error-msg) error-msg = 1*(OCTECT) ; where not "ok" ---- Updates can be unsuccessful for a number of reasons. The reference can have changed since the reference discovery phase was originally sent, meaning someone pushed in the meantime. The reference being pushed could be a non-fast-forward reference and the update hooks or configuration could be set to not allow that, etc. Also, some references can be updated while others can be rejected. An example client/server communication might look like this: ---- S: 007c74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/local\0report-status delete-refs ofs-delta\n S: 003e7d1665144a3a975c05f1f43902ddaf084e784dbe refs/heads/debug\n S: 003f74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/master\n S: 003f74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/team\n S: 0000 C: 003e7d1665144a3a975c05f1f43902ddaf084e784dbe 74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/debug\n C: 003e74730d410fcb6603ace96f1dc55ea6196122532d 5a3f6be755bbb7deae50065988cbfa1ffa9ab68a refs/heads/master\n C: 0000 C: [PACKDATA] S: 000eunpack ok\n S: 0018ok refs/heads/debug\n S: 002ang refs/heads/master non-fast-forward\n ---- */ golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/protocol/packp/report_status.go000066400000000000000000000065631345605224300267320ustar00rootroot00000000000000package packp import ( "bytes" "fmt" "io" "strings" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/format/pktline" ) const ( ok = "ok" ) // ReportStatus is a report status message, as used in the git-receive-pack // process whenever the 'report-status' capability is negotiated. type ReportStatus struct { UnpackStatus string CommandStatuses []*CommandStatus } // NewReportStatus creates a new ReportStatus message. func NewReportStatus() *ReportStatus { return &ReportStatus{} } // Error returns the first error if any. func (s *ReportStatus) Error() error { if s.UnpackStatus != ok { return fmt.Errorf("unpack error: %s", s.UnpackStatus) } for _, s := range s.CommandStatuses { if err := s.Error(); err != nil { return err } } return nil } // Encode writes the report status to a writer. func (s *ReportStatus) Encode(w io.Writer) error { e := pktline.NewEncoder(w) if err := e.Encodef("unpack %s\n", s.UnpackStatus); err != nil { return err } for _, cs := range s.CommandStatuses { if err := cs.encode(w); err != nil { return err } } return e.Flush() } // Decode reads from the given reader and decodes a report-status message. It // does not read more input than what is needed to fill the report status. func (s *ReportStatus) Decode(r io.Reader) error { scan := pktline.NewScanner(r) if err := s.scanFirstLine(scan); err != nil { return err } if err := s.decodeReportStatus(scan.Bytes()); err != nil { return err } flushed := false for scan.Scan() { b := scan.Bytes() if isFlush(b) { flushed = true break } if err := s.decodeCommandStatus(b); err != nil { return err } } if !flushed { return fmt.Errorf("missing flush") } return scan.Err() } func (s *ReportStatus) scanFirstLine(scan *pktline.Scanner) error { if scan.Scan() { return nil } if scan.Err() != nil { return scan.Err() } return io.ErrUnexpectedEOF } func (s *ReportStatus) decodeReportStatus(b []byte) error { if isFlush(b) { return fmt.Errorf("premature flush") } b = bytes.TrimSuffix(b, eol) line := string(b) fields := strings.SplitN(line, " ", 2) if len(fields) != 2 || fields[0] != "unpack" { return fmt.Errorf("malformed unpack status: %s", line) } s.UnpackStatus = fields[1] return nil } func (s *ReportStatus) decodeCommandStatus(b []byte) error { b = bytes.TrimSuffix(b, eol) line := string(b) fields := strings.SplitN(line, " ", 3) status := ok if len(fields) == 3 && fields[0] == "ng" { status = fields[2] } else if len(fields) != 2 || fields[0] != "ok" { return fmt.Errorf("malformed command status: %s", line) } cs := &CommandStatus{ ReferenceName: plumbing.ReferenceName(fields[1]), Status: status, } s.CommandStatuses = append(s.CommandStatuses, cs) return nil } // CommandStatus is the status of a reference in a report status. // See ReportStatus struct. type CommandStatus struct { ReferenceName plumbing.ReferenceName Status string } // Error returns the error, if any. func (s *CommandStatus) Error() error { if s.Status == ok { return nil } return fmt.Errorf("command error on %s: %s", s.ReferenceName.String(), s.Status) } func (s *CommandStatus) encode(w io.Writer) error { e := pktline.NewEncoder(w) if s.Error() == nil { return e.Encodef("ok %s\n", s.ReferenceName.String()) } return e.Encodef("ng %s %s\n", s.ReferenceName.String(), s.Status) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/protocol/packp/report_status_test.go000066400000000000000000000150261345605224300277630ustar00rootroot00000000000000package packp import ( "bytes" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/format/pktline" . "gopkg.in/check.v1" ) type ReportStatusSuite struct{} var _ = Suite(&ReportStatusSuite{}) func (s *ReportStatusSuite) TestError(c *C) { rs := NewReportStatus() rs.UnpackStatus = "ok" c.Assert(rs.Error(), IsNil) rs.UnpackStatus = "OK" c.Assert(rs.Error(), ErrorMatches, "unpack error: OK") rs.UnpackStatus = "" c.Assert(rs.Error(), ErrorMatches, "unpack error: ") cs := &CommandStatus{ReferenceName: plumbing.ReferenceName("ref")} rs.UnpackStatus = "ok" rs.CommandStatuses = append(rs.CommandStatuses, cs) cs.Status = "ok" c.Assert(rs.Error(), IsNil) cs.Status = "OK" c.Assert(rs.Error(), ErrorMatches, "command error on ref: OK") cs.Status = "" c.Assert(rs.Error(), ErrorMatches, "command error on ref: ") } func (s *ReportStatusSuite) testEncodeDecodeOk(c *C, rs *ReportStatus, lines ...string) { s.testDecodeOk(c, rs, lines...) s.testEncodeOk(c, rs, lines...) } func (s *ReportStatusSuite) testDecodeOk(c *C, expected *ReportStatus, lines ...string) { r := toPktLines(c, lines) rs := NewReportStatus() c.Assert(rs.Decode(r), IsNil) c.Assert(rs, DeepEquals, expected) } func (s *ReportStatusSuite) testDecodeError(c *C, errorMatch string, lines ...string) { r := toPktLines(c, lines) rs := NewReportStatus() c.Assert(rs.Decode(r), ErrorMatches, errorMatch) } func (s *ReportStatusSuite) testEncodeOk(c *C, input *ReportStatus, lines ...string) { expected := pktlines(c, lines...) var buf bytes.Buffer c.Assert(input.Encode(&buf), IsNil) obtained := buf.Bytes() comment := Commentf("\nobtained = %s\nexpected = %s\n", string(obtained), string(expected)) c.Assert(obtained, DeepEquals, expected, comment) } func (s *ReportStatusSuite) TestEncodeDecodeOkOneReference(c *C) { rs := NewReportStatus() rs.UnpackStatus = "ok" rs.CommandStatuses = []*CommandStatus{{ ReferenceName: plumbing.ReferenceName("refs/heads/master"), Status: "ok", }} s.testEncodeDecodeOk(c, rs, "unpack ok\n", "ok refs/heads/master\n", pktline.FlushString, ) } func (s *ReportStatusSuite) TestEncodeDecodeOkOneReferenceFailed(c *C) { rs := NewReportStatus() rs.UnpackStatus = "my error" rs.CommandStatuses = []*CommandStatus{{ ReferenceName: plumbing.ReferenceName("refs/heads/master"), Status: "command error", }} s.testEncodeDecodeOk(c, rs, "unpack my error\n", "ng refs/heads/master command error\n", pktline.FlushString, ) } func (s *ReportStatusSuite) TestEncodeDecodeOkMoreReferences(c *C) { rs := NewReportStatus() rs.UnpackStatus = "ok" rs.CommandStatuses = []*CommandStatus{{ ReferenceName: plumbing.ReferenceName("refs/heads/master"), Status: "ok", }, { ReferenceName: plumbing.ReferenceName("refs/heads/a"), Status: "ok", }, { ReferenceName: plumbing.ReferenceName("refs/heads/b"), Status: "ok", }} s.testEncodeDecodeOk(c, rs, "unpack ok\n", "ok refs/heads/master\n", "ok refs/heads/a\n", "ok refs/heads/b\n", pktline.FlushString, ) } func (s *ReportStatusSuite) TestEncodeDecodeOkMoreReferencesFailed(c *C) { rs := NewReportStatus() rs.UnpackStatus = "my error" rs.CommandStatuses = []*CommandStatus{{ ReferenceName: plumbing.ReferenceName("refs/heads/master"), Status: "ok", }, { ReferenceName: plumbing.ReferenceName("refs/heads/a"), Status: "command error", }, { ReferenceName: plumbing.ReferenceName("refs/heads/b"), Status: "ok", }} s.testEncodeDecodeOk(c, rs, "unpack my error\n", "ok refs/heads/master\n", "ng refs/heads/a command error\n", "ok refs/heads/b\n", pktline.FlushString, ) } func (s *ReportStatusSuite) TestEncodeDecodeOkNoReferences(c *C) { expected := NewReportStatus() expected.UnpackStatus = "ok" s.testEncodeDecodeOk(c, expected, "unpack ok\n", pktline.FlushString, ) } func (s *ReportStatusSuite) TestEncodeDecodeOkNoReferencesFailed(c *C) { rs := NewReportStatus() rs.UnpackStatus = "my error" s.testEncodeDecodeOk(c, rs, "unpack my error\n", pktline.FlushString, ) } func (s *ReportStatusSuite) TestDecodeErrorOneReferenceNoFlush(c *C) { expected := NewReportStatus() expected.UnpackStatus = "ok" expected.CommandStatuses = []*CommandStatus{{ ReferenceName: plumbing.ReferenceName("refs/heads/master"), Status: "ok", }} s.testDecodeError(c, "missing flush", "unpack ok\n", "ok refs/heads/master\n", ) } func (s *ReportStatusSuite) TestDecodeErrorEmpty(c *C) { expected := NewReportStatus() expected.UnpackStatus = "ok" expected.CommandStatuses = []*CommandStatus{{ ReferenceName: plumbing.ReferenceName("refs/heads/master"), Status: "ok", }} s.testDecodeError(c, "unexpected EOF") } func (s *ReportStatusSuite) TestDecodeErrorMalformed(c *C) { expected := NewReportStatus() expected.UnpackStatus = "ok" expected.CommandStatuses = []*CommandStatus{{ ReferenceName: plumbing.ReferenceName("refs/heads/master"), Status: "ok", }} s.testDecodeError(c, "malformed unpack status: unpackok", "unpackok\n", pktline.FlushString, ) } func (s *ReportStatusSuite) TestDecodeErrorMalformed2(c *C) { expected := NewReportStatus() expected.UnpackStatus = "ok" expected.CommandStatuses = []*CommandStatus{{ ReferenceName: plumbing.ReferenceName("refs/heads/master"), Status: "ok", }} s.testDecodeError(c, "malformed unpack status: UNPACK OK", "UNPACK OK\n", pktline.FlushString, ) } func (s *ReportStatusSuite) TestDecodeErrorMalformedCommandStatus(c *C) { expected := NewReportStatus() expected.UnpackStatus = "ok" expected.CommandStatuses = []*CommandStatus{{ ReferenceName: plumbing.ReferenceName("refs/heads/master"), Status: "ok", }} s.testDecodeError(c, "malformed command status: ko refs/heads/master", "unpack ok\n", "ko refs/heads/master\n", pktline.FlushString, ) } func (s *ReportStatusSuite) TestDecodeErrorMalformedCommandStatus2(c *C) { expected := NewReportStatus() expected.UnpackStatus = "ok" expected.CommandStatuses = []*CommandStatus{{ ReferenceName: plumbing.ReferenceName("refs/heads/master"), Status: "ok", }} s.testDecodeError(c, "malformed command status: ng refs/heads/master", "unpack ok\n", "ng refs/heads/master\n", pktline.FlushString, ) } func (s *ReportStatusSuite) TestDecodeErrorPrematureFlush(c *C) { expected := NewReportStatus() expected.UnpackStatus = "ok" expected.CommandStatuses = []*CommandStatus{{ ReferenceName: plumbing.ReferenceName("refs/heads/master"), Status: "ok", }} s.testDecodeError(c, "premature flush", pktline.FlushString, ) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/protocol/packp/shallowupd.go000066400000000000000000000033741345605224300261730ustar00rootroot00000000000000package packp import ( "bytes" "fmt" "io" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/format/pktline" ) const ( shallowLineLen = 48 unshallowLineLen = 50 ) type ShallowUpdate struct { Shallows []plumbing.Hash Unshallows []plumbing.Hash } func (r *ShallowUpdate) Decode(reader io.Reader) error { s := pktline.NewScanner(reader) for s.Scan() { line := s.Bytes() line = bytes.TrimSpace(line) var err error switch { case bytes.HasPrefix(line, shallow): err = r.decodeShallowLine(line) case bytes.HasPrefix(line, unshallow): err = r.decodeUnshallowLine(line) case bytes.Equal(line, pktline.Flush): return nil } if err != nil { return err } } return s.Err() } func (r *ShallowUpdate) decodeShallowLine(line []byte) error { hash, err := r.decodeLine(line, shallow, shallowLineLen) if err != nil { return err } r.Shallows = append(r.Shallows, hash) return nil } func (r *ShallowUpdate) decodeUnshallowLine(line []byte) error { hash, err := r.decodeLine(line, unshallow, unshallowLineLen) if err != nil { return err } r.Unshallows = append(r.Unshallows, hash) return nil } func (r *ShallowUpdate) decodeLine(line, prefix []byte, expLen int) (plumbing.Hash, error) { if len(line) != expLen { return plumbing.ZeroHash, fmt.Errorf("malformed %s%q", prefix, line) } raw := string(line[expLen-40 : expLen]) return plumbing.NewHash(raw), nil } func (r *ShallowUpdate) Encode(w io.Writer) error { e := pktline.NewEncoder(w) for _, h := range r.Shallows { if err := e.Encodef("%s%s\n", shallow, h.String()); err != nil { return err } } for _, h := range r.Unshallows { if err := e.Encodef("%s%s\n", unshallow, h.String()); err != nil { return err } } return e.Flush() } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/protocol/packp/shallowupd_test.go000066400000000000000000000100521345605224300272210ustar00rootroot00000000000000package packp import ( "bytes" "gopkg.in/src-d/go-git.v4/plumbing" . "gopkg.in/check.v1" ) type ShallowUpdateSuite struct{} var _ = Suite(&ShallowUpdateSuite{}) func (s *ShallowUpdateSuite) TestDecodeWithLF(c *C) { raw := "" + "0035shallow aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n" + "0035shallow bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\n" + "0000" su := &ShallowUpdate{} err := su.Decode(bytes.NewBufferString(raw)) c.Assert(err, IsNil) plumbing.HashesSort(su.Shallows) c.Assert(su.Unshallows, HasLen, 0) c.Assert(su.Shallows, HasLen, 2) c.Assert(su.Shallows, DeepEquals, []plumbing.Hash{ plumbing.NewHash("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), plumbing.NewHash("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"), }) } func (s *ShallowUpdateSuite) TestDecode(c *C) { raw := "" + "0034shallow aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + "0034shallow bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "0000" su := &ShallowUpdate{} err := su.Decode(bytes.NewBufferString(raw)) c.Assert(err, IsNil) plumbing.HashesSort(su.Shallows) c.Assert(su.Unshallows, HasLen, 0) c.Assert(su.Shallows, HasLen, 2) c.Assert(su.Shallows, DeepEquals, []plumbing.Hash{ plumbing.NewHash("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), plumbing.NewHash("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"), }) } func (s *ShallowUpdateSuite) TestDecodeUnshallow(c *C) { raw := "" + "0036unshallow aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + "0036unshallow bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + "0000" su := &ShallowUpdate{} err := su.Decode(bytes.NewBufferString(raw)) c.Assert(err, IsNil) plumbing.HashesSort(su.Unshallows) c.Assert(su.Shallows, HasLen, 0) c.Assert(su.Unshallows, HasLen, 2) c.Assert(su.Unshallows, DeepEquals, []plumbing.Hash{ plumbing.NewHash("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), plumbing.NewHash("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"), }) } func (s *ShallowUpdateSuite) TestDecodeMalformed(c *C) { raw := "" + "0035unshallow aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + "0000" su := &ShallowUpdate{} err := su.Decode(bytes.NewBufferString(raw)) c.Assert(err, NotNil) } func (s *ShallowUpdateSuite) TestEncodeEmpty(c *C) { su := &ShallowUpdate{} buf := bytes.NewBuffer(nil) c.Assert(su.Encode(buf), IsNil) c.Assert(buf.String(), Equals, "0000") } func (s *ShallowUpdateSuite) TestEncode(c *C) { su := &ShallowUpdate{ Shallows: []plumbing.Hash{ plumbing.NewHash("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), plumbing.NewHash("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"), }, Unshallows: []plumbing.Hash{ plumbing.NewHash("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), plumbing.NewHash("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"), }, } buf := bytes.NewBuffer(nil) c.Assert(su.Encode(buf), IsNil) expected := "" + "0035shallow aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n" + "0035shallow bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\n" + "0037unshallow aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n" + "0037unshallow bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\n" + "0000" c.Assert(buf.String(), Equals, expected) } func (s *ShallowUpdateSuite) TestEncodeShallow(c *C) { su := &ShallowUpdate{ Shallows: []plumbing.Hash{ plumbing.NewHash("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), plumbing.NewHash("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"), }, } buf := bytes.NewBuffer(nil) c.Assert(su.Encode(buf), IsNil) expected := "" + "0035shallow aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n" + "0035shallow bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\n" + "0000" c.Assert(buf.String(), Equals, expected) } func (s *ShallowUpdateSuite) TestEncodeUnshallow(c *C) { su := &ShallowUpdate{ Unshallows: []plumbing.Hash{ plumbing.NewHash("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), plumbing.NewHash("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"), }, } buf := bytes.NewBuffer(nil) c.Assert(su.Encode(buf), IsNil) expected := "" + "0037unshallow aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n" + "0037unshallow bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\n" + "0000" c.Assert(buf.String(), Equals, expected) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/protocol/packp/sideband/000077500000000000000000000000001345605224300252245ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/protocol/packp/sideband/common.go000066400000000000000000000014421345605224300270440ustar00rootroot00000000000000package sideband // Type sideband type "side-band" or "side-band-64k" type Type int8 const ( // Sideband legacy sideband type up to 1000-byte messages Sideband Type = iota // Sideband64k sideband type up to 65519-byte messages Sideband64k Type = iota // MaxPackedSize for Sideband type MaxPackedSize = 1000 // MaxPackedSize64k for Sideband64k type MaxPackedSize64k = 65520 ) // Channel sideband channel type Channel byte // WithPayload encode the payload as a message func (ch Channel) WithPayload(payload []byte) []byte { return append([]byte{byte(ch)}, payload...) } const ( // PackData packfile content PackData Channel = 1 // ProgressMessage progress messages ProgressMessage Channel = 2 // ErrorMessage fatal error message just before stream aborts ErrorMessage Channel = 3 ) golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/protocol/packp/sideband/demux.go000066400000000000000000000062561345605224300267060ustar00rootroot00000000000000package sideband import ( "errors" "fmt" "io" "gopkg.in/src-d/go-git.v4/plumbing/format/pktline" ) // ErrMaxPackedExceeded returned by Read, if the maximum packed size is exceeded var ErrMaxPackedExceeded = errors.New("max. packed size exceeded") // Progress where the progress information is stored type Progress interface { io.Writer } // Demuxer demultiplexes the progress reports and error info interleaved with the // packfile itself. // // A sideband has three different channels the main one, called PackData, contains // the packfile data; the ErrorMessage channel, that contains server errors; and // the last one, ProgressMessage channel, containing information about the ongoing // task happening in the server (optional, can be suppressed sending NoProgress // or Quiet capabilities to the server) // // In order to demultiplex the data stream, method `Read` should be called to // retrieve the PackData channel, the incoming data from the ProgressMessage is // written at `Progress` (if any), if any message is retrieved from the // ErrorMessage channel an error is returned and we can assume that the // connection has been closed. type Demuxer struct { t Type r io.Reader s *pktline.Scanner max int pending []byte // Progress is where the progress messages are stored Progress Progress } // NewDemuxer returns a new Demuxer for the given t and read from r func NewDemuxer(t Type, r io.Reader) *Demuxer { max := MaxPackedSize64k if t == Sideband { max = MaxPackedSize } return &Demuxer{ t: t, r: r, max: max, s: pktline.NewScanner(r), } } // Read reads up to len(p) bytes from the PackData channel into p, an error can // be return if an error happens when reading or if a message is sent in the // ErrorMessage channel. // // When a ProgressMessage is read, is not copy to b, instead of this is written // to the Progress func (d *Demuxer) Read(b []byte) (n int, err error) { var read, req int req = len(b) for read < req { n, err := d.doRead(b[read:req]) read += n if err != nil { return read, err } } return read, nil } func (d *Demuxer) doRead(b []byte) (int, error) { read, err := d.nextPackData() size := len(read) wanted := len(b) if size > wanted { d.pending = read[wanted:] } if wanted > size { wanted = size } size = copy(b, read[:wanted]) return size, err } func (d *Demuxer) nextPackData() ([]byte, error) { content := d.getPending() if len(content) != 0 { return content, nil } if !d.s.Scan() { if err := d.s.Err(); err != nil { return nil, err } return nil, io.EOF } content = d.s.Bytes() size := len(content) if size == 0 { return nil, nil } else if size > d.max { return nil, ErrMaxPackedExceeded } switch Channel(content[0]) { case PackData: return content[1:], nil case ProgressMessage: if d.Progress != nil { _, err := d.Progress.Write(content[1:]) return nil, err } case ErrorMessage: return nil, fmt.Errorf("unexpected error: %s", content[1:]) default: return nil, fmt.Errorf("unknown channel %s", content) } return nil, nil } func (d *Demuxer) getPending() (b []byte) { if len(d.pending) == 0 { return nil } content := d.pending d.pending = nil return content } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/protocol/packp/sideband/demux_test.go000066400000000000000000000102611345605224300277340ustar00rootroot00000000000000package sideband import ( "bytes" "errors" "io" "io/ioutil" "testing" "gopkg.in/src-d/go-git.v4/plumbing/format/pktline" . "gopkg.in/check.v1" ) func Test(t *testing.T) { TestingT(t) } type SidebandSuite struct{} var _ = Suite(&SidebandSuite{}) func (s *SidebandSuite) TestDecode(c *C) { expected := []byte("abcdefghijklmnopqrstuvwxyz") buf := bytes.NewBuffer(nil) e := pktline.NewEncoder(buf) e.Encode(PackData.WithPayload(expected[0:8])) e.Encode(ProgressMessage.WithPayload([]byte{'F', 'O', 'O', '\n'})) e.Encode(PackData.WithPayload(expected[8:16])) e.Encode(PackData.WithPayload(expected[16:26])) content := make([]byte, 26) d := NewDemuxer(Sideband64k, buf) n, err := io.ReadFull(d, content) c.Assert(err, IsNil) c.Assert(n, Equals, 26) c.Assert(content, DeepEquals, expected) } func (s *SidebandSuite) TestDecodeMoreThanContain(c *C) { expected := []byte("abcdefghijklmnopqrstuvwxyz") buf := bytes.NewBuffer(nil) e := pktline.NewEncoder(buf) e.Encode(PackData.WithPayload(expected)) content := make([]byte, 42) d := NewDemuxer(Sideband64k, buf) n, err := io.ReadFull(d, content) c.Assert(err, Equals, io.ErrUnexpectedEOF) c.Assert(n, Equals, 26) c.Assert(content[0:26], DeepEquals, expected) } func (s *SidebandSuite) TestDecodeWithError(c *C) { expected := []byte("abcdefghijklmnopqrstuvwxyz") buf := bytes.NewBuffer(nil) e := pktline.NewEncoder(buf) e.Encode(PackData.WithPayload(expected[0:8])) e.Encode(ErrorMessage.WithPayload([]byte{'F', 'O', 'O', '\n'})) e.Encode(PackData.WithPayload(expected[8:16])) e.Encode(PackData.WithPayload(expected[16:26])) content := make([]byte, 26) d := NewDemuxer(Sideband64k, buf) n, err := io.ReadFull(d, content) c.Assert(err, ErrorMatches, "unexpected error: FOO\n") c.Assert(n, Equals, 8) c.Assert(content[0:8], DeepEquals, expected[0:8]) } type mockReader struct{} func (r *mockReader) Read([]byte) (int, error) { return 0, errors.New("foo") } func (s *SidebandSuite) TestDecodeFromFailingReader(c *C) { content := make([]byte, 26) d := NewDemuxer(Sideband64k, &mockReader{}) n, err := io.ReadFull(d, content) c.Assert(err, ErrorMatches, "foo") c.Assert(n, Equals, 0) } func (s *SidebandSuite) TestDecodeWithProgress(c *C) { expected := []byte("abcdefghijklmnopqrstuvwxyz") input := bytes.NewBuffer(nil) e := pktline.NewEncoder(input) e.Encode(PackData.WithPayload(expected[0:8])) e.Encode(ProgressMessage.WithPayload([]byte{'F', 'O', 'O', '\n'})) e.Encode(PackData.WithPayload(expected[8:16])) e.Encode(PackData.WithPayload(expected[16:26])) output := bytes.NewBuffer(nil) content := make([]byte, 26) d := NewDemuxer(Sideband64k, input) d.Progress = output n, err := io.ReadFull(d, content) c.Assert(err, IsNil) c.Assert(n, Equals, 26) c.Assert(content, DeepEquals, expected) progress, err := ioutil.ReadAll(output) c.Assert(err, IsNil) c.Assert(progress, DeepEquals, []byte{'F', 'O', 'O', '\n'}) } func (s *SidebandSuite) TestDecodeWithUnknownChannel(c *C) { buf := bytes.NewBuffer(nil) e := pktline.NewEncoder(buf) e.Encode([]byte{'4', 'F', 'O', 'O', '\n'}) content := make([]byte, 26) d := NewDemuxer(Sideband64k, buf) n, err := io.ReadFull(d, content) c.Assert(err, ErrorMatches, "unknown channel 4FOO\n") c.Assert(n, Equals, 0) } func (s *SidebandSuite) TestDecodeWithPending(c *C) { expected := []byte("abcdefghijklmnopqrstuvwxyz") buf := bytes.NewBuffer(nil) e := pktline.NewEncoder(buf) e.Encode(PackData.WithPayload(expected[0:8])) e.Encode(PackData.WithPayload(expected[8:16])) e.Encode(PackData.WithPayload(expected[16:26])) content := make([]byte, 13) d := NewDemuxer(Sideband64k, buf) n, err := io.ReadFull(d, content) c.Assert(err, IsNil) c.Assert(n, Equals, 13) c.Assert(content, DeepEquals, expected[0:13]) n, err = d.Read(content) c.Assert(err, IsNil) c.Assert(n, Equals, 13) c.Assert(content, DeepEquals, expected[13:26]) } func (s *SidebandSuite) TestDecodeErrMaxPacked(c *C) { buf := bytes.NewBuffer(nil) e := pktline.NewEncoder(buf) e.Encode(PackData.WithPayload(bytes.Repeat([]byte{'0'}, MaxPackedSize+1))) content := make([]byte, 13) d := NewDemuxer(Sideband, buf) n, err := io.ReadFull(d, content) c.Assert(err, Equals, ErrMaxPackedExceeded) c.Assert(n, Equals, 0) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/protocol/packp/sideband/doc.go000066400000000000000000000025131345605224300263210ustar00rootroot00000000000000// Package sideband implements a sideband mutiplex/demultiplexer package sideband // If 'side-band' or 'side-band-64k' capabilities have been specified by // the client, the server will send the packfile data multiplexed. // // Either mode indicates that the packfile data will be streamed broken // up into packets of up to either 1000 bytes in the case of 'side_band', // or 65520 bytes in the case of 'side_band_64k'. Each packet is made up // of a leading 4-byte pkt-line length of how much data is in the packet, // followed by a 1-byte stream code, followed by the actual data. // // The stream code can be one of: // // 1 - pack data // 2 - progress messages // 3 - fatal error message just before stream aborts // // The "side-band-64k" capability came about as a way for newer clients // that can handle much larger packets to request packets that are // actually crammed nearly full, while maintaining backward compatibility // for the older clients. // // Further, with side-band and its up to 1000-byte messages, it's actually // 999 bytes of payload and 1 byte for the stream code. With side-band-64k, // same deal, you have up to 65519 bytes of data and 1 byte for the stream // code. // // The client MUST send only maximum of one of "side-band" and "side- // band-64k". Server MUST diagnose it as an error if client requests // both. golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/protocol/packp/sideband/muxer.go000066400000000000000000000027231345605224300267170ustar00rootroot00000000000000package sideband import ( "io" "gopkg.in/src-d/go-git.v4/plumbing/format/pktline" ) // Muxer multiplex the packfile along with the progress messages and the error // information. The multiplex is perform using pktline format. type Muxer struct { max int e *pktline.Encoder } const chLen = 1 // NewMuxer returns a new Muxer for the given t that writes on w. // // If t is equal to `Sideband` the max pack size is set to MaxPackedSize, in any // other value is given, max pack is set to MaxPackedSize64k, that is the // maximum length of a line in pktline format. func NewMuxer(t Type, w io.Writer) *Muxer { max := MaxPackedSize64k if t == Sideband { max = MaxPackedSize } return &Muxer{ max: max - chLen, e: pktline.NewEncoder(w), } } // Write writes p in the PackData channel func (m *Muxer) Write(p []byte) (int, error) { return m.WriteChannel(PackData, p) } // WriteChannel writes p in the given channel. This method can be used with any // channel, but is recommend use it only for the ProgressMessage and // ErrorMessage channels and use Write for the PackData channel func (m *Muxer) WriteChannel(t Channel, p []byte) (int, error) { wrote := 0 size := len(p) for wrote < size { n, err := m.doWrite(t, p[wrote:]) wrote += n if err != nil { return wrote, err } } return wrote, nil } func (m *Muxer) doWrite(ch Channel, p []byte) (int, error) { sz := len(p) if sz > m.max { sz = m.max } return sz, m.e.Encode(ch.WithPayload(p[:sz])) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/protocol/packp/sideband/muxer_test.go000066400000000000000000000016131345605224300277530ustar00rootroot00000000000000package sideband import ( "bytes" . "gopkg.in/check.v1" ) func (s *SidebandSuite) TestMuxerWrite(c *C) { buf := bytes.NewBuffer(nil) m := NewMuxer(Sideband, buf) n, err := m.Write(bytes.Repeat([]byte{'F'}, (MaxPackedSize-1)*2)) c.Assert(err, IsNil) c.Assert(n, Equals, 1998) c.Assert(buf.Len(), Equals, 2008) } func (s *SidebandSuite) TestMuxerWriteChannelMultipleChannels(c *C) { buf := bytes.NewBuffer(nil) m := NewMuxer(Sideband, buf) n, err := m.WriteChannel(PackData, bytes.Repeat([]byte{'D'}, 4)) c.Assert(err, IsNil) c.Assert(n, Equals, 4) n, err = m.WriteChannel(ProgressMessage, bytes.Repeat([]byte{'P'}, 4)) c.Assert(err, IsNil) c.Assert(n, Equals, 4) n, err = m.WriteChannel(PackData, bytes.Repeat([]byte{'D'}, 4)) c.Assert(err, IsNil) c.Assert(n, Equals, 4) c.Assert(buf.Len(), Equals, 27) c.Assert(buf.String(), Equals, "0009\x01DDDD0009\x02PPPP0009\x01DDDD") } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/protocol/packp/srvresp.go000066400000000000000000000053341345605224300255130ustar00rootroot00000000000000package packp import ( "bufio" "bytes" "errors" "fmt" "io" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/format/pktline" ) const ackLineLen = 44 // ServerResponse object acknowledgement from upload-pack service type ServerResponse struct { ACKs []plumbing.Hash } // Decode decodes the response into the struct, isMultiACK should be true, if // the request was done with multi_ack or multi_ack_detailed capabilities. func (r *ServerResponse) Decode(reader *bufio.Reader, isMultiACK bool) error { // TODO: implement support for multi_ack or multi_ack_detailed responses if isMultiACK { return errors.New("multi_ack and multi_ack_detailed are not supported") } s := pktline.NewScanner(reader) for s.Scan() { line := s.Bytes() if err := r.decodeLine(line); err != nil { return err } // we need to detect when the end of a response header and the beginning // of a packfile header happened, some requests to the git daemon // produces a duplicate ACK header even when multi_ack is not supported. stop, err := r.stopReading(reader) if err != nil { return err } if stop { break } } return s.Err() } // stopReading detects when a valid command such as ACK or NAK is found to be // read in the buffer without moving the read pointer. func (r *ServerResponse) stopReading(reader *bufio.Reader) (bool, error) { ahead, err := reader.Peek(7) if err == io.EOF { return true, nil } if err != nil { return false, err } if len(ahead) > 4 && r.isValidCommand(ahead[0:3]) { return false, nil } if len(ahead) == 7 && r.isValidCommand(ahead[4:]) { return false, nil } return true, nil } func (r *ServerResponse) isValidCommand(b []byte) bool { commands := [][]byte{ack, nak} for _, c := range commands { if bytes.Equal(b, c) { return true } } return false } func (r *ServerResponse) decodeLine(line []byte) error { if len(line) == 0 { return fmt.Errorf("unexpected flush") } if bytes.Equal(line[0:3], ack) { return r.decodeACKLine(line) } if bytes.Equal(line[0:3], nak) { return nil } return fmt.Errorf("unexpected content %q", string(line)) } func (r *ServerResponse) decodeACKLine(line []byte) error { if len(line) < ackLineLen { return fmt.Errorf("malformed ACK %q", line) } sp := bytes.Index(line, []byte(" ")) h := plumbing.NewHash(string(line[sp+1 : sp+41])) r.ACKs = append(r.ACKs, h) return nil } // Encode encodes the ServerResponse into a writer. func (r *ServerResponse) Encode(w io.Writer) error { if len(r.ACKs) > 1 { return errors.New("multi_ack and multi_ack_detailed are not supported") } e := pktline.NewEncoder(w) if len(r.ACKs) == 0 { return e.Encodef("%s\n", nak) } return e.Encodef("%s %s\n", ack, r.ACKs[0].String()) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/protocol/packp/srvresp_test.go000066400000000000000000000043261345605224300265520ustar00rootroot00000000000000package packp import ( "bufio" "bytes" "gopkg.in/src-d/go-git.v4/plumbing" . "gopkg.in/check.v1" ) type ServerResponseSuite struct{} var _ = Suite(&ServerResponseSuite{}) func (s *ServerResponseSuite) TestDecodeNAK(c *C) { raw := "0008NAK\n" sr := &ServerResponse{} err := sr.Decode(bufio.NewReader(bytes.NewBufferString(raw)), false) c.Assert(err, IsNil) c.Assert(sr.ACKs, HasLen, 0) } func (s *ServerResponseSuite) TestDecodeACK(c *C) { raw := "0031ACK 6ecf0ef2c2dffb796033e5a02219af86ec6584e5\n" sr := &ServerResponse{} err := sr.Decode(bufio.NewReader(bytes.NewBufferString(raw)), false) c.Assert(err, IsNil) c.Assert(sr.ACKs, HasLen, 1) c.Assert(sr.ACKs[0], Equals, plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5")) } func (s *ServerResponseSuite) TestDecodeMultipleACK(c *C) { raw := "" + "0031ACK 1111111111111111111111111111111111111111\n" + "0031ACK 6ecf0ef2c2dffb796033e5a02219af86ec6584e5\n" + "00080PACK\n" sr := &ServerResponse{} err := sr.Decode(bufio.NewReader(bytes.NewBufferString(raw)), false) c.Assert(err, IsNil) c.Assert(sr.ACKs, HasLen, 2) c.Assert(sr.ACKs[0], Equals, plumbing.NewHash("1111111111111111111111111111111111111111")) c.Assert(sr.ACKs[1], Equals, plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5")) } func (s *ServerResponseSuite) TestDecodeMultipleACKWithSideband(c *C) { raw := "" + "0031ACK 1111111111111111111111111111111111111111\n" + "0031ACK 6ecf0ef2c2dffb796033e5a02219af86ec6584e5\n" + "00080aaaa\n" sr := &ServerResponse{} err := sr.Decode(bufio.NewReader(bytes.NewBufferString(raw)), false) c.Assert(err, IsNil) c.Assert(sr.ACKs, HasLen, 2) c.Assert(sr.ACKs[0], Equals, plumbing.NewHash("1111111111111111111111111111111111111111")) c.Assert(sr.ACKs[1], Equals, plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5")) } func (s *ServerResponseSuite) TestDecodeMalformed(c *C) { raw := "0029ACK 6ecf0ef2c2dffb796033e5a02219af86ec6584e\n" sr := &ServerResponse{} err := sr.Decode(bufio.NewReader(bytes.NewBufferString(raw)), false) c.Assert(err, NotNil) } func (s *ServerResponseSuite) TestDecodeMultiACK(c *C) { sr := &ServerResponse{} err := sr.Decode(bufio.NewReader(bytes.NewBuffer(nil)), true) c.Assert(err, NotNil) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/protocol/packp/ulreq.go000066400000000000000000000115321345605224300251340ustar00rootroot00000000000000package packp import ( "fmt" "time" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability" ) // UploadRequest values represent the information transmitted on a // upload-request message. Values from this type are not zero-value // safe, use the New function instead. // This is a low level type, use UploadPackRequest instead. type UploadRequest struct { Capabilities *capability.List Wants []plumbing.Hash Shallows []plumbing.Hash Depth Depth } // Depth values stores the desired depth of the requested packfile: see // DepthCommit, DepthSince and DepthReference. type Depth interface { isDepth() IsZero() bool } // DepthCommits values stores the maximum number of requested commits in // the packfile. Zero means infinite. A negative value will have // undefined consequences. type DepthCommits int func (d DepthCommits) isDepth() {} func (d DepthCommits) IsZero() bool { return d == 0 } // DepthSince values requests only commits newer than the specified time. type DepthSince time.Time func (d DepthSince) isDepth() {} func (d DepthSince) IsZero() bool { return time.Time(d).IsZero() } // DepthReference requests only commits not to found in the specified reference. type DepthReference string func (d DepthReference) isDepth() {} func (d DepthReference) IsZero() bool { return string(d) == "" } // NewUploadRequest returns a pointer to a new UploadRequest value, ready to be // used. It has no capabilities, wants or shallows and an infinite depth. Please // note that to encode an upload-request it has to have at least one wanted hash. func NewUploadRequest() *UploadRequest { return &UploadRequest{ Capabilities: capability.NewList(), Wants: []plumbing.Hash{}, Shallows: []plumbing.Hash{}, Depth: DepthCommits(0), } } // NewUploadRequestFromCapabilities returns a pointer to a new UploadRequest // value, the request capabilities are filled with the most optiomal ones, based // on the adv value (advertaised capabilities), the UploadRequest generated it // has no wants or shallows and an infinite depth. func NewUploadRequestFromCapabilities(adv *capability.List) *UploadRequest { r := NewUploadRequest() if adv.Supports(capability.MultiACKDetailed) { r.Capabilities.Set(capability.MultiACKDetailed) } else if adv.Supports(capability.MultiACK) { r.Capabilities.Set(capability.MultiACK) } if adv.Supports(capability.Sideband64k) { r.Capabilities.Set(capability.Sideband64k) } else if adv.Supports(capability.Sideband) { r.Capabilities.Set(capability.Sideband) } if adv.Supports(capability.ThinPack) { r.Capabilities.Set(capability.ThinPack) } if adv.Supports(capability.OFSDelta) { r.Capabilities.Set(capability.OFSDelta) } if adv.Supports(capability.Agent) { r.Capabilities.Set(capability.Agent, capability.DefaultAgent) } return r } // Validate validates the content of UploadRequest, following the next rules: // - Wants MUST have at least one reference // - capability.Shallow MUST be present if Shallows is not empty // - is a non-zero DepthCommits is given capability.Shallow MUST be present // - is a DepthSince is given capability.Shallow MUST be present // - is a DepthReference is given capability.DeepenNot MUST be present // - MUST contain only maximum of one of capability.Sideband and capability.Sideband64k // - MUST contain only maximum of one of capability.MultiACK and capability.MultiACKDetailed func (r *UploadRequest) Validate() error { if len(r.Wants) == 0 { return fmt.Errorf("want can't be empty") } if err := r.validateRequiredCapabilities(); err != nil { return err } if err := r.validateConflictCapabilities(); err != nil { return err } return nil } func (r *UploadRequest) validateRequiredCapabilities() error { msg := "missing capability %s" if len(r.Shallows) != 0 && !r.Capabilities.Supports(capability.Shallow) { return fmt.Errorf(msg, capability.Shallow) } switch r.Depth.(type) { case DepthCommits: if r.Depth != DepthCommits(0) { if !r.Capabilities.Supports(capability.Shallow) { return fmt.Errorf(msg, capability.Shallow) } } case DepthSince: if !r.Capabilities.Supports(capability.DeepenSince) { return fmt.Errorf(msg, capability.DeepenSince) } case DepthReference: if !r.Capabilities.Supports(capability.DeepenNot) { return fmt.Errorf(msg, capability.DeepenNot) } } return nil } func (r *UploadRequest) validateConflictCapabilities() error { msg := "capabilities %s and %s are mutually exclusive" if r.Capabilities.Supports(capability.Sideband) && r.Capabilities.Supports(capability.Sideband64k) { return fmt.Errorf(msg, capability.Sideband, capability.Sideband64k) } if r.Capabilities.Supports(capability.MultiACK) && r.Capabilities.Supports(capability.MultiACKDetailed) { return fmt.Errorf(msg, capability.MultiACK, capability.MultiACKDetailed) } return nil } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/protocol/packp/ulreq_decode.go000066400000000000000000000125301345605224300264360ustar00rootroot00000000000000package packp import ( "bytes" "encoding/hex" "fmt" "io" "strconv" "time" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/format/pktline" ) // Decode reads the next upload-request form its input and // stores it in the UploadRequest. func (u *UploadRequest) Decode(r io.Reader) error { d := newUlReqDecoder(r) return d.Decode(u) } type ulReqDecoder struct { s *pktline.Scanner // a pkt-line scanner from the input stream line []byte // current pkt-line contents, use parser.nextLine() to make it advance nLine int // current pkt-line number for debugging, begins at 1 err error // sticky error, use the parser.error() method to fill this out data *UploadRequest // parsed data is stored here } func newUlReqDecoder(r io.Reader) *ulReqDecoder { return &ulReqDecoder{ s: pktline.NewScanner(r), } } func (d *ulReqDecoder) Decode(v *UploadRequest) error { d.data = v for state := d.decodeFirstWant; state != nil; { state = state() } return d.err } // fills out the parser stiky error func (d *ulReqDecoder) error(format string, a ...interface{}) { msg := fmt.Sprintf( "pkt-line %d: %s", d.nLine, fmt.Sprintf(format, a...), ) d.err = NewErrUnexpectedData(msg, d.line) } // Reads a new pkt-line from the scanner, makes its payload available as // p.line and increments p.nLine. A successful invocation returns true, // otherwise, false is returned and the sticky error is filled out // accordingly. Trims eols at the end of the payloads. func (d *ulReqDecoder) nextLine() bool { d.nLine++ if !d.s.Scan() { if d.err = d.s.Err(); d.err != nil { return false } d.error("EOF") return false } d.line = d.s.Bytes() d.line = bytes.TrimSuffix(d.line, eol) return true } // Expected format: want [ capabilities] func (d *ulReqDecoder) decodeFirstWant() stateFn { if ok := d.nextLine(); !ok { return nil } if !bytes.HasPrefix(d.line, want) { d.error("missing 'want ' prefix") return nil } d.line = bytes.TrimPrefix(d.line, want) hash, ok := d.readHash() if !ok { return nil } d.data.Wants = append(d.data.Wants, hash) return d.decodeCaps } func (d *ulReqDecoder) readHash() (plumbing.Hash, bool) { if len(d.line) < hashSize { d.err = fmt.Errorf("malformed hash: %v", d.line) return plumbing.ZeroHash, false } var hash plumbing.Hash if _, err := hex.Decode(hash[:], d.line[:hashSize]); err != nil { d.error("invalid hash text: %s", err) return plumbing.ZeroHash, false } d.line = d.line[hashSize:] return hash, true } // Expected format: sp cap1 sp cap2 sp cap3... func (d *ulReqDecoder) decodeCaps() stateFn { d.line = bytes.TrimPrefix(d.line, sp) if err := d.data.Capabilities.Decode(d.line); err != nil { d.error("invalid capabilities: %s", err) } return d.decodeOtherWants } // Expected format: want func (d *ulReqDecoder) decodeOtherWants() stateFn { if ok := d.nextLine(); !ok { return nil } if bytes.HasPrefix(d.line, shallow) { return d.decodeShallow } if bytes.HasPrefix(d.line, deepen) { return d.decodeDeepen } if len(d.line) == 0 { return nil } if !bytes.HasPrefix(d.line, want) { d.error("unexpected payload while expecting a want: %q", d.line) return nil } d.line = bytes.TrimPrefix(d.line, want) hash, ok := d.readHash() if !ok { return nil } d.data.Wants = append(d.data.Wants, hash) return d.decodeOtherWants } // Expected format: shallow func (d *ulReqDecoder) decodeShallow() stateFn { if bytes.HasPrefix(d.line, deepen) { return d.decodeDeepen } if len(d.line) == 0 { return nil } if !bytes.HasPrefix(d.line, shallow) { d.error("unexpected payload while expecting a shallow: %q", d.line) return nil } d.line = bytes.TrimPrefix(d.line, shallow) hash, ok := d.readHash() if !ok { return nil } d.data.Shallows = append(d.data.Shallows, hash) if ok := d.nextLine(); !ok { return nil } return d.decodeShallow } // Expected format: deepen / deepen-since
    / deepen-not func (d *ulReqDecoder) decodeDeepen() stateFn { if bytes.HasPrefix(d.line, deepenCommits) { return d.decodeDeepenCommits } if bytes.HasPrefix(d.line, deepenSince) { return d.decodeDeepenSince } if bytes.HasPrefix(d.line, deepenReference) { return d.decodeDeepenReference } if len(d.line) == 0 { return nil } d.error("unexpected deepen specification: %q", d.line) return nil } func (d *ulReqDecoder) decodeDeepenCommits() stateFn { d.line = bytes.TrimPrefix(d.line, deepenCommits) var n int if n, d.err = strconv.Atoi(string(d.line)); d.err != nil { return nil } if n < 0 { d.err = fmt.Errorf("negative depth") return nil } d.data.Depth = DepthCommits(n) return d.decodeFlush } func (d *ulReqDecoder) decodeDeepenSince() stateFn { d.line = bytes.TrimPrefix(d.line, deepenSince) var secs int64 secs, d.err = strconv.ParseInt(string(d.line), 10, 64) if d.err != nil { return nil } t := time.Unix(secs, 0).UTC() d.data.Depth = DepthSince(t) return d.decodeFlush } func (d *ulReqDecoder) decodeDeepenReference() stateFn { d.line = bytes.TrimPrefix(d.line, deepenReference) d.data.Depth = DepthReference(string(d.line)) return d.decodeFlush } func (d *ulReqDecoder) decodeFlush() stateFn { if ok := d.nextLine(); !ok { return nil } if len(d.line) != 0 { d.err = fmt.Errorf("unexpected payload while expecting a flush-pkt: %q", d.line) } return nil } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/protocol/packp/ulreq_decode_test.go000066400000000000000000000410761345605224300275040ustar00rootroot00000000000000package packp import ( "bytes" "io" "sort" "time" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/format/pktline" "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability" . "gopkg.in/check.v1" ) type UlReqDecodeSuite struct{} var _ = Suite(&UlReqDecodeSuite{}) func (s *UlReqDecodeSuite) TestEmpty(c *C) { ur := NewUploadRequest() var buf bytes.Buffer d := newUlReqDecoder(&buf) err := d.Decode(ur) c.Assert(err, ErrorMatches, "pkt-line 1: EOF") } func (s *UlReqDecodeSuite) TestNoWant(c *C) { payloads := []string{ "foobar", pktline.FlushString, } r := toPktLines(c, payloads) s.testDecoderErrorMatches(c, r, ".*missing 'want '.*") } func (s *UlReqDecodeSuite) testDecoderErrorMatches(c *C, input io.Reader, pattern string) { ur := NewUploadRequest() d := newUlReqDecoder(input) err := d.Decode(ur) c.Assert(err, ErrorMatches, pattern) } func (s *UlReqDecodeSuite) TestInvalidFirstHash(c *C) { payloads := []string{ "want 6ecf0ef2c2dffb796alberto2219af86ec6584e5\n", pktline.FlushString, } r := toPktLines(c, payloads) s.testDecoderErrorMatches(c, r, ".*invalid hash.*") } func (s *UlReqDecodeSuite) TestWantOK(c *C) { payloads := []string{ "want 1111111111111111111111111111111111111111", pktline.FlushString, } ur := s.testDecodeOK(c, payloads) c.Assert(ur.Wants, DeepEquals, []plumbing.Hash{ plumbing.NewHash("1111111111111111111111111111111111111111"), }) } func (s *UlReqDecodeSuite) testDecodeOK(c *C, payloads []string) *UploadRequest { var buf bytes.Buffer e := pktline.NewEncoder(&buf) err := e.EncodeString(payloads...) c.Assert(err, IsNil) ur := NewUploadRequest() d := newUlReqDecoder(&buf) err = d.Decode(ur) c.Assert(err, IsNil) return ur } func (s *UlReqDecodeSuite) TestWantWithCapabilities(c *C) { payloads := []string{ "want 1111111111111111111111111111111111111111 ofs-delta multi_ack", pktline.FlushString, } ur := s.testDecodeOK(c, payloads) c.Assert(ur.Wants, DeepEquals, []plumbing.Hash{ plumbing.NewHash("1111111111111111111111111111111111111111")}) c.Assert(ur.Capabilities.Supports(capability.OFSDelta), Equals, true) c.Assert(ur.Capabilities.Supports(capability.MultiACK), Equals, true) } func (s *UlReqDecodeSuite) TestManyWantsNoCapabilities(c *C) { payloads := []string{ "want 3333333333333333333333333333333333333333", "want 4444444444444444444444444444444444444444", "want 1111111111111111111111111111111111111111", "want 2222222222222222222222222222222222222222", pktline.FlushString, } ur := s.testDecodeOK(c, payloads) expected := []plumbing.Hash{ plumbing.NewHash("1111111111111111111111111111111111111111"), plumbing.NewHash("2222222222222222222222222222222222222222"), plumbing.NewHash("3333333333333333333333333333333333333333"), plumbing.NewHash("4444444444444444444444444444444444444444"), } sort.Sort(byHash(ur.Wants)) sort.Sort(byHash(expected)) c.Assert(ur.Wants, DeepEquals, expected) } type byHash []plumbing.Hash func (a byHash) Len() int { return len(a) } func (a byHash) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a byHash) Less(i, j int) bool { ii := [20]byte(a[i]) jj := [20]byte(a[j]) return bytes.Compare(ii[:], jj[:]) < 0 } func (s *UlReqDecodeSuite) TestManyWantsBadWant(c *C) { payloads := []string{ "want 3333333333333333333333333333333333333333", "want 4444444444444444444444444444444444444444", "foo", "want 2222222222222222222222222222222222222222", pktline.FlushString, } r := toPktLines(c, payloads) s.testDecoderErrorMatches(c, r, ".*unexpected payload.*") } func (s *UlReqDecodeSuite) TestManyWantsInvalidHash(c *C) { payloads := []string{ "want 3333333333333333333333333333333333333333", "want 4444444444444444444444444444444444444444", "want 1234567890abcdef", "want 2222222222222222222222222222222222222222", pktline.FlushString, } r := toPktLines(c, payloads) s.testDecoderErrorMatches(c, r, ".*malformed hash.*") } func (s *UlReqDecodeSuite) TestManyWantsWithCapabilities(c *C) { payloads := []string{ "want 3333333333333333333333333333333333333333 ofs-delta multi_ack", "want 4444444444444444444444444444444444444444", "want 1111111111111111111111111111111111111111", "want 2222222222222222222222222222222222222222", pktline.FlushString, } ur := s.testDecodeOK(c, payloads) expected := []plumbing.Hash{ plumbing.NewHash("1111111111111111111111111111111111111111"), plumbing.NewHash("2222222222222222222222222222222222222222"), plumbing.NewHash("3333333333333333333333333333333333333333"), plumbing.NewHash("4444444444444444444444444444444444444444"), } sort.Sort(byHash(ur.Wants)) sort.Sort(byHash(expected)) c.Assert(ur.Wants, DeepEquals, expected) c.Assert(ur.Capabilities.Supports(capability.OFSDelta), Equals, true) c.Assert(ur.Capabilities.Supports(capability.MultiACK), Equals, true) } func (s *UlReqDecodeSuite) TestSingleShallowSingleWant(c *C) { payloads := []string{ "want 3333333333333333333333333333333333333333 ofs-delta multi_ack", "shallow aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", pktline.FlushString, } ur := s.testDecodeOK(c, payloads) expectedWants := []plumbing.Hash{ plumbing.NewHash("3333333333333333333333333333333333333333"), } expectedShallows := []plumbing.Hash{ plumbing.NewHash("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), } c.Assert(ur.Wants, DeepEquals, expectedWants) c.Assert(ur.Capabilities.Supports(capability.OFSDelta), Equals, true) c.Assert(ur.Capabilities.Supports(capability.MultiACK), Equals, true) c.Assert(ur.Shallows, DeepEquals, expectedShallows) } func (s *UlReqDecodeSuite) TestSingleShallowManyWants(c *C) { payloads := []string{ "want 3333333333333333333333333333333333333333 ofs-delta multi_ack", "want 4444444444444444444444444444444444444444", "want 1111111111111111111111111111111111111111", "want 2222222222222222222222222222222222222222", "shallow aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", pktline.FlushString, } ur := s.testDecodeOK(c, payloads) expectedWants := []plumbing.Hash{ plumbing.NewHash("1111111111111111111111111111111111111111"), plumbing.NewHash("2222222222222222222222222222222222222222"), plumbing.NewHash("3333333333333333333333333333333333333333"), plumbing.NewHash("4444444444444444444444444444444444444444"), } sort.Sort(byHash(expectedWants)) expectedShallows := []plumbing.Hash{ plumbing.NewHash("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), } sort.Sort(byHash(ur.Wants)) c.Assert(ur.Wants, DeepEquals, expectedWants) c.Assert(ur.Capabilities.Supports(capability.OFSDelta), Equals, true) c.Assert(ur.Capabilities.Supports(capability.MultiACK), Equals, true) c.Assert(ur.Shallows, DeepEquals, expectedShallows) } func (s *UlReqDecodeSuite) TestManyShallowSingleWant(c *C) { payloads := []string{ "want 3333333333333333333333333333333333333333 ofs-delta multi_ack", "shallow aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "shallow bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", "shallow cccccccccccccccccccccccccccccccccccccccc", "shallow dddddddddddddddddddddddddddddddddddddddd", pktline.FlushString, } ur := s.testDecodeOK(c, payloads) expectedWants := []plumbing.Hash{ plumbing.NewHash("3333333333333333333333333333333333333333"), } expectedShallows := []plumbing.Hash{ plumbing.NewHash("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), plumbing.NewHash("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"), plumbing.NewHash("cccccccccccccccccccccccccccccccccccccccc"), plumbing.NewHash("dddddddddddddddddddddddddddddddddddddddd"), } sort.Sort(byHash(expectedShallows)) c.Assert(ur.Wants, DeepEquals, expectedWants) c.Assert(ur.Capabilities.Supports(capability.OFSDelta), Equals, true) c.Assert(ur.Capabilities.Supports(capability.MultiACK), Equals, true) sort.Sort(byHash(ur.Shallows)) c.Assert(ur.Shallows, DeepEquals, expectedShallows) } func (s *UlReqDecodeSuite) TestManyShallowManyWants(c *C) { payloads := []string{ "want 3333333333333333333333333333333333333333 ofs-delta multi_ack", "want 4444444444444444444444444444444444444444", "want 1111111111111111111111111111111111111111", "want 2222222222222222222222222222222222222222", "shallow aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "shallow bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", "shallow cccccccccccccccccccccccccccccccccccccccc", "shallow dddddddddddddddddddddddddddddddddddddddd", pktline.FlushString, } ur := s.testDecodeOK(c, payloads) expectedWants := []plumbing.Hash{ plumbing.NewHash("1111111111111111111111111111111111111111"), plumbing.NewHash("2222222222222222222222222222222222222222"), plumbing.NewHash("3333333333333333333333333333333333333333"), plumbing.NewHash("4444444444444444444444444444444444444444"), } sort.Sort(byHash(expectedWants)) expectedShallows := []plumbing.Hash{ plumbing.NewHash("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), plumbing.NewHash("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"), plumbing.NewHash("cccccccccccccccccccccccccccccccccccccccc"), plumbing.NewHash("dddddddddddddddddddddddddddddddddddddddd"), } sort.Sort(byHash(expectedShallows)) sort.Sort(byHash(ur.Wants)) c.Assert(ur.Wants, DeepEquals, expectedWants) c.Assert(ur.Capabilities.Supports(capability.OFSDelta), Equals, true) c.Assert(ur.Capabilities.Supports(capability.MultiACK), Equals, true) sort.Sort(byHash(ur.Shallows)) c.Assert(ur.Shallows, DeepEquals, expectedShallows) } func (s *UlReqDecodeSuite) TestMalformedShallow(c *C) { payloads := []string{ "want 3333333333333333333333333333333333333333 ofs-delta multi_ack", "shalow aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", pktline.FlushString, } r := toPktLines(c, payloads) s.testDecoderErrorMatches(c, r, ".*unexpected payload.*") } func (s *UlReqDecodeSuite) TestMalformedShallowHash(c *C) { payloads := []string{ "want 3333333333333333333333333333333333333333 ofs-delta multi_ack", "shallow aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", pktline.FlushString, } r := toPktLines(c, payloads) s.testDecoderErrorMatches(c, r, ".*malformed hash.*") } func (s *UlReqDecodeSuite) TestMalformedShallowManyShallows(c *C) { payloads := []string{ "want 3333333333333333333333333333333333333333 ofs-delta multi_ack", "shallow aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "shalow bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", "shallow cccccccccccccccccccccccccccccccccccccccc", pktline.FlushString, } r := toPktLines(c, payloads) s.testDecoderErrorMatches(c, r, ".*unexpected payload.*") } func (s *UlReqDecodeSuite) TestMalformedDeepenSpec(c *C) { payloads := []string{ "want 3333333333333333333333333333333333333333 ofs-delta multi_ack", "deepen-foo 34", pktline.FlushString, } r := toPktLines(c, payloads) s.testDecoderErrorMatches(c, r, ".*unexpected deepen.*") } func (s *UlReqDecodeSuite) TestMalformedDeepenSingleWant(c *C) { payloads := []string{ "want 3333333333333333333333333333333333333333 ofs-delta multi_ack", "depth 32", pktline.FlushString, } r := toPktLines(c, payloads) s.testDecoderErrorMatches(c, r, ".*unexpected payload.*") } func (s *UlReqDecodeSuite) TestMalformedDeepenMultiWant(c *C) { payloads := []string{ "want 3333333333333333333333333333333333333333 ofs-delta multi_ack", "want 2222222222222222222222222222222222222222", "depth 32", pktline.FlushString, } r := toPktLines(c, payloads) s.testDecoderErrorMatches(c, r, ".*unexpected payload.*") } func (s *UlReqDecodeSuite) TestMalformedDeepenWithSingleShallow(c *C) { payloads := []string{ "want 3333333333333333333333333333333333333333 ofs-delta multi_ack", "shallow 2222222222222222222222222222222222222222", "depth 32", pktline.FlushString, } r := toPktLines(c, payloads) s.testDecoderErrorMatches(c, r, ".*unexpected payload.*") } func (s *UlReqDecodeSuite) TestMalformedDeepenWithMultiShallow(c *C) { payloads := []string{ "want 3333333333333333333333333333333333333333 ofs-delta multi_ack", "shallow 2222222222222222222222222222222222222222", "shallow 5555555555555555555555555555555555555555", "depth 32", pktline.FlushString, } r := toPktLines(c, payloads) s.testDecoderErrorMatches(c, r, ".*unexpected payload.*") } func (s *UlReqDecodeSuite) TestDeepenCommits(c *C) { payloads := []string{ "want 3333333333333333333333333333333333333333 ofs-delta multi_ack", "deepen 1234", pktline.FlushString, } ur := s.testDecodeOK(c, payloads) c.Assert(ur.Depth, FitsTypeOf, DepthCommits(0)) commits, ok := ur.Depth.(DepthCommits) c.Assert(ok, Equals, true) c.Assert(int(commits), Equals, 1234) } func (s *UlReqDecodeSuite) TestDeepenCommitsInfiniteInplicit(c *C) { payloads := []string{ "want 3333333333333333333333333333333333333333 ofs-delta multi_ack", "deepen 0", pktline.FlushString, } ur := s.testDecodeOK(c, payloads) c.Assert(ur.Depth, FitsTypeOf, DepthCommits(0)) commits, ok := ur.Depth.(DepthCommits) c.Assert(ok, Equals, true) c.Assert(int(commits), Equals, 0) } func (s *UlReqDecodeSuite) TestDeepenCommitsInfiniteExplicit(c *C) { payloads := []string{ "want 3333333333333333333333333333333333333333 ofs-delta multi_ack", pktline.FlushString, } ur := s.testDecodeOK(c, payloads) c.Assert(ur.Depth, FitsTypeOf, DepthCommits(0)) commits, ok := ur.Depth.(DepthCommits) c.Assert(ok, Equals, true) c.Assert(int(commits), Equals, 0) } func (s *UlReqDecodeSuite) TestMalformedDeepenCommits(c *C) { payloads := []string{ "want 3333333333333333333333333333333333333333 ofs-delta multi_ack", "deepen -32", pktline.FlushString, } r := toPktLines(c, payloads) s.testDecoderErrorMatches(c, r, ".*negative depth.*") } func (s *UlReqDecodeSuite) TestDeepenCommitsEmpty(c *C) { payloads := []string{ "want 3333333333333333333333333333333333333333 ofs-delta multi_ack", "deepen ", pktline.FlushString, } r := toPktLines(c, payloads) s.testDecoderErrorMatches(c, r, ".*invalid syntax.*") } func (s *UlReqDecodeSuite) TestDeepenSince(c *C) { payloads := []string{ "want 3333333333333333333333333333333333333333 ofs-delta multi_ack", "deepen-since 1420167845", // 2015-01-02T03:04:05+00:00 pktline.FlushString, } ur := s.testDecodeOK(c, payloads) expected := time.Date(2015, time.January, 2, 3, 4, 5, 0, time.UTC) c.Assert(ur.Depth, FitsTypeOf, DepthSince(time.Now())) since, ok := ur.Depth.(DepthSince) c.Assert(ok, Equals, true) c.Assert(time.Time(since).Equal(expected), Equals, true, Commentf("obtained=%s\nexpected=%s", time.Time(since), expected)) } func (s *UlReqDecodeSuite) TestDeepenReference(c *C) { payloads := []string{ "want 3333333333333333333333333333333333333333 ofs-delta multi_ack", "deepen-not refs/heads/master", pktline.FlushString, } ur := s.testDecodeOK(c, payloads) expected := "refs/heads/master" c.Assert(ur.Depth, FitsTypeOf, DepthReference("")) reference, ok := ur.Depth.(DepthReference) c.Assert(ok, Equals, true) c.Assert(string(reference), Equals, expected) } func (s *UlReqDecodeSuite) TestAll(c *C) { payloads := []string{ "want 3333333333333333333333333333333333333333 ofs-delta multi_ack", "want 4444444444444444444444444444444444444444", "want 1111111111111111111111111111111111111111", "want 2222222222222222222222222222222222222222", "shallow aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "shallow bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", "shallow cccccccccccccccccccccccccccccccccccccccc", "shallow dddddddddddddddddddddddddddddddddddddddd", "deepen 1234", pktline.FlushString, } ur := s.testDecodeOK(c, payloads) expectedWants := []plumbing.Hash{ plumbing.NewHash("1111111111111111111111111111111111111111"), plumbing.NewHash("2222222222222222222222222222222222222222"), plumbing.NewHash("3333333333333333333333333333333333333333"), plumbing.NewHash("4444444444444444444444444444444444444444"), } sort.Sort(byHash(expectedWants)) sort.Sort(byHash(ur.Wants)) c.Assert(ur.Wants, DeepEquals, expectedWants) c.Assert(ur.Capabilities.Supports(capability.OFSDelta), Equals, true) c.Assert(ur.Capabilities.Supports(capability.MultiACK), Equals, true) expectedShallows := []plumbing.Hash{ plumbing.NewHash("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), plumbing.NewHash("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"), plumbing.NewHash("cccccccccccccccccccccccccccccccccccccccc"), plumbing.NewHash("dddddddddddddddddddddddddddddddddddddddd"), } sort.Sort(byHash(expectedShallows)) sort.Sort(byHash(ur.Shallows)) c.Assert(ur.Shallows, DeepEquals, expectedShallows) c.Assert(ur.Depth, FitsTypeOf, DepthCommits(0)) commits, ok := ur.Depth.(DepthCommits) c.Assert(ok, Equals, true) c.Assert(int(commits), Equals, 1234) } func (s *UlReqDecodeSuite) TestExtraData(c *C) { payloads := []string{ "want 3333333333333333333333333333333333333333 ofs-delta multi_ack", "deepen 32", "foo", pktline.FlushString, } r := toPktLines(c, payloads) s.testDecoderErrorMatches(c, r, ".*unexpected payload.*") } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/protocol/packp/ulreq_encode.go000066400000000000000000000060041345605224300264470ustar00rootroot00000000000000package packp import ( "bytes" "fmt" "io" "time" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/format/pktline" ) // Encode writes the UlReq encoding of u to the stream. // // All the payloads will end with a newline character. Wants and // shallows are sorted alphabetically. A depth of 0 means no depth // request is sent. func (u *UploadRequest) Encode(w io.Writer) error { e := newUlReqEncoder(w) return e.Encode(u) } type ulReqEncoder struct { pe *pktline.Encoder // where to write the encoded data data *UploadRequest // the data to encode err error // sticky error } func newUlReqEncoder(w io.Writer) *ulReqEncoder { return &ulReqEncoder{ pe: pktline.NewEncoder(w), } } func (e *ulReqEncoder) Encode(v *UploadRequest) error { e.data = v if len(v.Wants) == 0 { return fmt.Errorf("empty wants provided") } plumbing.HashesSort(e.data.Wants) for state := e.encodeFirstWant; state != nil; { state = state() } return e.err } func (e *ulReqEncoder) encodeFirstWant() stateFn { var err error if e.data.Capabilities.IsEmpty() { err = e.pe.Encodef("want %s\n", e.data.Wants[0]) } else { err = e.pe.Encodef( "want %s %s\n", e.data.Wants[0], e.data.Capabilities.String(), ) } if err != nil { e.err = fmt.Errorf("encoding first want line: %s", err) return nil } return e.encodeAditionalWants } func (e *ulReqEncoder) encodeAditionalWants() stateFn { last := e.data.Wants[0] for _, w := range e.data.Wants[1:] { if bytes.Equal(last[:], w[:]) { continue } if err := e.pe.Encodef("want %s\n", w); err != nil { e.err = fmt.Errorf("encoding want %q: %s", w, err) return nil } last = w } return e.encodeShallows } func (e *ulReqEncoder) encodeShallows() stateFn { plumbing.HashesSort(e.data.Shallows) var last plumbing.Hash for _, s := range e.data.Shallows { if bytes.Equal(last[:], s[:]) { continue } if err := e.pe.Encodef("shallow %s\n", s); err != nil { e.err = fmt.Errorf("encoding shallow %q: %s", s, err) return nil } last = s } return e.encodeDepth } func (e *ulReqEncoder) encodeDepth() stateFn { switch depth := e.data.Depth.(type) { case DepthCommits: if depth != 0 { commits := int(depth) if err := e.pe.Encodef("deepen %d\n", commits); err != nil { e.err = fmt.Errorf("encoding depth %d: %s", depth, err) return nil } } case DepthSince: when := time.Time(depth).UTC() if err := e.pe.Encodef("deepen-since %d\n", when.Unix()); err != nil { e.err = fmt.Errorf("encoding depth %s: %s", when, err) return nil } case DepthReference: reference := string(depth) if err := e.pe.Encodef("deepen-not %s\n", reference); err != nil { e.err = fmt.Errorf("encoding depth %s: %s", reference, err) return nil } default: e.err = fmt.Errorf("unsupported depth type") return nil } return e.encodeFlush } func (e *ulReqEncoder) encodeFlush() stateFn { if err := e.pe.Flush(); err != nil { e.err = fmt.Errorf("encoding flush-pkt: %s", err) return nil } return nil } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/protocol/packp/ulreq_encode_test.go000066400000000000000000000244621345605224300275160ustar00rootroot00000000000000package packp import ( "bytes" "time" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/format/pktline" "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability" . "gopkg.in/check.v1" ) type UlReqEncodeSuite struct{} var _ = Suite(&UlReqEncodeSuite{}) func testUlReqEncode(c *C, ur *UploadRequest, expectedPayloads []string) { var buf bytes.Buffer e := newUlReqEncoder(&buf) err := e.Encode(ur) c.Assert(err, IsNil) obtained := buf.Bytes() expected := pktlines(c, expectedPayloads...) comment := Commentf("\nobtained = %s\nexpected = %s\n", string(obtained), string(expected)) c.Assert(obtained, DeepEquals, expected, comment) } func testUlReqEncodeError(c *C, ur *UploadRequest, expectedErrorRegEx string) { var buf bytes.Buffer e := newUlReqEncoder(&buf) err := e.Encode(ur) c.Assert(err, ErrorMatches, expectedErrorRegEx) } func (s *UlReqEncodeSuite) TestZeroValue(c *C) { ur := NewUploadRequest() expectedErrorRegEx := ".*empty wants.*" testUlReqEncodeError(c, ur, expectedErrorRegEx) } func (s *UlReqEncodeSuite) TestOneWant(c *C) { ur := NewUploadRequest() ur.Wants = append(ur.Wants, plumbing.NewHash("1111111111111111111111111111111111111111")) expected := []string{ "want 1111111111111111111111111111111111111111\n", pktline.FlushString, } testUlReqEncode(c, ur, expected) } func (s *UlReqEncodeSuite) TestOneWantWithCapabilities(c *C) { ur := NewUploadRequest() ur.Wants = append(ur.Wants, plumbing.NewHash("1111111111111111111111111111111111111111")) ur.Capabilities.Add(capability.MultiACK) ur.Capabilities.Add(capability.OFSDelta) ur.Capabilities.Add(capability.Sideband) ur.Capabilities.Add(capability.SymRef, "HEAD:/refs/heads/master") ur.Capabilities.Add(capability.ThinPack) expected := []string{ "want 1111111111111111111111111111111111111111 multi_ack ofs-delta side-band symref=HEAD:/refs/heads/master thin-pack\n", pktline.FlushString, } testUlReqEncode(c, ur, expected) } func (s *UlReqEncodeSuite) TestWants(c *C) { ur := NewUploadRequest() ur.Wants = append(ur.Wants, plumbing.NewHash("4444444444444444444444444444444444444444"), plumbing.NewHash("1111111111111111111111111111111111111111"), plumbing.NewHash("3333333333333333333333333333333333333333"), plumbing.NewHash("2222222222222222222222222222222222222222"), plumbing.NewHash("5555555555555555555555555555555555555555"), ) expected := []string{ "want 1111111111111111111111111111111111111111\n", "want 2222222222222222222222222222222222222222\n", "want 3333333333333333333333333333333333333333\n", "want 4444444444444444444444444444444444444444\n", "want 5555555555555555555555555555555555555555\n", pktline.FlushString, } testUlReqEncode(c, ur, expected) } func (s *UlReqEncodeSuite) TestWantsDuplicates(c *C) { ur := NewUploadRequest() ur.Wants = append(ur.Wants, plumbing.NewHash("4444444444444444444444444444444444444444"), plumbing.NewHash("1111111111111111111111111111111111111111"), plumbing.NewHash("3333333333333333333333333333333333333333"), plumbing.NewHash("1111111111111111111111111111111111111111"), plumbing.NewHash("2222222222222222222222222222222222222222"), plumbing.NewHash("1111111111111111111111111111111111111111"), ) expected := []string{ "want 1111111111111111111111111111111111111111\n", "want 2222222222222222222222222222222222222222\n", "want 3333333333333333333333333333333333333333\n", "want 4444444444444444444444444444444444444444\n", pktline.FlushString, } testUlReqEncode(c, ur, expected) } func (s *UlReqEncodeSuite) TestWantsWithCapabilities(c *C) { ur := NewUploadRequest() ur.Wants = append(ur.Wants, plumbing.NewHash("4444444444444444444444444444444444444444"), plumbing.NewHash("1111111111111111111111111111111111111111"), plumbing.NewHash("3333333333333333333333333333333333333333"), plumbing.NewHash("2222222222222222222222222222222222222222"), plumbing.NewHash("5555555555555555555555555555555555555555"), ) ur.Capabilities.Add(capability.MultiACK) ur.Capabilities.Add(capability.OFSDelta) ur.Capabilities.Add(capability.Sideband) ur.Capabilities.Add(capability.SymRef, "HEAD:/refs/heads/master") ur.Capabilities.Add(capability.ThinPack) expected := []string{ "want 1111111111111111111111111111111111111111 multi_ack ofs-delta side-band symref=HEAD:/refs/heads/master thin-pack\n", "want 2222222222222222222222222222222222222222\n", "want 3333333333333333333333333333333333333333\n", "want 4444444444444444444444444444444444444444\n", "want 5555555555555555555555555555555555555555\n", pktline.FlushString, } testUlReqEncode(c, ur, expected) } func (s *UlReqEncodeSuite) TestShallow(c *C) { ur := NewUploadRequest() ur.Wants = append(ur.Wants, plumbing.NewHash("1111111111111111111111111111111111111111")) ur.Capabilities.Add(capability.MultiACK) ur.Shallows = append(ur.Shallows, plumbing.NewHash("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")) expected := []string{ "want 1111111111111111111111111111111111111111 multi_ack\n", "shallow aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n", pktline.FlushString, } testUlReqEncode(c, ur, expected) } func (s *UlReqEncodeSuite) TestManyShallows(c *C) { ur := NewUploadRequest() ur.Wants = append(ur.Wants, plumbing.NewHash("1111111111111111111111111111111111111111")) ur.Capabilities.Add(capability.MultiACK) ur.Shallows = append(ur.Shallows, plumbing.NewHash("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"), plumbing.NewHash("dddddddddddddddddddddddddddddddddddddddd"), plumbing.NewHash("cccccccccccccccccccccccccccccccccccccccc"), plumbing.NewHash("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), ) expected := []string{ "want 1111111111111111111111111111111111111111 multi_ack\n", "shallow aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n", "shallow bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\n", "shallow cccccccccccccccccccccccccccccccccccccccc\n", "shallow dddddddddddddddddddddddddddddddddddddddd\n", pktline.FlushString, } testUlReqEncode(c, ur, expected) } func (s *UlReqEncodeSuite) TestShallowsDuplicate(c *C) { ur := NewUploadRequest() ur.Wants = append(ur.Wants, plumbing.NewHash("1111111111111111111111111111111111111111")) ur.Capabilities.Add(capability.MultiACK) ur.Shallows = append(ur.Shallows, plumbing.NewHash("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"), plumbing.NewHash("cccccccccccccccccccccccccccccccccccccccc"), plumbing.NewHash("cccccccccccccccccccccccccccccccccccccccc"), plumbing.NewHash("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), ) expected := []string{ "want 1111111111111111111111111111111111111111 multi_ack\n", "shallow aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n", "shallow bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\n", "shallow cccccccccccccccccccccccccccccccccccccccc\n", pktline.FlushString, } testUlReqEncode(c, ur, expected) } func (s *UlReqEncodeSuite) TestDepthCommits(c *C) { ur := NewUploadRequest() ur.Wants = append(ur.Wants, plumbing.NewHash("1111111111111111111111111111111111111111")) ur.Depth = DepthCommits(1234) expected := []string{ "want 1111111111111111111111111111111111111111\n", "deepen 1234\n", pktline.FlushString, } testUlReqEncode(c, ur, expected) } func (s *UlReqEncodeSuite) TestDepthSinceUTC(c *C) { ur := NewUploadRequest() ur.Wants = append(ur.Wants, plumbing.NewHash("1111111111111111111111111111111111111111")) since := time.Date(2015, time.January, 2, 3, 4, 5, 0, time.UTC) ur.Depth = DepthSince(since) expected := []string{ "want 1111111111111111111111111111111111111111\n", "deepen-since 1420167845\n", pktline.FlushString, } testUlReqEncode(c, ur, expected) } func (s *UlReqEncodeSuite) TestDepthSinceNonUTC(c *C) { ur := NewUploadRequest() ur.Wants = append(ur.Wants, plumbing.NewHash("1111111111111111111111111111111111111111")) berlin, err := time.LoadLocation("Europe/Berlin") c.Assert(err, IsNil) since := time.Date(2015, time.January, 2, 3, 4, 5, 0, berlin) // since value is 2015-01-02 03:04:05 +0100 UTC (Europe/Berlin) or // 2015-01-02 02:04:05 +0000 UTC, which is 1420164245 Unix seconds. ur.Depth = DepthSince(since) expected := []string{ "want 1111111111111111111111111111111111111111\n", "deepen-since 1420164245\n", pktline.FlushString, } testUlReqEncode(c, ur, expected) } func (s *UlReqEncodeSuite) TestDepthReference(c *C) { ur := NewUploadRequest() ur.Wants = append(ur.Wants, plumbing.NewHash("1111111111111111111111111111111111111111")) ur.Depth = DepthReference("refs/heads/feature-foo") expected := []string{ "want 1111111111111111111111111111111111111111\n", "deepen-not refs/heads/feature-foo\n", pktline.FlushString, } testUlReqEncode(c, ur, expected) } func (s *UlReqEncodeSuite) TestAll(c *C) { ur := NewUploadRequest() ur.Wants = append(ur.Wants, plumbing.NewHash("4444444444444444444444444444444444444444"), plumbing.NewHash("1111111111111111111111111111111111111111"), plumbing.NewHash("3333333333333333333333333333333333333333"), plumbing.NewHash("2222222222222222222222222222222222222222"), plumbing.NewHash("5555555555555555555555555555555555555555"), ) ur.Capabilities.Add(capability.MultiACK) ur.Capabilities.Add(capability.OFSDelta) ur.Capabilities.Add(capability.Sideband) ur.Capabilities.Add(capability.SymRef, "HEAD:/refs/heads/master") ur.Capabilities.Add(capability.ThinPack) ur.Shallows = append(ur.Shallows, plumbing.NewHash("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb")) ur.Shallows = append(ur.Shallows, plumbing.NewHash("dddddddddddddddddddddddddddddddddddddddd")) ur.Shallows = append(ur.Shallows, plumbing.NewHash("cccccccccccccccccccccccccccccccccccccccc")) ur.Shallows = append(ur.Shallows, plumbing.NewHash("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")) since := time.Date(2015, time.January, 2, 3, 4, 5, 0, time.UTC) ur.Depth = DepthSince(since) expected := []string{ "want 1111111111111111111111111111111111111111 multi_ack ofs-delta side-band symref=HEAD:/refs/heads/master thin-pack\n", "want 2222222222222222222222222222222222222222\n", "want 3333333333333333333333333333333333333333\n", "want 4444444444444444444444444444444444444444\n", "want 5555555555555555555555555555555555555555\n", "shallow aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n", "shallow bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\n", "shallow cccccccccccccccccccccccccccccccccccccccc\n", "shallow dddddddddddddddddddddddddddddddddddddddd\n", "deepen-since 1420167845\n", pktline.FlushString, } testUlReqEncode(c, ur, expected) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/protocol/packp/ulreq_test.go000066400000000000000000000140511345605224300261720ustar00rootroot00000000000000package packp import ( "fmt" "os" "strings" "time" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/format/pktline" "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability" . "gopkg.in/check.v1" ) type UlReqSuite struct{} var _ = Suite(&UlReqSuite{}) func (s *UlReqSuite) TestNewUploadRequestFromCapabilities(c *C) { cap := capability.NewList() cap.Set(capability.Sideband) cap.Set(capability.Sideband64k) cap.Set(capability.MultiACK) cap.Set(capability.MultiACKDetailed) cap.Set(capability.ThinPack) cap.Set(capability.OFSDelta) cap.Set(capability.Agent, "foo") r := NewUploadRequestFromCapabilities(cap) c.Assert(r.Capabilities.String(), Equals, "multi_ack_detailed side-band-64k thin-pack ofs-delta agent=go-git/4.x", ) } func (s *UlReqSuite) TestValidateWants(c *C) { r := NewUploadRequest() err := r.Validate() c.Assert(err, NotNil) r.Wants = append(r.Wants, plumbing.NewHash("1111111111111111111111111111111111111111")) err = r.Validate() c.Assert(err, IsNil) } func (s *UlReqSuite) TestValidateShallows(c *C) { r := NewUploadRequest() r.Wants = append(r.Wants, plumbing.NewHash("1111111111111111111111111111111111111111")) r.Shallows = append(r.Shallows, plumbing.NewHash("2222222222222222222222222222222222222222")) err := r.Validate() c.Assert(err, NotNil) r.Capabilities.Set(capability.Shallow) err = r.Validate() c.Assert(err, IsNil) } func (s *UlReqSuite) TestValidateDepthCommits(c *C) { r := NewUploadRequest() r.Wants = append(r.Wants, plumbing.NewHash("1111111111111111111111111111111111111111")) r.Depth = DepthCommits(42) err := r.Validate() c.Assert(err, NotNil) r.Capabilities.Set(capability.Shallow) err = r.Validate() c.Assert(err, IsNil) } func (s *UlReqSuite) TestValidateDepthReference(c *C) { r := NewUploadRequest() r.Wants = append(r.Wants, plumbing.NewHash("1111111111111111111111111111111111111111")) r.Depth = DepthReference("1111111111111111111111111111111111111111") err := r.Validate() c.Assert(err, NotNil) r.Capabilities.Set(capability.DeepenNot) err = r.Validate() c.Assert(err, IsNil) } func (s *UlReqSuite) TestValidateDepthSince(c *C) { r := NewUploadRequest() r.Wants = append(r.Wants, plumbing.NewHash("1111111111111111111111111111111111111111")) r.Depth = DepthSince(time.Now()) err := r.Validate() c.Assert(err, NotNil) r.Capabilities.Set(capability.DeepenSince) err = r.Validate() c.Assert(err, IsNil) } func (s *UlReqSuite) TestValidateConflictSideband(c *C) { r := NewUploadRequest() r.Wants = append(r.Wants, plumbing.NewHash("1111111111111111111111111111111111111111")) r.Capabilities.Set(capability.Sideband) r.Capabilities.Set(capability.Sideband64k) err := r.Validate() c.Assert(err, NotNil) } func (s *UlReqSuite) TestValidateConflictMultiACK(c *C) { r := NewUploadRequest() r.Wants = append(r.Wants, plumbing.NewHash("1111111111111111111111111111111111111111")) r.Capabilities.Set(capability.MultiACK) r.Capabilities.Set(capability.MultiACKDetailed) err := r.Validate() c.Assert(err, NotNil) } func ExampleUploadRequest_Encode() { // Create an empty UlReq with the contents you want... ur := NewUploadRequest() // Add a couple of wants ur.Wants = append(ur.Wants, plumbing.NewHash("3333333333333333333333333333333333333333")) ur.Wants = append(ur.Wants, plumbing.NewHash("1111111111111111111111111111111111111111")) ur.Wants = append(ur.Wants, plumbing.NewHash("2222222222222222222222222222222222222222")) // And some capabilities you will like the server to use ur.Capabilities.Add(capability.OFSDelta) ur.Capabilities.Add(capability.SymRef, "HEAD:/refs/heads/master") // Add a couple of shallows ur.Shallows = append(ur.Shallows, plumbing.NewHash("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb")) ur.Shallows = append(ur.Shallows, plumbing.NewHash("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")) // And retrict the answer of the server to commits newer than "2015-01-02 03:04:05 UTC" since := time.Date(2015, time.January, 2, 3, 4, 5, 0, time.UTC) ur.Depth = DepthSince(since) // Create a new Encode for the stdout... e := newUlReqEncoder(os.Stdout) // ...and encode the upload-request to it. _ = e.Encode(ur) // ignoring errors for brevity // Output: // 005bwant 1111111111111111111111111111111111111111 ofs-delta symref=HEAD:/refs/heads/master // 0032want 2222222222222222222222222222222222222222 // 0032want 3333333333333333333333333333333333333333 // 0035shallow aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa // 0035shallow bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb // 001cdeepen-since 1420167845 // 0000 } func ExampleUploadRequest_Decode() { // Here is a raw advertised-ref message. raw := "" + "005bwant 1111111111111111111111111111111111111111 ofs-delta symref=HEAD:/refs/heads/master\n" + "0032want 2222222222222222222222222222222222222222\n" + "0032want 3333333333333333333333333333333333333333\n" + "0035shallow aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n" + "0035shallow bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\n" + "001cdeepen-since 1420167845\n" + // 2015-01-02 03:04:05 +0000 UTC pktline.FlushString // Use the raw message as our input. input := strings.NewReader(raw) // Create the Decoder reading from our input. d := newUlReqDecoder(input) // Decode the input into a newly allocated UlReq value. ur := NewUploadRequest() _ = d.Decode(ur) // error check ignored for brevity // Do something interesting with the UlReq, e.g. print its contents. fmt.Println("capabilities =", ur.Capabilities.String()) fmt.Println("wants =", ur.Wants) fmt.Println("shallows =", ur.Shallows) switch depth := ur.Depth.(type) { case DepthCommits: fmt.Println("depth =", int(depth)) case DepthSince: fmt.Println("depth =", time.Time(depth)) case DepthReference: fmt.Println("depth =", string(depth)) } // Output: // capabilities = ofs-delta symref=HEAD:/refs/heads/master // wants = [1111111111111111111111111111111111111111 2222222222222222222222222222222222222222 3333333333333333333333333333333333333333] // shallows = [aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb] // depth = 2015-01-02 03:04:05 +0000 UTC } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/protocol/packp/updreq.go000066400000000000000000000053041345605224300253040ustar00rootroot00000000000000package packp import ( "errors" "io" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability" "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/sideband" ) var ( ErrEmptyCommands = errors.New("commands cannot be empty") ErrMalformedCommand = errors.New("malformed command") ) // ReferenceUpdateRequest values represent reference upload requests. // Values from this type are not zero-value safe, use the New function instead. type ReferenceUpdateRequest struct { Capabilities *capability.List Commands []*Command Shallow *plumbing.Hash // Packfile contains an optional packfile reader. Packfile io.ReadCloser // Progress receives sideband progress messages from the server Progress sideband.Progress } // New returns a pointer to a new ReferenceUpdateRequest value. func NewReferenceUpdateRequest() *ReferenceUpdateRequest { return &ReferenceUpdateRequest{ // TODO: Add support for push-cert Capabilities: capability.NewList(), Commands: nil, } } // NewReferenceUpdateRequestFromCapabilities returns a pointer to a new // ReferenceUpdateRequest value, the request capabilities are filled with the // most optimal ones, based on the adv value (advertised capabilities), the // ReferenceUpdateRequest contains no commands // // It does set the following capabilities: // - agent // - report-status // - ofs-delta // - ref-delta // - delete-refs // It leaves up to the user to add the following capabilities later: // - atomic // - ofs-delta // - side-band // - side-band-64k // - quiet // - push-cert func NewReferenceUpdateRequestFromCapabilities(adv *capability.List) *ReferenceUpdateRequest { r := NewReferenceUpdateRequest() if adv.Supports(capability.Agent) { r.Capabilities.Set(capability.Agent, capability.DefaultAgent) } if adv.Supports(capability.ReportStatus) { r.Capabilities.Set(capability.ReportStatus) } return r } func (r *ReferenceUpdateRequest) validate() error { if len(r.Commands) == 0 { return ErrEmptyCommands } for _, c := range r.Commands { if err := c.validate(); err != nil { return err } } return nil } type Action string const ( Create Action = "create" Update = "update" Delete = "delete" Invalid = "invalid" ) type Command struct { Name plumbing.ReferenceName Old plumbing.Hash New plumbing.Hash } func (c *Command) Action() Action { if c.Old == plumbing.ZeroHash && c.New == plumbing.ZeroHash { return Invalid } if c.Old == plumbing.ZeroHash { return Create } if c.New == plumbing.ZeroHash { return Delete } return Update } func (c *Command) validate() error { if c.Action() == Invalid { return ErrMalformedCommand } return nil } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/protocol/packp/updreq_decode.go000066400000000000000000000120611345605224300266050ustar00rootroot00000000000000package packp import ( "bytes" "encoding/hex" "errors" "fmt" "io" "io/ioutil" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/format/pktline" ) var ( shallowLineLength = len(shallow) + hashSize minCommandLength = hashSize*2 + 2 + 1 minCommandAndCapsLenth = minCommandLength + 1 ) var ( ErrEmpty = errors.New("empty update-request message") errNoCommands = errors.New("unexpected EOF before any command") errMissingCapabilitiesDelimiter = errors.New("capabilities delimiter not found") ) func errMalformedRequest(reason string) error { return fmt.Errorf("malformed request: %s", reason) } func errInvalidHashSize(got int) error { return fmt.Errorf("invalid hash size: expected %d, got %d", hashSize, got) } func errInvalidHash(err error) error { return fmt.Errorf("invalid hash: %s", err.Error()) } func errInvalidShallowLineLength(got int) error { return errMalformedRequest(fmt.Sprintf( "invalid shallow line length: expected %d, got %d", shallowLineLength, got)) } func errInvalidCommandCapabilitiesLineLength(got int) error { return errMalformedRequest(fmt.Sprintf( "invalid command and capabilities line length: expected at least %d, got %d", minCommandAndCapsLenth, got)) } func errInvalidCommandLineLength(got int) error { return errMalformedRequest(fmt.Sprintf( "invalid command line length: expected at least %d, got %d", minCommandLength, got)) } func errInvalidShallowObjId(err error) error { return errMalformedRequest( fmt.Sprintf("invalid shallow object id: %s", err.Error())) } func errInvalidOldObjId(err error) error { return errMalformedRequest( fmt.Sprintf("invalid old object id: %s", err.Error())) } func errInvalidNewObjId(err error) error { return errMalformedRequest( fmt.Sprintf("invalid new object id: %s", err.Error())) } func errMalformedCommand(err error) error { return errMalformedRequest(fmt.Sprintf( "malformed command: %s", err.Error())) } // Decode reads the next update-request message form the reader and wr func (req *ReferenceUpdateRequest) Decode(r io.Reader) error { var rc io.ReadCloser var ok bool rc, ok = r.(io.ReadCloser) if !ok { rc = ioutil.NopCloser(r) } d := &updReqDecoder{r: rc, s: pktline.NewScanner(r)} return d.Decode(req) } type updReqDecoder struct { r io.ReadCloser s *pktline.Scanner req *ReferenceUpdateRequest } func (d *updReqDecoder) Decode(req *ReferenceUpdateRequest) error { d.req = req funcs := []func() error{ d.scanLine, d.decodeShallow, d.decodeCommandAndCapabilities, d.decodeCommands, d.setPackfile, req.validate, } for _, f := range funcs { if err := f(); err != nil { return err } } return nil } func (d *updReqDecoder) scanLine() error { if ok := d.s.Scan(); !ok { return d.scanErrorOr(ErrEmpty) } return nil } func (d *updReqDecoder) decodeShallow() error { b := d.s.Bytes() if !bytes.HasPrefix(b, shallowNoSp) { return nil } if len(b) != shallowLineLength { return errInvalidShallowLineLength(len(b)) } h, err := parseHash(string(b[len(shallow):])) if err != nil { return errInvalidShallowObjId(err) } if ok := d.s.Scan(); !ok { return d.scanErrorOr(errNoCommands) } d.req.Shallow = &h return nil } func (d *updReqDecoder) decodeCommands() error { for { b := d.s.Bytes() if bytes.Equal(b, pktline.Flush) { return nil } c, err := parseCommand(b) if err != nil { return err } d.req.Commands = append(d.req.Commands, c) if ok := d.s.Scan(); !ok { return d.s.Err() } } } func (d *updReqDecoder) decodeCommandAndCapabilities() error { b := d.s.Bytes() i := bytes.IndexByte(b, 0) if i == -1 { return errMissingCapabilitiesDelimiter } if len(b) < minCommandAndCapsLenth { return errInvalidCommandCapabilitiesLineLength(len(b)) } cmd, err := parseCommand(b[:i]) if err != nil { return err } d.req.Commands = append(d.req.Commands, cmd) if err := d.req.Capabilities.Decode(b[i+1:]); err != nil { return err } if err := d.scanLine(); err != nil { return err } return nil } func (d *updReqDecoder) setPackfile() error { d.req.Packfile = d.r return nil } func parseCommand(b []byte) (*Command, error) { if len(b) < minCommandLength { return nil, errInvalidCommandLineLength(len(b)) } var ( os, ns string n plumbing.ReferenceName ) if _, err := fmt.Sscanf(string(b), "%s %s %s", &os, &ns, &n); err != nil { return nil, errMalformedCommand(err) } oh, err := parseHash(os) if err != nil { return nil, errInvalidOldObjId(err) } nh, err := parseHash(ns) if err != nil { return nil, errInvalidNewObjId(err) } return &Command{Old: oh, New: nh, Name: plumbing.ReferenceName(n)}, nil } func parseHash(s string) (plumbing.Hash, error) { if len(s) != hashSize { return plumbing.ZeroHash, errInvalidHashSize(len(s)) } if _, err := hex.DecodeString(s); err != nil { return plumbing.ZeroHash, errInvalidHash(err) } h := plumbing.NewHash(s) return h, nil } func (d *updReqDecoder) scanErrorOr(origErr error) error { if err := d.s.Err(); err != nil { return err } return origErr } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/protocol/packp/updreq_decode_test.go000066400000000000000000000273061345605224300276540ustar00rootroot00000000000000package packp import ( "bytes" "io" "io/ioutil" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/format/pktline" . "gopkg.in/check.v1" ) type UpdReqDecodeSuite struct{} var _ = Suite(&UpdReqDecodeSuite{}) func (s *UpdReqDecodeSuite) TestEmpty(c *C) { r := NewReferenceUpdateRequest() var buf bytes.Buffer c.Assert(r.Decode(&buf), Equals, ErrEmpty) c.Assert(r, DeepEquals, NewReferenceUpdateRequest()) } func (s *UpdReqDecodeSuite) TestInvalidPktlines(c *C) { r := NewReferenceUpdateRequest() input := bytes.NewReader([]byte("xxxxxxxxxx")) c.Assert(r.Decode(input), ErrorMatches, "invalid pkt-len found") } func (s *UpdReqDecodeSuite) TestInvalidShadow(c *C) { payloads := []string{ "shallow", "1ecf0ef2c2dffb796033e5a02219af86ec6584e5 2ecf0ef2c2dffb796033e5a02219af86ec6584e5 myref\x00", pktline.FlushString, } s.testDecoderErrorMatches(c, toPktLines(c, payloads), "^malformed request: invalid shallow line length: expected 48, got 7$") payloads = []string{ "shallow ", "1ecf0ef2c2dffb796033e5a02219af86ec6584e5 2ecf0ef2c2dffb796033e5a02219af86ec6584e5 myref\x00", pktline.FlushString, } s.testDecoderErrorMatches(c, toPktLines(c, payloads), "^malformed request: invalid shallow line length: expected 48, got 8$") payloads = []string{ "shallow 1ecf0ef2c2dffb796033e5a02219af86ec65", "1ecf0ef2c2dffb796033e5a02219af86ec6584e5 2ecf0ef2c2dffb796033e5a02219af86ec6584e5 myref\x00", pktline.FlushString, } s.testDecoderErrorMatches(c, toPktLines(c, payloads), "^malformed request: invalid shallow line length: expected 48, got 44$") payloads = []string{ "shallow 1ecf0ef2c2dffb796033e5a02219af86ec6584e54", "1ecf0ef2c2dffb796033e5a02219af86ec6584e5 2ecf0ef2c2dffb796033e5a02219af86ec6584e5 myref\x00", pktline.FlushString, } s.testDecoderErrorMatches(c, toPktLines(c, payloads), "^malformed request: invalid shallow line length: expected 48, got 49$") payloads = []string{ "shallow 1ecf0ef2c2dffb796033e5a02219af86ec6584eu", "1ecf0ef2c2dffb796033e5a02219af86ec6584e5 2ecf0ef2c2dffb796033e5a02219af86ec6584e5 myref\x00", pktline.FlushString, } s.testDecoderErrorMatches(c, toPktLines(c, payloads), "^malformed request: invalid shallow object id: invalid hash: .*") } func (s *UpdReqDecodeSuite) TestMalformedCommand(c *C) { payloads := []string{ "1ecf0ef2c2dffb796033e5a02219af86ec6584e5x2ecf0ef2c2dffb796033e5a02219af86ec6584e5xmyref\x00", pktline.FlushString, } s.testDecoderErrorMatches(c, toPktLines(c, payloads), "^malformed request: malformed command: EOF$") payloads = []string{ "1ecf0ef2c2dffb796033e5a02219af86ec6584e5 2ecf0ef2c2dffb796033e5a02219af86ec6584e5 myref\x00", "1ecf0ef2c2dffb796033e5a02219af86ec6584e5x2ecf0ef2c2dffb796033e5a02219af86ec6584e5xmyref", pktline.FlushString, } s.testDecoderErrorMatches(c, toPktLines(c, payloads), "^malformed request: malformed command: EOF$") } func (s *UpdReqDecodeSuite) TestInvalidCommandInvalidHash(c *C) { payloads := []string{ "1ecf0ef2c2dffb796033e5a02219af86ec6584e 2ecf0ef2c2dffb796033e5a02219af86ec6584e5 myref\x00", pktline.FlushString, } s.testDecoderErrorMatches(c, toPktLines(c, payloads), "^malformed request: invalid old object id: invalid hash size: expected 40, got 39$") payloads = []string{ "1ecf0ef2c2dffb796033e5a02219af86ec6584e5 2ecf0ef2c2dffb796033e5a02219af86ec6584e myref\x00", pktline.FlushString, } s.testDecoderErrorMatches(c, toPktLines(c, payloads), "^malformed request: invalid new object id: invalid hash size: expected 40, got 39$") payloads = []string{ "1ecf0ef2c2dffb796033e5a02219af86e 2ecf0ef2c2dffb796033e5a02219af86ec6 m\x00", pktline.FlushString, } s.testDecoderErrorMatches(c, toPktLines(c, payloads), "^malformed request: invalid command and capabilities line length: expected at least 84, got 72$") payloads = []string{ "1ecf0ef2c2dffb796033e5a02219af86ec6584eu 2ecf0ef2c2dffb796033e5a02219af86ec6584e5 myref\x00", pktline.FlushString, } s.testDecoderErrorMatches(c, toPktLines(c, payloads), "^malformed request: invalid old object id: invalid hash: .*$") payloads = []string{ "1ecf0ef2c2dffb796033e5a02219af86ec6584e5 2ecf0ef2c2dffb796033e5a02219af86ec6584eu myref\x00", pktline.FlushString, } s.testDecoderErrorMatches(c, toPktLines(c, payloads), "^malformed request: invalid new object id: invalid hash: .*$") } func (s *UpdReqDecodeSuite) TestInvalidCommandMissingNullDelimiter(c *C) { payloads := []string{ "1ecf0ef2c2dffb796033e5a02219af86ec6584e5 2ecf0ef2c2dffb796033e5a02219af86ec6584e5 myref", pktline.FlushString, } s.testDecoderErrorMatches(c, toPktLines(c, payloads), "capabilities delimiter not found") } func (s *UpdReqDecodeSuite) TestInvalidCommandMissingName(c *C) { payloads := []string{ "1ecf0ef2c2dffb796033e5a02219af86ec6584e5 2ecf0ef2c2dffb796033e5a02219af86ec6584e5\x00", pktline.FlushString, } s.testDecoderErrorMatches(c, toPktLines(c, payloads), "^malformed request: invalid command and capabilities line length: expected at least 84, got 82$") payloads = []string{ "1ecf0ef2c2dffb796033e5a02219af86ec6584e5 2ecf0ef2c2dffb796033e5a02219af86ec6584e5 \x00", pktline.FlushString, } s.testDecoderErrorMatches(c, toPktLines(c, payloads), "^malformed request: invalid command and capabilities line length: expected at least 84, got 83$") payloads = []string{ "1ecf0ef2c2dffb796033e5a02219af86ec6584e5 2ecf0ef2c2dffb796033e5a02219af86ec6584e5 myref\x00", "1ecf0ef2c2dffb796033e5a02219af86ec6584e5 2ecf0ef2c2dffb796033e5a02219af86ec6584e5", pktline.FlushString, } s.testDecoderErrorMatches(c, toPktLines(c, payloads), "^malformed request: invalid command line length: expected at least 83, got 81$") payloads = []string{ "1ecf0ef2c2dffb796033e5a02219af86ec6584e5 2ecf0ef2c2dffb796033e5a02219af86ec6584e5 myref\x00", "1ecf0ef2c2dffb796033e5a02219af86ec6584e5 2ecf0ef2c2dffb796033e5a02219af86ec6584e5 ", pktline.FlushString, } s.testDecoderErrorMatches(c, toPktLines(c, payloads), "^malformed request: invalid command line length: expected at least 83, got 82$") } func (s *UpdReqDecodeSuite) TestOneUpdateCommand(c *C) { hash1 := plumbing.NewHash("1ecf0ef2c2dffb796033e5a02219af86ec6584e5") hash2 := plumbing.NewHash("2ecf0ef2c2dffb796033e5a02219af86ec6584e5") name := plumbing.ReferenceName("myref") expected := NewReferenceUpdateRequest() expected.Commands = []*Command{ {Name: name, Old: hash1, New: hash2}, } expected.Packfile = ioutil.NopCloser(bytes.NewReader([]byte{})) payloads := []string{ "1ecf0ef2c2dffb796033e5a02219af86ec6584e5 2ecf0ef2c2dffb796033e5a02219af86ec6584e5 myref\x00", pktline.FlushString, } s.testDecodeOkExpected(c, expected, payloads) } func (s *UpdReqDecodeSuite) TestMultipleCommands(c *C) { hash1 := plumbing.NewHash("1ecf0ef2c2dffb796033e5a02219af86ec6584e5") hash2 := plumbing.NewHash("2ecf0ef2c2dffb796033e5a02219af86ec6584e5") expected := NewReferenceUpdateRequest() expected.Commands = []*Command{ {Name: plumbing.ReferenceName("myref1"), Old: hash1, New: hash2}, {Name: plumbing.ReferenceName("myref2"), Old: plumbing.ZeroHash, New: hash2}, {Name: plumbing.ReferenceName("myref3"), Old: hash1, New: plumbing.ZeroHash}, } expected.Packfile = ioutil.NopCloser(bytes.NewReader([]byte{})) payloads := []string{ "1ecf0ef2c2dffb796033e5a02219af86ec6584e5 2ecf0ef2c2dffb796033e5a02219af86ec6584e5 myref1\x00", "0000000000000000000000000000000000000000 2ecf0ef2c2dffb796033e5a02219af86ec6584e5 myref2", "1ecf0ef2c2dffb796033e5a02219af86ec6584e5 0000000000000000000000000000000000000000 myref3", pktline.FlushString, } s.testDecodeOkExpected(c, expected, payloads) } func (s *UpdReqDecodeSuite) TestMultipleCommandsAndCapabilities(c *C) { hash1 := plumbing.NewHash("1ecf0ef2c2dffb796033e5a02219af86ec6584e5") hash2 := plumbing.NewHash("2ecf0ef2c2dffb796033e5a02219af86ec6584e5") expected := NewReferenceUpdateRequest() expected.Commands = []*Command{ {Name: plumbing.ReferenceName("myref1"), Old: hash1, New: hash2}, {Name: plumbing.ReferenceName("myref2"), Old: plumbing.ZeroHash, New: hash2}, {Name: plumbing.ReferenceName("myref3"), Old: hash1, New: plumbing.ZeroHash}, } expected.Capabilities.Add("shallow") expected.Packfile = ioutil.NopCloser(bytes.NewReader([]byte{})) payloads := []string{ "1ecf0ef2c2dffb796033e5a02219af86ec6584e5 2ecf0ef2c2dffb796033e5a02219af86ec6584e5 myref1\x00shallow", "0000000000000000000000000000000000000000 2ecf0ef2c2dffb796033e5a02219af86ec6584e5 myref2", "1ecf0ef2c2dffb796033e5a02219af86ec6584e5 0000000000000000000000000000000000000000 myref3", pktline.FlushString, } s.testDecodeOkExpected(c, expected, payloads) } func (s *UpdReqDecodeSuite) TestMultipleCommandsAndCapabilitiesShallow(c *C) { hash1 := plumbing.NewHash("1ecf0ef2c2dffb796033e5a02219af86ec6584e5") hash2 := plumbing.NewHash("2ecf0ef2c2dffb796033e5a02219af86ec6584e5") expected := NewReferenceUpdateRequest() expected.Commands = []*Command{ {Name: plumbing.ReferenceName("myref1"), Old: hash1, New: hash2}, {Name: plumbing.ReferenceName("myref2"), Old: plumbing.ZeroHash, New: hash2}, {Name: plumbing.ReferenceName("myref3"), Old: hash1, New: plumbing.ZeroHash}, } expected.Capabilities.Add("shallow") expected.Shallow = &hash1 expected.Packfile = ioutil.NopCloser(bytes.NewReader([]byte{})) payloads := []string{ "shallow 1ecf0ef2c2dffb796033e5a02219af86ec6584e5", "1ecf0ef2c2dffb796033e5a02219af86ec6584e5 2ecf0ef2c2dffb796033e5a02219af86ec6584e5 myref1\x00shallow", "0000000000000000000000000000000000000000 2ecf0ef2c2dffb796033e5a02219af86ec6584e5 myref2", "1ecf0ef2c2dffb796033e5a02219af86ec6584e5 0000000000000000000000000000000000000000 myref3", pktline.FlushString, } s.testDecodeOkExpected(c, expected, payloads) } func (s *UpdReqDecodeSuite) TestWithPackfile(c *C) { hash1 := plumbing.NewHash("1ecf0ef2c2dffb796033e5a02219af86ec6584e5") hash2 := plumbing.NewHash("2ecf0ef2c2dffb796033e5a02219af86ec6584e5") name := plumbing.ReferenceName("myref") expected := NewReferenceUpdateRequest() expected.Commands = []*Command{ {Name: name, Old: hash1, New: hash2}, } packfileContent := []byte("PACKabc") expected.Packfile = ioutil.NopCloser(bytes.NewReader(packfileContent)) payloads := []string{ "1ecf0ef2c2dffb796033e5a02219af86ec6584e5 2ecf0ef2c2dffb796033e5a02219af86ec6584e5 myref\x00", pktline.FlushString, } var buf bytes.Buffer e := pktline.NewEncoder(&buf) c.Assert(e.EncodeString(payloads...), IsNil) buf.Write(packfileContent) s.testDecodeOkRaw(c, expected, buf.Bytes()) } func (s *UpdReqDecodeSuite) testDecoderErrorMatches(c *C, input io.Reader, pattern string) { r := NewReferenceUpdateRequest() c.Assert(r.Decode(input), ErrorMatches, pattern) } func (s *UpdReqDecodeSuite) testDecodeOK(c *C, payloads []string) *ReferenceUpdateRequest { var buf bytes.Buffer e := pktline.NewEncoder(&buf) err := e.EncodeString(payloads...) c.Assert(err, IsNil) r := NewReferenceUpdateRequest() c.Assert(r.Decode(&buf), IsNil) return r } func (s *UpdReqDecodeSuite) testDecodeOkRaw(c *C, expected *ReferenceUpdateRequest, raw []byte) { req := NewReferenceUpdateRequest() c.Assert(req.Decode(bytes.NewBuffer(raw)), IsNil) c.Assert(req.Packfile, NotNil) s.compareReaders(c, req.Packfile, expected.Packfile) req.Packfile = nil expected.Packfile = nil c.Assert(req, DeepEquals, expected) } func (s *UpdReqDecodeSuite) testDecodeOkExpected(c *C, expected *ReferenceUpdateRequest, payloads []string) { req := s.testDecodeOK(c, payloads) c.Assert(req.Packfile, NotNil) s.compareReaders(c, req.Packfile, expected.Packfile) req.Packfile = nil expected.Packfile = nil c.Assert(req, DeepEquals, expected) } func (s *UpdReqDecodeSuite) compareReaders(c *C, a io.ReadCloser, b io.ReadCloser) { pba, err := ioutil.ReadAll(a) c.Assert(err, IsNil) c.Assert(a.Close(), IsNil) pbb, err := ioutil.ReadAll(b) c.Assert(err, IsNil) c.Assert(b.Close(), IsNil) c.Assert(pba, DeepEquals, pbb) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/protocol/packp/updreq_encode.go000066400000000000000000000026571345605224300266310ustar00rootroot00000000000000package packp import ( "fmt" "io" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/format/pktline" "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability" ) var ( zeroHashString = plumbing.ZeroHash.String() ) // Encode writes the ReferenceUpdateRequest encoding to the stream. func (r *ReferenceUpdateRequest) Encode(w io.Writer) error { if err := r.validate(); err != nil { return err } e := pktline.NewEncoder(w) if err := r.encodeShallow(e, r.Shallow); err != nil { return err } if err := r.encodeCommands(e, r.Commands, r.Capabilities); err != nil { return err } if r.Packfile != nil { if _, err := io.Copy(w, r.Packfile); err != nil { return err } return r.Packfile.Close() } return nil } func (r *ReferenceUpdateRequest) encodeShallow(e *pktline.Encoder, h *plumbing.Hash) error { if h == nil { return nil } objId := []byte(h.String()) return e.Encodef("%s%s", shallow, objId) } func (r *ReferenceUpdateRequest) encodeCommands(e *pktline.Encoder, cmds []*Command, cap *capability.List) error { if err := e.Encodef("%s\x00%s", formatCommand(cmds[0]), cap.String()); err != nil { return err } for _, cmd := range cmds[1:] { if err := e.Encodef(formatCommand(cmd)); err != nil { return err } } return e.Flush() } func formatCommand(cmd *Command) string { o := cmd.Old.String() n := cmd.New.String() return fmt.Sprintf("%s %s %s", o, n, cmd.Name) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/protocol/packp/updreq_encode_test.go000066400000000000000000000113751345605224300276650ustar00rootroot00000000000000package packp import ( "bytes" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/format/pktline" . "gopkg.in/check.v1" "io/ioutil" ) type UpdReqEncodeSuite struct{} var _ = Suite(&UpdReqEncodeSuite{}) func (s *UpdReqEncodeSuite) testEncode(c *C, input *ReferenceUpdateRequest, expected []byte) { var buf bytes.Buffer c.Assert(input.Encode(&buf), IsNil) obtained := buf.Bytes() comment := Commentf("\nobtained = %s\nexpected = %s\n", string(obtained), string(expected)) c.Assert(obtained, DeepEquals, expected, comment) } func (s *UpdReqEncodeSuite) TestZeroValue(c *C) { r := &ReferenceUpdateRequest{} var buf bytes.Buffer c.Assert(r.Encode(&buf), Equals, ErrEmptyCommands) r = NewReferenceUpdateRequest() c.Assert(r.Encode(&buf), Equals, ErrEmptyCommands) } func (s *UpdReqEncodeSuite) TestOneUpdateCommand(c *C) { hash1 := plumbing.NewHash("1ecf0ef2c2dffb796033e5a02219af86ec6584e5") hash2 := plumbing.NewHash("2ecf0ef2c2dffb796033e5a02219af86ec6584e5") name := plumbing.ReferenceName("myref") r := NewReferenceUpdateRequest() r.Commands = []*Command{ {Name: name, Old: hash1, New: hash2}, } expected := pktlines(c, "1ecf0ef2c2dffb796033e5a02219af86ec6584e5 2ecf0ef2c2dffb796033e5a02219af86ec6584e5 myref\x00", pktline.FlushString, ) s.testEncode(c, r, expected) } func (s *UpdReqEncodeSuite) TestMultipleCommands(c *C) { hash1 := plumbing.NewHash("1ecf0ef2c2dffb796033e5a02219af86ec6584e5") hash2 := plumbing.NewHash("2ecf0ef2c2dffb796033e5a02219af86ec6584e5") r := NewReferenceUpdateRequest() r.Commands = []*Command{ {Name: plumbing.ReferenceName("myref1"), Old: hash1, New: hash2}, {Name: plumbing.ReferenceName("myref2"), Old: plumbing.ZeroHash, New: hash2}, {Name: plumbing.ReferenceName("myref3"), Old: hash1, New: plumbing.ZeroHash}, } expected := pktlines(c, "1ecf0ef2c2dffb796033e5a02219af86ec6584e5 2ecf0ef2c2dffb796033e5a02219af86ec6584e5 myref1\x00", "0000000000000000000000000000000000000000 2ecf0ef2c2dffb796033e5a02219af86ec6584e5 myref2", "1ecf0ef2c2dffb796033e5a02219af86ec6584e5 0000000000000000000000000000000000000000 myref3", pktline.FlushString, ) s.testEncode(c, r, expected) } func (s *UpdReqEncodeSuite) TestMultipleCommandsAndCapabilities(c *C) { hash1 := plumbing.NewHash("1ecf0ef2c2dffb796033e5a02219af86ec6584e5") hash2 := plumbing.NewHash("2ecf0ef2c2dffb796033e5a02219af86ec6584e5") r := NewReferenceUpdateRequest() r.Commands = []*Command{ {Name: plumbing.ReferenceName("myref1"), Old: hash1, New: hash2}, {Name: plumbing.ReferenceName("myref2"), Old: plumbing.ZeroHash, New: hash2}, {Name: plumbing.ReferenceName("myref3"), Old: hash1, New: plumbing.ZeroHash}, } r.Capabilities.Add("shallow") expected := pktlines(c, "1ecf0ef2c2dffb796033e5a02219af86ec6584e5 2ecf0ef2c2dffb796033e5a02219af86ec6584e5 myref1\x00shallow", "0000000000000000000000000000000000000000 2ecf0ef2c2dffb796033e5a02219af86ec6584e5 myref2", "1ecf0ef2c2dffb796033e5a02219af86ec6584e5 0000000000000000000000000000000000000000 myref3", pktline.FlushString, ) s.testEncode(c, r, expected) } func (s *UpdReqEncodeSuite) TestMultipleCommandsAndCapabilitiesShallow(c *C) { hash1 := plumbing.NewHash("1ecf0ef2c2dffb796033e5a02219af86ec6584e5") hash2 := plumbing.NewHash("2ecf0ef2c2dffb796033e5a02219af86ec6584e5") r := NewReferenceUpdateRequest() r.Commands = []*Command{ {Name: plumbing.ReferenceName("myref1"), Old: hash1, New: hash2}, {Name: plumbing.ReferenceName("myref2"), Old: plumbing.ZeroHash, New: hash2}, {Name: plumbing.ReferenceName("myref3"), Old: hash1, New: plumbing.ZeroHash}, } r.Capabilities.Add("shallow") r.Shallow = &hash1 expected := pktlines(c, "shallow 1ecf0ef2c2dffb796033e5a02219af86ec6584e5", "1ecf0ef2c2dffb796033e5a02219af86ec6584e5 2ecf0ef2c2dffb796033e5a02219af86ec6584e5 myref1\x00shallow", "0000000000000000000000000000000000000000 2ecf0ef2c2dffb796033e5a02219af86ec6584e5 myref2", "1ecf0ef2c2dffb796033e5a02219af86ec6584e5 0000000000000000000000000000000000000000 myref3", pktline.FlushString, ) s.testEncode(c, r, expected) } func (s *UpdReqEncodeSuite) TestWithPackfile(c *C) { hash1 := plumbing.NewHash("1ecf0ef2c2dffb796033e5a02219af86ec6584e5") hash2 := plumbing.NewHash("2ecf0ef2c2dffb796033e5a02219af86ec6584e5") name := plumbing.ReferenceName("myref") packfileContent := []byte("PACKabc") packfileReader := bytes.NewReader(packfileContent) packfileReadCloser := ioutil.NopCloser(packfileReader) r := NewReferenceUpdateRequest() r.Commands = []*Command{ {Name: name, Old: hash1, New: hash2}, } r.Packfile = packfileReadCloser expected := pktlines(c, "1ecf0ef2c2dffb796033e5a02219af86ec6584e5 2ecf0ef2c2dffb796033e5a02219af86ec6584e5 myref\x00", pktline.FlushString, ) expected = append(expected, packfileContent...) s.testEncode(c, r, expected) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/protocol/packp/updreq_test.go000066400000000000000000000017221345605224300263430ustar00rootroot00000000000000package packp import ( "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability" . "gopkg.in/check.v1" ) type UpdReqSuite struct{} var _ = Suite(&UpdReqSuite{}) func (s *UpdReqSuite) TestNewReferenceUpdateRequestFromCapabilities(c *C) { cap := capability.NewList() cap.Set(capability.Sideband) cap.Set(capability.Sideband64k) cap.Set(capability.Quiet) cap.Set(capability.ReportStatus) cap.Set(capability.DeleteRefs) cap.Set(capability.PushCert, "foo") cap.Set(capability.Atomic) cap.Set(capability.Agent, "foo") r := NewReferenceUpdateRequestFromCapabilities(cap) c.Assert(r.Capabilities.String(), Equals, "agent=go-git/4.x report-status", ) cap = capability.NewList() cap.Set(capability.Agent, "foo") r = NewReferenceUpdateRequestFromCapabilities(cap) c.Assert(r.Capabilities.String(), Equals, "agent=go-git/4.x") cap = capability.NewList() r = NewReferenceUpdateRequestFromCapabilities(cap) c.Assert(r.Capabilities.String(), Equals, "") } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/protocol/packp/uppackreq.go000066400000000000000000000046161345605224300260040ustar00rootroot00000000000000package packp import ( "bytes" "fmt" "io" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/format/pktline" "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability" ) // UploadPackRequest represents a upload-pack request. // Zero-value is not safe, use NewUploadPackRequest instead. type UploadPackRequest struct { UploadRequest UploadHaves } // NewUploadPackRequest creates a new UploadPackRequest and returns a pointer. func NewUploadPackRequest() *UploadPackRequest { ur := NewUploadRequest() return &UploadPackRequest{ UploadHaves: UploadHaves{}, UploadRequest: *ur, } } // NewUploadPackRequestFromCapabilities creates a new UploadPackRequest and // returns a pointer. The request capabilities are filled with the most optiomal // ones, based on the adv value (advertaised capabilities), the UploadPackRequest // it has no wants, haves or shallows and an infinite depth func NewUploadPackRequestFromCapabilities(adv *capability.List) *UploadPackRequest { ur := NewUploadRequestFromCapabilities(adv) return &UploadPackRequest{ UploadHaves: UploadHaves{}, UploadRequest: *ur, } } // IsEmpty a request if empty if Haves are contained in the Wants, or if Wants // length is zero func (r *UploadPackRequest) IsEmpty() bool { return isSubset(r.Wants, r.Haves) } func isSubset(needle []plumbing.Hash, haystack []plumbing.Hash) bool { for _, h := range needle { found := false for _, oh := range haystack { if h == oh { found = true break } } if !found { return false } } return true } // UploadHaves is a message to signal the references that a client has in a // upload-pack. Do not use this directly. Use UploadPackRequest request instead. type UploadHaves struct { Haves []plumbing.Hash } // Encode encodes the UploadHaves into the Writer. If flush is true, a flush // command will be encoded at the end of the writer content. func (u *UploadHaves) Encode(w io.Writer, flush bool) error { e := pktline.NewEncoder(w) plumbing.HashesSort(u.Haves) var last plumbing.Hash for _, have := range u.Haves { if bytes.Equal(last[:], have[:]) { continue } if err := e.Encodef("have %s\n", have); err != nil { return fmt.Errorf("sending haves for %q: %s", have, err) } last = have } if flush && len(u.Haves) != 0 { if err := e.Flush(); err != nil { return fmt.Errorf("sending flush-pkt after haves: %s", err) } } return nil } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/protocol/packp/uppackreq_test.go000066400000000000000000000043271345605224300270420ustar00rootroot00000000000000package packp import ( "bytes" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability" . "gopkg.in/check.v1" ) type UploadPackRequestSuite struct{} var _ = Suite(&UploadPackRequestSuite{}) func (s *UploadPackRequestSuite) TestNewUploadPackRequestFromCapabilities(c *C) { cap := capability.NewList() cap.Set(capability.Agent, "foo") r := NewUploadPackRequestFromCapabilities(cap) c.Assert(r.Capabilities.String(), Equals, "agent=go-git/4.x") } func (s *UploadPackRequestSuite) TestIsEmpty(c *C) { r := NewUploadPackRequest() r.Wants = append(r.Wants, plumbing.NewHash("d82f291cde9987322c8a0c81a325e1ba6159684c")) r.Wants = append(r.Wants, plumbing.NewHash("2b41ef280fdb67a9b250678686a0c3e03b0a9989")) r.Haves = append(r.Haves, plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5")) c.Assert(r.IsEmpty(), Equals, false) r = NewUploadPackRequest() r.Wants = append(r.Wants, plumbing.NewHash("d82f291cde9987322c8a0c81a325e1ba6159684c")) r.Wants = append(r.Wants, plumbing.NewHash("2b41ef280fdb67a9b250678686a0c3e03b0a9989")) r.Haves = append(r.Haves, plumbing.NewHash("d82f291cde9987322c8a0c81a325e1ba6159684c")) c.Assert(r.IsEmpty(), Equals, false) r = NewUploadPackRequest() r.Wants = append(r.Wants, plumbing.NewHash("d82f291cde9987322c8a0c81a325e1ba6159684c")) r.Haves = append(r.Haves, plumbing.NewHash("d82f291cde9987322c8a0c81a325e1ba6159684c")) c.Assert(r.IsEmpty(), Equals, true) } type UploadHavesSuite struct{} var _ = Suite(&UploadHavesSuite{}) func (s *UploadHavesSuite) TestEncode(c *C) { uh := &UploadHaves{} uh.Haves = append(uh.Haves, plumbing.NewHash("1111111111111111111111111111111111111111"), plumbing.NewHash("3333333333333333333333333333333333333333"), plumbing.NewHash("1111111111111111111111111111111111111111"), plumbing.NewHash("2222222222222222222222222222222222222222"), plumbing.NewHash("1111111111111111111111111111111111111111"), ) buf := bytes.NewBuffer(nil) err := uh.Encode(buf, true) c.Assert(err, IsNil) c.Assert(buf.String(), Equals, ""+ "0032have 1111111111111111111111111111111111111111\n"+ "0032have 2222222222222222222222222222222222222222\n"+ "0032have 3333333333333333333333333333333333333333\n"+ "0000", ) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/protocol/packp/uppackresp.go000066400000000000000000000053231345605224300261620ustar00rootroot00000000000000package packp import ( "errors" "io" "bufio" "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability" "gopkg.in/src-d/go-git.v4/utils/ioutil" ) // ErrUploadPackResponseNotDecoded is returned if Read is called without // decoding first var ErrUploadPackResponseNotDecoded = errors.New("upload-pack-response should be decoded") // UploadPackResponse contains all the information responded by the upload-pack // service, the response implements io.ReadCloser that allows to read the // packfile directly from it. type UploadPackResponse struct { ShallowUpdate ServerResponse r io.ReadCloser isShallow bool isMultiACK bool isOk bool } // NewUploadPackResponse create a new UploadPackResponse instance, the request // being responded by the response is required. func NewUploadPackResponse(req *UploadPackRequest) *UploadPackResponse { isShallow := !req.Depth.IsZero() isMultiACK := req.Capabilities.Supports(capability.MultiACK) || req.Capabilities.Supports(capability.MultiACKDetailed) return &UploadPackResponse{ isShallow: isShallow, isMultiACK: isMultiACK, } } // NewUploadPackResponseWithPackfile creates a new UploadPackResponse instance, // and sets its packfile reader. func NewUploadPackResponseWithPackfile(req *UploadPackRequest, pf io.ReadCloser) *UploadPackResponse { r := NewUploadPackResponse(req) r.r = pf return r } // Decode decodes all the responses sent by upload-pack service into the struct // and prepares it to read the packfile using the Read method func (r *UploadPackResponse) Decode(reader io.ReadCloser) error { buf := bufio.NewReader(reader) if r.isShallow { if err := r.ShallowUpdate.Decode(buf); err != nil { return err } } if err := r.ServerResponse.Decode(buf, r.isMultiACK); err != nil { return err } // now the reader is ready to read the packfile content r.r = ioutil.NewReadCloser(buf, reader) return nil } // Encode encodes an UploadPackResponse. func (r *UploadPackResponse) Encode(w io.Writer) (err error) { if r.isShallow { if err := r.ShallowUpdate.Encode(w); err != nil { return err } } if err := r.ServerResponse.Encode(w); err != nil { return err } defer ioutil.CheckClose(r.r, &err) _, err = io.Copy(w, r.r) return err } // Read reads the packfile data, if the request was done with any Sideband // capability the content read should be demultiplexed. If the methods wasn't // called before the ErrUploadPackResponseNotDecoded will be return func (r *UploadPackResponse) Read(p []byte) (int, error) { if r.r == nil { return 0, ErrUploadPackResponseNotDecoded } return r.r.Read(p) } // Close the underlying reader, if any func (r *UploadPackResponse) Close() error { if r.r == nil { return nil } return r.r.Close() } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/protocol/packp/uppackresp_test.go000066400000000000000000000061141345605224300272200ustar00rootroot00000000000000package packp import ( "bytes" "io/ioutil" "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability" . "gopkg.in/check.v1" "gopkg.in/src-d/go-git.v4/plumbing" ) type UploadPackResponseSuite struct{} var _ = Suite(&UploadPackResponseSuite{}) func (s *UploadPackResponseSuite) TestDecodeNAK(c *C) { raw := "0008NAK\nPACK" req := NewUploadPackRequest() res := NewUploadPackResponse(req) defer res.Close() err := res.Decode(ioutil.NopCloser(bytes.NewBufferString(raw))) c.Assert(err, IsNil) pack, err := ioutil.ReadAll(res) c.Assert(err, IsNil) c.Assert(pack, DeepEquals, []byte("PACK")) } func (s *UploadPackResponseSuite) TestDecodeDepth(c *C) { raw := "00000008NAK\nPACK" req := NewUploadPackRequest() req.Depth = DepthCommits(1) res := NewUploadPackResponse(req) defer res.Close() err := res.Decode(ioutil.NopCloser(bytes.NewBufferString(raw))) c.Assert(err, IsNil) pack, err := ioutil.ReadAll(res) c.Assert(err, IsNil) c.Assert(pack, DeepEquals, []byte("PACK")) } func (s *UploadPackResponseSuite) TestDecodeMalformed(c *C) { raw := "00000008ACK\nPACK" req := NewUploadPackRequest() req.Depth = DepthCommits(1) res := NewUploadPackResponse(req) defer res.Close() err := res.Decode(ioutil.NopCloser(bytes.NewBufferString(raw))) c.Assert(err, NotNil) } func (s *UploadPackResponseSuite) TestDecodeMultiACK(c *C) { req := NewUploadPackRequest() req.Capabilities.Set(capability.MultiACK) res := NewUploadPackResponse(req) defer res.Close() err := res.Decode(ioutil.NopCloser(bytes.NewBuffer(nil))) c.Assert(err, NotNil) } func (s *UploadPackResponseSuite) TestReadNoDecode(c *C) { req := NewUploadPackRequest() req.Capabilities.Set(capability.MultiACK) res := NewUploadPackResponse(req) defer res.Close() n, err := res.Read(nil) c.Assert(err, Equals, ErrUploadPackResponseNotDecoded) c.Assert(n, Equals, 0) } func (s *UploadPackResponseSuite) TestEncodeNAK(c *C) { pf := ioutil.NopCloser(bytes.NewBuffer([]byte("[PACK]"))) req := NewUploadPackRequest() res := NewUploadPackResponseWithPackfile(req, pf) defer func() { c.Assert(res.Close(), IsNil) }() b := bytes.NewBuffer(nil) c.Assert(res.Encode(b), IsNil) expected := "0008NAK\n[PACK]" c.Assert(b.String(), Equals, expected) } func (s *UploadPackResponseSuite) TestEncodeDepth(c *C) { pf := ioutil.NopCloser(bytes.NewBuffer([]byte("PACK"))) req := NewUploadPackRequest() req.Depth = DepthCommits(1) res := NewUploadPackResponseWithPackfile(req, pf) defer func() { c.Assert(res.Close(), IsNil) }() b := bytes.NewBuffer(nil) c.Assert(res.Encode(b), IsNil) expected := "00000008NAK\nPACK" c.Assert(b.String(), Equals, expected) } func (s *UploadPackResponseSuite) TestEncodeMultiACK(c *C) { pf := ioutil.NopCloser(bytes.NewBuffer([]byte("[PACK]"))) req := NewUploadPackRequest() res := NewUploadPackResponseWithPackfile(req, pf) defer func() { c.Assert(res.Close(), IsNil) }() res.ACKs = []plumbing.Hash{ plumbing.NewHash("5dc01c595e6c6ec9ccda4f6f69c131c0dd945f81"), plumbing.NewHash("5dc01c595e6c6ec9ccda4f6f69c131c0dd945f82"), } b := bytes.NewBuffer(nil) c.Assert(res.Encode(b), NotNil) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/reference.go000066400000000000000000000116671345605224300230140ustar00rootroot00000000000000package plumbing import ( "errors" "fmt" "strings" ) const ( refPrefix = "refs/" refHeadPrefix = refPrefix + "heads/" refTagPrefix = refPrefix + "tags/" refRemotePrefix = refPrefix + "remotes/" refNotePrefix = refPrefix + "notes/" symrefPrefix = "ref: " ) // RefRevParseRules are a set of rules to parse references into short names. // These are the same rules as used by git in shorten_unambiguous_ref. // See: https://github.com/git/git/blob/e0aaa1b6532cfce93d87af9bc813fb2e7a7ce9d7/refs.c#L417 var RefRevParseRules = []string{ "refs/%s", "refs/tags/%s", "refs/heads/%s", "refs/remotes/%s", "refs/remotes/%s/HEAD", } var ( ErrReferenceNotFound = errors.New("reference not found") ) // ReferenceType reference type's type ReferenceType int8 const ( InvalidReference ReferenceType = 0 HashReference ReferenceType = 1 SymbolicReference ReferenceType = 2 ) func (r ReferenceType) String() string { switch r { case InvalidReference: return "invalid-reference" case HashReference: return "hash-reference" case SymbolicReference: return "symbolic-reference" } return "" } // ReferenceName reference name's type ReferenceName string // NewBranchReferenceName returns a reference name describing a branch based on // his short name. func NewBranchReferenceName(name string) ReferenceName { return ReferenceName(refHeadPrefix + name) } // NewNoteReferenceName returns a reference name describing a note based on his // short name. func NewNoteReferenceName(name string) ReferenceName { return ReferenceName(refNotePrefix + name) } // NewRemoteReferenceName returns a reference name describing a remote branch // based on his short name and the remote name. func NewRemoteReferenceName(remote, name string) ReferenceName { return ReferenceName(refRemotePrefix + fmt.Sprintf("%s/%s", remote, name)) } // NewRemoteHEADReferenceName returns a reference name describing a the HEAD // branch of a remote. func NewRemoteHEADReferenceName(remote string) ReferenceName { return ReferenceName(refRemotePrefix + fmt.Sprintf("%s/%s", remote, HEAD)) } // NewTagReferenceName returns a reference name describing a tag based on short // his name. func NewTagReferenceName(name string) ReferenceName { return ReferenceName(refTagPrefix + name) } // IsBranch check if a reference is a branch func (r ReferenceName) IsBranch() bool { return strings.HasPrefix(string(r), refHeadPrefix) } // IsNote check if a reference is a note func (r ReferenceName) IsNote() bool { return strings.HasPrefix(string(r), refNotePrefix) } // IsRemote check if a reference is a remote func (r ReferenceName) IsRemote() bool { return strings.HasPrefix(string(r), refRemotePrefix) } // IsTag check if a reference is a tag func (r ReferenceName) IsTag() bool { return strings.HasPrefix(string(r), refTagPrefix) } func (r ReferenceName) String() string { return string(r) } // Short returns the short name of a ReferenceName func (r ReferenceName) Short() string { s := string(r) res := s for _, format := range RefRevParseRules { _, err := fmt.Sscanf(s, format, &res) if err == nil { continue } } return res } const ( HEAD ReferenceName = "HEAD" Master ReferenceName = "refs/heads/master" ) // Reference is a representation of git reference type Reference struct { t ReferenceType n ReferenceName h Hash target ReferenceName } // NewReferenceFromStrings creates a reference from name and target as string, // the resulting reference can be a SymbolicReference or a HashReference base // on the target provided func NewReferenceFromStrings(name, target string) *Reference { n := ReferenceName(name) if strings.HasPrefix(target, symrefPrefix) { target := ReferenceName(target[len(symrefPrefix):]) return NewSymbolicReference(n, target) } return NewHashReference(n, NewHash(target)) } // NewSymbolicReference creates a new SymbolicReference reference func NewSymbolicReference(n, target ReferenceName) *Reference { return &Reference{ t: SymbolicReference, n: n, target: target, } } // NewHashReference creates a new HashReference reference func NewHashReference(n ReferenceName, h Hash) *Reference { return &Reference{ t: HashReference, n: n, h: h, } } // Type return the type of a reference func (r *Reference) Type() ReferenceType { return r.t } // Name return the name of a reference func (r *Reference) Name() ReferenceName { return r.n } // Hash return the hash of a hash reference func (r *Reference) Hash() Hash { return r.h } // Target return the target of a symbolic reference func (r *Reference) Target() ReferenceName { return r.target } // Strings dump a reference as a [2]string func (r *Reference) Strings() [2]string { var o [2]string o[0] = r.Name().String() switch r.Type() { case HashReference: o[1] = r.Hash().String() case SymbolicReference: o[1] = symrefPrefix + r.Target().String() } return o } func (r *Reference) String() string { s := r.Strings() return fmt.Sprintf("%s %s", s[1], s[0]) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/reference_test.go000066400000000000000000000060161345605224300240430ustar00rootroot00000000000000package plumbing import . "gopkg.in/check.v1" type ReferenceSuite struct{} var _ = Suite(&ReferenceSuite{}) const ( ExampleReferenceName ReferenceName = "refs/heads/v4" ) func (s *ReferenceSuite) TestReferenceTypeString(c *C) { c.Assert(SymbolicReference.String(), Equals, "symbolic-reference") } func (s *ReferenceSuite) TestReferenceNameShort(c *C) { c.Assert(ExampleReferenceName.Short(), Equals, "v4") } func (s *ReferenceSuite) TestReferenceNameWithSlash(c *C) { r := ReferenceName("refs/remotes/origin/feature/AllowSlashes") c.Assert(r.Short(), Equals, "origin/feature/AllowSlashes") } func (s *ReferenceSuite) TestReferenceNameNote(c *C) { r := ReferenceName("refs/notes/foo") c.Assert(r.Short(), Equals, "notes/foo") } func (s *ReferenceSuite) TestNewReferenceFromStrings(c *C) { r := NewReferenceFromStrings("refs/heads/v4", "6ecf0ef2c2dffb796033e5a02219af86ec6584e5") c.Assert(r.Type(), Equals, HashReference) c.Assert(r.Name(), Equals, ExampleReferenceName) c.Assert(r.Hash(), Equals, NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5")) r = NewReferenceFromStrings("HEAD", "ref: refs/heads/v4") c.Assert(r.Type(), Equals, SymbolicReference) c.Assert(r.Name(), Equals, HEAD) c.Assert(r.Target(), Equals, ExampleReferenceName) } func (s *ReferenceSuite) TestNewSymbolicReference(c *C) { r := NewSymbolicReference(HEAD, ExampleReferenceName) c.Assert(r.Type(), Equals, SymbolicReference) c.Assert(r.Name(), Equals, HEAD) c.Assert(r.Target(), Equals, ExampleReferenceName) } func (s *ReferenceSuite) TestNewHashReference(c *C) { r := NewHashReference(ExampleReferenceName, NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5")) c.Assert(r.Type(), Equals, HashReference) c.Assert(r.Name(), Equals, ExampleReferenceName) c.Assert(r.Hash(), Equals, NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5")) } func (s *ReferenceSuite) TestNewBranchReferenceName(c *C) { r := NewBranchReferenceName("foo") c.Assert(r.String(), Equals, "refs/heads/foo") } func (s *ReferenceSuite) TestNewNoteReferenceName(c *C) { r := NewNoteReferenceName("foo") c.Assert(r.String(), Equals, "refs/notes/foo") } func (s *ReferenceSuite) TestNewRemoteReferenceName(c *C) { r := NewRemoteReferenceName("bar", "foo") c.Assert(r.String(), Equals, "refs/remotes/bar/foo") } func (s *ReferenceSuite) TestNewRemoteHEADReferenceName(c *C) { r := NewRemoteHEADReferenceName("foo") c.Assert(r.String(), Equals, "refs/remotes/foo/HEAD") } func (s *ReferenceSuite) TestNewTagReferenceName(c *C) { r := NewTagReferenceName("foo") c.Assert(r.String(), Equals, "refs/tags/foo") } func (s *ReferenceSuite) TestIsBranch(c *C) { r := ExampleReferenceName c.Assert(r.IsBranch(), Equals, true) } func (s *ReferenceSuite) TestIsNote(c *C) { r := ReferenceName("refs/notes/foo") c.Assert(r.IsNote(), Equals, true) } func (s *ReferenceSuite) TestIsRemote(c *C) { r := ReferenceName("refs/remotes/origin/master") c.Assert(r.IsRemote(), Equals, true) } func (s *ReferenceSuite) TestIsTag(c *C) { r := ReferenceName("refs/tags/v3.1.") c.Assert(r.IsTag(), Equals, true) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/revision.go000066400000000000000000000004311345605224300226770ustar00rootroot00000000000000package plumbing // Revision represents a git revision // to get more details about git revisions // please check git manual page : // https://www.kernel.org/pub/software/scm/git/docs/gitrevisions.html type Revision string func (r Revision) String() string { return string(r) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/revlist/000077500000000000000000000000001345605224300222045ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/revlist/revlist.go000066400000000000000000000116741345605224300242340ustar00rootroot00000000000000// Package revlist provides support to access the ancestors of commits, in a // similar way as the git-rev-list command. package revlist import ( "fmt" "io" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/filemode" "gopkg.in/src-d/go-git.v4/plumbing/object" "gopkg.in/src-d/go-git.v4/plumbing/storer" ) // Objects applies a complementary set. It gets all the hashes from all // the reachable objects from the given objects. Ignore param are object hashes // that we want to ignore on the result. All that objects must be accessible // from the object storer. func Objects( s storer.EncodedObjectStorer, objs, ignore []plumbing.Hash, ) ([]plumbing.Hash, error) { return ObjectsWithStorageForIgnores(s, s, objs, ignore) } // ObjectsWithStorageForIgnores is the same as Objects, but a // secondary storage layer can be provided, to be used to finding the // full set of objects to be ignored while finding the reachable // objects. This is useful when the main `s` storage layer is slow // and/or remote, while the ignore list is available somewhere local. func ObjectsWithStorageForIgnores( s, ignoreStore storer.EncodedObjectStorer, objs, ignore []plumbing.Hash, ) ([]plumbing.Hash, error) { ignore, err := objects(ignoreStore, ignore, nil, true) if err != nil { return nil, err } return objects(s, objs, ignore, false) } func objects( s storer.EncodedObjectStorer, objects, ignore []plumbing.Hash, allowMissingObjects bool, ) ([]plumbing.Hash, error) { seen := hashListToSet(ignore) result := make(map[plumbing.Hash]bool) visited := make(map[plumbing.Hash]bool) walkerFunc := func(h plumbing.Hash) { if !seen[h] { result[h] = true seen[h] = true } } for _, h := range objects { if err := processObject(s, h, seen, visited, ignore, walkerFunc); err != nil { if allowMissingObjects && err == plumbing.ErrObjectNotFound { continue } return nil, err } } return hashSetToList(result), nil } // processObject obtains the object using the hash an process it depending of its type func processObject( s storer.EncodedObjectStorer, h plumbing.Hash, seen map[plumbing.Hash]bool, visited map[plumbing.Hash]bool, ignore []plumbing.Hash, walkerFunc func(h plumbing.Hash), ) error { if seen[h] { return nil } o, err := s.EncodedObject(plumbing.AnyObject, h) if err != nil { return err } do, err := object.DecodeObject(s, o) if err != nil { return err } switch do := do.(type) { case *object.Commit: return reachableObjects(do, seen, visited, ignore, walkerFunc) case *object.Tree: return iterateCommitTrees(seen, do, walkerFunc) case *object.Tag: walkerFunc(do.Hash) return processObject(s, do.Target, seen, visited, ignore, walkerFunc) case *object.Blob: walkerFunc(do.Hash) default: return fmt.Errorf("object type not valid: %s. "+ "Object reference: %s", o.Type(), o.Hash()) } return nil } // reachableObjects returns, using the callback function, all the reachable // objects from the specified commit. To avoid to iterate over seen commits, // if a commit hash is into the 'seen' set, we will not iterate all his trees // and blobs objects. func reachableObjects( commit *object.Commit, seen map[plumbing.Hash]bool, visited map[plumbing.Hash]bool, ignore []plumbing.Hash, cb func(h plumbing.Hash), ) error { i := object.NewCommitPreorderIter(commit, seen, ignore) pending := make(map[plumbing.Hash]bool) addPendingParents(pending, visited, commit) for { commit, err := i.Next() if err == io.EOF { break } if err != nil { return err } if pending[commit.Hash] { delete(pending, commit.Hash) } addPendingParents(pending, visited, commit) if visited[commit.Hash] && len(pending) == 0 { break } if seen[commit.Hash] { continue } cb(commit.Hash) tree, err := commit.Tree() if err != nil { return err } if err := iterateCommitTrees(seen, tree, cb); err != nil { return err } } return nil } func addPendingParents(pending, visited map[plumbing.Hash]bool, commit *object.Commit) { for _, p := range commit.ParentHashes { if !visited[p] { pending[p] = true } } } // iterateCommitTrees iterate all reachable trees from the given commit func iterateCommitTrees( seen map[plumbing.Hash]bool, tree *object.Tree, cb func(h plumbing.Hash), ) error { if seen[tree.Hash] { return nil } cb(tree.Hash) treeWalker := object.NewTreeWalker(tree, true, seen) for { _, e, err := treeWalker.Next() if err == io.EOF { break } if err != nil { return err } if e.Mode == filemode.Submodule { continue } if seen[e.Hash] { continue } cb(e.Hash) } return nil } func hashSetToList(hashes map[plumbing.Hash]bool) []plumbing.Hash { var result []plumbing.Hash for key := range hashes { result = append(result, key) } return result } func hashListToSet(hashes []plumbing.Hash) map[plumbing.Hash]bool { result := make(map[plumbing.Hash]bool) for _, h := range hashes { result[h] = true } return result } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/revlist/revlist_test.go000066400000000000000000000212261345605224300252650ustar00rootroot00000000000000package revlist import ( "testing" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/cache" "gopkg.in/src-d/go-git.v4/plumbing/object" "gopkg.in/src-d/go-git.v4/plumbing/storer" "gopkg.in/src-d/go-git.v4/storage/filesystem" . "gopkg.in/check.v1" "gopkg.in/src-d/go-git-fixtures.v3" ) func Test(t *testing.T) { TestingT(t) } type RevListSuite struct { fixtures.Suite Storer storer.EncodedObjectStorer } var _ = Suite(&RevListSuite{}) const ( initialCommit = "b029517f6300c2da0f4b651b8642506cd6aaf45d" secondCommit = "b8e471f58bcbca63b07bda20e428190409c2db47" someCommit = "918c48b83bd081e863dbe1b80f8998f058cd8294" someCommitBranch = "e8d3ffab552895c19b9fcf7aa264d277cde33881" someCommitOtherBranch = "6ecf0ef2c2dffb796033e5a02219af86ec6584e5" ) // Created using: git log --graph --oneline --all // // Basic fixture repository commits tree: // // * 6ecf0ef vendor stuff // | * e8d3ffa some code in a branch // |/ // * 918c48b some code // * af2d6a6 some json // * 1669dce Merge branch 'master' // |\ // | * a5b8b09 Merge pull request #1 // | |\ // | | * b8e471f Creating changelog // | |/ // * | 35e8510 binary file // |/ // * b029517 Initial commit func (s *RevListSuite) SetUpTest(c *C) { s.Suite.SetUpSuite(c) sto := filesystem.NewStorage(fixtures.Basic().One().DotGit(), cache.NewObjectLRUDefault()) s.Storer = sto } func (s *RevListSuite) commit(c *C, h plumbing.Hash) *object.Commit { commit, err := object.GetCommit(s.Storer, h) c.Assert(err, IsNil) return commit } func (s *RevListSuite) TestRevListObjects_Submodules(c *C) { submodules := map[string]bool{ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5": true, } sto := filesystem.NewStorage(fixtures.ByTag("submodule").One().DotGit(), cache.NewObjectLRUDefault()) ref, err := storer.ResolveReference(sto, plumbing.HEAD) c.Assert(err, IsNil) revList, err := Objects(sto, []plumbing.Hash{ref.Hash()}, nil) c.Assert(err, IsNil) for _, h := range revList { c.Assert(submodules[h.String()], Equals, false) } } // --- // | |\ // | | * b8e471f Creating changelog // | |/ // * | 35e8510 binary file // |/ // * b029517 Initial commit func (s *RevListSuite) TestRevListObjects(c *C) { revList := map[string]bool{ "b8e471f58bcbca63b07bda20e428190409c2db47": true, // second commit "c2d30fa8ef288618f65f6eed6e168e0d514886f4": true, // init tree "d3ff53e0564a9f87d8e84b6e28e5060e517008aa": true, // CHANGELOG } localHist, err := Objects(s.Storer, []plumbing.Hash{plumbing.NewHash(initialCommit)}, nil) c.Assert(err, IsNil) remoteHist, err := Objects(s.Storer, []plumbing.Hash{plumbing.NewHash(secondCommit)}, localHist) c.Assert(err, IsNil) for _, h := range remoteHist { c.Assert(revList[h.String()], Equals, true) } c.Assert(len(remoteHist), Equals, len(revList)) } func (s *RevListSuite) TestRevListObjectsTagObject(c *C) { sto := filesystem.NewStorage( fixtures.ByTag("tags"). ByURL("https://github.com/git-fixtures/tags.git").One().DotGit(), cache.NewObjectLRUDefault()) expected := map[string]bool{ "70846e9a10ef7b41064b40f07713d5b8b9a8fc73": true, "e69de29bb2d1d6434b8b29ae775ad8c2e48c5391": true, "ad7897c0fb8e7d9a9ba41fa66072cf06095a6cfc": true, "f7b877701fbf855b44c0a9e86f3fdce2c298b07f": true, } hist, err := Objects(sto, []plumbing.Hash{plumbing.NewHash("ad7897c0fb8e7d9a9ba41fa66072cf06095a6cfc")}, nil) c.Assert(err, IsNil) for _, h := range hist { c.Assert(expected[h.String()], Equals, true) } c.Assert(len(hist), Equals, len(expected)) } func (s *RevListSuite) TestRevListObjectsWithStorageForIgnores(c *C) { sto := filesystem.NewStorage( fixtures.ByTag("merge-conflict").One().DotGit(), cache.NewObjectLRUDefault()) // The "merge-conflict" repo has one extra commit in it, with a // two files modified in two different subdirs. expected := map[string]bool{ "1980fcf55330d9d94c34abee5ab734afecf96aba": true, // commit "73d9cf44e9045254346c73f6646b08f9302c8570": true, // root dir "e8435d512a98586bd2e4fcfcdf04101b0bb1b500": true, // go/ "257cc5642cb1a054f08cc83f2d943e56fd3ebe99": true, // haskal.hs "d499a1a0b79b7d87a35155afd0c1cce78b37a91c": true, // example.go "d108adc364fb6f21395d011ae2c8a11d96905b0d": true, // haskal/ } hist, err := ObjectsWithStorageForIgnores(sto, s.Storer, []plumbing.Hash{plumbing.NewHash("1980fcf55330d9d94c34abee5ab734afecf96aba")}, []plumbing.Hash{plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5")}) c.Assert(err, IsNil) for _, h := range hist { c.Assert(expected[h.String()], Equals, true) } c.Assert(len(hist), Equals, len(expected)) } // --- // | |\ // | | * b8e471f Creating changelog // | |/ // * | 35e8510 binary file // |/ // * b029517 Initial commit func (s *RevListSuite) TestRevListObjectsWithBlobsAndTrees(c *C) { revList := map[string]bool{ "b8e471f58bcbca63b07bda20e428190409c2db47": true, // second commit } localHist, err := Objects(s.Storer, []plumbing.Hash{ plumbing.NewHash(initialCommit), plumbing.NewHash("c2d30fa8ef288618f65f6eed6e168e0d514886f4"), plumbing.NewHash("d3ff53e0564a9f87d8e84b6e28e5060e517008aa"), }, nil) c.Assert(err, IsNil) remoteHist, err := Objects(s.Storer, []plumbing.Hash{plumbing.NewHash(secondCommit)}, localHist) c.Assert(err, IsNil) for _, h := range remoteHist { c.Assert(revList[h.String()], Equals, true) } c.Assert(len(remoteHist), Equals, len(revList)) } func (s *RevListSuite) TestRevListObjectsReverse(c *C) { localHist, err := Objects(s.Storer, []plumbing.Hash{plumbing.NewHash(secondCommit)}, nil) c.Assert(err, IsNil) remoteHist, err := Objects(s.Storer, []plumbing.Hash{plumbing.NewHash(initialCommit)}, localHist) c.Assert(err, IsNil) c.Assert(len(remoteHist), Equals, 0) } func (s *RevListSuite) TestRevListObjectsSameCommit(c *C) { localHist, err := Objects(s.Storer, []plumbing.Hash{plumbing.NewHash(secondCommit)}, nil) c.Assert(err, IsNil) remoteHist, err := Objects(s.Storer, []plumbing.Hash{plumbing.NewHash(secondCommit)}, localHist) c.Assert(err, IsNil) c.Assert(len(remoteHist), Equals, 0) } // * 6ecf0ef vendor stuff // | * e8d3ffa some code in a branch // |/ // * 918c48b some code // ----- func (s *RevListSuite) TestRevListObjectsNewBranch(c *C) { localHist, err := Objects(s.Storer, []plumbing.Hash{plumbing.NewHash(someCommit)}, nil) c.Assert(err, IsNil) remoteHist, err := Objects( s.Storer, []plumbing.Hash{ plumbing.NewHash(someCommitBranch), plumbing.NewHash(someCommitOtherBranch)}, localHist) c.Assert(err, IsNil) revList := map[string]bool{ "a8d315b2b1c615d43042c3a62402b8a54288cf5c": true, // init tree "cf4aa3b38974fb7d81f367c0830f7d78d65ab86b": true, // vendor folder "9dea2395f5403188298c1dabe8bdafe562c491e3": true, // foo.go "e8d3ffab552895c19b9fcf7aa264d277cde33881": true, // branch commit "dbd3641b371024f44d0e469a9c8f5457b0660de1": true, // init tree "7e59600739c96546163833214c36459e324bad0a": true, // README "6ecf0ef2c2dffb796033e5a02219af86ec6584e5": true, // otherBranch commit } for _, h := range remoteHist { c.Assert(revList[h.String()], Equals, true) } c.Assert(len(remoteHist), Equals, len(revList)) } // This tests will ensure that a5b8b09 and b8e471f will be visited even if // 35e8510 has already been visited and will not stop iterating until they // have been as well. // // * af2d6a6 some json // * 1669dce Merge branch 'master' // |\ // | * a5b8b09 Merge pull request #1 // | |\ // | | * b8e471f Creating changelog // | |/ // * | 35e8510 binary file // |/ // * b029517 Initial commit func (s *RevListSuite) TestReachableObjectsNoRevisit(c *C) { obj, err := s.Storer.EncodedObject(plumbing.CommitObject, plumbing.NewHash("af2d6a6954d532f8ffb47615169c8fdf9d383a1a")) c.Assert(err, IsNil) do, err := object.DecodeObject(s.Storer, obj) c.Assert(err, IsNil) commit, ok := do.(*object.Commit) c.Assert(ok, Equals, true) var visited []plumbing.Hash err = reachableObjects( commit, map[plumbing.Hash]bool{ plumbing.NewHash("35e85108805c84807bc66a02d91535e1e24b38b9"): true, }, map[plumbing.Hash]bool{ plumbing.NewHash("35e85108805c84807bc66a02d91535e1e24b38b9"): true, }, nil, func(h plumbing.Hash) { obj, err := s.Storer.EncodedObject(plumbing.AnyObject, h) c.Assert(err, IsNil) do, err := object.DecodeObject(s.Storer, obj) c.Assert(err, IsNil) if _, ok := do.(*object.Commit); ok { visited = append(visited, h) } }, ) c.Assert(err, IsNil) c.Assert(visited, DeepEquals, []plumbing.Hash{ plumbing.NewHash("af2d6a6954d532f8ffb47615169c8fdf9d383a1a"), plumbing.NewHash("1669dce138d9b841a518c64b10914d88f5e488ea"), plumbing.NewHash("a5b8b09e2f8fcb0bb99d3ccb0958157b40890d69"), plumbing.NewHash("b029517f6300c2da0f4b651b8642506cd6aaf45d"), plumbing.NewHash("b8e471f58bcbca63b07bda20e428190409c2db47"), }) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/storer/000077500000000000000000000000001345605224300220325ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/storer/doc.go000066400000000000000000000001331345605224300231230ustar00rootroot00000000000000// Package storer defines the interfaces to store objects, references, etc. package storer golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/storer/index.go000066400000000000000000000003231345605224300234660ustar00rootroot00000000000000package storer import "gopkg.in/src-d/go-git.v4/plumbing/format/index" // IndexStorer generic storage of index.Index type IndexStorer interface { SetIndex(*index.Index) error Index() (*index.Index, error) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/storer/object.go000066400000000000000000000232221345605224300236300ustar00rootroot00000000000000package storer import ( "errors" "io" "time" "gopkg.in/src-d/go-git.v4/plumbing" ) var ( //ErrStop is used to stop a ForEach function in an Iter ErrStop = errors.New("stop iter") ) // EncodedObjectStorer generic storage of objects type EncodedObjectStorer interface { // NewEncodedObject returns a new plumbing.EncodedObject, the real type // of the object can be a custom implementation or the default one, // plumbing.MemoryObject. NewEncodedObject() plumbing.EncodedObject // SetEncodedObject saves an object into the storage, the object should // be create with the NewEncodedObject, method, and file if the type is // not supported. SetEncodedObject(plumbing.EncodedObject) (plumbing.Hash, error) // EncodedObject gets an object by hash with the given // plumbing.ObjectType. Implementors should return // (nil, plumbing.ErrObjectNotFound) if an object doesn't exist with // both the given hash and object type. // // Valid plumbing.ObjectType values are CommitObject, BlobObject, TagObject, // TreeObject and AnyObject. If plumbing.AnyObject is given, the object must // be looked up regardless of its type. EncodedObject(plumbing.ObjectType, plumbing.Hash) (plumbing.EncodedObject, error) // IterObjects returns a custom EncodedObjectStorer over all the object // on the storage. // // Valid plumbing.ObjectType values are CommitObject, BlobObject, TagObject, IterEncodedObjects(plumbing.ObjectType) (EncodedObjectIter, error) // HasEncodedObject returns ErrObjNotFound if the object doesn't // exist. If the object does exist, it returns nil. HasEncodedObject(plumbing.Hash) error // EncodedObjectSize returns the plaintext size of the encoded object. EncodedObjectSize(plumbing.Hash) (int64, error) } // DeltaObjectStorer is an EncodedObjectStorer that can return delta // objects. type DeltaObjectStorer interface { // DeltaObject is the same as EncodedObject but without resolving deltas. // Deltas will be returned as plumbing.DeltaObject instances. DeltaObject(plumbing.ObjectType, plumbing.Hash) (plumbing.EncodedObject, error) } // Transactioner is a optional method for ObjectStorer, it enable transaction // base write and read operations in the storage type Transactioner interface { // Begin starts a transaction. Begin() Transaction } // LooseObjectStorer is an optional interface for managing "loose" // objects, i.e. those not in packfiles. type LooseObjectStorer interface { // ForEachObjectHash iterates over all the (loose) object hashes // in the repository without necessarily having to read those objects. // Objects only inside pack files may be omitted. // If ErrStop is sent the iteration is stop but no error is returned. ForEachObjectHash(func(plumbing.Hash) error) error // LooseObjectTime looks up the (m)time associated with the // loose object (that is not in a pack file). Some // implementations (e.g. without loose objects) // always return an error. LooseObjectTime(plumbing.Hash) (time.Time, error) // DeleteLooseObject deletes a loose object if it exists. DeleteLooseObject(plumbing.Hash) error } // PackedObjectStorer is an optional interface for managing objects in // packfiles. type PackedObjectStorer interface { // ObjectPacks returns hashes of object packs if the underlying // implementation has pack files. ObjectPacks() ([]plumbing.Hash, error) // DeleteOldObjectPackAndIndex deletes an object pack and the corresponding index file if they exist. // Deletion is only performed if the pack is older than the supplied time (or the time is zero). DeleteOldObjectPackAndIndex(plumbing.Hash, time.Time) error } // PackfileWriter is a optional method for ObjectStorer, it enable direct write // of packfile to the storage type PackfileWriter interface { // PackfileWriter returns a writer for writing a packfile to the storage // // If the Storer not implements PackfileWriter the objects should be written // using the Set method. PackfileWriter() (io.WriteCloser, error) } // EncodedObjectIter is a generic closable interface for iterating over objects. type EncodedObjectIter interface { Next() (plumbing.EncodedObject, error) ForEach(func(plumbing.EncodedObject) error) error Close() } // Transaction is an in-progress storage transaction. A transaction must end // with a call to Commit or Rollback. type Transaction interface { SetEncodedObject(plumbing.EncodedObject) (plumbing.Hash, error) EncodedObject(plumbing.ObjectType, plumbing.Hash) (plumbing.EncodedObject, error) Commit() error Rollback() error } // EncodedObjectLookupIter implements EncodedObjectIter. It iterates over a // series of object hashes and yields their associated objects by retrieving // each one from object storage. The retrievals are lazy and only occur when the // iterator moves forward with a call to Next(). // // The EncodedObjectLookupIter must be closed with a call to Close() when it is // no longer needed. type EncodedObjectLookupIter struct { storage EncodedObjectStorer series []plumbing.Hash t plumbing.ObjectType pos int } // NewEncodedObjectLookupIter returns an object iterator given an object storage // and a slice of object hashes. func NewEncodedObjectLookupIter( storage EncodedObjectStorer, t plumbing.ObjectType, series []plumbing.Hash) *EncodedObjectLookupIter { return &EncodedObjectLookupIter{ storage: storage, series: series, t: t, } } // Next returns the next object from the iterator. If the iterator has reached // the end it will return io.EOF as an error. If the object can't be found in // the object storage, it will return plumbing.ErrObjectNotFound as an error. // If the object is retreieved successfully error will be nil. func (iter *EncodedObjectLookupIter) Next() (plumbing.EncodedObject, error) { if iter.pos >= len(iter.series) { return nil, io.EOF } hash := iter.series[iter.pos] obj, err := iter.storage.EncodedObject(iter.t, hash) if err == nil { iter.pos++ } return obj, err } // ForEach call the cb function for each object contained on this iter until // an error happens or the end of the iter is reached. If ErrStop is sent // the iteration is stop but no error is returned. The iterator is closed. func (iter *EncodedObjectLookupIter) ForEach(cb func(plumbing.EncodedObject) error) error { return ForEachIterator(iter, cb) } // Close releases any resources used by the iterator. func (iter *EncodedObjectLookupIter) Close() { iter.pos = len(iter.series) } // EncodedObjectSliceIter implements EncodedObjectIter. It iterates over a // series of objects stored in a slice and yields each one in turn when Next() // is called. // // The EncodedObjectSliceIter must be closed with a call to Close() when it is // no longer needed. type EncodedObjectSliceIter struct { series []plumbing.EncodedObject } // NewEncodedObjectSliceIter returns an object iterator for the given slice of // objects. func NewEncodedObjectSliceIter(series []plumbing.EncodedObject) *EncodedObjectSliceIter { return &EncodedObjectSliceIter{ series: series, } } // Next returns the next object from the iterator. If the iterator has reached // the end it will return io.EOF as an error. If the object is retreieved // successfully error will be nil. func (iter *EncodedObjectSliceIter) Next() (plumbing.EncodedObject, error) { if len(iter.series) == 0 { return nil, io.EOF } obj := iter.series[0] iter.series = iter.series[1:] return obj, nil } // ForEach call the cb function for each object contained on this iter until // an error happens or the end of the iter is reached. If ErrStop is sent // the iteration is stop but no error is returned. The iterator is closed. func (iter *EncodedObjectSliceIter) ForEach(cb func(plumbing.EncodedObject) error) error { return ForEachIterator(iter, cb) } // Close releases any resources used by the iterator. func (iter *EncodedObjectSliceIter) Close() { iter.series = []plumbing.EncodedObject{} } // MultiEncodedObjectIter implements EncodedObjectIter. It iterates over several // EncodedObjectIter, // // The MultiObjectIter must be closed with a call to Close() when it is no // longer needed. type MultiEncodedObjectIter struct { iters []EncodedObjectIter } // NewMultiEncodedObjectIter returns an object iterator for the given slice of // EncodedObjectIters. func NewMultiEncodedObjectIter(iters []EncodedObjectIter) EncodedObjectIter { return &MultiEncodedObjectIter{iters: iters} } // Next returns the next object from the iterator, if one iterator reach io.EOF // is removed and the next one is used. func (iter *MultiEncodedObjectIter) Next() (plumbing.EncodedObject, error) { if len(iter.iters) == 0 { return nil, io.EOF } obj, err := iter.iters[0].Next() if err == io.EOF { iter.iters[0].Close() iter.iters = iter.iters[1:] return iter.Next() } return obj, err } // ForEach call the cb function for each object contained on this iter until // an error happens or the end of the iter is reached. If ErrStop is sent // the iteration is stop but no error is returned. The iterator is closed. func (iter *MultiEncodedObjectIter) ForEach(cb func(plumbing.EncodedObject) error) error { return ForEachIterator(iter, cb) } // Close releases any resources used by the iterator. func (iter *MultiEncodedObjectIter) Close() { for _, i := range iter.iters { i.Close() } } type bareIterator interface { Next() (plumbing.EncodedObject, error) Close() } // ForEachIterator is a helper function to build iterators without need to // rewrite the same ForEach function each time. func ForEachIterator(iter bareIterator, cb func(plumbing.EncodedObject) error) error { defer iter.Close() for { obj, err := iter.Next() if err != nil { if err == io.EOF { return nil } return err } if err := cb(obj); err != nil { if err == ErrStop { return nil } return err } } } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/storer/object_test.go000066400000000000000000000071241345605224300246720ustar00rootroot00000000000000package storer import ( "fmt" "testing" . "gopkg.in/check.v1" "gopkg.in/src-d/go-git.v4/plumbing" ) func Test(t *testing.T) { TestingT(t) } type ObjectSuite struct { Objects []plumbing.EncodedObject Hash []plumbing.Hash } var _ = Suite(&ObjectSuite{}) func (s *ObjectSuite) SetUpSuite(c *C) { s.Objects = []plumbing.EncodedObject{ s.buildObject([]byte("foo")), s.buildObject([]byte("bar")), } for _, o := range s.Objects { s.Hash = append(s.Hash, o.Hash()) } } func (s *ObjectSuite) TestMultiObjectIterNext(c *C) { expected := []plumbing.EncodedObject{ &plumbing.MemoryObject{}, &plumbing.MemoryObject{}, &plumbing.MemoryObject{}, &plumbing.MemoryObject{}, &plumbing.MemoryObject{}, &plumbing.MemoryObject{}, } iter := NewMultiEncodedObjectIter([]EncodedObjectIter{ NewEncodedObjectSliceIter(expected[0:2]), NewEncodedObjectSliceIter(expected[2:4]), NewEncodedObjectSliceIter(expected[4:5]), }) var i int iter.ForEach(func(o plumbing.EncodedObject) error { c.Assert(o, Equals, expected[i]) i++ return nil }) iter.Close() } func (s *ObjectSuite) buildObject(content []byte) plumbing.EncodedObject { o := &plumbing.MemoryObject{} o.Write(content) return o } func (s *ObjectSuite) TestObjectLookupIter(c *C) { var count int storage := &MockObjectStorage{s.Objects} i := NewEncodedObjectLookupIter(storage, plumbing.CommitObject, s.Hash) err := i.ForEach(func(o plumbing.EncodedObject) error { c.Assert(o, NotNil) c.Assert(o.Hash().String(), Equals, s.Hash[count].String()) count++ return nil }) c.Assert(err, IsNil) i.Close() } func (s *ObjectSuite) TestObjectSliceIter(c *C) { var count int i := NewEncodedObjectSliceIter(s.Objects) err := i.ForEach(func(o plumbing.EncodedObject) error { c.Assert(o, NotNil) c.Assert(o.Hash().String(), Equals, s.Hash[count].String()) count++ return nil }) c.Assert(count, Equals, 2) c.Assert(err, IsNil) c.Assert(i.series, HasLen, 0) } func (s *ObjectSuite) TestObjectSliceIterStop(c *C) { i := NewEncodedObjectSliceIter(s.Objects) var count = 0 err := i.ForEach(func(o plumbing.EncodedObject) error { c.Assert(o, NotNil) c.Assert(o.Hash().String(), Equals, s.Hash[count].String()) count++ return ErrStop }) c.Assert(count, Equals, 1) c.Assert(err, IsNil) } func (s *ObjectSuite) TestObjectSliceIterError(c *C) { i := NewEncodedObjectSliceIter([]plumbing.EncodedObject{ s.buildObject([]byte("foo")), }) err := i.ForEach(func(plumbing.EncodedObject) error { return fmt.Errorf("a random error") }) c.Assert(err, NotNil) } type MockObjectStorage struct { db []plumbing.EncodedObject } func (o *MockObjectStorage) NewEncodedObject() plumbing.EncodedObject { return nil } func (o *MockObjectStorage) SetEncodedObject(obj plumbing.EncodedObject) (plumbing.Hash, error) { return plumbing.ZeroHash, nil } func (o *MockObjectStorage) HasEncodedObject(h plumbing.Hash) error { for _, o := range o.db { if o.Hash() == h { return nil } } return plumbing.ErrObjectNotFound } func (o *MockObjectStorage) EncodedObjectSize(h plumbing.Hash) ( size int64, err error) { for _, o := range o.db { if o.Hash() == h { return o.Size(), nil } } return 0, plumbing.ErrObjectNotFound } func (o *MockObjectStorage) EncodedObject(t plumbing.ObjectType, h plumbing.Hash) (plumbing.EncodedObject, error) { for _, o := range o.db { if o.Hash() == h { return o, nil } } return nil, plumbing.ErrObjectNotFound } func (o *MockObjectStorage) IterEncodedObjects(t plumbing.ObjectType) (EncodedObjectIter, error) { return nil, nil } func (o *MockObjectStorage) Begin() Transaction { return nil } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/storer/reference.go000066400000000000000000000143421345605224300243230ustar00rootroot00000000000000package storer import ( "errors" "io" "gopkg.in/src-d/go-git.v4/plumbing" ) const MaxResolveRecursion = 1024 // ErrMaxResolveRecursion is returned by ResolveReference is MaxResolveRecursion // is exceeded var ErrMaxResolveRecursion = errors.New("max. recursion level reached") // ReferenceStorer is a generic storage of references. type ReferenceStorer interface { SetReference(*plumbing.Reference) error // CheckAndSetReference sets the reference `new`, but if `old` is // not `nil`, it first checks that the current stored value for // `old.Name()` matches the given reference value in `old`. If // not, it returns an error and doesn't update `new`. CheckAndSetReference(new, old *plumbing.Reference) error Reference(plumbing.ReferenceName) (*plumbing.Reference, error) IterReferences() (ReferenceIter, error) RemoveReference(plumbing.ReferenceName) error CountLooseRefs() (int, error) PackRefs() error } // ReferenceIter is a generic closable interface for iterating over references. type ReferenceIter interface { Next() (*plumbing.Reference, error) ForEach(func(*plumbing.Reference) error) error Close() } type referenceFilteredIter struct { ff func(r *plumbing.Reference) bool iter ReferenceIter } // NewReferenceFilteredIter returns a reference iterator for the given reference // Iterator. This iterator will iterate only references that accomplish the // provided function. func NewReferenceFilteredIter( ff func(r *plumbing.Reference) bool, iter ReferenceIter) ReferenceIter { return &referenceFilteredIter{ff, iter} } // Next returns the next reference from the iterator. If the iterator has reached // the end it will return io.EOF as an error. func (iter *referenceFilteredIter) Next() (*plumbing.Reference, error) { for { r, err := iter.iter.Next() if err != nil { return nil, err } if iter.ff(r) { return r, nil } continue } } // ForEach call the cb function for each reference contained on this iter until // an error happens or the end of the iter is reached. If ErrStop is sent // the iteration is stopped but no error is returned. The iterator is closed. func (iter *referenceFilteredIter) ForEach(cb func(*plumbing.Reference) error) error { defer iter.Close() for { r, err := iter.Next() if err == io.EOF { break } if err != nil { return err } if err := cb(r); err != nil { if err == ErrStop { break } return err } } return nil } // Close releases any resources used by the iterator. func (iter *referenceFilteredIter) Close() { iter.iter.Close() } // ReferenceSliceIter implements ReferenceIter. It iterates over a series of // references stored in a slice and yields each one in turn when Next() is // called. // // The ReferenceSliceIter must be closed with a call to Close() when it is no // longer needed. type ReferenceSliceIter struct { series []*plumbing.Reference pos int } // NewReferenceSliceIter returns a reference iterator for the given slice of // objects. func NewReferenceSliceIter(series []*plumbing.Reference) ReferenceIter { return &ReferenceSliceIter{ series: series, } } // Next returns the next reference from the iterator. If the iterator has // reached the end it will return io.EOF as an error. func (iter *ReferenceSliceIter) Next() (*plumbing.Reference, error) { if iter.pos >= len(iter.series) { return nil, io.EOF } obj := iter.series[iter.pos] iter.pos++ return obj, nil } // ForEach call the cb function for each reference contained on this iter until // an error happens or the end of the iter is reached. If ErrStop is sent // the iteration is stop but no error is returned. The iterator is closed. func (iter *ReferenceSliceIter) ForEach(cb func(*plumbing.Reference) error) error { return forEachReferenceIter(iter, cb) } type bareReferenceIterator interface { Next() (*plumbing.Reference, error) Close() } func forEachReferenceIter(iter bareReferenceIterator, cb func(*plumbing.Reference) error) error { defer iter.Close() for { obj, err := iter.Next() if err != nil { if err == io.EOF { return nil } return err } if err := cb(obj); err != nil { if err == ErrStop { return nil } return err } } } // Close releases any resources used by the iterator. func (iter *ReferenceSliceIter) Close() { iter.pos = len(iter.series) } // MultiReferenceIter implements ReferenceIter. It iterates over several // ReferenceIter, // // The MultiReferenceIter must be closed with a call to Close() when it is no // longer needed. type MultiReferenceIter struct { iters []ReferenceIter } // NewMultiReferenceIter returns an reference iterator for the given slice of // EncodedObjectIters. func NewMultiReferenceIter(iters []ReferenceIter) ReferenceIter { return &MultiReferenceIter{iters: iters} } // Next returns the next reference from the iterator, if one iterator reach // io.EOF is removed and the next one is used. func (iter *MultiReferenceIter) Next() (*plumbing.Reference, error) { if len(iter.iters) == 0 { return nil, io.EOF } obj, err := iter.iters[0].Next() if err == io.EOF { iter.iters[0].Close() iter.iters = iter.iters[1:] return iter.Next() } return obj, err } // ForEach call the cb function for each reference contained on this iter until // an error happens or the end of the iter is reached. If ErrStop is sent // the iteration is stop but no error is returned. The iterator is closed. func (iter *MultiReferenceIter) ForEach(cb func(*plumbing.Reference) error) error { return forEachReferenceIter(iter, cb) } // Close releases any resources used by the iterator. func (iter *MultiReferenceIter) Close() { for _, i := range iter.iters { i.Close() } } // ResolveReference resolves a SymbolicReference to a HashReference. func ResolveReference(s ReferenceStorer, n plumbing.ReferenceName) (*plumbing.Reference, error) { r, err := s.Reference(n) if err != nil || r == nil { return r, err } return resolveReference(s, r, 0) } func resolveReference(s ReferenceStorer, r *plumbing.Reference, recursion int) (*plumbing.Reference, error) { if r.Type() != plumbing.SymbolicReference { return r, nil } if recursion > MaxResolveRecursion { return nil, ErrMaxResolveRecursion } t, err := s.Reference(r.Target()) if err != nil { return nil, err } recursion++ return resolveReference(s, t, recursion) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/storer/reference_test.go000066400000000000000000000111551345605224300253610ustar00rootroot00000000000000package storer import ( "errors" "io" . "gopkg.in/check.v1" "gopkg.in/src-d/go-git.v4/plumbing" ) type ReferenceSuite struct{} var _ = Suite(&ReferenceSuite{}) func (s *ReferenceSuite) TestReferenceSliceIterNext(c *C) { slice := []*plumbing.Reference{ plumbing.NewReferenceFromStrings("foo", "foo"), plumbing.NewReferenceFromStrings("bar", "bar"), } i := NewReferenceSliceIter(slice) foo, err := i.Next() c.Assert(err, IsNil) c.Assert(foo == slice[0], Equals, true) bar, err := i.Next() c.Assert(err, IsNil) c.Assert(bar == slice[1], Equals, true) empty, err := i.Next() c.Assert(err, Equals, io.EOF) c.Assert(empty, IsNil) } func (s *ReferenceSuite) TestReferenceSliceIterForEach(c *C) { slice := []*plumbing.Reference{ plumbing.NewReferenceFromStrings("foo", "foo"), plumbing.NewReferenceFromStrings("bar", "bar"), } i := NewReferenceSliceIter(slice) var count int i.ForEach(func(r *plumbing.Reference) error { c.Assert(r == slice[count], Equals, true) count++ return nil }) c.Assert(count, Equals, 2) } func (s *ReferenceSuite) TestReferenceSliceIterForEachError(c *C) { slice := []*plumbing.Reference{ plumbing.NewReferenceFromStrings("foo", "foo"), plumbing.NewReferenceFromStrings("bar", "bar"), } i := NewReferenceSliceIter(slice) var count int exampleErr := errors.New("SOME ERROR") err := i.ForEach(func(r *plumbing.Reference) error { c.Assert(r == slice[count], Equals, true) count++ if count == 2 { return exampleErr } return nil }) c.Assert(err, Equals, exampleErr) c.Assert(count, Equals, 2) } func (s *ReferenceSuite) TestReferenceSliceIterForEachStop(c *C) { slice := []*plumbing.Reference{ plumbing.NewReferenceFromStrings("foo", "foo"), plumbing.NewReferenceFromStrings("bar", "bar"), } i := NewReferenceSliceIter(slice) var count int i.ForEach(func(r *plumbing.Reference) error { c.Assert(r == slice[count], Equals, true) count++ return ErrStop }) c.Assert(count, Equals, 1) } func (s *ReferenceSuite) TestReferenceFilteredIterNext(c *C) { slice := []*plumbing.Reference{ plumbing.NewReferenceFromStrings("foo", "foo"), plumbing.NewReferenceFromStrings("bar", "bar"), } i := NewReferenceFilteredIter(func(r *plumbing.Reference) bool { return r.Name() == "bar" }, NewReferenceSliceIter(slice)) foo, err := i.Next() c.Assert(err, IsNil) c.Assert(foo == slice[0], Equals, false) c.Assert(foo == slice[1], Equals, true) empty, err := i.Next() c.Assert(err, Equals, io.EOF) c.Assert(empty, IsNil) } func (s *ReferenceSuite) TestReferenceFilteredIterForEach(c *C) { slice := []*plumbing.Reference{ plumbing.NewReferenceFromStrings("foo", "foo"), plumbing.NewReferenceFromStrings("bar", "bar"), } i := NewReferenceFilteredIter(func(r *plumbing.Reference) bool { return r.Name() == "bar" }, NewReferenceSliceIter(slice)) var count int i.ForEach(func(r *plumbing.Reference) error { c.Assert(r == slice[1], Equals, true) count++ return nil }) c.Assert(count, Equals, 1) } func (s *ReferenceSuite) TestReferenceFilteredIterError(c *C) { slice := []*plumbing.Reference{ plumbing.NewReferenceFromStrings("foo", "foo"), plumbing.NewReferenceFromStrings("bar", "bar"), } i := NewReferenceFilteredIter(func(r *plumbing.Reference) bool { return r.Name() == "bar" }, NewReferenceSliceIter(slice)) var count int exampleErr := errors.New("SOME ERROR") err := i.ForEach(func(r *plumbing.Reference) error { c.Assert(r == slice[1], Equals, true) count++ if count == 1 { return exampleErr } return nil }) c.Assert(err, Equals, exampleErr) c.Assert(count, Equals, 1) } func (s *ReferenceSuite) TestReferenceFilteredIterForEachStop(c *C) { slice := []*plumbing.Reference{ plumbing.NewReferenceFromStrings("foo", "foo"), plumbing.NewReferenceFromStrings("bar", "bar"), } i := NewReferenceFilteredIter(func(r *plumbing.Reference) bool { return r.Name() == "bar" }, NewReferenceSliceIter(slice)) var count int i.ForEach(func(r *plumbing.Reference) error { c.Assert(r == slice[1], Equals, true) count++ return ErrStop }) c.Assert(count, Equals, 1) } func (s *ReferenceSuite) TestMultiReferenceIterForEach(c *C) { i := NewMultiReferenceIter( []ReferenceIter{ NewReferenceSliceIter([]*plumbing.Reference{ plumbing.NewReferenceFromStrings("foo", "foo"), }), NewReferenceSliceIter([]*plumbing.Reference{ plumbing.NewReferenceFromStrings("bar", "bar"), }), }, ) var result []string err := i.ForEach(func(r *plumbing.Reference) error { result = append(result, r.Name().String()) return nil }) c.Assert(err, IsNil) c.Assert(result, HasLen, 2) c.Assert(result, DeepEquals, []string{"foo", "bar"}) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/storer/shallow.go000066400000000000000000000004731345605224300240360ustar00rootroot00000000000000package storer import "gopkg.in/src-d/go-git.v4/plumbing" // ShallowStorer is a storage of references to shallow commits by hash, // meaning that these commits have missing parents because of a shallow fetch. type ShallowStorer interface { SetShallow([]plumbing.Hash) error Shallow() ([]plumbing.Hash, error) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/storer/storer.go000066400000000000000000000006311345605224300236770ustar00rootroot00000000000000package storer // Storer is a basic storer for encoded objects and references. type Storer interface { EncodedObjectStorer ReferenceStorer } // Initializer should be implemented by storers that require to perform any // operation when creating a new repository (i.e. git init). type Initializer interface { // Init performs initialization of the storer and returns the error, if // any. Init() error } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/transport/000077500000000000000000000000001345605224300225505ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/transport/client/000077500000000000000000000000001345605224300240265ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/transport/client/client.go000066400000000000000000000025031345605224300256330ustar00rootroot00000000000000// Package client contains helper function to deal with the different client // protocols. package client import ( "fmt" "gopkg.in/src-d/go-git.v4/plumbing/transport" "gopkg.in/src-d/go-git.v4/plumbing/transport/file" "gopkg.in/src-d/go-git.v4/plumbing/transport/git" "gopkg.in/src-d/go-git.v4/plumbing/transport/http" "gopkg.in/src-d/go-git.v4/plumbing/transport/ssh" ) // Protocols are the protocols supported by default. var Protocols = map[string]transport.Transport{ "http": http.DefaultClient, "https": http.DefaultClient, "ssh": ssh.DefaultClient, "git": git.DefaultClient, "file": file.DefaultClient, } // InstallProtocol adds or modifies an existing protocol. func InstallProtocol(scheme string, c transport.Transport) { if c == nil { delete(Protocols, scheme) return } Protocols[scheme] = c } // NewClient returns the appropriate client among of the set of known protocols: // http://, https://, ssh:// and file://. // See `InstallProtocol` to add or modify protocols. func NewClient(endpoint *transport.Endpoint) (transport.Transport, error) { f, ok := Protocols[endpoint.Protocol] if !ok { return nil, fmt.Errorf("unsupported scheme %q", endpoint.Protocol) } if f == nil { return nil, fmt.Errorf("malformed client for scheme %q, client is defined as nil", endpoint.Protocol) } return f, nil } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/transport/client/client_test.go000066400000000000000000000031151345605224300266720ustar00rootroot00000000000000package client import ( "fmt" "net/http" "testing" "gopkg.in/src-d/go-git.v4/plumbing/transport" . "gopkg.in/check.v1" ) func Test(t *testing.T) { TestingT(t) } type ClientSuite struct{} var _ = Suite(&ClientSuite{}) func (s *ClientSuite) TestNewClientSSH(c *C) { e, err := transport.NewEndpoint("ssh://github.com/src-d/go-git") c.Assert(err, IsNil) output, err := NewClient(e) c.Assert(err, IsNil) c.Assert(output, NotNil) } func (s *ClientSuite) TestNewClientUnknown(c *C) { e, err := transport.NewEndpoint("unknown://github.com/src-d/go-git") c.Assert(err, IsNil) _, err = NewClient(e) c.Assert(err, NotNil) } func (s *ClientSuite) TestNewClientNil(c *C) { Protocols["newscheme"] = nil e, err := transport.NewEndpoint("newscheme://github.com/src-d/go-git") c.Assert(err, IsNil) _, err = NewClient(e) c.Assert(err, NotNil) } func (s *ClientSuite) TestInstallProtocol(c *C) { InstallProtocol("newscheme", &dummyClient{}) c.Assert(Protocols["newscheme"], NotNil) } func (s *ClientSuite) TestInstallProtocolNilValue(c *C) { InstallProtocol("newscheme", &dummyClient{}) InstallProtocol("newscheme", nil) _, ok := Protocols["newscheme"] c.Assert(ok, Equals, false) } type dummyClient struct { *http.Client } func (*dummyClient) NewUploadPackSession(*transport.Endpoint, transport.AuthMethod) ( transport.UploadPackSession, error) { return nil, nil } func (*dummyClient) NewReceivePackSession(*transport.Endpoint, transport.AuthMethod) ( transport.ReceivePackSession, error) { return nil, nil } func typeAsString(v interface{}) string { return fmt.Sprintf("%T", v) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/transport/client/example_test.go000066400000000000000000000007501345605224300270510ustar00rootroot00000000000000package client_test import ( "crypto/tls" "net/http" "gopkg.in/src-d/go-git.v4/plumbing/transport/client" githttp "gopkg.in/src-d/go-git.v4/plumbing/transport/http" ) func ExampleInstallProtocol() { // Create custom net/http client that. httpClient := &http.Client{ Transport: &http.Transport{ TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, }, } // Install it as default client for https URLs. client.InstallProtocol("https", githttp.NewClient(httpClient)) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/transport/common.go000066400000000000000000000157171345605224300244020ustar00rootroot00000000000000// Package transport includes the implementation for different transport // protocols. // // `Client` can be used to fetch and send packfiles to a git server. // The `client` package provides higher level functions to instantiate the // appropriate `Client` based on the repository URL. // // go-git supports HTTP and SSH (see `Protocols`), but you can also install // your own protocols (see the `client` package). // // Each protocol has its own implementation of `Client`, but you should // generally not use them directly, use `client.NewClient` instead. package transport import ( "bytes" "context" "errors" "fmt" "io" "net/url" "strconv" "strings" giturl "gopkg.in/src-d/go-git.v4/internal/url" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp" "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability" ) var ( ErrRepositoryNotFound = errors.New("repository not found") ErrEmptyRemoteRepository = errors.New("remote repository is empty") ErrAuthenticationRequired = errors.New("authentication required") ErrAuthorizationFailed = errors.New("authorization failed") ErrEmptyUploadPackRequest = errors.New("empty git-upload-pack given") ErrInvalidAuthMethod = errors.New("invalid auth method") ErrAlreadyConnected = errors.New("session already established") ) const ( UploadPackServiceName = "git-upload-pack" ReceivePackServiceName = "git-receive-pack" ) // Transport can initiate git-upload-pack and git-receive-pack processes. // It is implemented both by the client and the server, making this a RPC. type Transport interface { // NewUploadPackSession starts a git-upload-pack session for an endpoint. NewUploadPackSession(*Endpoint, AuthMethod) (UploadPackSession, error) // NewReceivePackSession starts a git-receive-pack session for an endpoint. NewReceivePackSession(*Endpoint, AuthMethod) (ReceivePackSession, error) } type Session interface { // AdvertisedReferences retrieves the advertised references for a // repository. // If the repository does not exist, returns ErrRepositoryNotFound. // If the repository exists, but is empty, returns ErrEmptyRemoteRepository. AdvertisedReferences() (*packp.AdvRefs, error) io.Closer } type AuthMethod interface { fmt.Stringer Name() string } // UploadPackSession represents a git-upload-pack session. // A git-upload-pack session has two steps: reference discovery // (AdvertisedReferences) and uploading pack (UploadPack). type UploadPackSession interface { Session // UploadPack takes a git-upload-pack request and returns a response, // including a packfile. Don't be confused by terminology, the client // side of a git-upload-pack is called git-fetch-pack, although here // the same interface is used to make it RPC-like. UploadPack(context.Context, *packp.UploadPackRequest) (*packp.UploadPackResponse, error) } // ReceivePackSession represents a git-receive-pack session. // A git-receive-pack session has two steps: reference discovery // (AdvertisedReferences) and receiving pack (ReceivePack). // In that order. type ReceivePackSession interface { Session // ReceivePack sends an update references request and a packfile // reader and returns a ReportStatus and error. Don't be confused by // terminology, the client side of a git-receive-pack is called // git-send-pack, although here the same interface is used to make it // RPC-like. ReceivePack(context.Context, *packp.ReferenceUpdateRequest) (*packp.ReportStatus, error) } // Endpoint represents a Git URL in any supported protocol. type Endpoint struct { // Protocol is the protocol of the endpoint (e.g. git, https, file). Protocol string // User is the user. User string // Password is the password. Password string // Host is the host. Host string // Port is the port to connect, if 0 the default port for the given protocol // wil be used. Port int // Path is the repository path. Path string } var defaultPorts = map[string]int{ "http": 80, "https": 443, "git": 9418, "ssh": 22, } // String returns a string representation of the Git URL. func (u *Endpoint) String() string { var buf bytes.Buffer if u.Protocol != "" { buf.WriteString(u.Protocol) buf.WriteByte(':') } if u.Protocol != "" || u.Host != "" || u.User != "" || u.Password != "" { buf.WriteString("//") if u.User != "" || u.Password != "" { buf.WriteString(url.PathEscape(u.User)) if u.Password != "" { buf.WriteByte(':') buf.WriteString(url.PathEscape(u.Password)) } buf.WriteByte('@') } if u.Host != "" { buf.WriteString(u.Host) if u.Port != 0 { port, ok := defaultPorts[strings.ToLower(u.Protocol)] if !ok || ok && port != u.Port { fmt.Fprintf(&buf, ":%d", u.Port) } } } } if u.Path != "" && u.Path[0] != '/' && u.Host != "" { buf.WriteByte('/') } buf.WriteString(u.Path) return buf.String() } func NewEndpoint(endpoint string) (*Endpoint, error) { if e, ok := parseSCPLike(endpoint); ok { return e, nil } if e, ok := parseFile(endpoint); ok { return e, nil } return parseURL(endpoint) } func parseURL(endpoint string) (*Endpoint, error) { u, err := url.Parse(endpoint) if err != nil { return nil, err } if !u.IsAbs() { return nil, plumbing.NewPermanentError(fmt.Errorf( "invalid endpoint: %s", endpoint, )) } var user, pass string if u.User != nil { user = u.User.Username() pass, _ = u.User.Password() } return &Endpoint{ Protocol: u.Scheme, User: user, Password: pass, Host: u.Hostname(), Port: getPort(u), Path: getPath(u), }, nil } func getPort(u *url.URL) int { p := u.Port() if p == "" { return 0 } i, err := strconv.Atoi(p) if err != nil { return 0 } return i } func getPath(u *url.URL) string { var res string = u.Path if u.RawQuery != "" { res += "?" + u.RawQuery } if u.Fragment != "" { res += "#" + u.Fragment } return res } func parseSCPLike(endpoint string) (*Endpoint, bool) { if giturl.MatchesScheme(endpoint) || !giturl.MatchesScpLike(endpoint) { return nil, false } user, host, portStr, path := giturl.FindScpLikeComponents(endpoint) port, err := strconv.Atoi(portStr) if err != nil { port = 22 } return &Endpoint{ Protocol: "ssh", User: user, Host: host, Port: port, Path: path, }, true } func parseFile(endpoint string) (*Endpoint, bool) { if giturl.MatchesScheme(endpoint) { return nil, false } path := endpoint return &Endpoint{ Protocol: "file", Path: path, }, true } // UnsupportedCapabilities are the capabilities not supported by any client // implementation var UnsupportedCapabilities = []capability.Capability{ capability.MultiACK, capability.MultiACKDetailed, capability.ThinPack, } // FilterUnsupportedCapabilities it filter out all the UnsupportedCapabilities // from a capability.List, the intended usage is on the client implementation // to filter the capabilities from an AdvRefs message. func FilterUnsupportedCapabilities(list *capability.List) { for _, c := range UnsupportedCapabilities { list.Delete(c) } } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/transport/common_test.go000066400000000000000000000141031345605224300254250ustar00rootroot00000000000000package transport import ( "fmt" "net/url" "testing" "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability" . "gopkg.in/check.v1" ) func Test(t *testing.T) { TestingT(t) } type SuiteCommon struct{} var _ = Suite(&SuiteCommon{}) func (s *SuiteCommon) TestNewEndpointHTTP(c *C) { e, err := NewEndpoint("http://git:pass@github.com/user/repository.git?foo#bar") c.Assert(err, IsNil) c.Assert(e.Protocol, Equals, "http") c.Assert(e.User, Equals, "git") c.Assert(e.Password, Equals, "pass") c.Assert(e.Host, Equals, "github.com") c.Assert(e.Port, Equals, 0) c.Assert(e.Path, Equals, "/user/repository.git?foo#bar") c.Assert(e.String(), Equals, "http://git:pass@github.com/user/repository.git?foo#bar") } func (s *SuiteCommon) TestNewEndpointPorts(c *C) { e, err := NewEndpoint("http://git:pass@github.com:8080/user/repository.git?foo#bar") c.Assert(err, IsNil) c.Assert(e.String(), Equals, "http://git:pass@github.com:8080/user/repository.git?foo#bar") e, err = NewEndpoint("https://git:pass@github.com:443/user/repository.git?foo#bar") c.Assert(err, IsNil) c.Assert(e.String(), Equals, "https://git:pass@github.com/user/repository.git?foo#bar") e, err = NewEndpoint("ssh://git:pass@github.com:22/user/repository.git?foo#bar") c.Assert(err, IsNil) c.Assert(e.String(), Equals, "ssh://git:pass@github.com/user/repository.git?foo#bar") e, err = NewEndpoint("git://github.com:9418/user/repository.git?foo#bar") c.Assert(err, IsNil) c.Assert(e.String(), Equals, "git://github.com/user/repository.git?foo#bar") } func (s *SuiteCommon) TestNewEndpointSSH(c *C) { e, err := NewEndpoint("ssh://git@github.com/user/repository.git") c.Assert(err, IsNil) c.Assert(e.Protocol, Equals, "ssh") c.Assert(e.User, Equals, "git") c.Assert(e.Password, Equals, "") c.Assert(e.Host, Equals, "github.com") c.Assert(e.Port, Equals, 0) c.Assert(e.Path, Equals, "/user/repository.git") c.Assert(e.String(), Equals, "ssh://git@github.com/user/repository.git") } func (s *SuiteCommon) TestNewEndpointSSHNoUser(c *C) { e, err := NewEndpoint("ssh://github.com/user/repository.git") c.Assert(err, IsNil) c.Assert(e.Protocol, Equals, "ssh") c.Assert(e.User, Equals, "") c.Assert(e.Password, Equals, "") c.Assert(e.Host, Equals, "github.com") c.Assert(e.Port, Equals, 0) c.Assert(e.Path, Equals, "/user/repository.git") c.Assert(e.String(), Equals, "ssh://github.com/user/repository.git") } func (s *SuiteCommon) TestNewEndpointSSHWithPort(c *C) { e, err := NewEndpoint("ssh://git@github.com:777/user/repository.git") c.Assert(err, IsNil) c.Assert(e.Protocol, Equals, "ssh") c.Assert(e.User, Equals, "git") c.Assert(e.Password, Equals, "") c.Assert(e.Host, Equals, "github.com") c.Assert(e.Port, Equals, 777) c.Assert(e.Path, Equals, "/user/repository.git") c.Assert(e.String(), Equals, "ssh://git@github.com:777/user/repository.git") } func (s *SuiteCommon) TestNewEndpointSCPLike(c *C) { e, err := NewEndpoint("git@github.com:user/repository.git") c.Assert(err, IsNil) c.Assert(e.Protocol, Equals, "ssh") c.Assert(e.User, Equals, "git") c.Assert(e.Password, Equals, "") c.Assert(e.Host, Equals, "github.com") c.Assert(e.Port, Equals, 22) c.Assert(e.Path, Equals, "user/repository.git") c.Assert(e.String(), Equals, "ssh://git@github.com/user/repository.git") } func (s *SuiteCommon) TestNewEndpointSCPLikeWithPort(c *C) { e, err := NewEndpoint("git@github.com:9999/user/repository.git") c.Assert(err, IsNil) c.Assert(e.Protocol, Equals, "ssh") c.Assert(e.User, Equals, "git") c.Assert(e.Password, Equals, "") c.Assert(e.Host, Equals, "github.com") c.Assert(e.Port, Equals, 9999) c.Assert(e.Path, Equals, "user/repository.git") c.Assert(e.String(), Equals, "ssh://git@github.com:9999/user/repository.git") } func (s *SuiteCommon) TestNewEndpointFileAbs(c *C) { e, err := NewEndpoint("/foo.git") c.Assert(err, IsNil) c.Assert(e.Protocol, Equals, "file") c.Assert(e.User, Equals, "") c.Assert(e.Password, Equals, "") c.Assert(e.Host, Equals, "") c.Assert(e.Port, Equals, 0) c.Assert(e.Path, Equals, "/foo.git") c.Assert(e.String(), Equals, "file:///foo.git") } func (s *SuiteCommon) TestNewEndpointFileRel(c *C) { e, err := NewEndpoint("foo.git") c.Assert(err, IsNil) c.Assert(e.Protocol, Equals, "file") c.Assert(e.User, Equals, "") c.Assert(e.Password, Equals, "") c.Assert(e.Host, Equals, "") c.Assert(e.Port, Equals, 0) c.Assert(e.Path, Equals, "foo.git") c.Assert(e.String(), Equals, "file://foo.git") } func (s *SuiteCommon) TestNewEndpointFileWindows(c *C) { e, err := NewEndpoint("C:\\foo.git") c.Assert(err, IsNil) c.Assert(e.Protocol, Equals, "file") c.Assert(e.User, Equals, "") c.Assert(e.Password, Equals, "") c.Assert(e.Host, Equals, "") c.Assert(e.Port, Equals, 0) c.Assert(e.Path, Equals, "C:\\foo.git") c.Assert(e.String(), Equals, "file://C:\\foo.git") } func (s *SuiteCommon) TestNewEndpointFileURL(c *C) { e, err := NewEndpoint("file:///foo.git") c.Assert(err, IsNil) c.Assert(e.Protocol, Equals, "file") c.Assert(e.User, Equals, "") c.Assert(e.Password, Equals, "") c.Assert(e.Host, Equals, "") c.Assert(e.Port, Equals, 0) c.Assert(e.Path, Equals, "/foo.git") c.Assert(e.String(), Equals, "file:///foo.git") } func (s *SuiteCommon) TestValidEndpoint(c *C) { user := "person@mail.com" pass := " !\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~" e, err := NewEndpoint(fmt.Sprintf( "http://%s:%s@github.com/user/repository.git", url.PathEscape(user), url.PathEscape(pass), )) c.Assert(err, IsNil) c.Assert(e, NotNil) c.Assert(e.User, Equals, user) c.Assert(e.Password, Equals, pass) c.Assert(e.Host, Equals, "github.com") c.Assert(e.Path, Equals, "/user/repository.git") c.Assert(e.String(), Equals, "http://person@mail.com:%20%21%22%23$%25&%27%28%29%2A+%2C-.%2F:%3B%3C=%3E%3F@%5B%5C%5D%5E_%60%7B%7C%7D~@github.com/user/repository.git") } func (s *SuiteCommon) TestNewEndpointInvalidURL(c *C) { e, err := NewEndpoint("http://\\") c.Assert(err, NotNil) c.Assert(e, IsNil) } func (s *SuiteCommon) TestFilterUnsupportedCapabilities(c *C) { l := capability.NewList() l.Set(capability.MultiACK) FilterUnsupportedCapabilities(l) c.Assert(l.Supports(capability.MultiACK), Equals, false) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/transport/file/000077500000000000000000000000001345605224300234675ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/transport/file/client.go000066400000000000000000000061411345605224300252760ustar00rootroot00000000000000// Package file implements the file transport protocol. package file import ( "bufio" "errors" "io" "os" "os/exec" "path/filepath" "strings" "gopkg.in/src-d/go-git.v4/plumbing/transport" "gopkg.in/src-d/go-git.v4/plumbing/transport/internal/common" ) // DefaultClient is the default local client. var DefaultClient = NewClient( transport.UploadPackServiceName, transport.ReceivePackServiceName, ) type runner struct { UploadPackBin string ReceivePackBin string } // NewClient returns a new local client using the given git-upload-pack and // git-receive-pack binaries. func NewClient(uploadPackBin, receivePackBin string) transport.Transport { return common.NewClient(&runner{ UploadPackBin: uploadPackBin, ReceivePackBin: receivePackBin, }) } func prefixExecPath(cmd string) (string, error) { // Use `git --exec-path` to find the exec path. execCmd := exec.Command("git", "--exec-path") stdout, err := execCmd.StdoutPipe() if err != nil { return "", err } stdoutBuf := bufio.NewReader(stdout) err = execCmd.Start() if err != nil { return "", err } execPathBytes, isPrefix, err := stdoutBuf.ReadLine() if err != nil { return "", err } if isPrefix { return "", errors.New("Couldn't read exec-path line all at once") } err = execCmd.Wait() if err != nil { return "", err } execPath := string(execPathBytes) execPath = strings.TrimSpace(execPath) cmd = filepath.Join(execPath, cmd) // Make sure it actually exists. _, err = exec.LookPath(cmd) if err != nil { return "", err } return cmd, nil } func (r *runner) Command(cmd string, ep *transport.Endpoint, auth transport.AuthMethod, ) (common.Command, error) { switch cmd { case transport.UploadPackServiceName: cmd = r.UploadPackBin case transport.ReceivePackServiceName: cmd = r.ReceivePackBin } _, err := exec.LookPath(cmd) if err != nil { if e, ok := err.(*exec.Error); ok && e.Err == exec.ErrNotFound { cmd, err = prefixExecPath(cmd) if err != nil { return nil, err } } else { return nil, err } } return &command{cmd: exec.Command(cmd, ep.Path)}, nil } type command struct { cmd *exec.Cmd stderrCloser io.Closer closed bool } func (c *command) Start() error { return c.cmd.Start() } func (c *command) StderrPipe() (io.Reader, error) { // Pipe returned by Command.StderrPipe has a race with Read + Command.Wait. // We use an io.Pipe and close it after the command finishes. r, w := io.Pipe() c.cmd.Stderr = w c.stderrCloser = r return r, nil } func (c *command) StdinPipe() (io.WriteCloser, error) { return c.cmd.StdinPipe() } func (c *command) StdoutPipe() (io.Reader, error) { return c.cmd.StdoutPipe() } func (c *command) Kill() error { c.cmd.Process.Kill() return c.Close() } // Close waits for the command to exit. func (c *command) Close() error { if c.closed { return nil } defer func() { c.closed = true _ = c.stderrCloser.Close() }() err := c.cmd.Wait() if _, ok := err.(*os.PathError); ok { return nil } // When a repository does not exist, the command exits with code 128. if _, ok := err.(*exec.ExitError); ok { return nil } return err } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/transport/file/client_test.go000066400000000000000000000025761345605224300263450ustar00rootroot00000000000000package file import ( "io" "os" "path/filepath" "strings" "testing" "gopkg.in/src-d/go-git.v4/plumbing/transport" . "gopkg.in/check.v1" ) func Test(t *testing.T) { TestingT(t) } type ClientSuite struct { CommonSuite } var _ = Suite(&ClientSuite{}) func (s *ClientSuite) TestCommand(c *C) { runner := &runner{ UploadPackBin: transport.UploadPackServiceName, ReceivePackBin: transport.ReceivePackServiceName, } ep, err := transport.NewEndpoint(filepath.Join("fake", "repo")) c.Assert(err, IsNil) var emptyAuth transport.AuthMethod _, err = runner.Command("git-receive-pack", ep, emptyAuth) c.Assert(err, IsNil) // Make sure we get an error for one that doesn't exist. _, err = runner.Command("git-fake-command", ep, emptyAuth) c.Assert(err, NotNil) } const bareConfig = `[core] repositoryformatversion = 0 filemode = true bare = true` func prepareRepo(c *C, path string) *transport.Endpoint { ep, err := transport.NewEndpoint(path) c.Assert(err, IsNil) // git-receive-pack refuses to update refs/heads/master on non-bare repo // so we ensure bare repo config. config := filepath.Join(path, "config") if _, err := os.Stat(config); err == nil { f, err := os.OpenFile(config, os.O_TRUNC|os.O_WRONLY, 0) c.Assert(err, IsNil) content := strings.NewReader(bareConfig) _, err = io.Copy(f, content) c.Assert(err, IsNil) c.Assert(f.Close(), IsNil) } return ep } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/transport/file/common_test.go000066400000000000000000000020311345605224300263410ustar00rootroot00000000000000package file import ( "io/ioutil" "os" "os/exec" "path/filepath" . "gopkg.in/check.v1" "gopkg.in/src-d/go-git-fixtures.v3" ) type CommonSuite struct { fixtures.Suite ReceivePackBin string UploadPackBin string tmpDir string // to be removed at teardown } var _ = Suite(&CommonSuite{}) func (s *CommonSuite) SetUpSuite(c *C) { s.Suite.SetUpSuite(c) if err := exec.Command("git", "--version").Run(); err != nil { c.Skip("git command not found") } var err error s.tmpDir, err = ioutil.TempDir("", "") c.Assert(err, IsNil) s.ReceivePackBin = filepath.Join(s.tmpDir, "git-receive-pack") s.UploadPackBin = filepath.Join(s.tmpDir, "git-upload-pack") bin := filepath.Join(s.tmpDir, "go-git") cmd := exec.Command("go", "build", "-o", bin, "../../../cli/go-git/...") c.Assert(cmd.Run(), IsNil) c.Assert(os.Symlink(bin, s.ReceivePackBin), IsNil) c.Assert(os.Symlink(bin, s.UploadPackBin), IsNil) } func (s *CommonSuite) TearDownSuite(c *C) { defer s.Suite.TearDownSuite(c) c.Assert(os.RemoveAll(s.tmpDir), IsNil) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/transport/file/receive_pack_test.go000066400000000000000000000035121345605224300274760ustar00rootroot00000000000000package file import ( "os" "gopkg.in/src-d/go-git.v4/plumbing/transport/test" . "gopkg.in/check.v1" "gopkg.in/src-d/go-git-fixtures.v3" ) type ReceivePackSuite struct { CommonSuite test.ReceivePackSuite } var _ = Suite(&ReceivePackSuite{}) func (s *ReceivePackSuite) SetUpSuite(c *C) { s.CommonSuite.SetUpSuite(c) s.ReceivePackSuite.Client = DefaultClient } func (s *ReceivePackSuite) SetUpTest(c *C) { fixture := fixtures.Basic().One() path := fixture.DotGit().Root() s.Endpoint = prepareRepo(c, path) fixture = fixtures.ByTag("empty").One() path = fixture.DotGit().Root() s.EmptyEndpoint = prepareRepo(c, path) s.NonExistentEndpoint = prepareRepo(c, "/non-existent") } func (s *ReceivePackSuite) TearDownTest(c *C) { s.Suite.TearDownSuite(c) } // TODO: fix test func (s *ReceivePackSuite) TestCommandNoOutput(c *C) { c.Skip("failing test") if _, err := os.Stat("/bin/true"); os.IsNotExist(err) { c.Skip("/bin/true not found") } client := NewClient("true", "true") session, err := client.NewReceivePackSession(s.Endpoint, s.EmptyAuth) c.Assert(err, IsNil) ar, err := session.AdvertisedReferences() c.Assert(err, IsNil) c.Assert(ar, IsNil) } func (s *ReceivePackSuite) TestMalformedInputNoErrors(c *C) { if _, err := os.Stat("/usr/bin/yes"); os.IsNotExist(err) { c.Skip("/usr/bin/yes not found") } client := NewClient("yes", "yes") session, err := client.NewReceivePackSession(s.Endpoint, s.EmptyAuth) c.Assert(err, IsNil) ar, err := session.AdvertisedReferences() c.Assert(err, NotNil) c.Assert(ar, IsNil) } func (s *ReceivePackSuite) TestNonExistentCommand(c *C) { cmd := "/non-existent-git" client := NewClient(cmd, cmd) session, err := client.NewReceivePackSession(s.Endpoint, s.EmptyAuth) c.Assert(err, ErrorMatches, ".*(no such file or directory.*|.*file does not exist)*.") c.Assert(session, IsNil) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/transport/file/server.go000066400000000000000000000026101345605224300253230ustar00rootroot00000000000000package file import ( "fmt" "os" "gopkg.in/src-d/go-git.v4/plumbing/transport" "gopkg.in/src-d/go-git.v4/plumbing/transport/internal/common" "gopkg.in/src-d/go-git.v4/plumbing/transport/server" "gopkg.in/src-d/go-git.v4/utils/ioutil" ) // ServeUploadPack serves a git-upload-pack request using standard output, input // and error. This is meant to be used when implementing a git-upload-pack // command. func ServeUploadPack(path string) error { ep, err := transport.NewEndpoint(path) if err != nil { return err } // TODO: define and implement a server-side AuthMethod s, err := server.DefaultServer.NewUploadPackSession(ep, nil) if err != nil { return fmt.Errorf("error creating session: %s", err) } return common.ServeUploadPack(srvCmd, s) } // ServeReceivePack serves a git-receive-pack request using standard output, // input and error. This is meant to be used when implementing a // git-receive-pack command. func ServeReceivePack(path string) error { ep, err := transport.NewEndpoint(path) if err != nil { return err } // TODO: define and implement a server-side AuthMethod s, err := server.DefaultServer.NewReceivePackSession(ep, nil) if err != nil { return fmt.Errorf("error creating session: %s", err) } return common.ServeReceivePack(srvCmd, s) } var srvCmd = common.ServerCommand{ Stdin: os.Stdin, Stdout: ioutil.WriteNopCloser(os.Stdout), Stderr: os.Stderr, } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/transport/file/server_test.go000066400000000000000000000034271345605224300263710ustar00rootroot00000000000000package file import ( "os" "os/exec" . "gopkg.in/check.v1" "gopkg.in/src-d/go-git-fixtures.v3" ) type ServerSuite struct { CommonSuite RemoteName string SrcPath string DstPath string } var _ = Suite(&ServerSuite{}) func (s *ServerSuite) SetUpSuite(c *C) { s.CommonSuite.SetUpSuite(c) s.RemoteName = "test" fixture := fixtures.Basic().One() s.SrcPath = fixture.DotGit().Root() fixture = fixtures.ByTag("empty").One() s.DstPath = fixture.DotGit().Root() cmd := exec.Command("git", "remote", "add", s.RemoteName, s.DstPath) cmd.Dir = s.SrcPath c.Assert(cmd.Run(), IsNil) } func (s *ServerSuite) TestPush(c *C) { if !s.checkExecPerm(c) { c.Skip("go-git binary has not execution permissions") } // git <2.0 cannot push to an empty repository without a refspec. cmd := exec.Command("git", "push", "--receive-pack", s.ReceivePackBin, s.RemoteName, "refs/heads/*:refs/heads/*", ) cmd.Dir = s.SrcPath cmd.Env = os.Environ() cmd.Env = append(cmd.Env, "GIT_TRACE=true", "GIT_TRACE_PACKET=true") out, err := cmd.CombinedOutput() c.Assert(err, IsNil, Commentf("combined stdout and stderr:\n%s\n", out)) } func (s *ServerSuite) TestClone(c *C) { if !s.checkExecPerm(c) { c.Skip("go-git binary has not execution permissions") } pathToClone := c.MkDir() cmd := exec.Command("git", "clone", "--upload-pack", s.UploadPackBin, s.SrcPath, pathToClone, ) cmd.Env = os.Environ() cmd.Env = append(cmd.Env, "GIT_TRACE=true", "GIT_TRACE_PACKET=true") out, err := cmd.CombinedOutput() c.Assert(err, IsNil, Commentf("combined stdout and stderr:\n%s\n", out)) } func (s *ServerSuite) checkExecPerm(c *C) bool { const userExecPermMask = 0100 info, err := os.Stat(s.ReceivePackBin) c.Assert(err, IsNil) return (info.Mode().Perm() & userExecPermMask) == userExecPermMask } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/transport/file/upload_pack_test.go000066400000000000000000000043131345605224300273400ustar00rootroot00000000000000package file import ( "os" "path/filepath" "gopkg.in/src-d/go-git.v4/plumbing/transport" "gopkg.in/src-d/go-git.v4/plumbing/transport/test" . "gopkg.in/check.v1" "gopkg.in/src-d/go-git-fixtures.v3" ) type UploadPackSuite struct { CommonSuite test.UploadPackSuite } var _ = Suite(&UploadPackSuite{}) func (s *UploadPackSuite) SetUpSuite(c *C) { s.CommonSuite.SetUpSuite(c) s.UploadPackSuite.Client = DefaultClient fixture := fixtures.Basic().One() path := fixture.DotGit().Root() ep, err := transport.NewEndpoint(path) c.Assert(err, IsNil) s.Endpoint = ep fixture = fixtures.ByTag("empty").One() path = fixture.DotGit().Root() ep, err = transport.NewEndpoint(path) c.Assert(err, IsNil) s.EmptyEndpoint = ep path = filepath.Join(fixtures.DataFolder, "non-existent") ep, err = transport.NewEndpoint(path) c.Assert(err, IsNil) s.NonExistentEndpoint = ep } // TODO: fix test func (s *UploadPackSuite) TestCommandNoOutput(c *C) { c.Skip("failing test") if _, err := os.Stat("/bin/true"); os.IsNotExist(err) { c.Skip("/bin/true not found") } client := NewClient("true", "true") session, err := client.NewUploadPackSession(s.Endpoint, s.EmptyAuth) c.Assert(err, IsNil) ar, err := session.AdvertisedReferences() c.Assert(err, IsNil) c.Assert(ar, IsNil) } func (s *UploadPackSuite) TestMalformedInputNoErrors(c *C) { if _, err := os.Stat("/usr/bin/yes"); os.IsNotExist(err) { c.Skip("/usr/bin/yes not found") } client := NewClient("yes", "yes") session, err := client.NewUploadPackSession(s.Endpoint, s.EmptyAuth) c.Assert(err, IsNil) ar, err := session.AdvertisedReferences() c.Assert(err, NotNil) c.Assert(ar, IsNil) } func (s *UploadPackSuite) TestNonExistentCommand(c *C) { cmd := "/non-existent-git" client := NewClient(cmd, cmd) session, err := client.NewUploadPackSession(s.Endpoint, s.EmptyAuth) // Error message is OS-dependant, so do a broad check c.Assert(err, ErrorMatches, ".*file.*") c.Assert(session, IsNil) } func (s *UploadPackSuite) TestUploadPackWithContextOnRead(c *C) { // TODO: Fix race condition when Session.Close and the read failed due to a // canceled context when the packfile is being read. c.Skip("UploadPack has a race condition when we Close the session") } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/transport/git/000077500000000000000000000000001345605224300233335ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/transport/git/common.go000066400000000000000000000050661345605224300251610ustar00rootroot00000000000000// Package git implements the git transport protocol. package git import ( "fmt" "io" "net" "gopkg.in/src-d/go-git.v4/plumbing/format/pktline" "gopkg.in/src-d/go-git.v4/plumbing/transport" "gopkg.in/src-d/go-git.v4/plumbing/transport/internal/common" "gopkg.in/src-d/go-git.v4/utils/ioutil" ) // DefaultClient is the default git client. var DefaultClient = common.NewClient(&runner{}) const DefaultPort = 9418 type runner struct{} // Command returns a new Command for the given cmd in the given Endpoint func (r *runner) Command(cmd string, ep *transport.Endpoint, auth transport.AuthMethod) (common.Command, error) { // auth not allowed since git protocol doesn't support authentication if auth != nil { return nil, transport.ErrInvalidAuthMethod } c := &command{command: cmd, endpoint: ep} if err := c.connect(); err != nil { return nil, err } return c, nil } type command struct { conn net.Conn connected bool command string endpoint *transport.Endpoint } // Start executes the command sending the required message to the TCP connection func (c *command) Start() error { cmd := endpointToCommand(c.command, c.endpoint) e := pktline.NewEncoder(c.conn) return e.Encode([]byte(cmd)) } func (c *command) connect() error { if c.connected { return transport.ErrAlreadyConnected } var err error c.conn, err = net.Dial("tcp", c.getHostWithPort()) if err != nil { return err } c.connected = true return nil } func (c *command) getHostWithPort() string { host := c.endpoint.Host port := c.endpoint.Port if port <= 0 { port = DefaultPort } return fmt.Sprintf("%s:%d", host, port) } // StderrPipe git protocol doesn't have any dedicated error channel func (c *command) StderrPipe() (io.Reader, error) { return nil, nil } // StdinPipe return the underlying connection as WriteCloser, wrapped to prevent // call to the Close function from the connection, a command execution in git // protocol can't be closed or killed func (c *command) StdinPipe() (io.WriteCloser, error) { return ioutil.WriteNopCloser(c.conn), nil } // StdoutPipe return the underlying connection as Reader func (c *command) StdoutPipe() (io.Reader, error) { return c.conn, nil } func endpointToCommand(cmd string, ep *transport.Endpoint) string { host := ep.Host if ep.Port != DefaultPort { host = fmt.Sprintf("%s:%d", ep.Host, ep.Port) } return fmt.Sprintf("%s %s%chost=%s%c", cmd, ep.Path, 0, host, 0) } // Close closes the TCP connection and connection. func (c *command) Close() error { if !c.connected { return nil } c.connected = false return c.conn.Close() } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/transport/git/common_test.go000066400000000000000000000043601345605224300262140ustar00rootroot00000000000000package git import ( "fmt" "io/ioutil" "net" "os" "os/exec" "path/filepath" "runtime" "testing" "time" "gopkg.in/src-d/go-git.v4/plumbing/transport" . "gopkg.in/check.v1" "gopkg.in/src-d/go-git-fixtures.v3" ) func Test(t *testing.T) { TestingT(t) } type BaseSuite struct { fixtures.Suite base string port int daemon *exec.Cmd } func (s *BaseSuite) SetUpTest(c *C) { if runtime.GOOS == "windows" { c.Skip(`git for windows has issues with write operations through git:// protocol. See https://github.com/git-for-windows/git/issues/907`) } var err error s.port, err = freePort() c.Assert(err, IsNil) s.base, err = ioutil.TempDir(os.TempDir(), fmt.Sprintf("go-git-protocol-%d", s.port)) c.Assert(err, IsNil) } func (s *BaseSuite) StartDaemon(c *C) { s.daemon = exec.Command( "git", "daemon", fmt.Sprintf("--base-path=%s", s.base), "--export-all", "--enable=receive-pack", "--reuseaddr", fmt.Sprintf("--port=%d", s.port), // Unless max-connections is limited to 1, a git-receive-pack // might not be seen by a subsequent operation. "--max-connections=1", ) // Environment must be inherited in order to acknowledge GIT_EXEC_PATH if set. s.daemon.Env = os.Environ() err := s.daemon.Start() c.Assert(err, IsNil) // Connections might be refused if we start sending request too early. time.Sleep(time.Millisecond * 500) } func (s *BaseSuite) newEndpoint(c *C, name string) *transport.Endpoint { ep, err := transport.NewEndpoint(fmt.Sprintf("git://localhost:%d/%s", s.port, name)) c.Assert(err, IsNil) return ep } func (s *BaseSuite) prepareRepository(c *C, f *fixtures.Fixture, name string) *transport.Endpoint { fs := f.DotGit() err := fixtures.EnsureIsBare(fs) c.Assert(err, IsNil) path := filepath.Join(s.base, name) err = os.Rename(fs.Root(), path) c.Assert(err, IsNil) return s.newEndpoint(c, name) } func (s *BaseSuite) TearDownTest(c *C) { _ = s.daemon.Process.Signal(os.Kill) _ = s.daemon.Wait() err := os.RemoveAll(s.base) c.Assert(err, IsNil) } func freePort() (int, error) { addr, err := net.ResolveTCPAddr("tcp", "localhost:0") if err != nil { return 0, err } l, err := net.ListenTCP("tcp", addr) if err != nil { return 0, err } return l.Addr().(*net.TCPAddr).Port, l.Close() } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/transport/git/receive_pack_test.go000066400000000000000000000012131345605224300273360ustar00rootroot00000000000000package git import ( "gopkg.in/src-d/go-git.v4/plumbing/transport/test" . "gopkg.in/check.v1" "gopkg.in/src-d/go-git-fixtures.v3" ) type ReceivePackSuite struct { test.ReceivePackSuite BaseSuite } var _ = Suite(&ReceivePackSuite{}) func (s *ReceivePackSuite) SetUpTest(c *C) { s.BaseSuite.SetUpTest(c) s.ReceivePackSuite.Client = DefaultClient s.ReceivePackSuite.Endpoint = s.prepareRepository(c, fixtures.Basic().One(), "basic.git") s.ReceivePackSuite.EmptyEndpoint = s.prepareRepository(c, fixtures.ByTag("empty").One(), "empty.git") s.ReceivePackSuite.NonExistentEndpoint = s.newEndpoint(c, "non-existent.git") s.StartDaemon(c) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/transport/git/upload_pack_test.go000066400000000000000000000012041345605224300272000ustar00rootroot00000000000000package git import ( "gopkg.in/src-d/go-git.v4/plumbing/transport/test" . "gopkg.in/check.v1" "gopkg.in/src-d/go-git-fixtures.v3" ) type UploadPackSuite struct { test.UploadPackSuite BaseSuite } var _ = Suite(&UploadPackSuite{}) func (s *UploadPackSuite) SetUpSuite(c *C) { s.BaseSuite.SetUpTest(c) s.UploadPackSuite.Client = DefaultClient s.UploadPackSuite.Endpoint = s.prepareRepository(c, fixtures.Basic().One(), "basic.git") s.UploadPackSuite.EmptyEndpoint = s.prepareRepository(c, fixtures.ByTag("empty").One(), "empty.git") s.UploadPackSuite.NonExistentEndpoint = s.newEndpoint(c, "non-existent.git") s.StartDaemon(c) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/transport/http/000077500000000000000000000000001345605224300235275ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/transport/http/common.go000066400000000000000000000145051345605224300253530ustar00rootroot00000000000000// Package http implements the HTTP transport protocol. package http import ( "bytes" "fmt" "net" "net/http" "strconv" "strings" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp" "gopkg.in/src-d/go-git.v4/plumbing/transport" "gopkg.in/src-d/go-git.v4/utils/ioutil" ) // it requires a bytes.Buffer, because we need to know the length func applyHeadersToRequest(req *http.Request, content *bytes.Buffer, host string, requestType string) { req.Header.Add("User-Agent", "git/1.0") req.Header.Add("Host", host) // host:port if content == nil { req.Header.Add("Accept", "*/*") return } req.Header.Add("Accept", fmt.Sprintf("application/x-%s-result", requestType)) req.Header.Add("Content-Type", fmt.Sprintf("application/x-%s-request", requestType)) req.Header.Add("Content-Length", strconv.Itoa(content.Len())) } const infoRefsPath = "/info/refs" func advertisedReferences(s *session, serviceName string) (ref *packp.AdvRefs, err error) { url := fmt.Sprintf( "%s%s?service=%s", s.endpoint.String(), infoRefsPath, serviceName, ) req, err := http.NewRequest(http.MethodGet, url, nil) if err != nil { return nil, err } s.ApplyAuthToRequest(req) applyHeadersToRequest(req, nil, s.endpoint.Host, serviceName) res, err := s.client.Do(req) if err != nil { return nil, err } s.ModifyEndpointIfRedirect(res) defer ioutil.CheckClose(res.Body, &err) if err = NewErr(res); err != nil { return nil, err } ar := packp.NewAdvRefs() if err = ar.Decode(res.Body); err != nil { if err == packp.ErrEmptyAdvRefs { err = transport.ErrEmptyRemoteRepository } return nil, err } transport.FilterUnsupportedCapabilities(ar.Capabilities) s.advRefs = ar return ar, nil } type client struct { c *http.Client } // DefaultClient is the default HTTP client, which uses `http.DefaultClient`. var DefaultClient = NewClient(nil) // NewClient creates a new client with a custom net/http client. // See `InstallProtocol` to install and override default http client. // Unless a properly initialized client is given, it will fall back into // `http.DefaultClient`. // // Note that for HTTP client cannot distinguist between private repositories and // unexistent repositories on GitHub. So it returns `ErrAuthorizationRequired` // for both. func NewClient(c *http.Client) transport.Transport { if c == nil { return &client{http.DefaultClient} } return &client{ c: c, } } func (c *client) NewUploadPackSession(ep *transport.Endpoint, auth transport.AuthMethod) ( transport.UploadPackSession, error) { return newUploadPackSession(c.c, ep, auth) } func (c *client) NewReceivePackSession(ep *transport.Endpoint, auth transport.AuthMethod) ( transport.ReceivePackSession, error) { return newReceivePackSession(c.c, ep, auth) } type session struct { auth AuthMethod client *http.Client endpoint *transport.Endpoint advRefs *packp.AdvRefs } func newSession(c *http.Client, ep *transport.Endpoint, auth transport.AuthMethod) (*session, error) { s := &session{ auth: basicAuthFromEndpoint(ep), client: c, endpoint: ep, } if auth != nil { a, ok := auth.(AuthMethod) if !ok { return nil, transport.ErrInvalidAuthMethod } s.auth = a } return s, nil } func (s *session) ApplyAuthToRequest(req *http.Request) { if s.auth == nil { return } s.auth.setAuth(req) } func (s *session) ModifyEndpointIfRedirect(res *http.Response) { if res.Request == nil { return } r := res.Request if !strings.HasSuffix(r.URL.Path, infoRefsPath) { return } h, p, err := net.SplitHostPort(r.URL.Host) if err != nil { h = r.URL.Host } if p != "" { port, err := strconv.Atoi(p) if err == nil { s.endpoint.Port = port } } s.endpoint.Host = h s.endpoint.Protocol = r.URL.Scheme s.endpoint.Path = r.URL.Path[:len(r.URL.Path)-len(infoRefsPath)] } func (*session) Close() error { return nil } // AuthMethod is concrete implementation of common.AuthMethod for HTTP services type AuthMethod interface { transport.AuthMethod setAuth(r *http.Request) } func basicAuthFromEndpoint(ep *transport.Endpoint) *BasicAuth { u := ep.User if u == "" { return nil } return &BasicAuth{u, ep.Password} } // BasicAuth represent a HTTP basic auth type BasicAuth struct { Username, Password string } func (a *BasicAuth) setAuth(r *http.Request) { if a == nil { return } r.SetBasicAuth(a.Username, a.Password) } // Name is name of the auth func (a *BasicAuth) Name() string { return "http-basic-auth" } func (a *BasicAuth) String() string { masked := "*******" if a.Password == "" { masked = "" } return fmt.Sprintf("%s - %s:%s", a.Name(), a.Username, masked) } // TokenAuth implements an http.AuthMethod that can be used with http transport // to authenticate with HTTP token authentication (also known as bearer // authentication). // // IMPORTANT: If you are looking to use OAuth tokens with popular servers (e.g. // GitHub, Bitbucket, GitLab) you should use BasicAuth instead. These servers // use basic HTTP authentication, with the OAuth token as user or password. // Check the documentation of your git server for details. type TokenAuth struct { Token string } func (a *TokenAuth) setAuth(r *http.Request) { if a == nil { return } r.Header.Add("Authorization", fmt.Sprintf("Bearer %s", a.Token)) } // Name is name of the auth func (a *TokenAuth) Name() string { return "http-token-auth" } func (a *TokenAuth) String() string { masked := "*******" if a.Token == "" { masked = "" } return fmt.Sprintf("%s - %s", a.Name(), masked) } // Err is a dedicated error to return errors based on status code type Err struct { Response *http.Response } // NewErr returns a new Err based on a http response func NewErr(r *http.Response) error { if r.StatusCode >= http.StatusOK && r.StatusCode < http.StatusMultipleChoices { return nil } switch r.StatusCode { case http.StatusUnauthorized: return transport.ErrAuthenticationRequired case http.StatusForbidden: return transport.ErrAuthorizationFailed case http.StatusNotFound: return transport.ErrRepositoryNotFound } return plumbing.NewUnexpectedError(&Err{r}) } // StatusCode returns the status code of the response func (e *Err) StatusCode() int { return e.Response.StatusCode } func (e *Err) Error() string { return fmt.Sprintf("unexpected requesting %q status code: %d", e.Response.Request.URL, e.Response.StatusCode, ) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/transport/http/common_test.go000066400000000000000000000125161345605224300264120ustar00rootroot00000000000000package http import ( "crypto/tls" "fmt" "io/ioutil" "log" "net" "net/http" "net/http/cgi" "net/url" "os" "os/exec" "path/filepath" "strings" "testing" "gopkg.in/src-d/go-git.v4/plumbing/transport" . "gopkg.in/check.v1" "gopkg.in/src-d/go-git-fixtures.v3" ) func Test(t *testing.T) { TestingT(t) } type ClientSuite struct { Endpoint *transport.Endpoint EmptyAuth transport.AuthMethod } var _ = Suite(&ClientSuite{}) func (s *ClientSuite) SetUpSuite(c *C) { var err error s.Endpoint, err = transport.NewEndpoint( "https://github.com/git-fixtures/basic", ) c.Assert(err, IsNil) } func (s *UploadPackSuite) TestNewClient(c *C) { roundTripper := &http.Transport{ TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, } cl := &http.Client{Transport: roundTripper} r, ok := NewClient(cl).(*client) c.Assert(ok, Equals, true) c.Assert(r.c, Equals, cl) } func (s *ClientSuite) TestNewBasicAuth(c *C) { a := &BasicAuth{"foo", "qux"} c.Assert(a.Name(), Equals, "http-basic-auth") c.Assert(a.String(), Equals, "http-basic-auth - foo:*******") } func (s *ClientSuite) TestNewTokenAuth(c *C) { a := &TokenAuth{"OAUTH-TOKEN-TEXT"} c.Assert(a.Name(), Equals, "http-token-auth") c.Assert(a.String(), Equals, "http-token-auth - *******") // Check header is set correctly req, err := http.NewRequest("GET", "https://github.com/git-fixtures/basic", nil) c.Assert(err, Equals, nil) a.setAuth(req) c.Assert(req.Header.Get("Authorization"), Equals, "Bearer OAUTH-TOKEN-TEXT") } func (s *ClientSuite) TestNewErrOK(c *C) { res := &http.Response{StatusCode: http.StatusOK} err := NewErr(res) c.Assert(err, IsNil) } func (s *ClientSuite) TestNewErrUnauthorized(c *C) { s.testNewHTTPError(c, http.StatusUnauthorized, "authentication required") } func (s *ClientSuite) TestNewErrForbidden(c *C) { s.testNewHTTPError(c, http.StatusForbidden, "authorization failed") } func (s *ClientSuite) TestNewErrNotFound(c *C) { s.testNewHTTPError(c, http.StatusNotFound, "repository not found") } func (s *ClientSuite) TestNewHTTPError40x(c *C) { s.testNewHTTPError(c, http.StatusPaymentRequired, "unexpected client error.*") } func (s *ClientSuite) testNewHTTPError(c *C, code int, msg string) { req, _ := http.NewRequest("GET", "foo", nil) res := &http.Response{ StatusCode: code, Request: req, } err := NewErr(res) c.Assert(err, NotNil) c.Assert(err, ErrorMatches, msg) } func (s *ClientSuite) TestSetAuth(c *C) { auth := &BasicAuth{} r, err := DefaultClient.NewUploadPackSession(s.Endpoint, auth) c.Assert(err, IsNil) c.Assert(auth, Equals, r.(*upSession).auth) } type mockAuth struct{} func (*mockAuth) Name() string { return "" } func (*mockAuth) String() string { return "" } func (s *ClientSuite) TestSetAuthWrongType(c *C) { _, err := DefaultClient.NewUploadPackSession(s.Endpoint, &mockAuth{}) c.Assert(err, Equals, transport.ErrInvalidAuthMethod) } func (s *ClientSuite) TestModifyEndpointIfRedirect(c *C) { sess := &session{endpoint: nil} u, _ := url.Parse("https://example.com/info/refs") res := &http.Response{Request: &http.Request{URL: u}} c.Assert(func() { sess.ModifyEndpointIfRedirect(res) }, PanicMatches, ".*nil pointer dereference.*") sess = &session{endpoint: nil} // no-op - should return and not panic sess.ModifyEndpointIfRedirect(&http.Response{}) data := []struct { url string endpoint *transport.Endpoint expected *transport.Endpoint }{ {"https://example.com/foo/bar", nil, nil}, {"https://example.com/foo.git/info/refs", &transport.Endpoint{}, &transport.Endpoint{Protocol: "https", Host: "example.com", Path: "/foo.git"}}, {"https://example.com:8080/foo.git/info/refs", &transport.Endpoint{}, &transport.Endpoint{Protocol: "https", Host: "example.com", Port: 8080, Path: "/foo.git"}}, } for _, d := range data { u, _ := url.Parse(d.url) sess := &session{endpoint: d.endpoint} sess.ModifyEndpointIfRedirect(&http.Response{ Request: &http.Request{URL: u}, }) c.Assert(d.endpoint, DeepEquals, d.expected) } } type BaseSuite struct { fixtures.Suite base string host string port int } func (s *BaseSuite) SetUpTest(c *C) { l, err := net.Listen("tcp", "localhost:0") c.Assert(err, IsNil) base, err := ioutil.TempDir(os.TempDir(), fmt.Sprintf("go-git-http-%d", s.port)) c.Assert(err, IsNil) s.port = l.Addr().(*net.TCPAddr).Port s.base = filepath.Join(base, s.host) err = os.MkdirAll(s.base, 0755) c.Assert(err, IsNil) cmd := exec.Command("git", "--exec-path") out, err := cmd.CombinedOutput() c.Assert(err, IsNil) server := &http.Server{ Handler: &cgi.Handler{ Path: filepath.Join(strings.Trim(string(out), "\n"), "git-http-backend"), Env: []string{"GIT_HTTP_EXPORT_ALL=true", fmt.Sprintf("GIT_PROJECT_ROOT=%s", s.base)}, }, } go func() { log.Fatal(server.Serve(l)) }() } func (s *BaseSuite) prepareRepository(c *C, f *fixtures.Fixture, name string) *transport.Endpoint { fs := f.DotGit() err := fixtures.EnsureIsBare(fs) c.Assert(err, IsNil) path := filepath.Join(s.base, name) err = os.Rename(fs.Root(), path) c.Assert(err, IsNil) return s.newEndpoint(c, name) } func (s *BaseSuite) newEndpoint(c *C, name string) *transport.Endpoint { ep, err := transport.NewEndpoint(fmt.Sprintf("http://localhost:%d/%s", s.port, name)) c.Assert(err, IsNil) return ep } func (s *BaseSuite) TearDownTest(c *C) { err := os.RemoveAll(s.base) c.Assert(err, IsNil) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/transport/http/receive_pack.go000066400000000000000000000045441345605224300265050ustar00rootroot00000000000000package http import ( "bytes" "context" "fmt" "io" "net/http" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp" "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability" "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/sideband" "gopkg.in/src-d/go-git.v4/plumbing/transport" "gopkg.in/src-d/go-git.v4/utils/ioutil" ) type rpSession struct { *session } func newReceivePackSession(c *http.Client, ep *transport.Endpoint, auth transport.AuthMethod) (transport.ReceivePackSession, error) { s, err := newSession(c, ep, auth) return &rpSession{s}, err } func (s *rpSession) AdvertisedReferences() (*packp.AdvRefs, error) { return advertisedReferences(s.session, transport.ReceivePackServiceName) } func (s *rpSession) ReceivePack(ctx context.Context, req *packp.ReferenceUpdateRequest) ( *packp.ReportStatus, error) { url := fmt.Sprintf( "%s/%s", s.endpoint.String(), transport.ReceivePackServiceName, ) buf := bytes.NewBuffer(nil) if err := req.Encode(buf); err != nil { return nil, err } res, err := s.doRequest(ctx, http.MethodPost, url, buf) if err != nil { return nil, err } r, err := ioutil.NonEmptyReader(res.Body) if err == ioutil.ErrEmptyReader { return nil, nil } if err != nil { return nil, err } var d *sideband.Demuxer if req.Capabilities.Supports(capability.Sideband64k) { d = sideband.NewDemuxer(sideband.Sideband64k, r) } else if req.Capabilities.Supports(capability.Sideband) { d = sideband.NewDemuxer(sideband.Sideband, r) } if d != nil { d.Progress = req.Progress r = d } rc := ioutil.NewReadCloser(r, res.Body) report := packp.NewReportStatus() if err := report.Decode(rc); err != nil { return nil, err } return report, report.Error() } func (s *rpSession) doRequest( ctx context.Context, method, url string, content *bytes.Buffer, ) (*http.Response, error) { var body io.Reader if content != nil { body = content } req, err := http.NewRequest(method, url, body) if err != nil { return nil, plumbing.NewPermanentError(err) } applyHeadersToRequest(req, content, s.endpoint.Host, transport.ReceivePackServiceName) s.ApplyAuthToRequest(req) res, err := s.client.Do(req.WithContext(ctx)) if err != nil { return nil, plumbing.NewUnexpectedError(err) } if err := NewErr(res); err != nil { _ = res.Body.Close() return nil, err } return res, nil } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/transport/http/receive_pack_test.go000066400000000000000000000011711345605224300275350ustar00rootroot00000000000000package http import ( "gopkg.in/src-d/go-git.v4/plumbing/transport/test" . "gopkg.in/check.v1" "gopkg.in/src-d/go-git-fixtures.v3" ) type ReceivePackSuite struct { test.ReceivePackSuite BaseSuite } var _ = Suite(&ReceivePackSuite{}) func (s *ReceivePackSuite) SetUpTest(c *C) { s.BaseSuite.SetUpTest(c) s.ReceivePackSuite.Client = DefaultClient s.ReceivePackSuite.Endpoint = s.prepareRepository(c, fixtures.Basic().One(), "basic.git") s.ReceivePackSuite.EmptyEndpoint = s.prepareRepository(c, fixtures.ByTag("empty").One(), "empty.git") s.ReceivePackSuite.NonExistentEndpoint = s.newEndpoint(c, "non-existent.git") } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/transport/http/upload_pack.go000066400000000000000000000053221345605224300263420ustar00rootroot00000000000000package http import ( "bytes" "context" "fmt" "io" "net/http" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/format/pktline" "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp" "gopkg.in/src-d/go-git.v4/plumbing/transport" "gopkg.in/src-d/go-git.v4/plumbing/transport/internal/common" "gopkg.in/src-d/go-git.v4/utils/ioutil" ) type upSession struct { *session } func newUploadPackSession(c *http.Client, ep *transport.Endpoint, auth transport.AuthMethod) (transport.UploadPackSession, error) { s, err := newSession(c, ep, auth) return &upSession{s}, err } func (s *upSession) AdvertisedReferences() (*packp.AdvRefs, error) { return advertisedReferences(s.session, transport.UploadPackServiceName) } func (s *upSession) UploadPack( ctx context.Context, req *packp.UploadPackRequest, ) (*packp.UploadPackResponse, error) { if req.IsEmpty() { return nil, transport.ErrEmptyUploadPackRequest } if err := req.Validate(); err != nil { return nil, err } url := fmt.Sprintf( "%s/%s", s.endpoint.String(), transport.UploadPackServiceName, ) content, err := uploadPackRequestToReader(req) if err != nil { return nil, err } res, err := s.doRequest(ctx, http.MethodPost, url, content) if err != nil { return nil, err } r, err := ioutil.NonEmptyReader(res.Body) if err != nil { if err == ioutil.ErrEmptyReader || err == io.ErrUnexpectedEOF { return nil, transport.ErrEmptyUploadPackRequest } return nil, err } rc := ioutil.NewReadCloser(r, res.Body) return common.DecodeUploadPackResponse(rc, req) } // Close does nothing. func (s *upSession) Close() error { return nil } func (s *upSession) doRequest( ctx context.Context, method, url string, content *bytes.Buffer, ) (*http.Response, error) { var body io.Reader if content != nil { body = content } req, err := http.NewRequest(method, url, body) if err != nil { return nil, plumbing.NewPermanentError(err) } applyHeadersToRequest(req, content, s.endpoint.Host, transport.UploadPackServiceName) s.ApplyAuthToRequest(req) res, err := s.client.Do(req.WithContext(ctx)) if err != nil { return nil, plumbing.NewUnexpectedError(err) } if err := NewErr(res); err != nil { _ = res.Body.Close() return nil, err } return res, nil } func uploadPackRequestToReader(req *packp.UploadPackRequest) (*bytes.Buffer, error) { buf := bytes.NewBuffer(nil) e := pktline.NewEncoder(buf) if err := req.UploadRequest.Encode(buf); err != nil { return nil, fmt.Errorf("sending upload-req message: %s", err) } if err := req.UploadHaves.Encode(buf, false); err != nil { return nil, fmt.Errorf("sending haves message: %s", err) } if err := e.EncodeString("done\n"); err != nil { return nil, err } return buf, nil } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/transport/http/upload_pack_test.go000066400000000000000000000062701345605224300274040ustar00rootroot00000000000000package http import ( "fmt" "io/ioutil" "os" "path/filepath" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp" "gopkg.in/src-d/go-git.v4/plumbing/transport" "gopkg.in/src-d/go-git.v4/plumbing/transport/test" . "gopkg.in/check.v1" "gopkg.in/src-d/go-git-fixtures.v3" ) type UploadPackSuite struct { test.UploadPackSuite BaseSuite } var _ = Suite(&UploadPackSuite{}) func (s *UploadPackSuite) SetUpSuite(c *C) { s.BaseSuite.SetUpTest(c) s.UploadPackSuite.Client = DefaultClient s.UploadPackSuite.Endpoint = s.prepareRepository(c, fixtures.Basic().One(), "basic.git") s.UploadPackSuite.EmptyEndpoint = s.prepareRepository(c, fixtures.ByTag("empty").One(), "empty.git") s.UploadPackSuite.NonExistentEndpoint = s.newEndpoint(c, "non-existent.git") } // Overwritten, different behaviour for HTTP. func (s *UploadPackSuite) TestAdvertisedReferencesNotExists(c *C) { r, err := s.Client.NewUploadPackSession(s.NonExistentEndpoint, s.EmptyAuth) c.Assert(err, IsNil) info, err := r.AdvertisedReferences() c.Assert(err, Equals, transport.ErrRepositoryNotFound) c.Assert(info, IsNil) } func (s *UploadPackSuite) TestuploadPackRequestToReader(c *C) { r := packp.NewUploadPackRequest() r.Wants = append(r.Wants, plumbing.NewHash("d82f291cde9987322c8a0c81a325e1ba6159684c")) r.Wants = append(r.Wants, plumbing.NewHash("2b41ef280fdb67a9b250678686a0c3e03b0a9989")) r.Haves = append(r.Haves, plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5")) sr, err := uploadPackRequestToReader(r) c.Assert(err, IsNil) b, _ := ioutil.ReadAll(sr) c.Assert(string(b), Equals, "0032want 2b41ef280fdb67a9b250678686a0c3e03b0a9989\n"+ "0032want d82f291cde9987322c8a0c81a325e1ba6159684c\n0000"+ "0032have 6ecf0ef2c2dffb796033e5a02219af86ec6584e5\n"+ "0009done\n", ) } func (s *UploadPackSuite) prepareRepository(c *C, f *fixtures.Fixture, name string) *transport.Endpoint { fs := f.DotGit() err := fixtures.EnsureIsBare(fs) c.Assert(err, IsNil) path := filepath.Join(s.base, name) err = os.Rename(fs.Root(), path) c.Assert(err, IsNil) return s.newEndpoint(c, name) } func (s *UploadPackSuite) newEndpoint(c *C, name string) *transport.Endpoint { ep, err := transport.NewEndpoint(fmt.Sprintf("http://localhost:%d/%s", s.port, name)) c.Assert(err, IsNil) return ep } func (s *UploadPackSuite) TestAdvertisedReferencesRedirectPath(c *C) { endpoint, _ := transport.NewEndpoint("https://gitlab.com/gitlab-org/gitter/webapp") session, err := s.Client.NewUploadPackSession(endpoint, s.EmptyAuth) c.Assert(err, IsNil) info, err := session.AdvertisedReferences() c.Assert(err, IsNil) c.Assert(info, NotNil) url := session.(*upSession).endpoint.String() c.Assert(url, Equals, "https://gitlab.com/gitlab-org/gitter/webapp.git") } func (s *UploadPackSuite) TestAdvertisedReferencesRedirectSchema(c *C) { endpoint, _ := transport.NewEndpoint("http://github.com/git-fixtures/basic") session, err := s.Client.NewUploadPackSession(endpoint, s.EmptyAuth) c.Assert(err, IsNil) info, err := session.AdvertisedReferences() c.Assert(err, IsNil) c.Assert(info, NotNil) url := session.(*upSession).endpoint.String() c.Assert(url, Equals, "https://github.com/git-fixtures/basic") } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/transport/internal/000077500000000000000000000000001345605224300243645ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/transport/internal/common/000077500000000000000000000000001345605224300256545ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/transport/internal/common/common.go000066400000000000000000000263551345605224300275060ustar00rootroot00000000000000// Package common implements the git pack protocol with a pluggable transport. // This is a low-level package to implement new transports. Use a concrete // implementation instead (e.g. http, file, ssh). // // A simple example of usage can be found in the file package. package common import ( "bufio" "context" "errors" "fmt" "io" stdioutil "io/ioutil" "strings" "time" "gopkg.in/src-d/go-git.v4/plumbing/format/pktline" "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp" "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability" "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/sideband" "gopkg.in/src-d/go-git.v4/plumbing/transport" "gopkg.in/src-d/go-git.v4/utils/ioutil" ) const ( readErrorSecondsTimeout = 10 ) var ( ErrTimeoutExceeded = errors.New("timeout exceeded") ) // Commander creates Command instances. This is the main entry point for // transport implementations. type Commander interface { // Command creates a new Command for the given git command and // endpoint. cmd can be git-upload-pack or git-receive-pack. An // error should be returned if the endpoint is not supported or the // command cannot be created (e.g. binary does not exist, connection // cannot be established). Command(cmd string, ep *transport.Endpoint, auth transport.AuthMethod) (Command, error) } // Command is used for a single command execution. // This interface is modeled after exec.Cmd and ssh.Session in the standard // library. type Command interface { // StderrPipe returns a pipe that will be connected to the command's // standard error when the command starts. It should not be called after // Start. StderrPipe() (io.Reader, error) // StdinPipe returns a pipe that will be connected to the command's // standard input when the command starts. It should not be called after // Start. The pipe should be closed when no more input is expected. StdinPipe() (io.WriteCloser, error) // StdoutPipe returns a pipe that will be connected to the command's // standard output when the command starts. It should not be called after // Start. StdoutPipe() (io.Reader, error) // Start starts the specified command. It does not wait for it to // complete. Start() error // Close closes the command and releases any resources used by it. It // will block until the command exits. Close() error } // CommandKiller expands the Command interface, enableing it for being killed. type CommandKiller interface { // Kill and close the session whatever the state it is. It will block until // the command is terminated. Kill() error } type client struct { cmdr Commander } // NewClient creates a new client using the given Commander. func NewClient(runner Commander) transport.Transport { return &client{runner} } // NewUploadPackSession creates a new UploadPackSession. func (c *client) NewUploadPackSession(ep *transport.Endpoint, auth transport.AuthMethod) ( transport.UploadPackSession, error) { return c.newSession(transport.UploadPackServiceName, ep, auth) } // NewReceivePackSession creates a new ReceivePackSession. func (c *client) NewReceivePackSession(ep *transport.Endpoint, auth transport.AuthMethod) ( transport.ReceivePackSession, error) { return c.newSession(transport.ReceivePackServiceName, ep, auth) } type session struct { Stdin io.WriteCloser Stdout io.Reader Command Command isReceivePack bool advRefs *packp.AdvRefs packRun bool finished bool firstErrLine chan string } func (c *client) newSession(s string, ep *transport.Endpoint, auth transport.AuthMethod) (*session, error) { cmd, err := c.cmdr.Command(s, ep, auth) if err != nil { return nil, err } stdin, err := cmd.StdinPipe() if err != nil { return nil, err } stdout, err := cmd.StdoutPipe() if err != nil { return nil, err } stderr, err := cmd.StderrPipe() if err != nil { return nil, err } if err := cmd.Start(); err != nil { return nil, err } return &session{ Stdin: stdin, Stdout: stdout, Command: cmd, firstErrLine: c.listenFirstError(stderr), isReceivePack: s == transport.ReceivePackServiceName, }, nil } func (c *client) listenFirstError(r io.Reader) chan string { if r == nil { return nil } errLine := make(chan string, 1) go func() { s := bufio.NewScanner(r) if s.Scan() { errLine <- s.Text() } else { close(errLine) } _, _ = io.Copy(stdioutil.Discard, r) }() return errLine } // AdvertisedReferences retrieves the advertised references from the server. func (s *session) AdvertisedReferences() (*packp.AdvRefs, error) { if s.advRefs != nil { return s.advRefs, nil } ar := packp.NewAdvRefs() if err := ar.Decode(s.Stdout); err != nil { if err := s.handleAdvRefDecodeError(err); err != nil { return nil, err } } transport.FilterUnsupportedCapabilities(ar.Capabilities) s.advRefs = ar return ar, nil } func (s *session) handleAdvRefDecodeError(err error) error { // If repository is not found, we get empty stdout and server writes an // error to stderr. if err == packp.ErrEmptyInput { s.finished = true if err := s.checkNotFoundError(); err != nil { return err } return io.ErrUnexpectedEOF } // For empty (but existing) repositories, we get empty advertised-references // message. But valid. That is, it includes at least a flush. if err == packp.ErrEmptyAdvRefs { // Empty repositories are valid for git-receive-pack. if s.isReceivePack { return nil } if err := s.finish(); err != nil { return err } return transport.ErrEmptyRemoteRepository } // Some server sends the errors as normal content (git protocol), so when // we try to decode it fails, we need to check the content of it, to detect // not found errors if uerr, ok := err.(*packp.ErrUnexpectedData); ok { if isRepoNotFoundError(string(uerr.Data)) { return transport.ErrRepositoryNotFound } } return err } // UploadPack performs a request to the server to fetch a packfile. A reader is // returned with the packfile content. The reader must be closed after reading. func (s *session) UploadPack(ctx context.Context, req *packp.UploadPackRequest) (*packp.UploadPackResponse, error) { if req.IsEmpty() { return nil, transport.ErrEmptyUploadPackRequest } if err := req.Validate(); err != nil { return nil, err } if _, err := s.AdvertisedReferences(); err != nil { return nil, err } s.packRun = true in := s.StdinContext(ctx) out := s.StdoutContext(ctx) if err := uploadPack(in, out, req); err != nil { return nil, err } r, err := ioutil.NonEmptyReader(out) if err == ioutil.ErrEmptyReader { if c, ok := s.Stdout.(io.Closer); ok { _ = c.Close() } return nil, transport.ErrEmptyUploadPackRequest } if err != nil { return nil, err } rc := ioutil.NewReadCloser(r, s) return DecodeUploadPackResponse(rc, req) } func (s *session) StdinContext(ctx context.Context) io.WriteCloser { return ioutil.NewWriteCloserOnError( ioutil.NewContextWriteCloser(ctx, s.Stdin), s.onError, ) } func (s *session) StdoutContext(ctx context.Context) io.Reader { return ioutil.NewReaderOnError( ioutil.NewContextReader(ctx, s.Stdout), s.onError, ) } func (s *session) onError(err error) { if k, ok := s.Command.(CommandKiller); ok { _ = k.Kill() } _ = s.Close() } func (s *session) ReceivePack(ctx context.Context, req *packp.ReferenceUpdateRequest) (*packp.ReportStatus, error) { if _, err := s.AdvertisedReferences(); err != nil { return nil, err } s.packRun = true w := s.StdinContext(ctx) if err := req.Encode(w); err != nil { return nil, err } if err := w.Close(); err != nil { return nil, err } if !req.Capabilities.Supports(capability.ReportStatus) { // If we don't have report-status, we can only // check return value error. return nil, s.Command.Close() } r := s.StdoutContext(ctx) var d *sideband.Demuxer if req.Capabilities.Supports(capability.Sideband64k) { d = sideband.NewDemuxer(sideband.Sideband64k, r) } else if req.Capabilities.Supports(capability.Sideband) { d = sideband.NewDemuxer(sideband.Sideband, r) } if d != nil { d.Progress = req.Progress r = d } report := packp.NewReportStatus() if err := report.Decode(r); err != nil { return nil, err } if err := report.Error(); err != nil { defer s.Close() return report, err } return report, s.Command.Close() } func (s *session) finish() error { if s.finished { return nil } s.finished = true // If we did not run a upload/receive-pack, we close the connection // gracefully by sending a flush packet to the server. If the server // operates correctly, it will exit with status 0. if !s.packRun { _, err := s.Stdin.Write(pktline.FlushPkt) return err } return nil } func (s *session) Close() (err error) { err = s.finish() defer ioutil.CheckClose(s.Command, &err) return } func (s *session) checkNotFoundError() error { t := time.NewTicker(time.Second * readErrorSecondsTimeout) defer t.Stop() select { case <-t.C: return ErrTimeoutExceeded case line, ok := <-s.firstErrLine: if !ok { return nil } if isRepoNotFoundError(line) { return transport.ErrRepositoryNotFound } return fmt.Errorf("unknown error: %s", line) } } var ( githubRepoNotFoundErr = "ERROR: Repository not found." bitbucketRepoNotFoundErr = "conq: repository does not exist." localRepoNotFoundErr = "does not appear to be a git repository" gitProtocolNotFoundErr = "ERR \n Repository not found." gitProtocolNoSuchErr = "ERR no such repository" gitProtocolAccessDeniedErr = "ERR access denied" gogsAccessDeniedErr = "Gogs: Repository does not exist or you do not have access" ) func isRepoNotFoundError(s string) bool { if strings.HasPrefix(s, githubRepoNotFoundErr) { return true } if strings.HasPrefix(s, bitbucketRepoNotFoundErr) { return true } if strings.HasSuffix(s, localRepoNotFoundErr) { return true } if strings.HasPrefix(s, gitProtocolNotFoundErr) { return true } if strings.HasPrefix(s, gitProtocolNoSuchErr) { return true } if strings.HasPrefix(s, gitProtocolAccessDeniedErr) { return true } if strings.HasPrefix(s, gogsAccessDeniedErr) { return true } return false } var ( nak = []byte("NAK") eol = []byte("\n") ) // uploadPack implements the git-upload-pack protocol. func uploadPack(w io.WriteCloser, r io.Reader, req *packp.UploadPackRequest) error { // TODO support multi_ack mode // TODO support multi_ack_detailed mode // TODO support acks for common objects // TODO build a proper state machine for all these processing options if err := req.UploadRequest.Encode(w); err != nil { return fmt.Errorf("sending upload-req message: %s", err) } if err := req.UploadHaves.Encode(w, true); err != nil { return fmt.Errorf("sending haves message: %s", err) } if err := sendDone(w); err != nil { return fmt.Errorf("sending done message: %s", err) } if err := w.Close(); err != nil { return fmt.Errorf("closing input: %s", err) } return nil } func sendDone(w io.Writer) error { e := pktline.NewEncoder(w) return e.Encodef("done\n") } // DecodeUploadPackResponse decodes r into a new packp.UploadPackResponse func DecodeUploadPackResponse(r io.ReadCloser, req *packp.UploadPackRequest) ( *packp.UploadPackResponse, error, ) { res := packp.NewUploadPackResponse(req) if err := res.Decode(r); err != nil { return nil, fmt.Errorf("error decoding upload-pack response: %s", err) } return res, nil } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/transport/internal/common/common_test.go000066400000000000000000000036771345605224300305470ustar00rootroot00000000000000package common import ( "fmt" "testing" . "gopkg.in/check.v1" ) func Test(t *testing.T) { TestingT(t) } type CommonSuite struct{} var _ = Suite(&CommonSuite{}) func (s *CommonSuite) TestIsRepoNotFoundErrorForUnknowSource(c *C) { msg := "unknown system is complaining of something very sad :(" isRepoNotFound := isRepoNotFoundError(msg) c.Assert(isRepoNotFound, Equals, false) } func (s *CommonSuite) TestIsRepoNotFoundErrorForGithub(c *C) { msg := fmt.Sprintf("%s : some error stuf", githubRepoNotFoundErr) isRepoNotFound := isRepoNotFoundError(msg) c.Assert(isRepoNotFound, Equals, true) } func (s *CommonSuite) TestIsRepoNotFoundErrorForBitBucket(c *C) { msg := fmt.Sprintf("%s : some error stuf", bitbucketRepoNotFoundErr) isRepoNotFound := isRepoNotFoundError(msg) c.Assert(isRepoNotFound, Equals, true) } func (s *CommonSuite) TestIsRepoNotFoundErrorForLocal(c *C) { msg := fmt.Sprintf("some error stuf : %s", localRepoNotFoundErr) isRepoNotFound := isRepoNotFoundError(msg) c.Assert(isRepoNotFound, Equals, true) } func (s *CommonSuite) TestIsRepoNotFoundErrorForGitProtocolNotFound(c *C) { msg := fmt.Sprintf("%s : some error stuf", gitProtocolNotFoundErr) isRepoNotFound := isRepoNotFoundError(msg) c.Assert(isRepoNotFound, Equals, true) } func (s *CommonSuite) TestIsRepoNotFoundErrorForGitProtocolNoSuch(c *C) { msg := fmt.Sprintf("%s : some error stuf", gitProtocolNoSuchErr) isRepoNotFound := isRepoNotFoundError(msg) c.Assert(isRepoNotFound, Equals, true) } func (s *CommonSuite) TestIsRepoNotFoundErrorForGitProtocolAccessDenied(c *C) { msg := fmt.Sprintf("%s : some error stuf", gitProtocolAccessDeniedErr) isRepoNotFound := isRepoNotFoundError(msg) c.Assert(isRepoNotFound, Equals, true) } func (s *CommonSuite) TestIsRepoNotFoundErrorForGogsAccessDenied(c *C) { msg := fmt.Sprintf("%s : some error stuf", gogsAccessDeniedErr) isRepoNotFound := isRepoNotFoundError(msg) c.Assert(isRepoNotFound, Equals, true) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/transport/internal/common/server.go000066400000000000000000000031041345605224300275070ustar00rootroot00000000000000package common import ( "context" "fmt" "io" "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp" "gopkg.in/src-d/go-git.v4/plumbing/transport" "gopkg.in/src-d/go-git.v4/utils/ioutil" ) // ServerCommand is used for a single server command execution. type ServerCommand struct { Stderr io.Writer Stdout io.WriteCloser Stdin io.Reader } func ServeUploadPack(cmd ServerCommand, s transport.UploadPackSession) (err error) { ioutil.CheckClose(cmd.Stdout, &err) ar, err := s.AdvertisedReferences() if err != nil { return err } if err := ar.Encode(cmd.Stdout); err != nil { return err } req := packp.NewUploadPackRequest() if err := req.Decode(cmd.Stdin); err != nil { return err } var resp *packp.UploadPackResponse resp, err = s.UploadPack(context.TODO(), req) if err != nil { return err } return resp.Encode(cmd.Stdout) } func ServeReceivePack(cmd ServerCommand, s transport.ReceivePackSession) error { ar, err := s.AdvertisedReferences() if err != nil { return fmt.Errorf("internal error in advertised references: %s", err) } if err := ar.Encode(cmd.Stdout); err != nil { return fmt.Errorf("error in advertised references encoding: %s", err) } req := packp.NewReferenceUpdateRequest() if err := req.Decode(cmd.Stdin); err != nil { return fmt.Errorf("error decoding: %s", err) } rs, err := s.ReceivePack(context.TODO(), req) if rs != nil { if err := rs.Encode(cmd.Stdout); err != nil { return fmt.Errorf("error in encoding report status %s", err) } } if err != nil { return fmt.Errorf("error in receive pack: %s", err) } return nil } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/transport/server/000077500000000000000000000000001345605224300240565ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/transport/server/loader.go000066400000000000000000000036141345605224300256570ustar00rootroot00000000000000package server import ( "gopkg.in/src-d/go-git.v4/plumbing/cache" "gopkg.in/src-d/go-git.v4/plumbing/storer" "gopkg.in/src-d/go-git.v4/plumbing/transport" "gopkg.in/src-d/go-git.v4/storage/filesystem" "gopkg.in/src-d/go-billy.v4" "gopkg.in/src-d/go-billy.v4/osfs" ) // DefaultLoader is a filesystem loader ignoring host and resolving paths to /. var DefaultLoader = NewFilesystemLoader(osfs.New("")) // Loader loads repository's storer.Storer based on an optional host and a path. type Loader interface { // Load loads a storer.Storer given a transport.Endpoint. // Returns transport.ErrRepositoryNotFound if the repository does not // exist. Load(ep *transport.Endpoint) (storer.Storer, error) } type fsLoader struct { base billy.Filesystem } // NewFilesystemLoader creates a Loader that ignores host and resolves paths // with a given base filesystem. func NewFilesystemLoader(base billy.Filesystem) Loader { return &fsLoader{base} } // Load looks up the endpoint's path in the base file system and returns a // storer for it. Returns transport.ErrRepositoryNotFound if a repository does // not exist in the given path. func (l *fsLoader) Load(ep *transport.Endpoint) (storer.Storer, error) { fs, err := l.base.Chroot(ep.Path) if err != nil { return nil, err } if _, err := fs.Stat("config"); err != nil { return nil, transport.ErrRepositoryNotFound } return filesystem.NewStorage(fs, cache.NewObjectLRUDefault()), nil } // MapLoader is a Loader that uses a lookup map of storer.Storer by // transport.Endpoint. type MapLoader map[string]storer.Storer // Load returns a storer.Storer for given a transport.Endpoint by looking it up // in the map. Returns transport.ErrRepositoryNotFound if the endpoint does not // exist. func (l MapLoader) Load(ep *transport.Endpoint) (storer.Storer, error) { s, ok := l[ep.String()] if !ok { return nil, transport.ErrRepositoryNotFound } return s, nil } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/transport/server/loader_test.go000066400000000000000000000033241345605224300267140ustar00rootroot00000000000000package server import ( "os/exec" "path/filepath" "gopkg.in/src-d/go-git.v4/plumbing/transport" "gopkg.in/src-d/go-git.v4/storage/memory" . "gopkg.in/check.v1" ) type LoaderSuite struct { RepoPath string } var _ = Suite(&LoaderSuite{}) func (s *LoaderSuite) SetUpSuite(c *C) { if err := exec.Command("git", "--version").Run(); err != nil { c.Skip("git command not found") } dir := c.MkDir() s.RepoPath = filepath.Join(dir, "repo.git") c.Assert(exec.Command("git", "init", "--bare", s.RepoPath).Run(), IsNil) } func (s *LoaderSuite) endpoint(c *C, url string) *transport.Endpoint { ep, err := transport.NewEndpoint(url) c.Assert(err, IsNil) return ep } func (s *LoaderSuite) TestLoadNonExistent(c *C) { sto, err := DefaultLoader.Load(s.endpoint(c, "does-not-exist")) c.Assert(err, Equals, transport.ErrRepositoryNotFound) c.Assert(sto, IsNil) } func (s *LoaderSuite) TestLoadNonExistentIgnoreHost(c *C) { sto, err := DefaultLoader.Load(s.endpoint(c, "https://github.com/does-not-exist")) c.Assert(err, Equals, transport.ErrRepositoryNotFound) c.Assert(sto, IsNil) } func (s *LoaderSuite) TestLoad(c *C) { sto, err := DefaultLoader.Load(s.endpoint(c, s.RepoPath)) c.Assert(err, IsNil) c.Assert(sto, NotNil) } func (s *LoaderSuite) TestLoadIgnoreHost(c *C) { sto, err := DefaultLoader.Load(s.endpoint(c, s.RepoPath)) c.Assert(err, IsNil) c.Assert(sto, NotNil) } func (s *LoaderSuite) TestMapLoader(c *C) { ep, err := transport.NewEndpoint("file://test") sto := memory.NewStorage() c.Assert(err, IsNil) loader := MapLoader{ep.String(): sto} ep, err = transport.NewEndpoint("file://test") c.Assert(err, IsNil) loaderSto, err := loader.Load(ep) c.Assert(err, IsNil) c.Assert(sto, Equals, loaderSto) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/transport/server/receive_pack_test.go000066400000000000000000000013211345605224300300610ustar00rootroot00000000000000package server_test import ( "gopkg.in/src-d/go-git.v4/plumbing/transport" . "gopkg.in/check.v1" ) type ReceivePackSuite struct { BaseSuite } var _ = Suite(&ReceivePackSuite{}) func (s *ReceivePackSuite) SetUpSuite(c *C) { s.BaseSuite.SetUpSuite(c) s.ReceivePackSuite.Client = s.client } func (s *ReceivePackSuite) SetUpTest(c *C) { s.prepareRepositories(c) } func (s *ReceivePackSuite) TearDownTest(c *C) { s.Suite.TearDownSuite(c) } // Overwritten, server returns error earlier. func (s *ReceivePackSuite) TestAdvertisedReferencesNotExists(c *C) { r, err := s.Client.NewReceivePackSession(s.NonExistentEndpoint, s.EmptyAuth) c.Assert(err, Equals, transport.ErrRepositoryNotFound) c.Assert(r, IsNil) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/transport/server/server.go000066400000000000000000000215451345605224300257220ustar00rootroot00000000000000// Package server implements the git server protocol. For most use cases, the // transport-specific implementations should be used. package server import ( "context" "errors" "fmt" "io" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/format/packfile" "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp" "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability" "gopkg.in/src-d/go-git.v4/plumbing/revlist" "gopkg.in/src-d/go-git.v4/plumbing/storer" "gopkg.in/src-d/go-git.v4/plumbing/transport" "gopkg.in/src-d/go-git.v4/utils/ioutil" ) var DefaultServer = NewServer(DefaultLoader) type server struct { loader Loader handler *handler } // NewServer returns a transport.Transport implementing a git server, // independent of transport. Each transport must wrap this. func NewServer(loader Loader) transport.Transport { return &server{ loader, &handler{asClient: false}, } } // NewClient returns a transport.Transport implementing a client with an // embedded server. func NewClient(loader Loader) transport.Transport { return &server{ loader, &handler{asClient: true}, } } func (s *server) NewUploadPackSession(ep *transport.Endpoint, auth transport.AuthMethod) (transport.UploadPackSession, error) { sto, err := s.loader.Load(ep) if err != nil { return nil, err } return s.handler.NewUploadPackSession(sto) } func (s *server) NewReceivePackSession(ep *transport.Endpoint, auth transport.AuthMethod) (transport.ReceivePackSession, error) { sto, err := s.loader.Load(ep) if err != nil { return nil, err } return s.handler.NewReceivePackSession(sto) } type handler struct { asClient bool } func (h *handler) NewUploadPackSession(s storer.Storer) (transport.UploadPackSession, error) { return &upSession{ session: session{storer: s, asClient: h.asClient}, }, nil } func (h *handler) NewReceivePackSession(s storer.Storer) (transport.ReceivePackSession, error) { return &rpSession{ session: session{storer: s, asClient: h.asClient}, cmdStatus: map[plumbing.ReferenceName]error{}, }, nil } type session struct { storer storer.Storer caps *capability.List asClient bool } func (s *session) Close() error { return nil } func (s *session) SetAuth(transport.AuthMethod) error { //TODO: deprecate return nil } func (s *session) checkSupportedCapabilities(cl *capability.List) error { for _, c := range cl.All() { if !s.caps.Supports(c) { return fmt.Errorf("unsupported capability: %s", c) } } return nil } type upSession struct { session } func (s *upSession) AdvertisedReferences() (*packp.AdvRefs, error) { ar := packp.NewAdvRefs() if err := s.setSupportedCapabilities(ar.Capabilities); err != nil { return nil, err } s.caps = ar.Capabilities if err := setReferences(s.storer, ar); err != nil { return nil, err } if err := setHEAD(s.storer, ar); err != nil { return nil, err } if s.asClient && len(ar.References) == 0 { return nil, transport.ErrEmptyRemoteRepository } return ar, nil } func (s *upSession) UploadPack(ctx context.Context, req *packp.UploadPackRequest) (*packp.UploadPackResponse, error) { if req.IsEmpty() { return nil, transport.ErrEmptyUploadPackRequest } if err := req.Validate(); err != nil { return nil, err } if s.caps == nil { s.caps = capability.NewList() if err := s.setSupportedCapabilities(s.caps); err != nil { return nil, err } } if err := s.checkSupportedCapabilities(req.Capabilities); err != nil { return nil, err } s.caps = req.Capabilities if len(req.Shallows) > 0 { return nil, fmt.Errorf("shallow not supported") } objs, err := s.objectsToUpload(req) if err != nil { return nil, err } pr, pw := io.Pipe() e := packfile.NewEncoder(pw, s.storer, false) go func() { // TODO: plumb through a pack window. _, err := e.Encode(objs, 10) pw.CloseWithError(err) }() return packp.NewUploadPackResponseWithPackfile(req, ioutil.NewContextReadCloser(ctx, pr), ), nil } func (s *upSession) objectsToUpload(req *packp.UploadPackRequest) ([]plumbing.Hash, error) { haves, err := revlist.Objects(s.storer, req.Haves, nil) if err != nil { return nil, err } return revlist.Objects(s.storer, req.Wants, haves) } func (*upSession) setSupportedCapabilities(c *capability.List) error { if err := c.Set(capability.Agent, capability.DefaultAgent); err != nil { return err } if err := c.Set(capability.OFSDelta); err != nil { return err } return nil } type rpSession struct { session cmdStatus map[plumbing.ReferenceName]error firstErr error unpackErr error } func (s *rpSession) AdvertisedReferences() (*packp.AdvRefs, error) { ar := packp.NewAdvRefs() if err := s.setSupportedCapabilities(ar.Capabilities); err != nil { return nil, err } s.caps = ar.Capabilities if err := setReferences(s.storer, ar); err != nil { return nil, err } if err := setHEAD(s.storer, ar); err != nil { return nil, err } return ar, nil } var ( ErrUpdateReference = errors.New("failed to update ref") ) func (s *rpSession) ReceivePack(ctx context.Context, req *packp.ReferenceUpdateRequest) (*packp.ReportStatus, error) { if s.caps == nil { s.caps = capability.NewList() if err := s.setSupportedCapabilities(s.caps); err != nil { return nil, err } } if err := s.checkSupportedCapabilities(req.Capabilities); err != nil { return nil, err } s.caps = req.Capabilities //TODO: Implement 'atomic' update of references. r := ioutil.NewContextReadCloser(ctx, req.Packfile) if err := s.writePackfile(r); err != nil { s.unpackErr = err s.firstErr = err return s.reportStatus(), err } s.updateReferences(req) return s.reportStatus(), s.firstErr } func (s *rpSession) updateReferences(req *packp.ReferenceUpdateRequest) { for _, cmd := range req.Commands { exists, err := referenceExists(s.storer, cmd.Name) if err != nil { s.setStatus(cmd.Name, err) continue } switch cmd.Action() { case packp.Create: if exists { s.setStatus(cmd.Name, ErrUpdateReference) continue } ref := plumbing.NewHashReference(cmd.Name, cmd.New) err := s.storer.SetReference(ref) s.setStatus(cmd.Name, err) case packp.Delete: if !exists { s.setStatus(cmd.Name, ErrUpdateReference) continue } err := s.storer.RemoveReference(cmd.Name) s.setStatus(cmd.Name, err) case packp.Update: if !exists { s.setStatus(cmd.Name, ErrUpdateReference) continue } if err != nil { s.setStatus(cmd.Name, err) continue } ref := plumbing.NewHashReference(cmd.Name, cmd.New) err := s.storer.SetReference(ref) s.setStatus(cmd.Name, err) } } } func (s *rpSession) writePackfile(r io.ReadCloser) error { if r == nil { return nil } if err := packfile.UpdateObjectStorage(s.storer, r); err != nil { _ = r.Close() return err } return r.Close() } func (s *rpSession) setStatus(ref plumbing.ReferenceName, err error) { s.cmdStatus[ref] = err if s.firstErr == nil && err != nil { s.firstErr = err } } func (s *rpSession) reportStatus() *packp.ReportStatus { if !s.caps.Supports(capability.ReportStatus) { return nil } rs := packp.NewReportStatus() rs.UnpackStatus = "ok" if s.unpackErr != nil { rs.UnpackStatus = s.unpackErr.Error() } if s.cmdStatus == nil { return rs } for ref, err := range s.cmdStatus { msg := "ok" if err != nil { msg = err.Error() } status := &packp.CommandStatus{ ReferenceName: ref, Status: msg, } rs.CommandStatuses = append(rs.CommandStatuses, status) } return rs } func (*rpSession) setSupportedCapabilities(c *capability.List) error { if err := c.Set(capability.Agent, capability.DefaultAgent); err != nil { return err } if err := c.Set(capability.OFSDelta); err != nil { return err } if err := c.Set(capability.DeleteRefs); err != nil { return err } return c.Set(capability.ReportStatus) } func setHEAD(s storer.Storer, ar *packp.AdvRefs) error { ref, err := s.Reference(plumbing.HEAD) if err == plumbing.ErrReferenceNotFound { return nil } if err != nil { return err } if ref.Type() == plumbing.SymbolicReference { if err := ar.AddReference(ref); err != nil { return nil } ref, err = storer.ResolveReference(s, ref.Target()) if err == plumbing.ErrReferenceNotFound { return nil } if err != nil { return err } } if ref.Type() != plumbing.HashReference { return plumbing.ErrInvalidType } h := ref.Hash() ar.Head = &h return nil } func setReferences(s storer.Storer, ar *packp.AdvRefs) error { //TODO: add peeled references. iter, err := s.IterReferences() if err != nil { return err } return iter.ForEach(func(ref *plumbing.Reference) error { if ref.Type() != plumbing.HashReference { return nil } ar.References[ref.Name().String()] = ref.Hash() return nil }) } func referenceExists(s storer.ReferenceStorer, n plumbing.ReferenceName) (bool, error) { _, err := s.Reference(n) if err == plumbing.ErrReferenceNotFound { return false, nil } return err == nil, err } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/transport/server/server_test.go000066400000000000000000000031341345605224300267530ustar00rootroot00000000000000package server_test import ( "testing" "gopkg.in/src-d/go-git.v4/plumbing/cache" "gopkg.in/src-d/go-git.v4/plumbing/transport" "gopkg.in/src-d/go-git.v4/plumbing/transport/client" "gopkg.in/src-d/go-git.v4/plumbing/transport/server" "gopkg.in/src-d/go-git.v4/plumbing/transport/test" "gopkg.in/src-d/go-git.v4/storage/filesystem" "gopkg.in/src-d/go-git.v4/storage/memory" . "gopkg.in/check.v1" "gopkg.in/src-d/go-git-fixtures.v3" ) func Test(t *testing.T) { TestingT(t) } type BaseSuite struct { fixtures.Suite test.ReceivePackSuite loader server.MapLoader client transport.Transport clientBackup transport.Transport asClient bool } func (s *BaseSuite) SetUpSuite(c *C) { s.Suite.SetUpSuite(c) s.loader = server.MapLoader{} if s.asClient { s.client = server.NewClient(s.loader) } else { s.client = server.NewServer(s.loader) } s.clientBackup = client.Protocols["file"] client.Protocols["file"] = s.client } func (s *BaseSuite) TearDownSuite(c *C) { if s.clientBackup == nil { delete(client.Protocols, "file") } else { client.Protocols["file"] = s.clientBackup } } func (s *BaseSuite) prepareRepositories(c *C) { var err error fs := fixtures.Basic().One().DotGit() s.Endpoint, err = transport.NewEndpoint(fs.Root()) c.Assert(err, IsNil) s.loader[s.Endpoint.String()] = filesystem.NewStorage(fs, cache.NewObjectLRUDefault()) s.EmptyEndpoint, err = transport.NewEndpoint("/empty.git") c.Assert(err, IsNil) s.loader[s.EmptyEndpoint.String()] = memory.NewStorage() s.NonExistentEndpoint, err = transport.NewEndpoint("/non-existent.git") c.Assert(err, IsNil) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/transport/server/upload_pack_test.go000066400000000000000000000022751345605224300277340ustar00rootroot00000000000000package server_test import ( "gopkg.in/src-d/go-git.v4/plumbing/transport" . "gopkg.in/check.v1" ) type UploadPackSuite struct { BaseSuite } var _ = Suite(&UploadPackSuite{}) func (s *UploadPackSuite) SetUpSuite(c *C) { s.BaseSuite.SetUpSuite(c) s.Client = s.client } func (s *UploadPackSuite) SetUpTest(c *C) { s.prepareRepositories(c) } // Overwritten, server returns error earlier. func (s *UploadPackSuite) TestAdvertisedReferencesNotExists(c *C) { r, err := s.Client.NewUploadPackSession(s.NonExistentEndpoint, s.EmptyAuth) c.Assert(err, Equals, transport.ErrRepositoryNotFound) c.Assert(r, IsNil) } func (s *UploadPackSuite) TestUploadPackWithContext(c *C) { c.Skip("UploadPack cannot be canceled on server") } // Tests server with `asClient = true`. This is recommended when using a server // registered directly with `client.InstallProtocol`. type ClientLikeUploadPackSuite struct { UploadPackSuite } var _ = Suite(&ClientLikeUploadPackSuite{}) func (s *ClientLikeUploadPackSuite) SetUpSuite(c *C) { s.asClient = true s.UploadPackSuite.SetUpSuite(c) } func (s *ClientLikeUploadPackSuite) TestAdvertisedReferencesEmpty(c *C) { s.UploadPackSuite.TestAdvertisedReferencesEmpty(c) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/transport/ssh/000077500000000000000000000000001345605224300233455ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/transport/ssh/auth_method.go000066400000000000000000000204141345605224300261760ustar00rootroot00000000000000package ssh import ( "crypto/x509" "encoding/pem" "errors" "fmt" "io/ioutil" "os" "os/user" "path/filepath" "gopkg.in/src-d/go-git.v4/plumbing/transport" "github.com/mitchellh/go-homedir" "github.com/xanzy/ssh-agent" "golang.org/x/crypto/ssh" "golang.org/x/crypto/ssh/knownhosts" ) const DefaultUsername = "git" // AuthMethod is the interface all auth methods for the ssh client // must implement. The clientConfig method returns the ssh client // configuration needed to establish an ssh connection. type AuthMethod interface { transport.AuthMethod // ClientConfig should return a valid ssh.ClientConfig to be used to create // a connection to the SSH server. ClientConfig() (*ssh.ClientConfig, error) } // The names of the AuthMethod implementations. To be returned by the // Name() method. Most git servers only allow PublicKeysName and // PublicKeysCallbackName. const ( KeyboardInteractiveName = "ssh-keyboard-interactive" PasswordName = "ssh-password" PasswordCallbackName = "ssh-password-callback" PublicKeysName = "ssh-public-keys" PublicKeysCallbackName = "ssh-public-key-callback" ) // KeyboardInteractive implements AuthMethod by using a // prompt/response sequence controlled by the server. type KeyboardInteractive struct { User string Challenge ssh.KeyboardInteractiveChallenge HostKeyCallbackHelper } func (a *KeyboardInteractive) Name() string { return KeyboardInteractiveName } func (a *KeyboardInteractive) String() string { return fmt.Sprintf("user: %s, name: %s", a.User, a.Name()) } func (a *KeyboardInteractive) ClientConfig() (*ssh.ClientConfig, error) { return a.SetHostKeyCallback(&ssh.ClientConfig{ User: a.User, Auth: []ssh.AuthMethod{ ssh.KeyboardInteractiveChallenge(a.Challenge), }, }) } // Password implements AuthMethod by using the given password. type Password struct { User string Password string HostKeyCallbackHelper } func (a *Password) Name() string { return PasswordName } func (a *Password) String() string { return fmt.Sprintf("user: %s, name: %s", a.User, a.Name()) } func (a *Password) ClientConfig() (*ssh.ClientConfig, error) { return a.SetHostKeyCallback(&ssh.ClientConfig{ User: a.User, Auth: []ssh.AuthMethod{ssh.Password(a.Password)}, }) } // PasswordCallback implements AuthMethod by using a callback // to fetch the password. type PasswordCallback struct { User string Callback func() (pass string, err error) HostKeyCallbackHelper } func (a *PasswordCallback) Name() string { return PasswordCallbackName } func (a *PasswordCallback) String() string { return fmt.Sprintf("user: %s, name: %s", a.User, a.Name()) } func (a *PasswordCallback) ClientConfig() (*ssh.ClientConfig, error) { return a.SetHostKeyCallback(&ssh.ClientConfig{ User: a.User, Auth: []ssh.AuthMethod{ssh.PasswordCallback(a.Callback)}, }) } // PublicKeys implements AuthMethod by using the given key pairs. type PublicKeys struct { User string Signer ssh.Signer HostKeyCallbackHelper } // NewPublicKeys returns a PublicKeys from a PEM encoded private key. An // encryption password should be given if the pemBytes contains a password // encrypted PEM block otherwise password should be empty. It supports RSA // (PKCS#1), DSA (OpenSSL), and ECDSA private keys. func NewPublicKeys(user string, pemBytes []byte, password string) (*PublicKeys, error) { block, _ := pem.Decode(pemBytes) if block == nil { return nil, errors.New("invalid PEM data") } if x509.IsEncryptedPEMBlock(block) { key, err := x509.DecryptPEMBlock(block, []byte(password)) if err != nil { return nil, err } block = &pem.Block{Type: block.Type, Bytes: key} pemBytes = pem.EncodeToMemory(block) } signer, err := ssh.ParsePrivateKey(pemBytes) if err != nil { return nil, err } return &PublicKeys{User: user, Signer: signer}, nil } // NewPublicKeysFromFile returns a PublicKeys from a file containing a PEM // encoded private key. An encryption password should be given if the pemBytes // contains a password encrypted PEM block otherwise password should be empty. func NewPublicKeysFromFile(user, pemFile, password string) (*PublicKeys, error) { bytes, err := ioutil.ReadFile(pemFile) if err != nil { return nil, err } return NewPublicKeys(user, bytes, password) } func (a *PublicKeys) Name() string { return PublicKeysName } func (a *PublicKeys) String() string { return fmt.Sprintf("user: %s, name: %s", a.User, a.Name()) } func (a *PublicKeys) ClientConfig() (*ssh.ClientConfig, error) { return a.SetHostKeyCallback(&ssh.ClientConfig{ User: a.User, Auth: []ssh.AuthMethod{ssh.PublicKeys(a.Signer)}, }) } func username() (string, error) { var username string if user, err := user.Current(); err == nil { username = user.Username } else { username = os.Getenv("USER") } if username == "" { return "", errors.New("failed to get username") } return username, nil } // PublicKeysCallback implements AuthMethod by asking a // ssh.agent.Agent to act as a signer. type PublicKeysCallback struct { User string Callback func() (signers []ssh.Signer, err error) HostKeyCallbackHelper } // NewSSHAgentAuth returns a PublicKeysCallback based on a SSH agent, it opens // a pipe with the SSH agent and uses the pipe as the implementer of the public // key callback function. func NewSSHAgentAuth(u string) (*PublicKeysCallback, error) { var err error if u == "" { u, err = username() if err != nil { return nil, err } } a, _, err := sshagent.New() if err != nil { return nil, fmt.Errorf("error creating SSH agent: %q", err) } return &PublicKeysCallback{ User: u, Callback: a.Signers, }, nil } func (a *PublicKeysCallback) Name() string { return PublicKeysCallbackName } func (a *PublicKeysCallback) String() string { return fmt.Sprintf("user: %s, name: %s", a.User, a.Name()) } func (a *PublicKeysCallback) ClientConfig() (*ssh.ClientConfig, error) { return a.SetHostKeyCallback(&ssh.ClientConfig{ User: a.User, Auth: []ssh.AuthMethod{ssh.PublicKeysCallback(a.Callback)}, }) } // NewKnownHostsCallback returns ssh.HostKeyCallback based on a file based on a // known_hosts file. http://man.openbsd.org/sshd#SSH_KNOWN_HOSTS_FILE_FORMAT // // If list of files is empty, then it will be read from the SSH_KNOWN_HOSTS // environment variable, example: // /home/foo/custom_known_hosts_file:/etc/custom_known/hosts_file // // If SSH_KNOWN_HOSTS is not set the following file locations will be used: // ~/.ssh/known_hosts // /etc/ssh/ssh_known_hosts func NewKnownHostsCallback(files ...string) (ssh.HostKeyCallback, error) { var err error if len(files) == 0 { if files, err = getDefaultKnownHostsFiles(); err != nil { return nil, err } } if files, err = filterKnownHostsFiles(files...); err != nil { return nil, err } return knownhosts.New(files...) } func getDefaultKnownHostsFiles() ([]string, error) { files := filepath.SplitList(os.Getenv("SSH_KNOWN_HOSTS")) if len(files) != 0 { return files, nil } homeDirPath, err := homedir.Dir() if err != nil { return nil, err } return []string{ filepath.Join(homeDirPath, "/.ssh/known_hosts"), "/etc/ssh/ssh_known_hosts", }, nil } func filterKnownHostsFiles(files ...string) ([]string, error) { var out []string for _, file := range files { _, err := os.Stat(file) if err == nil { out = append(out, file) continue } if !os.IsNotExist(err) { return nil, err } } if len(out) == 0 { return nil, fmt.Errorf("unable to find any valid known_hosts file, set SSH_KNOWN_HOSTS env variable") } return out, nil } // HostKeyCallbackHelper is a helper that provides common functionality to // configure HostKeyCallback into a ssh.ClientConfig. type HostKeyCallbackHelper struct { // HostKeyCallback is the function type used for verifying server keys. // If nil default callback will be create using NewKnownHostsCallback // without argument. HostKeyCallback ssh.HostKeyCallback } // SetHostKeyCallback sets the field HostKeyCallback in the given cfg. If // HostKeyCallback is empty a default callback is created using // NewKnownHostsCallback. func (m *HostKeyCallbackHelper) SetHostKeyCallback(cfg *ssh.ClientConfig) (*ssh.ClientConfig, error) { var err error if m.HostKeyCallback == nil { if m.HostKeyCallback, err = NewKnownHostsCallback(); err != nil { return cfg, err } } cfg.HostKeyCallback = m.HostKeyCallback return cfg, nil } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/transport/ssh/auth_method_test.go000066400000000000000000000121331345605224300272340ustar00rootroot00000000000000package ssh import ( "bufio" "fmt" "io/ioutil" "os" "strings" "golang.org/x/crypto/ssh" "golang.org/x/crypto/ssh/testdata" . "gopkg.in/check.v1" ) type ( SuiteCommon struct{} mockKnownHosts struct{} ) func (mockKnownHosts) host() string { return "github.com" } func (mockKnownHosts) knownHosts() []byte { return []byte(`github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==`) } func (mockKnownHosts) Network() string { return "tcp" } func (mockKnownHosts) String() string { return "github.com:22" } var _ = Suite(&SuiteCommon{}) func (s *SuiteCommon) TestKeyboardInteractiveName(c *C) { a := &KeyboardInteractive{ User: "test", Challenge: nil, } c.Assert(a.Name(), Equals, KeyboardInteractiveName) } func (s *SuiteCommon) TestKeyboardInteractiveString(c *C) { a := &KeyboardInteractive{ User: "test", Challenge: nil, } c.Assert(a.String(), Equals, fmt.Sprintf("user: test, name: %s", KeyboardInteractiveName)) } func (s *SuiteCommon) TestPasswordName(c *C) { a := &Password{ User: "test", Password: "", } c.Assert(a.Name(), Equals, PasswordName) } func (s *SuiteCommon) TestPasswordString(c *C) { a := &Password{ User: "test", Password: "", } c.Assert(a.String(), Equals, fmt.Sprintf("user: test, name: %s", PasswordName)) } func (s *SuiteCommon) TestPasswordCallbackName(c *C) { a := &PasswordCallback{ User: "test", Callback: nil, } c.Assert(a.Name(), Equals, PasswordCallbackName) } func (s *SuiteCommon) TestPasswordCallbackString(c *C) { a := &PasswordCallback{ User: "test", Callback: nil, } c.Assert(a.String(), Equals, fmt.Sprintf("user: test, name: %s", PasswordCallbackName)) } func (s *SuiteCommon) TestPublicKeysName(c *C) { a := &PublicKeys{ User: "test", Signer: nil, } c.Assert(a.Name(), Equals, PublicKeysName) } func (s *SuiteCommon) TestPublicKeysString(c *C) { a := &PublicKeys{ User: "test", Signer: nil, } c.Assert(a.String(), Equals, fmt.Sprintf("user: test, name: %s", PublicKeysName)) } func (s *SuiteCommon) TestPublicKeysCallbackName(c *C) { a := &PublicKeysCallback{ User: "test", Callback: nil, } c.Assert(a.Name(), Equals, PublicKeysCallbackName) } func (s *SuiteCommon) TestPublicKeysCallbackString(c *C) { a := &PublicKeysCallback{ User: "test", Callback: nil, } c.Assert(a.String(), Equals, fmt.Sprintf("user: test, name: %s", PublicKeysCallbackName)) } func (s *SuiteCommon) TestNewSSHAgentAuth(c *C) { if os.Getenv("SSH_AUTH_SOCK") == "" { c.Skip("SSH_AUTH_SOCK or SSH_TEST_PRIVATE_KEY are required") } auth, err := NewSSHAgentAuth("foo") c.Assert(err, IsNil) c.Assert(auth, NotNil) } func (s *SuiteCommon) TestNewSSHAgentAuthNoAgent(c *C) { addr := os.Getenv("SSH_AUTH_SOCK") err := os.Unsetenv("SSH_AUTH_SOCK") c.Assert(err, IsNil) defer func() { err := os.Setenv("SSH_AUTH_SOCK", addr) c.Assert(err, IsNil) }() k, err := NewSSHAgentAuth("foo") c.Assert(k, IsNil) c.Assert(err, ErrorMatches, ".*SSH_AUTH_SOCK.*|.*SSH agent .* not running.*") } func (*SuiteCommon) TestNewPublicKeys(c *C) { auth, err := NewPublicKeys("foo", testdata.PEMBytes["rsa"], "") c.Assert(err, IsNil) c.Assert(auth, NotNil) } func (*SuiteCommon) TestNewPublicKeysWithEncryptedPEM(c *C) { f := testdata.PEMEncryptedKeys[0] auth, err := NewPublicKeys("foo", f.PEMBytes, f.EncryptionKey) c.Assert(err, IsNil) c.Assert(auth, NotNil) } func (*SuiteCommon) TestNewPublicKeysFromFile(c *C) { f, err := ioutil.TempFile("", "ssh-test") c.Assert(err, IsNil) _, err = f.Write(testdata.PEMBytes["rsa"]) c.Assert(err, IsNil) c.Assert(f.Close(), IsNil) defer os.RemoveAll(f.Name()) auth, err := NewPublicKeysFromFile("foo", f.Name(), "") c.Assert(err, IsNil) c.Assert(auth, NotNil) } func (*SuiteCommon) TestNewPublicKeysWithInvalidPEM(c *C) { auth, err := NewPublicKeys("foo", []byte("bar"), "") c.Assert(err, NotNil) c.Assert(auth, IsNil) } func (*SuiteCommon) TestNewKnownHostsCallback(c *C) { var mock = mockKnownHosts{} f, err := ioutil.TempFile("", "known-hosts") c.Assert(err, IsNil) _, err = f.Write(mock.knownHosts()) c.Assert(err, IsNil) err = f.Close() c.Assert(err, IsNil) defer os.RemoveAll(f.Name()) f, err = os.Open(f.Name()) c.Assert(err, IsNil) defer f.Close() var hostKey ssh.PublicKey scanner := bufio.NewScanner(f) for scanner.Scan() { fields := strings.Split(scanner.Text(), " ") if len(fields) != 3 { continue } if strings.Contains(fields[0], mock.host()) { var err error hostKey, _, _, _, err = ssh.ParseAuthorizedKey(scanner.Bytes()) if err != nil { c.Fatalf("error parsing %q: %v", fields[2], err) } break } } if hostKey == nil { c.Fatalf("no hostkey for %s", mock.host()) } clb, err := NewKnownHostsCallback(f.Name()) c.Assert(err, IsNil) err = clb(mock.String(), mock, hostKey) c.Assert(err, IsNil) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/transport/ssh/common.go000066400000000000000000000103731345605224300251700ustar00rootroot00000000000000// Package ssh implements the SSH transport protocol. package ssh import ( "fmt" "reflect" "strconv" "gopkg.in/src-d/go-git.v4/plumbing/transport" "gopkg.in/src-d/go-git.v4/plumbing/transport/internal/common" "github.com/kevinburke/ssh_config" "golang.org/x/crypto/ssh" ) // DefaultClient is the default SSH client. var DefaultClient = NewClient(nil) // DefaultSSHConfig is the reader used to access parameters stored in the // system's ssh_config files. If nil all the ssh_config are ignored. var DefaultSSHConfig sshConfig = ssh_config.DefaultUserSettings type sshConfig interface { Get(alias, key string) string } // NewClient creates a new SSH client with an optional *ssh.ClientConfig. func NewClient(config *ssh.ClientConfig) transport.Transport { return common.NewClient(&runner{config: config}) } // DefaultAuthBuilder is the function used to create a default AuthMethod, when // the user doesn't provide any. var DefaultAuthBuilder = func(user string) (AuthMethod, error) { return NewSSHAgentAuth(user) } const DefaultPort = 22 type runner struct { config *ssh.ClientConfig } func (r *runner) Command(cmd string, ep *transport.Endpoint, auth transport.AuthMethod) (common.Command, error) { c := &command{command: cmd, endpoint: ep, config: r.config} if auth != nil { c.setAuth(auth) } if err := c.connect(); err != nil { return nil, err } return c, nil } type command struct { *ssh.Session connected bool command string endpoint *transport.Endpoint client *ssh.Client auth AuthMethod config *ssh.ClientConfig } func (c *command) setAuth(auth transport.AuthMethod) error { a, ok := auth.(AuthMethod) if !ok { return transport.ErrInvalidAuthMethod } c.auth = a return nil } func (c *command) Start() error { return c.Session.Start(endpointToCommand(c.command, c.endpoint)) } // Close closes the SSH session and connection. func (c *command) Close() error { if !c.connected { return nil } c.connected = false //XXX: If did read the full packfile, then the session might be already // closed. _ = c.Session.Close() return c.client.Close() } // connect connects to the SSH server, unless a AuthMethod was set with // SetAuth method, by default uses an auth method based on PublicKeysCallback, // it connects to a SSH agent, using the address stored in the SSH_AUTH_SOCK // environment var. func (c *command) connect() error { if c.connected { return transport.ErrAlreadyConnected } if c.auth == nil { if err := c.setAuthFromEndpoint(); err != nil { return err } } var err error config, err := c.auth.ClientConfig() if err != nil { return err } overrideConfig(c.config, config) c.client, err = ssh.Dial("tcp", c.getHostWithPort(), config) if err != nil { return err } c.Session, err = c.client.NewSession() if err != nil { _ = c.client.Close() return err } c.connected = true return nil } func (c *command) getHostWithPort() string { if addr, found := c.doGetHostWithPortFromSSHConfig(); found { return addr } host := c.endpoint.Host port := c.endpoint.Port if port <= 0 { port = DefaultPort } return fmt.Sprintf("%s:%d", host, port) } func (c *command) doGetHostWithPortFromSSHConfig() (addr string, found bool) { if DefaultSSHConfig == nil { return } host := c.endpoint.Host port := c.endpoint.Port configHost := DefaultSSHConfig.Get(c.endpoint.Host, "Hostname") if configHost != "" { host = configHost found = true } if !found { return } configPort := DefaultSSHConfig.Get(c.endpoint.Host, "Port") if configPort != "" { if i, err := strconv.Atoi(configPort); err == nil { port = i } } addr = fmt.Sprintf("%s:%d", host, port) return } func (c *command) setAuthFromEndpoint() error { var err error c.auth, err = DefaultAuthBuilder(c.endpoint.User) return err } func endpointToCommand(cmd string, ep *transport.Endpoint) string { return fmt.Sprintf("%s '%s'", cmd, ep.Path) } func overrideConfig(overrides *ssh.ClientConfig, c *ssh.ClientConfig) { if overrides == nil { return } t := reflect.TypeOf(*c) vc := reflect.ValueOf(c).Elem() vo := reflect.ValueOf(overrides).Elem() for i := 0; i < t.NumField(); i++ { f := t.Field(i) vcf := vc.FieldByName(f.Name) vof := vo.FieldByName(f.Name) vcf.Set(vof) } *c = vc.Interface().(ssh.ClientConfig) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/transport/ssh/common_test.go000066400000000000000000000042651345605224300262320ustar00rootroot00000000000000package ssh import ( "testing" "github.com/kevinburke/ssh_config" "golang.org/x/crypto/ssh" . "gopkg.in/check.v1" "gopkg.in/src-d/go-git.v4/plumbing/transport" ) func Test(t *testing.T) { TestingT(t) } func (s *SuiteCommon) TestOverrideConfig(c *C) { config := &ssh.ClientConfig{ User: "foo", Auth: []ssh.AuthMethod{ ssh.Password("yourpassword"), }, HostKeyCallback: ssh.FixedHostKey(nil), } target := &ssh.ClientConfig{} overrideConfig(config, target) c.Assert(target.User, Equals, "foo") c.Assert(target.Auth, HasLen, 1) c.Assert(target.HostKeyCallback, NotNil) } func (s *SuiteCommon) TestOverrideConfigKeep(c *C) { config := &ssh.ClientConfig{ User: "foo", } target := &ssh.ClientConfig{ User: "bar", } overrideConfig(config, target) c.Assert(target.User, Equals, "foo") } func (s *SuiteCommon) TestDefaultSSHConfig(c *C) { defer func() { DefaultSSHConfig = ssh_config.DefaultUserSettings }() DefaultSSHConfig = &mockSSHConfig{map[string]map[string]string{ "github.com": { "Hostname": "foo.local", "Port": "42", }, }} ep, err := transport.NewEndpoint("git@github.com:foo/bar.git") c.Assert(err, IsNil) cmd := &command{endpoint: ep} c.Assert(cmd.getHostWithPort(), Equals, "foo.local:42") } func (s *SuiteCommon) TestDefaultSSHConfigNil(c *C) { defer func() { DefaultSSHConfig = ssh_config.DefaultUserSettings }() DefaultSSHConfig = nil ep, err := transport.NewEndpoint("git@github.com:foo/bar.git") c.Assert(err, IsNil) cmd := &command{endpoint: ep} c.Assert(cmd.getHostWithPort(), Equals, "github.com:22") } func (s *SuiteCommon) TestDefaultSSHConfigWildcard(c *C) { defer func() { DefaultSSHConfig = ssh_config.DefaultUserSettings }() DefaultSSHConfig = &mockSSHConfig{Values: map[string]map[string]string{ "*": { "Port": "42", }, }} ep, err := transport.NewEndpoint("git@github.com:foo/bar.git") c.Assert(err, IsNil) cmd := &command{endpoint: ep} c.Assert(cmd.getHostWithPort(), Equals, "github.com:22") } type mockSSHConfig struct { Values map[string]map[string]string } func (c *mockSSHConfig) Get(alias, key string) string { a, ok := c.Values[alias] if !ok { return c.Values["*"][key] } return a[key] } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/transport/ssh/upload_pack_test.go000066400000000000000000000054701345605224300272230ustar00rootroot00000000000000package ssh import ( "fmt" "io" "io/ioutil" "log" "net" "os" "os/exec" "path/filepath" "strings" "sync" "gopkg.in/src-d/go-git.v4/plumbing/transport" "gopkg.in/src-d/go-git.v4/plumbing/transport/test" "github.com/gliderlabs/ssh" stdssh "golang.org/x/crypto/ssh" . "gopkg.in/check.v1" "gopkg.in/src-d/go-git-fixtures.v3" ) type UploadPackSuite struct { test.UploadPackSuite fixtures.Suite port int base string } var _ = Suite(&UploadPackSuite{}) func (s *UploadPackSuite) SetUpSuite(c *C) { s.Suite.SetUpSuite(c) l, err := net.Listen("tcp", "localhost:0") c.Assert(err, IsNil) s.port = l.Addr().(*net.TCPAddr).Port s.base, err = ioutil.TempDir(os.TempDir(), fmt.Sprintf("go-git-ssh-%d", s.port)) c.Assert(err, IsNil) DefaultAuthBuilder = func(user string) (AuthMethod, error) { return &Password{User: user}, nil } s.UploadPackSuite.Client = NewClient(&stdssh.ClientConfig{ HostKeyCallback: stdssh.InsecureIgnoreHostKey(), }) s.UploadPackSuite.Endpoint = s.prepareRepository(c, fixtures.Basic().One(), "basic.git") s.UploadPackSuite.EmptyEndpoint = s.prepareRepository(c, fixtures.ByTag("empty").One(), "empty.git") s.UploadPackSuite.NonExistentEndpoint = s.newEndpoint(c, "non-existent.git") server := &ssh.Server{Handler: handlerSSH} go func() { log.Fatal(server.Serve(l)) }() } func (s *UploadPackSuite) prepareRepository(c *C, f *fixtures.Fixture, name string) *transport.Endpoint { fs := f.DotGit() err := fixtures.EnsureIsBare(fs) c.Assert(err, IsNil) path := filepath.Join(s.base, name) err = os.Rename(fs.Root(), path) c.Assert(err, IsNil) return s.newEndpoint(c, name) } func (s *UploadPackSuite) newEndpoint(c *C, name string) *transport.Endpoint { ep, err := transport.NewEndpoint(fmt.Sprintf( "ssh://git@localhost:%d/%s/%s", s.port, filepath.ToSlash(s.base), name, )) c.Assert(err, IsNil) return ep } func handlerSSH(s ssh.Session) { cmd, stdin, stderr, stdout, err := buildCommand(s.Command()) if err != nil { fmt.Println(err) return } if err := cmd.Start(); err != nil { fmt.Println(err) return } go func() { defer stdin.Close() io.Copy(stdin, s) }() var wg sync.WaitGroup wg.Add(2) go func() { defer wg.Done() io.Copy(s.Stderr(), stderr) }() go func() { defer wg.Done() io.Copy(s, stdout) }() wg.Wait() if err := cmd.Wait(); err != nil { return } } func buildCommand(c []string) (cmd *exec.Cmd, stdin io.WriteCloser, stderr, stdout io.ReadCloser, err error) { if len(c) != 2 { err = fmt.Errorf("invalid command") return } // fix for Windows environments path := strings.Replace(c[1], "/C:/", "C:/", 1) cmd = exec.Command(c[0], path) stdout, err = cmd.StdoutPipe() if err != nil { return } stdin, err = cmd.StdinPipe() if err != nil { return } stderr, err = cmd.StderrPipe() if err != nil { return } return } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/transport/test/000077500000000000000000000000001345605224300235275ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/transport/test/receive_pack.go000066400000000000000000000266641345605224300265140ustar00rootroot00000000000000// Package test implements common test suite for different transport // implementations. // package test import ( "bytes" "context" "io" "io/ioutil" "os" "path/filepath" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/format/packfile" "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp" "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability" "gopkg.in/src-d/go-git.v4/plumbing/transport" "gopkg.in/src-d/go-git.v4/storage/memory" . "gopkg.in/check.v1" "gopkg.in/src-d/go-git-fixtures.v3" ) type ReceivePackSuite struct { Endpoint *transport.Endpoint EmptyEndpoint *transport.Endpoint NonExistentEndpoint *transport.Endpoint EmptyAuth transport.AuthMethod Client transport.Transport } func (s *ReceivePackSuite) TestAdvertisedReferencesEmpty(c *C) { r, err := s.Client.NewReceivePackSession(s.EmptyEndpoint, s.EmptyAuth) c.Assert(err, IsNil) defer func() { c.Assert(r.Close(), IsNil) }() ar, err := r.AdvertisedReferences() c.Assert(err, IsNil) c.Assert(ar.Head, IsNil) } func (s *ReceivePackSuite) TestAdvertisedReferencesNotExists(c *C) { r, err := s.Client.NewReceivePackSession(s.NonExistentEndpoint, s.EmptyAuth) c.Assert(err, IsNil) ar, err := r.AdvertisedReferences() c.Assert(err, Equals, transport.ErrRepositoryNotFound) c.Assert(ar, IsNil) c.Assert(r.Close(), IsNil) r, err = s.Client.NewReceivePackSession(s.NonExistentEndpoint, s.EmptyAuth) c.Assert(err, IsNil) req := packp.NewReferenceUpdateRequest() req.Commands = []*packp.Command{ {Name: "master", Old: plumbing.ZeroHash, New: plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5")}, } writer, err := r.ReceivePack(context.Background(), req) c.Assert(err, Equals, transport.ErrRepositoryNotFound) c.Assert(writer, IsNil) c.Assert(r.Close(), IsNil) } func (s *ReceivePackSuite) TestCallAdvertisedReferenceTwice(c *C) { r, err := s.Client.NewReceivePackSession(s.Endpoint, s.EmptyAuth) defer func() { c.Assert(r.Close(), IsNil) }() c.Assert(err, IsNil) ar1, err := r.AdvertisedReferences() c.Assert(err, IsNil) c.Assert(ar1, NotNil) ar2, err := r.AdvertisedReferences() c.Assert(err, IsNil) c.Assert(ar2, DeepEquals, ar1) } func (s *ReceivePackSuite) TestDefaultBranch(c *C) { r, err := s.Client.NewReceivePackSession(s.Endpoint, s.EmptyAuth) c.Assert(err, IsNil) defer func() { c.Assert(r.Close(), IsNil) }() info, err := r.AdvertisedReferences() c.Assert(err, IsNil) ref, ok := info.References["refs/heads/master"] c.Assert(ok, Equals, true) c.Assert(ref, Equals, fixtures.Basic().One().Head) } func (s *ReceivePackSuite) TestCapabilities(c *C) { r, err := s.Client.NewReceivePackSession(s.Endpoint, s.EmptyAuth) c.Assert(err, IsNil) defer func() { c.Assert(r.Close(), IsNil) }() info, err := r.AdvertisedReferences() c.Assert(err, IsNil) c.Assert(info.Capabilities.Get("agent"), HasLen, 1) } func (s *ReceivePackSuite) TestFullSendPackOnEmpty(c *C) { endpoint := s.EmptyEndpoint full := true fixture := fixtures.Basic().ByTag("packfile").One() req := packp.NewReferenceUpdateRequest() req.Commands = []*packp.Command{ {Name: "refs/heads/master", Old: plumbing.ZeroHash, New: fixture.Head}, } s.receivePack(c, endpoint, req, fixture, full) s.checkRemoteHead(c, endpoint, fixture.Head) } func (s *ReceivePackSuite) TestSendPackWithContext(c *C) { fixture := fixtures.Basic().ByTag("packfile").One() req := packp.NewReferenceUpdateRequest() req.Packfile = fixture.Packfile() req.Commands = []*packp.Command{ {Name: "refs/heads/master", Old: plumbing.ZeroHash, New: fixture.Head}, } r, err := s.Client.NewReceivePackSession(s.EmptyEndpoint, s.EmptyAuth) c.Assert(err, IsNil) defer func() { c.Assert(r.Close(), IsNil) }() info, err := r.AdvertisedReferences() c.Assert(err, IsNil) c.Assert(info, NotNil) ctx, close := context.WithCancel(context.TODO()) close() report, err := r.ReceivePack(ctx, req) c.Assert(err, NotNil) c.Assert(report, IsNil) } func (s *ReceivePackSuite) TestSendPackOnEmpty(c *C) { endpoint := s.EmptyEndpoint full := false fixture := fixtures.Basic().ByTag("packfile").One() req := packp.NewReferenceUpdateRequest() req.Commands = []*packp.Command{ {Name: "refs/heads/master", Old: plumbing.ZeroHash, New: fixture.Head}, } s.receivePack(c, endpoint, req, fixture, full) s.checkRemoteHead(c, endpoint, fixture.Head) } func (s *ReceivePackSuite) TestSendPackOnEmptyWithReportStatus(c *C) { endpoint := s.EmptyEndpoint full := false fixture := fixtures.Basic().ByTag("packfile").One() req := packp.NewReferenceUpdateRequest() req.Commands = []*packp.Command{ {Name: "refs/heads/master", Old: plumbing.ZeroHash, New: fixture.Head}, } req.Capabilities.Set(capability.ReportStatus) s.receivePack(c, endpoint, req, fixture, full) s.checkRemoteHead(c, endpoint, fixture.Head) } func (s *ReceivePackSuite) TestFullSendPackOnNonEmpty(c *C) { endpoint := s.Endpoint full := true fixture := fixtures.Basic().ByTag("packfile").One() req := packp.NewReferenceUpdateRequest() req.Commands = []*packp.Command{ {Name: "refs/heads/master", Old: fixture.Head, New: fixture.Head}, } s.receivePack(c, endpoint, req, fixture, full) s.checkRemoteHead(c, endpoint, fixture.Head) } func (s *ReceivePackSuite) TestSendPackOnNonEmpty(c *C) { endpoint := s.Endpoint full := false fixture := fixtures.Basic().ByTag("packfile").One() req := packp.NewReferenceUpdateRequest() req.Commands = []*packp.Command{ {Name: "refs/heads/master", Old: fixture.Head, New: fixture.Head}, } s.receivePack(c, endpoint, req, fixture, full) s.checkRemoteHead(c, endpoint, fixture.Head) } func (s *ReceivePackSuite) TestSendPackOnNonEmptyWithReportStatus(c *C) { endpoint := s.Endpoint full := false fixture := fixtures.Basic().ByTag("packfile").One() req := packp.NewReferenceUpdateRequest() req.Commands = []*packp.Command{ {Name: "refs/heads/master", Old: fixture.Head, New: fixture.Head}, } req.Capabilities.Set(capability.ReportStatus) s.receivePack(c, endpoint, req, fixture, full) s.checkRemoteHead(c, endpoint, fixture.Head) } func (s *ReceivePackSuite) TestSendPackOnNonEmptyWithReportStatusWithError(c *C) { endpoint := s.Endpoint full := false fixture := fixtures.Basic().ByTag("packfile").One() req := packp.NewReferenceUpdateRequest() req.Commands = []*packp.Command{ {Name: "refs/heads/master", Old: plumbing.ZeroHash, New: fixture.Head}, } req.Capabilities.Set(capability.ReportStatus) report, err := s.receivePackNoCheck(c, endpoint, req, fixture, full) //XXX: Recent git versions return "failed to update ref", while older // (>=1.9) return "failed to lock". c.Assert(err, ErrorMatches, ".*(failed to update ref|failed to lock).*") c.Assert(report.UnpackStatus, Equals, "ok") c.Assert(len(report.CommandStatuses), Equals, 1) c.Assert(report.CommandStatuses[0].ReferenceName, Equals, plumbing.ReferenceName("refs/heads/master")) c.Assert(report.CommandStatuses[0].Status, Matches, "(failed to update ref|failed to lock)") s.checkRemoteHead(c, endpoint, fixture.Head) } func (s *ReceivePackSuite) receivePackNoCheck(c *C, ep *transport.Endpoint, req *packp.ReferenceUpdateRequest, fixture *fixtures.Fixture, callAdvertisedReferences bool) (*packp.ReportStatus, error) { url := "" if fixture != nil { url = fixture.URL } comment := Commentf( "failed with ep=%s fixture=%s callAdvertisedReferences=%s", ep.String(), url, callAdvertisedReferences, ) // Set write permissions to endpoint directory files. By default // fixtures are generated with read only permissions, this casuses // errors deleting or modifying files. rootPath := ep.Path stat, err := os.Stat(ep.Path) if rootPath != "" && err == nil && stat.IsDir() { objectPath := filepath.Join(rootPath, "objects/pack") files, err := ioutil.ReadDir(objectPath) c.Assert(err, IsNil) for _, file := range files { path := filepath.Join(objectPath, file.Name()) err = os.Chmod(path, 0644) c.Assert(err, IsNil) } } r, err := s.Client.NewReceivePackSession(ep, s.EmptyAuth) c.Assert(err, IsNil, comment) defer func() { c.Assert(r.Close(), IsNil, comment) }() if callAdvertisedReferences { info, err := r.AdvertisedReferences() c.Assert(err, IsNil, comment) c.Assert(info, NotNil, comment) } if fixture != nil { c.Assert(fixture.Packfile(), NotNil) req.Packfile = fixture.Packfile() } else { req.Packfile = s.emptyPackfile() } return r.ReceivePack(context.Background(), req) } func (s *ReceivePackSuite) receivePack(c *C, ep *transport.Endpoint, req *packp.ReferenceUpdateRequest, fixture *fixtures.Fixture, callAdvertisedReferences bool) { url := "" if fixture != nil { url = fixture.URL } comment := Commentf( "failed with ep=%s fixture=%s callAdvertisedReferences=%s", ep.String(), url, callAdvertisedReferences, ) report, err := s.receivePackNoCheck(c, ep, req, fixture, callAdvertisedReferences) c.Assert(err, IsNil, comment) if req.Capabilities.Supports(capability.ReportStatus) { c.Assert(report, NotNil, comment) c.Assert(report.Error(), IsNil, comment) } else { c.Assert(report, IsNil, comment) } } func (s *ReceivePackSuite) checkRemoteHead(c *C, ep *transport.Endpoint, head plumbing.Hash) { s.checkRemoteReference(c, ep, "refs/heads/master", head) } func (s *ReceivePackSuite) checkRemoteReference(c *C, ep *transport.Endpoint, refName string, head plumbing.Hash) { r, err := s.Client.NewUploadPackSession(ep, s.EmptyAuth) c.Assert(err, IsNil) defer func() { c.Assert(r.Close(), IsNil) }() ar, err := r.AdvertisedReferences() c.Assert(err, IsNil, Commentf("endpoint: %s", ep.String())) ref, ok := ar.References[refName] if head == plumbing.ZeroHash { c.Assert(ok, Equals, false) } else { c.Assert(ok, Equals, true) c.Assert(ref, DeepEquals, head) } } func (s *ReceivePackSuite) TestSendPackAddDeleteReference(c *C) { s.testSendPackAddReference(c) s.testSendPackDeleteReference(c) } func (s *ReceivePackSuite) testSendPackAddReference(c *C) { r, err := s.Client.NewReceivePackSession(s.Endpoint, s.EmptyAuth) c.Assert(err, IsNil) fixture := fixtures.Basic().ByTag("packfile").One() ar, err := r.AdvertisedReferences() c.Assert(err, IsNil) req := packp.NewReferenceUpdateRequest() req.Commands = []*packp.Command{ {Name: "refs/heads/newbranch", Old: plumbing.ZeroHash, New: fixture.Head}, } if ar.Capabilities.Supports(capability.ReportStatus) { req.Capabilities.Set(capability.ReportStatus) } c.Assert(r.Close(), IsNil) s.receivePack(c, s.Endpoint, req, nil, false) s.checkRemoteReference(c, s.Endpoint, "refs/heads/newbranch", fixture.Head) } func (s *ReceivePackSuite) testSendPackDeleteReference(c *C) { r, err := s.Client.NewReceivePackSession(s.Endpoint, s.EmptyAuth) c.Assert(err, IsNil) fixture := fixtures.Basic().ByTag("packfile").One() ar, err := r.AdvertisedReferences() c.Assert(err, IsNil) req := packp.NewReferenceUpdateRequest() req.Commands = []*packp.Command{ {Name: "refs/heads/newbranch", Old: fixture.Head, New: plumbing.ZeroHash}, } if ar.Capabilities.Supports(capability.ReportStatus) { req.Capabilities.Set(capability.ReportStatus) } if !ar.Capabilities.Supports(capability.DeleteRefs) { c.Fatal("capability delete-refs not supported") } c.Assert(r.Close(), IsNil) s.receivePack(c, s.Endpoint, req, nil, false) s.checkRemoteReference(c, s.Endpoint, "refs/heads/newbranch", plumbing.ZeroHash) } func (s *ReceivePackSuite) emptyPackfile() io.ReadCloser { var buf bytes.Buffer e := packfile.NewEncoder(&buf, memory.NewStorage(), false) _, err := e.Encode(nil, 10) if err != nil { panic(err) } return ioutil.NopCloser(&buf) } golang-gopkg-src-d-go-git.v4-4.11.0/plumbing/transport/test/upload_pack.go000066400000000000000000000202241345605224300263400ustar00rootroot00000000000000// Package test implements common test suite for different transport // implementations. // package test import ( "bytes" "context" "io" "io/ioutil" "time" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/format/packfile" "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp" "gopkg.in/src-d/go-git.v4/plumbing/transport" "gopkg.in/src-d/go-git.v4/storage/memory" . "gopkg.in/check.v1" "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability" ) type UploadPackSuite struct { Endpoint *transport.Endpoint EmptyEndpoint *transport.Endpoint NonExistentEndpoint *transport.Endpoint EmptyAuth transport.AuthMethod Client transport.Transport } func (s *UploadPackSuite) TestAdvertisedReferencesEmpty(c *C) { r, err := s.Client.NewUploadPackSession(s.EmptyEndpoint, s.EmptyAuth) c.Assert(err, IsNil) defer func() { c.Assert(r.Close(), IsNil) }() ar, err := r.AdvertisedReferences() c.Assert(err, Equals, transport.ErrEmptyRemoteRepository) c.Assert(ar, IsNil) } func (s *UploadPackSuite) TestAdvertisedReferencesNotExists(c *C) { r, err := s.Client.NewUploadPackSession(s.NonExistentEndpoint, s.EmptyAuth) c.Assert(err, IsNil) defer func() { c.Assert(r.Close(), IsNil) }() ar, err := r.AdvertisedReferences() c.Assert(err, Equals, transport.ErrRepositoryNotFound) c.Assert(ar, IsNil) r, err = s.Client.NewUploadPackSession(s.NonExistentEndpoint, s.EmptyAuth) c.Assert(err, IsNil) req := packp.NewUploadPackRequest() req.Wants = append(req.Wants, plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5")) reader, err := r.UploadPack(context.Background(), req) c.Assert(err, Equals, transport.ErrRepositoryNotFound) c.Assert(reader, IsNil) } func (s *UploadPackSuite) TestCallAdvertisedReferenceTwice(c *C) { r, err := s.Client.NewUploadPackSession(s.Endpoint, s.EmptyAuth) c.Assert(err, IsNil) defer func() { c.Assert(r.Close(), IsNil) }() ar1, err := r.AdvertisedReferences() c.Assert(err, IsNil) c.Assert(ar1, NotNil) ar2, err := r.AdvertisedReferences() c.Assert(err, IsNil) c.Assert(ar2, DeepEquals, ar1) } func (s *UploadPackSuite) TestDefaultBranch(c *C) { r, err := s.Client.NewUploadPackSession(s.Endpoint, s.EmptyAuth) c.Assert(err, IsNil) defer func() { c.Assert(r.Close(), IsNil) }() info, err := r.AdvertisedReferences() c.Assert(err, IsNil) symrefs := info.Capabilities.Get(capability.SymRef) c.Assert(symrefs, HasLen, 1) c.Assert(symrefs[0], Equals, "HEAD:refs/heads/master") } func (s *UploadPackSuite) TestAdvertisedReferencesFilterUnsupported(c *C) { r, err := s.Client.NewUploadPackSession(s.Endpoint, s.EmptyAuth) c.Assert(err, IsNil) defer func() { c.Assert(r.Close(), IsNil) }() info, err := r.AdvertisedReferences() c.Assert(err, IsNil) c.Assert(info.Capabilities.Supports(capability.MultiACK), Equals, false) } func (s *UploadPackSuite) TestCapabilities(c *C) { r, err := s.Client.NewUploadPackSession(s.Endpoint, s.EmptyAuth) c.Assert(err, IsNil) defer func() { c.Assert(r.Close(), IsNil) }() info, err := r.AdvertisedReferences() c.Assert(err, IsNil) c.Assert(info.Capabilities.Get(capability.Agent), HasLen, 1) } func (s *UploadPackSuite) TestUploadPack(c *C) { r, err := s.Client.NewUploadPackSession(s.Endpoint, s.EmptyAuth) c.Assert(err, IsNil) defer func() { c.Assert(r.Close(), IsNil) }() req := packp.NewUploadPackRequest() req.Wants = append(req.Wants, plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5")) reader, err := r.UploadPack(context.Background(), req) c.Assert(err, IsNil) s.checkObjectNumber(c, reader, 28) } func (s *UploadPackSuite) TestUploadPackWithContext(c *C) { ctx, cancel := context.WithTimeout(context.Background(), 1*time.Millisecond) defer cancel() r, err := s.Client.NewUploadPackSession(s.Endpoint, s.EmptyAuth) c.Assert(err, IsNil) defer func() { c.Assert(r.Close(), IsNil) }() info, err := r.AdvertisedReferences() c.Assert(err, IsNil) c.Assert(info, NotNil) req := packp.NewUploadPackRequest() req.Wants = append(req.Wants, plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5")) reader, err := r.UploadPack(ctx, req) c.Assert(err, NotNil) c.Assert(reader, IsNil) } func (s *UploadPackSuite) TestUploadPackWithContextOnRead(c *C) { ctx, cancel := context.WithCancel(context.Background()) r, err := s.Client.NewUploadPackSession(s.Endpoint, s.EmptyAuth) c.Assert(err, IsNil) info, err := r.AdvertisedReferences() c.Assert(err, IsNil) c.Assert(info, NotNil) req := packp.NewUploadPackRequest() req.Wants = append(req.Wants, plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5")) reader, err := r.UploadPack(ctx, req) c.Assert(err, IsNil) c.Assert(reader, NotNil) cancel() _, err = io.Copy(ioutil.Discard, reader) c.Assert(err, NotNil) err = reader.Close() c.Assert(err, IsNil) err = r.Close() c.Assert(err, IsNil) } func (s *UploadPackSuite) TestUploadPackFull(c *C) { r, err := s.Client.NewUploadPackSession(s.Endpoint, s.EmptyAuth) c.Assert(err, IsNil) defer func() { c.Assert(r.Close(), IsNil) }() info, err := r.AdvertisedReferences() c.Assert(err, IsNil) c.Assert(info, NotNil) req := packp.NewUploadPackRequest() req.Wants = append(req.Wants, plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5")) reader, err := r.UploadPack(context.Background(), req) c.Assert(err, IsNil) s.checkObjectNumber(c, reader, 28) } func (s *UploadPackSuite) TestUploadPackInvalidReq(c *C) { r, err := s.Client.NewUploadPackSession(s.Endpoint, s.EmptyAuth) c.Assert(err, IsNil) defer func() { c.Assert(r.Close(), IsNil) }() req := packp.NewUploadPackRequest() req.Wants = append(req.Wants, plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5")) req.Capabilities.Set(capability.Sideband) req.Capabilities.Set(capability.Sideband64k) _, err = r.UploadPack(context.Background(), req) c.Assert(err, NotNil) } func (s *UploadPackSuite) TestUploadPackNoChanges(c *C) { r, err := s.Client.NewUploadPackSession(s.Endpoint, s.EmptyAuth) c.Assert(err, IsNil) defer func() { c.Assert(r.Close(), IsNil) }() req := packp.NewUploadPackRequest() req.Wants = append(req.Wants, plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5")) req.Haves = append(req.Haves, plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5")) reader, err := r.UploadPack(context.Background(), req) c.Assert(err, Equals, transport.ErrEmptyUploadPackRequest) c.Assert(reader, IsNil) } func (s *UploadPackSuite) TestUploadPackMulti(c *C) { r, err := s.Client.NewUploadPackSession(s.Endpoint, s.EmptyAuth) c.Assert(err, IsNil) defer func() { c.Assert(r.Close(), IsNil) }() req := packp.NewUploadPackRequest() req.Wants = append(req.Wants, plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5")) req.Wants = append(req.Wants, plumbing.NewHash("e8d3ffab552895c19b9fcf7aa264d277cde33881")) reader, err := r.UploadPack(context.Background(), req) c.Assert(err, IsNil) s.checkObjectNumber(c, reader, 31) } func (s *UploadPackSuite) TestUploadPackPartial(c *C) { r, err := s.Client.NewUploadPackSession(s.Endpoint, s.EmptyAuth) c.Assert(err, IsNil) defer func() { c.Assert(r.Close(), IsNil) }() req := packp.NewUploadPackRequest() req.Wants = append(req.Wants, plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5")) req.Haves = append(req.Haves, plumbing.NewHash("918c48b83bd081e863dbe1b80f8998f058cd8294")) reader, err := r.UploadPack(context.Background(), req) c.Assert(err, IsNil) s.checkObjectNumber(c, reader, 4) } func (s *UploadPackSuite) TestFetchError(c *C) { r, err := s.Client.NewUploadPackSession(s.Endpoint, s.EmptyAuth) c.Assert(err, IsNil) req := packp.NewUploadPackRequest() req.Wants = append(req.Wants, plumbing.NewHash("1111111111111111111111111111111111111111")) reader, err := r.UploadPack(context.Background(), req) c.Assert(err, NotNil) c.Assert(reader, IsNil) //XXX: We do not test Close error, since implementations might return // different errors if a previous error was found. } func (s *UploadPackSuite) checkObjectNumber(c *C, r io.Reader, n int) { b, err := ioutil.ReadAll(r) c.Assert(err, IsNil) buf := bytes.NewBuffer(b) storage := memory.NewStorage() err = packfile.UpdateObjectStorage(storage, buf) c.Assert(err, IsNil) c.Assert(len(storage.Objects), Equals, n) } golang-gopkg-src-d-go-git.v4-4.11.0/prune.go000066400000000000000000000032241345605224300203600ustar00rootroot00000000000000package git import ( "errors" "time" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/storer" ) type PruneHandler func(unreferencedObjectHash plumbing.Hash) error type PruneOptions struct { // OnlyObjectsOlderThan if set to non-zero value // selects only objects older than the time provided. OnlyObjectsOlderThan time.Time // Handler is called on matching objects Handler PruneHandler } var ErrLooseObjectsNotSupported = errors.New("Loose objects not supported") // DeleteObject deletes an object from a repository. // The type conveniently matches PruneHandler. func (r *Repository) DeleteObject(hash plumbing.Hash) error { los, ok := r.Storer.(storer.LooseObjectStorer) if !ok { return ErrLooseObjectsNotSupported } return los.DeleteLooseObject(hash) } func (r *Repository) Prune(opt PruneOptions) error { los, ok := r.Storer.(storer.LooseObjectStorer) if !ok { return ErrLooseObjectsNotSupported } pw := newObjectWalker(r.Storer) err := pw.walkAllRefs() if err != nil { return err } // Now walk all (loose) objects in storage. return los.ForEachObjectHash(func(hash plumbing.Hash) error { // Get out if we have seen this object. if pw.isSeen(hash) { return nil } // Otherwise it is a candidate for pruning. // Check out for too new objects next. if !opt.OnlyObjectsOlderThan.IsZero() { // Errors here are non-fatal. The object may be e.g. packed. // Or concurrently deleted. Skip such objects. t, err := los.LooseObjectTime(hash) if err != nil { return nil } // Skip too new objects. if !t.Before(opt.OnlyObjectsOlderThan) { return nil } } return opt.Handler(hash) }) } golang-gopkg-src-d-go-git.v4-4.11.0/prune_test.go000066400000000000000000000032061345605224300214170ustar00rootroot00000000000000package git import ( "time" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/cache" "gopkg.in/src-d/go-git.v4/plumbing/storer" "gopkg.in/src-d/go-git.v4/storage" "gopkg.in/src-d/go-git.v4/storage/filesystem" . "gopkg.in/check.v1" "gopkg.in/src-d/go-git-fixtures.v3" ) type PruneSuite struct { BaseSuite } var _ = Suite(&PruneSuite{}) func (s *PruneSuite) testPrune(c *C, deleteTime time.Time) { srcFs := fixtures.ByTag("unpacked").One().DotGit() var sto storage.Storer var err error sto = filesystem.NewStorage(srcFs, cache.NewObjectLRUDefault()) los := sto.(storer.LooseObjectStorer) c.Assert(los, NotNil) count := 0 err = los.ForEachObjectHash(func(_ plumbing.Hash) error { count++ return nil }) c.Assert(err, IsNil) r, err := Open(sto, srcFs) c.Assert(err, IsNil) c.Assert(r, NotNil) // Remove a branch so we can prune some objects. err = sto.RemoveReference(plumbing.ReferenceName("refs/heads/v4")) c.Assert(err, IsNil) err = sto.RemoveReference(plumbing.ReferenceName("refs/remotes/origin/v4")) c.Assert(err, IsNil) err = r.Prune(PruneOptions{ OnlyObjectsOlderThan: deleteTime, Handler: r.DeleteObject, }) c.Assert(err, IsNil) newCount := 0 err = los.ForEachObjectHash(func(_ plumbing.Hash) error { newCount++ return nil }) if deleteTime.IsZero() { c.Assert(newCount < count, Equals, true) } else { // Assume a delete time older than any of the objects was passed in. c.Assert(newCount, Equals, count) } } func (s *PruneSuite) TestPrune(c *C) { s.testPrune(c, time.Time{}) } func (s *PruneSuite) TestPruneWithNoDelete(c *C) { s.testPrune(c, time.Unix(0, 1)) } golang-gopkg-src-d-go-git.v4-4.11.0/references.go000066400000000000000000000154371345605224300213610ustar00rootroot00000000000000package git import ( "io" "sort" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/object" "gopkg.in/src-d/go-git.v4/utils/diff" "github.com/sergi/go-diff/diffmatchpatch" ) // References returns a slice of Commits for the file at "path", starting from // the commit provided that contains the file from the provided path. The last // commit into the returned slice is the commit where the file was created. // If the provided commit does not contains the specified path, a nil slice is // returned. The commits are sorted in commit order, newer to older. // // Caveats: // // - Moves and copies are not currently supported. // // - Cherry-picks are not detected unless there are no commits between them and // therefore can appear repeated in the list. (see git path-id for hints on how // to fix this). func references(c *object.Commit, path string) ([]*object.Commit, error) { var result []*object.Commit seen := make(map[plumbing.Hash]struct{}) if err := walkGraph(&result, &seen, c, path); err != nil { return nil, err } // TODO result should be returned without ordering sortCommits(result) // for merges of identical cherry-picks return removeComp(path, result, equivalent) } type commitSorterer struct { l []*object.Commit } func (s commitSorterer) Len() int { return len(s.l) } func (s commitSorterer) Less(i, j int) bool { return s.l[i].Committer.When.Before(s.l[j].Committer.When) || s.l[i].Committer.When.Equal(s.l[j].Committer.When) && s.l[i].Author.When.Before(s.l[j].Author.When) } func (s commitSorterer) Swap(i, j int) { s.l[i], s.l[j] = s.l[j], s.l[i] } // SortCommits sorts a commit list by commit date, from older to newer. func sortCommits(l []*object.Commit) { s := &commitSorterer{l} sort.Sort(s) } // Recursive traversal of the commit graph, generating a linear history of the // path. func walkGraph(result *[]*object.Commit, seen *map[plumbing.Hash]struct{}, current *object.Commit, path string) error { // check and update seen if _, ok := (*seen)[current.Hash]; ok { return nil } (*seen)[current.Hash] = struct{}{} // if the path is not in the current commit, stop searching. if _, err := current.File(path); err != nil { return nil } // optimization: don't traverse branches that does not // contain the path. parents, err := parentsContainingPath(path, current) if err != nil { return err } switch len(parents) { // if the path is not found in any of its parents, the path was // created by this commit; we must add it to the revisions list and // stop searching. This includes the case when current is the // initial commit. case 0: *result = append(*result, current) return nil case 1: // only one parent contains the path // if the file contents has change, add the current commit different, err := differentContents(path, current, parents) if err != nil { return err } if len(different) == 1 { *result = append(*result, current) } // in any case, walk the parent return walkGraph(result, seen, parents[0], path) default: // more than one parent contains the path // TODO: detect merges that had a conflict, because they must be // included in the result here. for _, p := range parents { err := walkGraph(result, seen, p, path) if err != nil { return err } } } return nil } func parentsContainingPath(path string, c *object.Commit) ([]*object.Commit, error) { // TODO: benchmark this method making git.object.Commit.parent public instead of using // an iterator var result []*object.Commit iter := c.Parents() for { parent, err := iter.Next() if err == io.EOF { return result, nil } if err != nil { return nil, err } if _, err := parent.File(path); err == nil { result = append(result, parent) } } } // Returns an slice of the commits in "cs" that has the file "path", but with different // contents than what can be found in "c". func differentContents(path string, c *object.Commit, cs []*object.Commit) ([]*object.Commit, error) { result := make([]*object.Commit, 0, len(cs)) h, found := blobHash(path, c) if !found { return nil, object.ErrFileNotFound } for _, cx := range cs { if hx, found := blobHash(path, cx); found && h != hx { result = append(result, cx) } } return result, nil } // blobHash returns the hash of a path in a commit func blobHash(path string, commit *object.Commit) (hash plumbing.Hash, found bool) { file, err := commit.File(path) if err != nil { var empty plumbing.Hash return empty, found } return file.Hash, true } type contentsComparatorFn func(path string, a, b *object.Commit) (bool, error) // Returns a new slice of commits, with duplicates removed. Expects a // sorted commit list. Duplication is defined according to "comp". It // will always keep the first commit of a series of duplicated commits. func removeComp(path string, cs []*object.Commit, comp contentsComparatorFn) ([]*object.Commit, error) { result := make([]*object.Commit, 0, len(cs)) if len(cs) == 0 { return result, nil } result = append(result, cs[0]) for i := 1; i < len(cs); i++ { equals, err := comp(path, cs[i], cs[i-1]) if err != nil { return nil, err } if !equals { result = append(result, cs[i]) } } return result, nil } // Equivalent commits are commits whose patch is the same. func equivalent(path string, a, b *object.Commit) (bool, error) { numParentsA := a.NumParents() numParentsB := b.NumParents() // the first commit is not equivalent to anyone // and "I think" merges can not be equivalent to anything if numParentsA != 1 || numParentsB != 1 { return false, nil } diffsA, err := patch(a, path) if err != nil { return false, err } diffsB, err := patch(b, path) if err != nil { return false, err } return sameDiffs(diffsA, diffsB), nil } func patch(c *object.Commit, path string) ([]diffmatchpatch.Diff, error) { // get contents of the file in the commit file, err := c.File(path) if err != nil { return nil, err } content, err := file.Contents() if err != nil { return nil, err } // get contents of the file in the first parent of the commit var contentParent string iter := c.Parents() parent, err := iter.Next() if err != nil { return nil, err } file, err = parent.File(path) if err != nil { contentParent = "" } else { contentParent, err = file.Contents() if err != nil { return nil, err } } // compare the contents of parent and child return diff.Do(content, contentParent), nil } func sameDiffs(a, b []diffmatchpatch.Diff) bool { if len(a) != len(b) { return false } for i := range a { if !sameDiff(a[i], b[i]) { return false } } return true } func sameDiff(a, b diffmatchpatch.Diff) bool { if a.Type != b.Type { return false } switch a.Type { case 0: return countLines(a.Text) == countLines(b.Text) case 1, -1: return a.Text == b.Text default: panic("unreachable") } } golang-gopkg-src-d-go-git.v4-4.11.0/references_test.go000066400000000000000000000403161345605224300224120ustar00rootroot00000000000000package git import ( "bytes" "fmt" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/object" "gopkg.in/src-d/go-git.v4/storage/memory" . "gopkg.in/check.v1" "gopkg.in/src-d/go-git-fixtures.v3" ) type ReferencesSuite struct { BaseSuite } var _ = Suite(&ReferencesSuite{}) var referencesTests = [...]struct { // input data to revlist repo string commit string path string // expected output data form the revlist revs []string }{ // Tyba git-fixture {"https://github.com/git-fixtures/basic.git", "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", "binary.jpg", []string{ "35e85108805c84807bc66a02d91535e1e24b38b9", }}, {"https://github.com/git-fixtures/basic.git", "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", "CHANGELOG", []string{ "b8e471f58bcbca63b07bda20e428190409c2db47", }}, {"https://github.com/git-fixtures/basic.git", "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", "go/example.go", []string{ "918c48b83bd081e863dbe1b80f8998f058cd8294", }}, {"https://github.com/git-fixtures/basic.git", "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", "json/long.json", []string{ "af2d6a6954d532f8ffb47615169c8fdf9d383a1a", }}, {"https://github.com/git-fixtures/basic.git", "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", "json/short.json", []string{ "af2d6a6954d532f8ffb47615169c8fdf9d383a1a", }}, {"https://github.com/git-fixtures/basic.git", "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", "LICENSE", []string{ "b029517f6300c2da0f4b651b8642506cd6aaf45d", }}, {"https://github.com/git-fixtures/basic.git", "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", "php/crappy.php", []string{ "918c48b83bd081e863dbe1b80f8998f058cd8294", }}, {"https://github.com/git-fixtures/basic.git", "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", "vendor/foo.go", []string{ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", }}, {"https://github.com/jamesob/desk.git", "d4edaf0e8101fcea437ebd982d899fe2cc0f9f7b", "LICENSE", []string{ "ffcda27c2de6768ee83f3f4a027fa4ab57d50f09", }}, {"https://github.com/jamesob/desk.git", "d4edaf0e8101fcea437ebd982d899fe2cc0f9f7b", "README.md", []string{ "ffcda27c2de6768ee83f3f4a027fa4ab57d50f09", "2e87a2dcc63a115f9a61bd969d1e85fb132a431b", "215b0ac06225b0671bc3460d10da88c3406f796f", "0260eb7a2623dd2309ab439f74e8681fccdc4285", "d46b48933e94f30992486374fa9a6becfd28ea17", "9cb4df2a88efee8836f9b8ad27ca2717f624164e", "8c49acdec2ed441706d8799f8b17878aae4c1ffe", "ebaca0c6f54c23193ee8175c3530e370cb2dabe3", "77675f82039551a19de4fbccbe69366fe63680df", "b9741594fb8ab7374f9be07d6a09a3bf96719816", "04db6acd94de714ca48128c606b17ee1149a630e", "ff737bd8a962a714a446d7592fae423a56e61e12", "eadd03f7a1cc54810bd10eef6747ad9562ad246d", "b5072ab5c1cf89191d71f1244eecc5d1f369ef7e", "bfa6ebc9948f1939402b063c0a2a24bf2b1c1cc3", "d9aef39828c670dfdb172502021a2ebcda8cf2fb", "1a6b6e45c91e1831494eb139ee3f8e21649c7fb0", "09fdbe4612066cf63ea46aee43c7cfaaff02ecfb", "236f6526b1150cc1f1723566b4738f443fc70777", "7862953f470b62397d22f6782a884f5bea6d760d", "b0b0152d08c2333680266977a5bc9c4e50e1e968", "13ce6c1c77c831f381974aa1c62008a414bd2b37", "d3f3c8faca048d11709969fbfc0cdf2901b87578", "8777dde1abe18c805d021366643218d3f3356dd9", }}, {"https://github.com/spinnaker/spinnaker.git", "b32b2aecae2cfca4840dd480f8082da206a538da", "pylib/spinnaker/reconfigure_spinnaker.py", []string{ "a24001f6938d425d0e7504bdf5d27fc866a85c3d", }}, {"https://github.com/spinnaker/spinnaker.git", "b32b2aecae2cfca4840dd480f8082da206a538da", "pylib/spinnaker/validate_configuration.py", []string{ "a24001f6938d425d0e7504bdf5d27fc866a85c3d", "1e14f94bcf82694fdc7e2dcbbfdbbed58db0f4d9", "1e3d328a2cabda5d0aaddc5dec65271343e0dc37", "b5d999e2986e190d81767cd3cfeda0260f9f6fb8", }}, {"https://github.com/spinnaker/spinnaker.git", "b32b2aecae2cfca4840dd480f8082da206a538da", "pylib/spinnaker/fetch.py", []string{ "a24001f6938d425d0e7504bdf5d27fc866a85c3d", }}, {"https://github.com/spinnaker/spinnaker.git", "b32b2aecae2cfca4840dd480f8082da206a538da", "pylib/spinnaker/yaml_util.py", []string{ "a24001f6938d425d0e7504bdf5d27fc866a85c3d", "1e14f94bcf82694fdc7e2dcbbfdbbed58db0f4d9", "b5d999e2986e190d81767cd3cfeda0260f9f6fb8", "023d4fb17b76e0fe0764971df8b8538b735a1d67", }}, {"https://github.com/spinnaker/spinnaker.git", "b32b2aecae2cfca4840dd480f8082da206a538da", "dev/build_release.py", []string{ "a24001f6938d425d0e7504bdf5d27fc866a85c3d", "1e14f94bcf82694fdc7e2dcbbfdbbed58db0f4d9", "f42771ba298b93a7c4f5b16c5b30ab96c15305a8", "dd52703a50e71891f63fcf05df1f69836f4e7056", "0d9c9cef53af38cefcb6801bb492aaed3f2c9a42", "d375f1994ff4d0bdc32d614e698f1b50e1093f14", "abad497f11a366548aa95303c8c2f165fe7ae918", "6986d885626792dee4ef6b7474dfc9230c5bda54", "5422a86a10a8c5a1ef6728f5fc8894d9a4c54cb9", "09a4ea729b25714b6368959eea5113c99938f7b6", }}, {"https://github.com/spinnaker/spinnaker.git", "b32b2aecae2cfca4840dd480f8082da206a538da", "pkg_scripts/postUninstall.sh", []string{ "ce9f123d790717599aaeb76bc62510de437761be", }}, {"https://github.com/spinnaker/spinnaker.git", "b32b2aecae2cfca4840dd480f8082da206a538da", "install/first_google_boot.sh", []string{ "a24001f6938d425d0e7504bdf5d27fc866a85c3d", "de25f576b888569192e6442b0202d30ca7b2d8ec", "a596972a661d9a7deca8abd18b52ce1a39516e89", "9467ec579708b3c71dd9e5b3906772841c144a30", "c4a9091e4076cb740fa46e790dd5b658e19012ad", "6eb5d9c5225224bfe59c401182a2939d6c27fc00", "495c7118e7cf757aa04eab410b64bfb5b5149ad2", "dd2d03c19658ff96d371aef00e75e2e54702da0e", "2a3b1d3b134e937c7bafdab6cc2950e264bf5dee", "a57b08a9072f6a865f760551be2a4944f72f804a", "0777fadf4ca6f458d7071de414f9bd5417911037", }}, {"https://github.com/spinnaker/spinnaker.git", "b32b2aecae2cfca4840dd480f8082da206a538da", "install/install_spinnaker.sh", []string{ "0d9c9cef53af38cefcb6801bb492aaed3f2c9a42", }}, {"https://github.com/spinnaker/spinnaker.git", "b32b2aecae2cfca4840dd480f8082da206a538da", "install/install_fake_openjdk8.sh", []string{ "a24001f6938d425d0e7504bdf5d27fc866a85c3d", }}, {"https://github.com/spinnaker/spinnaker.git", "b32b2aecae2cfca4840dd480f8082da206a538da", "install/install_spinnaker.py", []string{ "a24001f6938d425d0e7504bdf5d27fc866a85c3d", "37f94770d81232b1895fca447878f68d65aac652", "46c9dcbb55ca3f4735e82ad006e8cae2fdd050d9", "124a88cfda413cb7182ca9c739a284a9e50042a1", "eb4faf67a8b775d7985d07a708e3ffeac4273580", "0d9c9cef53af38cefcb6801bb492aaed3f2c9a42", "01171a8a2e843bef3a574ba73b258ac29e5d5405", "739d8c6fe16edcb6ef9185dc74197de561b84315", "d33c2d1e350b03fb989eefc612e8c9d5fa7cadc2", }}, {"https://github.com/spinnaker/spinnaker.git", "b32b2aecae2cfca4840dd480f8082da206a538da", "install/__init__.py", []string{ "a24001f6938d425d0e7504bdf5d27fc866a85c3d", }}, {"https://github.com/spinnaker/spinnaker.git", "b32b2aecae2cfca4840dd480f8082da206a538da", "experimental/docker-compose/docker-compose.yml", []string{ "fda357835d889595dc39dfebc6181d863cce7d4f", "57c59e7144354a76e1beba69ae2f85db6b1727af", "7682dff881029c722d893a112a64fea6849a0428", "66f1c938c380a4096674b27540086656076a597f", "56dc238f6f397e93f1d1aad702976889c830e8bf", "b95e442c064935709e789fa02126f17ddceef10b", "f98965a8f42037bd038b86c3401da7e6dfbf4f2e", "5344429749e8b68b168d2707b7903692436cc2ea", "6a31f5d219766b0cec4ea4fbbbfe47bdcdb0ab8e", "ddaae195b628150233b0a48f50a1674fd9d1a924", "7119ad9cf7d4e4d8b059e5337374baae4adc7458", }}, {"https://github.com/spinnaker/spinnaker.git", "b32b2aecae2cfca4840dd480f8082da206a538da", "unittest/validate_configuration_test.py", []string{ "1e14f94bcf82694fdc7e2dcbbfdbbed58db0f4d9", "1e3d328a2cabda5d0aaddc5dec65271343e0dc37", }}, {"https://github.com/spinnaker/spinnaker.git", "f39d86f59a0781f130e8de6b2115329c1fbe9545", "README.adoc", []string{ "638f61b3331695f46f1a88095e26dea0f09f176b", "bd42370d3fe8d410e78acb96f81cb3d838ad1c21", "d6905eab6fec1841c7cf8e4484499f5c8d7d423e", "c0a70a0f5aa494f0ae01c55ba191f2325556489a", "811795c8a185e88f5d269195cb68b29c8d0fe170", "d6e6fe0194447cc280f942d6a2e0521b68ea7796", "174bdbf9edfb0ca88415dd4a673852d5b22e7036", "9944d6cf72b8f82d622d85dad7434472bc8f397d", "e805183c72f0426fb073728c01901c2fd2db1da6", "8ef83dd443a05e9122681950399edaa58a38d466", "d73f9cee49a5ad27a42a6e18af7c49a8f28ad8a8", }}, // FAILS /* // this contains an empty move {"https://github.com/spinnaker/spinnaker.git", "b32b2aecae2cfca4840dd480f8082da206a538da", "google/dev/build_google_tarball.py", []string{ "88e60ac93f832efc2616b3c165e99a8f2ffc3e0c", "9e49443da49b8c862cc140b660744f84eebcfa51", }}, */ /* {"https://github.com/spinnaker/spinnaker.git", "b32b2aecae2cfca4840dd480f8082da206a538da", "unittest/yaml_util_test.py", []string{ "edf909edb9319c5e615e4ce73da47bbdca388ebe", "023d4fb17b76e0fe0764971df8b8538b735a1d67", }}, */ /* {"https://github.com/spinnaker/spinnaker.git", "b32b2aecae2cfca4840dd480f8082da206a538da", "unittest/configurator_test.py", []string{ "1e14f94bcf82694fdc7e2dcbbfdbbed58db0f4d9", "edf909edb9319c5e615e4ce73da47bbdca388ebe", "d14f793a6cd7169ef708a4fc276ad876bd3edd4e", "023d4fb17b76e0fe0764971df8b8538b735a1d67", }}, */ /* // this contains a cherry-pick at 094d0e7d5d691 (with 3f34438d) {"https://github.com/jamesob/desk.git", "d4edaf0e8101fcea437ebd982d899fe2cc0f9f7b", "desk", []string{ "ffcda27c2de6768ee83f3f4a027fa4ab57d50f09", "a0c1e853158ccbaf95574220bbf3b54509034a9f", "decfc524570c407d6bba0f217e534c8b47dbdbee", "1413872d5b3af7cd674bbe0e1f23387cd5d940e6", "40cd5a91d916e7b2f331e4e85fdc52636fd7cff7", "8e07d73aa0e3780f8c7cf8ad1a6b263df26a0a52", "19c56f95720ac3630efe9f29b1a252581d6cbc0c", "9ea46ccc6d253cffb4b7b66e936987d87de136e4", "094d0e7d5d69141c98a606910ba64786c5565da0", "801e62706a9e4fef75fcaca9c78744de0bc36e6a", "eddf335f31c73624ed3f40dc5fcad50136074b2b", "c659093f06eb2bd68c6252caeab605e5cd8aa49e", "d94b3fe8ce0e3a474874d742992d432cd040582f", "93cddf036df2d8509f910063696acd556ca7600f", "b3d4cb0c826b16b301f088581d681654d8de6c07", "52d90f9b513dd3c5330663cba39396e6b8a3ba4e", "15919e99ded03c6ceea9ff98558e77a322a4dadb", "803bf37847633e2f685a46a27b11facf22efebec", "c07ad524ee1e616c70bf2ea7a0ee4f4a01195d78", "b91aff30f318fda461d009c308490613b394f3e2", "67cec1e8a3f21c6eb11678e3f31ffd228b55b783", "bbe404c78af7525fabc57b9e7aa7c100b0d39f7a", "5dd078848786c2babc2511e9502fa98518cf3535", "7970ae7cc165c5205945dfb704d67d53031f550a", "33091ac904747747ff30f107d4d0f22fa872eccf", "069f81cab12d185ba1b509be946c47897cd4fb1f", "13ce6c1c77c831f381974aa1c62008a414bd2b37", }}, */ /* {"https://github.com/spinnaker/spinnaker.git", "b32b2aecae2cfca4840dd480f8082da206a538da", "InstallSpinnaker.sh", []string{ "ce9f123d790717599aaeb76bc62510de437761be", "23673af3ad70b50bba7fdafadc2323302f5ba520", "b7015a5d36990d69a054482556127b9c7404a24a", "582da9622e3a72a19cd261a017276d72b5b0051a", "0c5bb1e4392e751f884f3c57de5d4aee72c40031", "c9c2a0ec03968ab17e8b16fdec9661eb1dbea173", "a3cdf880826b4d9af42b93f4a2df29a91ab31d35", "18526c447f5174d33c96aac6d6433318b0e2021c", "2a6288be1c8ea160c443ca3cd0fe826ff2387d37", "9e74d009894d73dd07773ea6b3bdd8323db980f7", "d2f6214b625db706384b378a29cc4c22237db97a", "202a9c720b3ba8106e022a0ad027ebe279040c78", "791bcd1592828d9d5d16e83f3a825fb08b0ba22d", "01e65d67eed8afcb67a6bdf1c962541f62b299c9", "6328ee836affafc1b52127147b5ca07300ac78e6", "3de4f77c105f700f50d9549d32b9a05a01b46c4b", "8980daf661408a3faa1f22c225702a5c1d11d5c9", "8eb116de9128c314ac8a6f5310ca500b8c74f5db", "88e841aad37b71b78a8fb88bc75fe69499d527c7", "370d61cdbc1f3c90db6759f1599ccbabd40ad6c1", "505577dc87d300cf562dc4702a05a5615d90d855", "b5c6053a46993b20d1b91e7b7206bffa54669ad7", "ba486de7c025457963701114c683dcd4708e1dee", "b41d7c0e5b20bbe7c8eb6606731a3ff68f4e3941", "a47d0aaeda421f06df248ad65bd58230766bf118", "495c7118e7cf757aa04eab410b64bfb5b5149ad2", "46670eb6477c353d837dbaba3cf36c5f8b86f037", "dd2d03c19658ff96d371aef00e75e2e54702da0e", "4bbcad219ec55a465fb48ce236cb10ca52d43b1f", "50d0556563599366f29cb286525780004fa5a317", "9a06d3f20eabb254d0a1e2ff7735ef007ccd595e", "d4b48a39aba7d3bd3e8abef2274a95b112d1ae73", }}, */ /* {"https://github.com/spinnaker/spinnaker.git", "b32b2aecae2cfca4840dd480f8082da206a538da", "config/default-spinnaker-local.yml", []string{ "ae904e8d60228c21c47368f6a10f1cc9ca3aeebf", "99534ecc895fe17a1d562bb3049d4168a04d0865", "caf6d62e8285d4681514dd8027356fb019bc97ff", "eaf7614cad81e8ab5c813dd4821129d0c04ea449", "5a2a845bc08974a36d599a4a4b7e25be833823b0", "41e96c54a478e5d09dd07ed7feb2d8d08d8c7e3c", "974b775a8978b120ff710cac93a21c7387b914c9", "87e459a9a044b3109dfeb943cc82c627b61d84a6", "5e09821cbd7d710405b61cab0a795c2982a71b9c", "8cc2d4bdb0a15aafc7fe02cdcb03ab90c974cafa", "3ce7b902a51bac2f10994f7d1f251b616c975e54", "a596972a661d9a7deca8abd18b52ce1a39516e89", "8980daf661408a3faa1f22c225702a5c1d11d5c9", }}, */ /* {"https://github.com/spinnaker/spinnaker.git", "b32b2aecae2cfca4840dd480f8082da206a538da", "config/spinnaker.yml", []string{ "ae904e8d60228c21c47368f6a10f1cc9ca3aeebf", "caf6d62e8285d4681514dd8027356fb019bc97ff", "eaf7614cad81e8ab5c813dd4821129d0c04ea449", "5a2a845bc08974a36d599a4a4b7e25be833823b0", "41e96c54a478e5d09dd07ed7feb2d8d08d8c7e3c", "974b775a8978b120ff710cac93a21c7387b914c9", "ed887f6547d7cd2b2d741184a06f97a0a704152b", "d4553dac205023fa77652308af1a2d1cf52138fb", "a596972a661d9a7deca8abd18b52ce1a39516e89", "66ac94f0b4442707fb6f695fbed91d62b3bd9d4a", "079e42e7c979541b6fab7343838f7b9fd4a360cd", }}, */ } func (s *ReferencesSuite) TestObjectNotFoundError(c *C) { h1 := plumbing.NewHash("af2d6a6954d532f8ffb47615169c8fdf9d383a1a") hParent := plumbing.NewHash("1669dce138d9b841a518c64b10914d88f5e488ea") url := fixtures.ByURL("https://github.com/git-fixtures/basic.git").One().DotGit().Root() storer := memory.NewStorage() r, err := Clone(storer, nil, &CloneOptions{ URL: url, }) c.Assert(err, IsNil) delete(storer.Objects, hParent) commit, err := r.CommitObject(h1) c.Assert(err, IsNil) _, err = references(commit, "LICENSE") c.Assert(err, Equals, plumbing.ErrObjectNotFound) } func (s *ReferencesSuite) TestRevList(c *C) { for _, t := range referencesTests { r := s.NewRepositoryFromPackfile(fixtures.ByURL(t.repo).One()) commit, err := r.CommitObject(plumbing.NewHash(t.commit)) c.Assert(err, IsNil) revs, err := references(commit, t.path) c.Assert(err, IsNil) c.Assert(len(revs), Equals, len(t.revs)) for i := range revs { if revs[i].Hash.String() != t.revs[i] { commit, err := s.Repository.CommitObject(plumbing.NewHash(t.revs[i])) c.Assert(err, IsNil) equiv, err := equivalent(t.path, revs[i], commit) c.Assert(err, IsNil) if equiv { fmt.Printf("cherry-pick detected: %s %s\n", revs[i].Hash.String(), t.revs[i]) } else { c.Fatalf("\nrepo=%s, commit=%s, path=%s, \n%s", t.repo, t.commit, t.path, compareSideBySide(t.revs, revs)) } } } } } // same length is assumed func compareSideBySide(a []string, b []*object.Commit) string { var buf bytes.Buffer buf.WriteString("\t EXPECTED OBTAINED ") var sep string var obt string for i := range a { obt = b[i].Hash.String() if a[i] != obt { sep = "------" } else { sep = " " } buf.WriteString(fmt.Sprintf("\n%d", i+1)) buf.WriteString(sep) buf.WriteString(a[i]) buf.WriteString(sep) buf.WriteString(obt) } return buf.String() } var cherryPicks = [...][]string{ // repo, path, commit a, commit b {"https://github.com/jamesob/desk.git", "desk", "094d0e7d5d69141c98a606910ba64786c5565da0", "3f34438d54f4a1ca86db8c0f03ed8eb38f20e22c"}, } // should detect cherry picks func (s *ReferencesSuite) TestEquivalent(c *C) { for _, t := range cherryPicks { cs := s.commits(c, t[0], t[2], t[3]) equiv, err := equivalent(t[1], cs[0], cs[1]) c.Assert(err, IsNil) c.Assert(equiv, Equals, true, Commentf("repo=%s, file=%s, a=%s b=%s", t[0], t[1], t[2], t[3])) } } // returns the commits from a slice of hashes func (s *ReferencesSuite) commits(c *C, repo string, hs ...string) []*object.Commit { r := s.NewRepositoryFromPackfile(fixtures.ByURL(repo).One()) result := make([]*object.Commit, 0, len(hs)) for _, h := range hs { commit, err := r.CommitObject(plumbing.NewHash(h)) c.Assert(err, IsNil) result = append(result, commit) } return result } golang-gopkg-src-d-go-git.v4-4.11.0/remote.go000066400000000000000000000556301345605224300205320ustar00rootroot00000000000000package git import ( "context" "errors" "fmt" "io" "gopkg.in/src-d/go-billy.v4/osfs" "gopkg.in/src-d/go-git.v4/config" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/cache" "gopkg.in/src-d/go-git.v4/plumbing/format/packfile" "gopkg.in/src-d/go-git.v4/plumbing/object" "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp" "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability" "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/sideband" "gopkg.in/src-d/go-git.v4/plumbing/revlist" "gopkg.in/src-d/go-git.v4/plumbing/storer" "gopkg.in/src-d/go-git.v4/plumbing/transport" "gopkg.in/src-d/go-git.v4/plumbing/transport/client" "gopkg.in/src-d/go-git.v4/storage" "gopkg.in/src-d/go-git.v4/storage/filesystem" "gopkg.in/src-d/go-git.v4/storage/memory" "gopkg.in/src-d/go-git.v4/utils/ioutil" ) var ( NoErrAlreadyUpToDate = errors.New("already up-to-date") ErrDeleteRefNotSupported = errors.New("server does not support delete-refs") ErrForceNeeded = errors.New("some refs were not updated") ) const ( // This describes the maximum number of commits to walk when // computing the haves to send to a server, for each ref in the // repo containing this remote, when not using the multi-ack // protocol. Setting this to 0 means there is no limit. maxHavesToVisitPerRef = 100 ) // Remote represents a connection to a remote repository. type Remote struct { c *config.RemoteConfig s storage.Storer } func newRemote(s storage.Storer, c *config.RemoteConfig) *Remote { return &Remote{s: s, c: c} } // Config returns the RemoteConfig object used to instantiate this Remote. func (r *Remote) Config() *config.RemoteConfig { return r.c } func (r *Remote) String() string { var fetch, push string if len(r.c.URLs) > 0 { fetch = r.c.URLs[0] push = r.c.URLs[0] } return fmt.Sprintf("%s\t%s (fetch)\n%[1]s\t%[3]s (push)", r.c.Name, fetch, push) } // Push performs a push to the remote. Returns NoErrAlreadyUpToDate if the // remote was already up-to-date. func (r *Remote) Push(o *PushOptions) error { return r.PushContext(context.Background(), o) } // PushContext performs a push to the remote. Returns NoErrAlreadyUpToDate if // the remote was already up-to-date. // // The provided Context must be non-nil. If the context expires before the // operation is complete, an error is returned. The context only affects to the // transport operations. func (r *Remote) PushContext(ctx context.Context, o *PushOptions) (err error) { if err := o.Validate(); err != nil { return err } if o.RemoteName != r.c.Name { return fmt.Errorf("remote names don't match: %s != %s", o.RemoteName, r.c.Name) } s, err := newSendPackSession(r.c.URLs[0], o.Auth) if err != nil { return err } defer ioutil.CheckClose(s, &err) ar, err := s.AdvertisedReferences() if err != nil { return err } remoteRefs, err := ar.AllReferences() if err != nil { return err } isDelete := false allDelete := true for _, rs := range o.RefSpecs { if rs.IsDelete() { isDelete = true } else { allDelete = false } if isDelete && !allDelete { break } } if isDelete && !ar.Capabilities.Supports(capability.DeleteRefs) { return ErrDeleteRefNotSupported } localRefs, err := r.references() if err != nil { return err } req, err := r.newReferenceUpdateRequest(o, localRefs, remoteRefs, ar) if err != nil { return err } if len(req.Commands) == 0 { return NoErrAlreadyUpToDate } objects := objectsToPush(req.Commands) haves, err := referencesToHashes(remoteRefs) if err != nil { return err } stop, err := r.s.Shallow() if err != nil { return err } // if we have shallow we should include this as part of the objects that // we are aware. haves = append(haves, stop...) var hashesToPush []plumbing.Hash // Avoid the expensive revlist operation if we're only doing deletes. if !allDelete { if r.c.IsFirstURLLocal() { // If we're are pushing to a local repo, it might be much // faster to use a local storage layer to get the commits // to ignore, when calculating the object revlist. localStorer := filesystem.NewStorage( osfs.New(r.c.URLs[0]), cache.NewObjectLRUDefault()) hashesToPush, err = revlist.ObjectsWithStorageForIgnores( r.s, localStorer, objects, haves) } else { hashesToPush, err = revlist.Objects(r.s, objects, haves) } if err != nil { return err } } rs, err := pushHashes(ctx, s, r.s, req, hashesToPush, r.useRefDeltas(ar)) if err != nil { return err } if err = rs.Error(); err != nil { return err } return r.updateRemoteReferenceStorage(req, rs) } func (r *Remote) useRefDeltas(ar *packp.AdvRefs) bool { return !ar.Capabilities.Supports(capability.OFSDelta) } func (r *Remote) newReferenceUpdateRequest( o *PushOptions, localRefs []*plumbing.Reference, remoteRefs storer.ReferenceStorer, ar *packp.AdvRefs, ) (*packp.ReferenceUpdateRequest, error) { req := packp.NewReferenceUpdateRequestFromCapabilities(ar.Capabilities) if o.Progress != nil { req.Progress = o.Progress if ar.Capabilities.Supports(capability.Sideband64k) { req.Capabilities.Set(capability.Sideband64k) } else if ar.Capabilities.Supports(capability.Sideband) { req.Capabilities.Set(capability.Sideband) } } if err := r.addReferencesToUpdate(o.RefSpecs, localRefs, remoteRefs, req); err != nil { return nil, err } return req, nil } func (r *Remote) updateRemoteReferenceStorage( req *packp.ReferenceUpdateRequest, result *packp.ReportStatus, ) error { for _, spec := range r.c.Fetch { for _, c := range req.Commands { if !spec.Match(c.Name) { continue } local := spec.Dst(c.Name) ref := plumbing.NewHashReference(local, c.New) switch c.Action() { case packp.Create, packp.Update: if err := r.s.SetReference(ref); err != nil { return err } case packp.Delete: if err := r.s.RemoveReference(local); err != nil { return err } } } } return nil } // FetchContext fetches references along with the objects necessary to complete // their histories. // // Returns nil if the operation is successful, NoErrAlreadyUpToDate if there are // no changes to be fetched, or an error. // // The provided Context must be non-nil. If the context expires before the // operation is complete, an error is returned. The context only affects to the // transport operations. func (r *Remote) FetchContext(ctx context.Context, o *FetchOptions) error { _, err := r.fetch(ctx, o) return err } // Fetch fetches references along with the objects necessary to complete their // histories. // // Returns nil if the operation is successful, NoErrAlreadyUpToDate if there are // no changes to be fetched, or an error. func (r *Remote) Fetch(o *FetchOptions) error { return r.FetchContext(context.Background(), o) } func (r *Remote) fetch(ctx context.Context, o *FetchOptions) (sto storer.ReferenceStorer, err error) { if o.RemoteName == "" { o.RemoteName = r.c.Name } if err = o.Validate(); err != nil { return nil, err } if len(o.RefSpecs) == 0 { o.RefSpecs = r.c.Fetch } s, err := newUploadPackSession(r.c.URLs[0], o.Auth) if err != nil { return nil, err } defer ioutil.CheckClose(s, &err) ar, err := s.AdvertisedReferences() if err != nil { return nil, err } req, err := r.newUploadPackRequest(o, ar) if err != nil { return nil, err } remoteRefs, err := ar.AllReferences() if err != nil { return nil, err } localRefs, err := r.references() if err != nil { return nil, err } refs, err := calculateRefs(o.RefSpecs, remoteRefs, o.Tags) if err != nil { return nil, err } req.Wants, err = getWants(r.s, refs) if len(req.Wants) > 0 { req.Haves, err = getHaves(localRefs, remoteRefs, r.s) if err != nil { return nil, err } if err = r.fetchPack(ctx, o, s, req); err != nil { return nil, err } } updated, err := r.updateLocalReferenceStorage(o.RefSpecs, refs, remoteRefs, o.Tags, o.Force) if err != nil { return nil, err } if !updated { return remoteRefs, NoErrAlreadyUpToDate } return remoteRefs, nil } func newUploadPackSession(url string, auth transport.AuthMethod) (transport.UploadPackSession, error) { c, ep, err := newClient(url) if err != nil { return nil, err } return c.NewUploadPackSession(ep, auth) } func newSendPackSession(url string, auth transport.AuthMethod) (transport.ReceivePackSession, error) { c, ep, err := newClient(url) if err != nil { return nil, err } return c.NewReceivePackSession(ep, auth) } func newClient(url string) (transport.Transport, *transport.Endpoint, error) { ep, err := transport.NewEndpoint(url) if err != nil { return nil, nil, err } c, err := client.NewClient(ep) if err != nil { return nil, nil, err } return c, ep, err } func (r *Remote) fetchPack(ctx context.Context, o *FetchOptions, s transport.UploadPackSession, req *packp.UploadPackRequest) (err error) { reader, err := s.UploadPack(ctx, req) if err != nil { return err } defer ioutil.CheckClose(reader, &err) if err = r.updateShallow(o, reader); err != nil { return err } if err = packfile.UpdateObjectStorage(r.s, buildSidebandIfSupported(req.Capabilities, reader, o.Progress), ); err != nil { return err } return err } func (r *Remote) addReferencesToUpdate( refspecs []config.RefSpec, localRefs []*plumbing.Reference, remoteRefs storer.ReferenceStorer, req *packp.ReferenceUpdateRequest, ) error { // This references dictionary will be used to search references by name. refsDict := make(map[string]*plumbing.Reference) for _, ref := range localRefs { refsDict[ref.Name().String()] = ref } for _, rs := range refspecs { if rs.IsDelete() { if err := r.deleteReferences(rs, remoteRefs, req); err != nil { return err } } else { err := r.addOrUpdateReferences(rs, localRefs, refsDict, remoteRefs, req) if err != nil { return err } } } return nil } func (r *Remote) addOrUpdateReferences( rs config.RefSpec, localRefs []*plumbing.Reference, refsDict map[string]*plumbing.Reference, remoteRefs storer.ReferenceStorer, req *packp.ReferenceUpdateRequest, ) error { // If it is not a wilcard refspec we can directly search for the reference // in the references dictionary. if !rs.IsWildcard() { ref, ok := refsDict[rs.Src()] if !ok { return nil } return r.addReferenceIfRefSpecMatches(rs, remoteRefs, ref, req) } for _, ref := range localRefs { err := r.addReferenceIfRefSpecMatches(rs, remoteRefs, ref, req) if err != nil { return err } } return nil } func (r *Remote) deleteReferences(rs config.RefSpec, remoteRefs storer.ReferenceStorer, req *packp.ReferenceUpdateRequest) error { iter, err := remoteRefs.IterReferences() if err != nil { return err } return iter.ForEach(func(ref *plumbing.Reference) error { if ref.Type() != plumbing.HashReference { return nil } if rs.Dst("") != ref.Name() { return nil } cmd := &packp.Command{ Name: ref.Name(), Old: ref.Hash(), New: plumbing.ZeroHash, } req.Commands = append(req.Commands, cmd) return nil }) } func (r *Remote) addReferenceIfRefSpecMatches(rs config.RefSpec, remoteRefs storer.ReferenceStorer, localRef *plumbing.Reference, req *packp.ReferenceUpdateRequest) error { if localRef.Type() != plumbing.HashReference { return nil } if !rs.Match(localRef.Name()) { return nil } cmd := &packp.Command{ Name: rs.Dst(localRef.Name()), Old: plumbing.ZeroHash, New: localRef.Hash(), } remoteRef, err := remoteRefs.Reference(cmd.Name) if err == nil { if remoteRef.Type() != plumbing.HashReference { //TODO: check actual git behavior here return nil } cmd.Old = remoteRef.Hash() } else if err != plumbing.ErrReferenceNotFound { return err } if cmd.Old == cmd.New { return nil } if !rs.IsForceUpdate() { if err := checkFastForwardUpdate(r.s, remoteRefs, cmd); err != nil { return err } } req.Commands = append(req.Commands, cmd) return nil } func (r *Remote) references() ([]*plumbing.Reference, error) { var localRefs []*plumbing.Reference iter, err := r.s.IterReferences() if err != nil { return nil, err } for { ref, err := iter.Next() if err == io.EOF { break } if err != nil { return nil, err } localRefs = append(localRefs, ref) } return localRefs, nil } func getRemoteRefsFromStorer(remoteRefStorer storer.ReferenceStorer) ( map[plumbing.Hash]bool, error) { remoteRefs := map[plumbing.Hash]bool{} iter, err := remoteRefStorer.IterReferences() if err != nil { return nil, err } err = iter.ForEach(func(ref *plumbing.Reference) error { if ref.Type() != plumbing.HashReference { return nil } remoteRefs[ref.Hash()] = true return nil }) if err != nil { return nil, err } return remoteRefs, nil } // getHavesFromRef populates the given `haves` map with the given // reference, and up to `maxHavesToVisitPerRef` ancestor commits. func getHavesFromRef( ref *plumbing.Reference, remoteRefs map[plumbing.Hash]bool, s storage.Storer, haves map[plumbing.Hash]bool, ) error { h := ref.Hash() if haves[h] { return nil } // No need to load the commit if we know the remote already // has this hash. if remoteRefs[h] { haves[h] = true return nil } commit, err := object.GetCommit(s, h) if err != nil { // Ignore the error if this isn't a commit. haves[ref.Hash()] = true return nil } // Until go-git supports proper commit negotiation during an // upload pack request, include up to `maxHavesToVisitPerRef` // commits from the history of each ref. walker := object.NewCommitPreorderIter(commit, haves, nil) toVisit := maxHavesToVisitPerRef return walker.ForEach(func(c *object.Commit) error { haves[c.Hash] = true toVisit-- // If toVisit starts out at 0 (indicating there is no // max), then it will be negative here and we won't stop // early. if toVisit == 0 || remoteRefs[c.Hash] { return storer.ErrStop } return nil }) } func getHaves( localRefs []*plumbing.Reference, remoteRefStorer storer.ReferenceStorer, s storage.Storer, ) ([]plumbing.Hash, error) { haves := map[plumbing.Hash]bool{} // Build a map of all the remote references, to avoid loading too // many parent commits for references we know don't need to be // transferred. remoteRefs, err := getRemoteRefsFromStorer(remoteRefStorer) if err != nil { return nil, err } for _, ref := range localRefs { if haves[ref.Hash()] { continue } if ref.Type() != plumbing.HashReference { continue } err = getHavesFromRef(ref, remoteRefs, s, haves) if err != nil { return nil, err } } var result []plumbing.Hash for h := range haves { result = append(result, h) } return result, nil } const refspecAllTags = "+refs/tags/*:refs/tags/*" func calculateRefs( spec []config.RefSpec, remoteRefs storer.ReferenceStorer, tagMode TagMode, ) (memory.ReferenceStorage, error) { if tagMode == AllTags { spec = append(spec, refspecAllTags) } refs := make(memory.ReferenceStorage) for _, s := range spec { if err := doCalculateRefs(s, remoteRefs, refs); err != nil { return nil, err } } return refs, nil } func doCalculateRefs( s config.RefSpec, remoteRefs storer.ReferenceStorer, refs memory.ReferenceStorage, ) error { iter, err := remoteRefs.IterReferences() if err != nil { return err } var matched bool err = iter.ForEach(func(ref *plumbing.Reference) error { if !s.Match(ref.Name()) { return nil } if ref.Type() == plumbing.SymbolicReference { target, err := storer.ResolveReference(remoteRefs, ref.Name()) if err != nil { return err } ref = plumbing.NewHashReference(ref.Name(), target.Hash()) } if ref.Type() != plumbing.HashReference { return nil } matched = true if err := refs.SetReference(ref); err != nil { return err } if !s.IsWildcard() { return storer.ErrStop } return nil }) if !matched && !s.IsWildcard() { return fmt.Errorf("couldn't find remote ref %q", s.Src()) } return err } func getWants(localStorer storage.Storer, refs memory.ReferenceStorage) ([]plumbing.Hash, error) { wants := map[plumbing.Hash]bool{} for _, ref := range refs { hash := ref.Hash() exists, err := objectExists(localStorer, ref.Hash()) if err != nil { return nil, err } if !exists { wants[hash] = true } } var result []plumbing.Hash for h := range wants { result = append(result, h) } return result, nil } func objectExists(s storer.EncodedObjectStorer, h plumbing.Hash) (bool, error) { _, err := s.EncodedObject(plumbing.AnyObject, h) if err == plumbing.ErrObjectNotFound { return false, nil } return true, err } func checkFastForwardUpdate(s storer.EncodedObjectStorer, remoteRefs storer.ReferenceStorer, cmd *packp.Command) error { if cmd.Old == plumbing.ZeroHash { _, err := remoteRefs.Reference(cmd.Name) if err == plumbing.ErrReferenceNotFound { return nil } if err != nil { return err } return fmt.Errorf("non-fast-forward update: %s", cmd.Name.String()) } ff, err := isFastForward(s, cmd.Old, cmd.New) if err != nil { return err } if !ff { return fmt.Errorf("non-fast-forward update: %s", cmd.Name.String()) } return nil } func isFastForward(s storer.EncodedObjectStorer, old, new plumbing.Hash) (bool, error) { c, err := object.GetCommit(s, new) if err != nil { return false, err } found := false iter := object.NewCommitPreorderIter(c, nil, nil) err = iter.ForEach(func(c *object.Commit) error { if c.Hash != old { return nil } found = true return storer.ErrStop }) return found, err } func (r *Remote) newUploadPackRequest(o *FetchOptions, ar *packp.AdvRefs) (*packp.UploadPackRequest, error) { req := packp.NewUploadPackRequestFromCapabilities(ar.Capabilities) if o.Depth != 0 { req.Depth = packp.DepthCommits(o.Depth) if err := req.Capabilities.Set(capability.Shallow); err != nil { return nil, err } } if o.Progress == nil && ar.Capabilities.Supports(capability.NoProgress) { if err := req.Capabilities.Set(capability.NoProgress); err != nil { return nil, err } } isWildcard := true for _, s := range o.RefSpecs { if !s.IsWildcard() { isWildcard = false break } } if isWildcard && o.Tags == TagFollowing && ar.Capabilities.Supports(capability.IncludeTag) { if err := req.Capabilities.Set(capability.IncludeTag); err != nil { return nil, err } } return req, nil } func buildSidebandIfSupported(l *capability.List, reader io.Reader, p sideband.Progress) io.Reader { var t sideband.Type switch { case l.Supports(capability.Sideband): t = sideband.Sideband case l.Supports(capability.Sideband64k): t = sideband.Sideband64k default: return reader } d := sideband.NewDemuxer(t, reader) d.Progress = p return d } func (r *Remote) updateLocalReferenceStorage( specs []config.RefSpec, fetchedRefs, remoteRefs memory.ReferenceStorage, tagMode TagMode, force bool, ) (updated bool, err error) { isWildcard := true forceNeeded := false for _, spec := range specs { if !spec.IsWildcard() { isWildcard = false } for _, ref := range fetchedRefs { if !spec.Match(ref.Name()) { continue } if ref.Type() != plumbing.HashReference { continue } localName := spec.Dst(ref.Name()) old, _ := storer.ResolveReference(r.s, localName) new := plumbing.NewHashReference(localName, ref.Hash()) // If the ref exists locally as a branch and force is not specified, // only update if the new ref is an ancestor of the old if old != nil && old.Name().IsBranch() && !force && !spec.IsForceUpdate() { ff, err := isFastForward(r.s, old.Hash(), new.Hash()) if err != nil { return updated, err } if !ff { forceNeeded = true continue } } refUpdated, err := checkAndUpdateReferenceStorerIfNeeded(r.s, new, old) if err != nil { return updated, err } if refUpdated { updated = true } } } if tagMode == NoTags { return updated, nil } tags := fetchedRefs if isWildcard { tags = remoteRefs } tagUpdated, err := r.buildFetchedTags(tags) if err != nil { return updated, err } if tagUpdated { updated = true } if err == nil && forceNeeded { err = ErrForceNeeded } return } func (r *Remote) buildFetchedTags(refs memory.ReferenceStorage) (updated bool, err error) { for _, ref := range refs { if !ref.Name().IsTag() { continue } _, err := r.s.EncodedObject(plumbing.AnyObject, ref.Hash()) if err == plumbing.ErrObjectNotFound { continue } if err != nil { return false, err } refUpdated, err := updateReferenceStorerIfNeeded(r.s, ref) if err != nil { return updated, err } if refUpdated { updated = true } } return } // List the references on the remote repository. func (r *Remote) List(o *ListOptions) (rfs []*plumbing.Reference, err error) { s, err := newUploadPackSession(r.c.URLs[0], o.Auth) if err != nil { return nil, err } defer ioutil.CheckClose(s, &err) ar, err := s.AdvertisedReferences() if err != nil { return nil, err } allRefs, err := ar.AllReferences() if err != nil { return nil, err } refs, err := allRefs.IterReferences() if err != nil { return nil, err } var resultRefs []*plumbing.Reference refs.ForEach(func(ref *plumbing.Reference) error { resultRefs = append(resultRefs, ref) return nil }) return resultRefs, nil } func objectsToPush(commands []*packp.Command) []plumbing.Hash { var objects []plumbing.Hash for _, cmd := range commands { if cmd.New == plumbing.ZeroHash { continue } objects = append(objects, cmd.New) } return objects } func referencesToHashes(refs storer.ReferenceStorer) ([]plumbing.Hash, error) { iter, err := refs.IterReferences() if err != nil { return nil, err } var hs []plumbing.Hash err = iter.ForEach(func(ref *plumbing.Reference) error { if ref.Type() != plumbing.HashReference { return nil } hs = append(hs, ref.Hash()) return nil }) if err != nil { return nil, err } return hs, nil } func pushHashes( ctx context.Context, sess transport.ReceivePackSession, s storage.Storer, req *packp.ReferenceUpdateRequest, hs []plumbing.Hash, useRefDeltas bool, ) (*packp.ReportStatus, error) { rd, wr := io.Pipe() req.Packfile = rd config, err := s.Config() if err != nil { return nil, err } // Set buffer size to 1 so the error message can be written when // ReceivePack fails. Otherwise the goroutine will be blocked writing // to the channel. done := make(chan error, 1) go func() { e := packfile.NewEncoder(wr, s, useRefDeltas) if _, err := e.Encode(hs, config.Pack.Window); err != nil { done <- wr.CloseWithError(err) return } done <- wr.Close() }() rs, err := sess.ReceivePack(ctx, req) if err != nil { // close the pipe to unlock encode write _ = rd.Close() return nil, err } if err := <-done; err != nil { return nil, err } return rs, nil } func (r *Remote) updateShallow(o *FetchOptions, resp *packp.UploadPackResponse) error { if o.Depth == 0 || len(resp.Shallows) == 0 { return nil } shallows, err := r.s.Shallow() if err != nil { return err } outer: for _, s := range resp.Shallows { for _, oldS := range shallows { if s == oldS { continue outer } } shallows = append(shallows, s) } return r.s.SetShallow(shallows) } golang-gopkg-src-d-go-git.v4-4.11.0/remote_test.go000066400000000000000000000600641345605224300215660ustar00rootroot00000000000000package git import ( "bytes" "context" "io" "io/ioutil" "os" "runtime" "time" "gopkg.in/src-d/go-git.v4/config" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/cache" "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp" "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability" "gopkg.in/src-d/go-git.v4/plumbing/storer" "gopkg.in/src-d/go-git.v4/storage" "gopkg.in/src-d/go-git.v4/storage/filesystem" "gopkg.in/src-d/go-git.v4/storage/memory" . "gopkg.in/check.v1" "gopkg.in/src-d/go-billy.v4/osfs" "gopkg.in/src-d/go-git-fixtures.v3" ) type RemoteSuite struct { BaseSuite } var _ = Suite(&RemoteSuite{}) func (s *RemoteSuite) TestFetchInvalidEndpoint(c *C) { r := newRemote(nil, &config.RemoteConfig{Name: "foo", URLs: []string{"http://\\"}}) err := r.Fetch(&FetchOptions{RemoteName: "foo"}) c.Assert(err, ErrorMatches, ".*invalid character.*") } func (s *RemoteSuite) TestFetchNonExistentEndpoint(c *C) { r := newRemote(nil, &config.RemoteConfig{Name: "foo", URLs: []string{"ssh://non-existent/foo.git"}}) err := r.Fetch(&FetchOptions{}) c.Assert(err, NotNil) } func (s *RemoteSuite) TestFetchInvalidSchemaEndpoint(c *C) { r := newRemote(nil, &config.RemoteConfig{Name: "foo", URLs: []string{"qux://foo"}}) err := r.Fetch(&FetchOptions{}) c.Assert(err, ErrorMatches, ".*unsupported scheme.*") } func (s *RemoteSuite) TestFetchInvalidFetchOptions(c *C) { r := newRemote(nil, &config.RemoteConfig{Name: "foo", URLs: []string{"qux://foo"}}) invalid := config.RefSpec("^*$ñ") err := r.Fetch(&FetchOptions{RefSpecs: []config.RefSpec{invalid}}) c.Assert(err, Equals, config.ErrRefSpecMalformedSeparator) } func (s *RemoteSuite) TestFetchWildcard(c *C) { r := newRemote(memory.NewStorage(), &config.RemoteConfig{ URLs: []string{s.GetBasicLocalRepositoryURL()}, }) s.testFetch(c, r, &FetchOptions{ RefSpecs: []config.RefSpec{ config.RefSpec("+refs/heads/*:refs/remotes/origin/*"), }, }, []*plumbing.Reference{ plumbing.NewReferenceFromStrings("refs/remotes/origin/master", "6ecf0ef2c2dffb796033e5a02219af86ec6584e5"), plumbing.NewReferenceFromStrings("refs/remotes/origin/branch", "e8d3ffab552895c19b9fcf7aa264d277cde33881"), plumbing.NewReferenceFromStrings("refs/tags/v1.0.0", "6ecf0ef2c2dffb796033e5a02219af86ec6584e5"), }) } func (s *RemoteSuite) TestFetchWildcardTags(c *C) { r := newRemote(memory.NewStorage(), &config.RemoteConfig{ URLs: []string{s.GetLocalRepositoryURL(fixtures.ByTag("tags").One())}, }) s.testFetch(c, r, &FetchOptions{ RefSpecs: []config.RefSpec{ config.RefSpec("+refs/heads/*:refs/remotes/origin/*"), }, }, []*plumbing.Reference{ plumbing.NewReferenceFromStrings("refs/remotes/origin/master", "f7b877701fbf855b44c0a9e86f3fdce2c298b07f"), plumbing.NewReferenceFromStrings("refs/tags/annotated-tag", "b742a2a9fa0afcfa9a6fad080980fbc26b007c69"), plumbing.NewReferenceFromStrings("refs/tags/tree-tag", "152175bf7e5580299fa1f0ba41ef6474cc043b70"), plumbing.NewReferenceFromStrings("refs/tags/commit-tag", "ad7897c0fb8e7d9a9ba41fa66072cf06095a6cfc"), plumbing.NewReferenceFromStrings("refs/tags/blob-tag", "fe6cb94756faa81e5ed9240f9191b833db5f40ae"), plumbing.NewReferenceFromStrings("refs/tags/lightweight-tag", "f7b877701fbf855b44c0a9e86f3fdce2c298b07f"), }) } func (s *RemoteSuite) TestFetch(c *C) { r := newRemote(memory.NewStorage(), &config.RemoteConfig{ URLs: []string{s.GetLocalRepositoryURL(fixtures.ByTag("tags").One())}, }) s.testFetch(c, r, &FetchOptions{ RefSpecs: []config.RefSpec{ config.RefSpec("+refs/heads/master:refs/remotes/origin/master"), }, }, []*plumbing.Reference{ plumbing.NewReferenceFromStrings("refs/remotes/origin/master", "f7b877701fbf855b44c0a9e86f3fdce2c298b07f"), }) } func (s *RemoteSuite) TestFetchNonExistantReference(c *C) { r := newRemote(memory.NewStorage(), &config.RemoteConfig{ URLs: []string{s.GetLocalRepositoryURL(fixtures.ByTag("tags").One())}, }) err := r.Fetch(&FetchOptions{ RefSpecs: []config.RefSpec{ config.RefSpec("+refs/heads/foo:refs/remotes/origin/foo"), }, }) c.Assert(err, ErrorMatches, "couldn't find remote ref.*") } func (s *RemoteSuite) TestFetchContext(c *C) { r := newRemote(memory.NewStorage(), &config.RemoteConfig{ URLs: []string{s.GetLocalRepositoryURL(fixtures.ByTag("tags").One())}, }) ctx, cancel := context.WithCancel(context.Background()) cancel() err := r.FetchContext(ctx, &FetchOptions{ RefSpecs: []config.RefSpec{ config.RefSpec("+refs/heads/master:refs/remotes/origin/master"), }, }) c.Assert(err, NotNil) } func (s *RemoteSuite) TestFetchWithAllTags(c *C) { r := newRemote(memory.NewStorage(), &config.RemoteConfig{ URLs: []string{s.GetLocalRepositoryURL(fixtures.ByTag("tags").One())}, }) s.testFetch(c, r, &FetchOptions{ Tags: AllTags, RefSpecs: []config.RefSpec{ config.RefSpec("+refs/heads/master:refs/remotes/origin/master"), }, }, []*plumbing.Reference{ plumbing.NewReferenceFromStrings("refs/remotes/origin/master", "f7b877701fbf855b44c0a9e86f3fdce2c298b07f"), plumbing.NewReferenceFromStrings("refs/tags/annotated-tag", "b742a2a9fa0afcfa9a6fad080980fbc26b007c69"), plumbing.NewReferenceFromStrings("refs/tags/tree-tag", "152175bf7e5580299fa1f0ba41ef6474cc043b70"), plumbing.NewReferenceFromStrings("refs/tags/commit-tag", "ad7897c0fb8e7d9a9ba41fa66072cf06095a6cfc"), plumbing.NewReferenceFromStrings("refs/tags/blob-tag", "fe6cb94756faa81e5ed9240f9191b833db5f40ae"), plumbing.NewReferenceFromStrings("refs/tags/lightweight-tag", "f7b877701fbf855b44c0a9e86f3fdce2c298b07f"), }) } func (s *RemoteSuite) TestFetchWithNoTags(c *C) { r := newRemote(memory.NewStorage(), &config.RemoteConfig{ URLs: []string{s.GetLocalRepositoryURL(fixtures.ByTag("tags").One())}, }) s.testFetch(c, r, &FetchOptions{ Tags: NoTags, RefSpecs: []config.RefSpec{ config.RefSpec("+refs/heads/*:refs/remotes/origin/*"), }, }, []*plumbing.Reference{ plumbing.NewReferenceFromStrings("refs/remotes/origin/master", "f7b877701fbf855b44c0a9e86f3fdce2c298b07f"), }) } func (s *RemoteSuite) TestFetchWithDepth(c *C) { r := newRemote(memory.NewStorage(), &config.RemoteConfig{ URLs: []string{s.GetBasicLocalRepositoryURL()}, }) s.testFetch(c, r, &FetchOptions{ Depth: 1, RefSpecs: []config.RefSpec{ config.RefSpec("+refs/heads/*:refs/remotes/origin/*"), }, }, []*plumbing.Reference{ plumbing.NewReferenceFromStrings("refs/remotes/origin/master", "6ecf0ef2c2dffb796033e5a02219af86ec6584e5"), plumbing.NewReferenceFromStrings("refs/remotes/origin/branch", "e8d3ffab552895c19b9fcf7aa264d277cde33881"), plumbing.NewReferenceFromStrings("refs/tags/v1.0.0", "6ecf0ef2c2dffb796033e5a02219af86ec6584e5"), }) c.Assert(r.s.(*memory.Storage).Objects, HasLen, 18) } func (s *RemoteSuite) testFetch(c *C, r *Remote, o *FetchOptions, expected []*plumbing.Reference) { err := r.Fetch(o) c.Assert(err, IsNil) var refs int l, err := r.s.IterReferences() c.Assert(err, IsNil) l.ForEach(func(r *plumbing.Reference) error { refs++; return nil }) c.Assert(refs, Equals, len(expected)) for _, exp := range expected { r, err := r.s.Reference(exp.Name()) c.Assert(err, IsNil) c.Assert(exp.String(), Equals, r.String()) } } func (s *RemoteSuite) TestFetchWithProgress(c *C) { url := s.GetBasicLocalRepositoryURL() sto := memory.NewStorage() buf := bytes.NewBuffer(nil) r := newRemote(sto, &config.RemoteConfig{Name: "foo", URLs: []string{url}}) refspec := config.RefSpec("+refs/heads/*:refs/remotes/origin/*") err := r.Fetch(&FetchOptions{ RefSpecs: []config.RefSpec{refspec}, Progress: buf, }) c.Assert(err, IsNil) c.Assert(sto.Objects, HasLen, 31) c.Assert(buf.Len(), Not(Equals), 0) } type mockPackfileWriter struct { storage.Storer PackfileWriterCalled bool } func (m *mockPackfileWriter) PackfileWriter() (io.WriteCloser, error) { m.PackfileWriterCalled = true return m.Storer.(storer.PackfileWriter).PackfileWriter() } func (s *RemoteSuite) TestFetchWithPackfileWriter(c *C) { dir, err := ioutil.TempDir("", "fetch") c.Assert(err, IsNil) defer os.RemoveAll(dir) // clean up fss := filesystem.NewStorage(osfs.New(dir), cache.NewObjectLRUDefault()) c.Assert(err, IsNil) mock := &mockPackfileWriter{Storer: fss} url := s.GetBasicLocalRepositoryURL() r := newRemote(mock, &config.RemoteConfig{Name: "foo", URLs: []string{url}}) refspec := config.RefSpec("+refs/heads/*:refs/remotes/origin/*") err = r.Fetch(&FetchOptions{ RefSpecs: []config.RefSpec{refspec}, }) c.Assert(err, IsNil) var count int iter, err := mock.IterEncodedObjects(plumbing.AnyObject) c.Assert(err, IsNil) iter.ForEach(func(plumbing.EncodedObject) error { count++ return nil }) c.Assert(count, Equals, 31) c.Assert(mock.PackfileWriterCalled, Equals, true) } func (s *RemoteSuite) TestFetchNoErrAlreadyUpToDate(c *C) { url := s.GetBasicLocalRepositoryURL() s.doTestFetchNoErrAlreadyUpToDate(c, url) } func (s *RemoteSuite) TestFetchNoErrAlreadyUpToDateButStillUpdateLocalRemoteRefs(c *C) { r := newRemote(memory.NewStorage(), &config.RemoteConfig{ URLs: []string{s.GetBasicLocalRepositoryURL()}, }) o := &FetchOptions{ RefSpecs: []config.RefSpec{ config.RefSpec("+refs/heads/*:refs/remotes/origin/*"), }, } err := r.Fetch(o) c.Assert(err, IsNil) // Simulate an out of date remote ref even though we have the new commit locally r.s.SetReference(plumbing.NewReferenceFromStrings( "refs/remotes/origin/master", "918c48b83bd081e863dbe1b80f8998f058cd8294", )) err = r.Fetch(o) c.Assert(err, IsNil) exp := plumbing.NewReferenceFromStrings( "refs/remotes/origin/master", "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", ) ref, err := r.s.Reference("refs/remotes/origin/master") c.Assert(err, IsNil) c.Assert(exp.String(), Equals, ref.String()) } func (s *RemoteSuite) TestFetchNoErrAlreadyUpToDateWithNonCommitObjects(c *C) { fixture := fixtures.ByTag("tags").One() url := s.GetLocalRepositoryURL(fixture) s.doTestFetchNoErrAlreadyUpToDate(c, url) } func (s *RemoteSuite) doTestFetchNoErrAlreadyUpToDate(c *C, url string) { r := newRemote(memory.NewStorage(), &config.RemoteConfig{URLs: []string{url}}) o := &FetchOptions{ RefSpecs: []config.RefSpec{ config.RefSpec("+refs/heads/*:refs/remotes/origin/*"), }, } err := r.Fetch(o) c.Assert(err, IsNil) err = r.Fetch(o) c.Assert(err, Equals, NoErrAlreadyUpToDate) } func (s *RemoteSuite) testFetchFastForward(c *C, sto storage.Storer) { r := newRemote(sto, &config.RemoteConfig{ URLs: []string{s.GetBasicLocalRepositoryURL()}, }) s.testFetch(c, r, &FetchOptions{ RefSpecs: []config.RefSpec{ config.RefSpec("+refs/heads/master:refs/heads/master"), }, }, []*plumbing.Reference{ plumbing.NewReferenceFromStrings("refs/heads/master", "6ecf0ef2c2dffb796033e5a02219af86ec6584e5"), }) // First make sure that we error correctly when a force is required. err := r.Fetch(&FetchOptions{ RefSpecs: []config.RefSpec{ config.RefSpec("refs/heads/branch:refs/heads/master"), }, }) c.Assert(err, Equals, ErrForceNeeded) // And that forcing it fixes the problem. err = r.Fetch(&FetchOptions{ RefSpecs: []config.RefSpec{ config.RefSpec("+refs/heads/branch:refs/heads/master"), }, }) c.Assert(err, IsNil) // Now test that a fast-forward, non-force fetch works. r.s.SetReference(plumbing.NewReferenceFromStrings( "refs/heads/master", "918c48b83bd081e863dbe1b80f8998f058cd8294", )) s.testFetch(c, r, &FetchOptions{ RefSpecs: []config.RefSpec{ config.RefSpec("refs/heads/master:refs/heads/master"), }, }, []*plumbing.Reference{ plumbing.NewReferenceFromStrings("refs/heads/master", "6ecf0ef2c2dffb796033e5a02219af86ec6584e5"), }) } func (s *RemoteSuite) TestFetchFastForwardMem(c *C) { s.testFetchFastForward(c, memory.NewStorage()) } func (s *RemoteSuite) TestFetchFastForwardFS(c *C) { dir, err := ioutil.TempDir("", "fetch") c.Assert(err, IsNil) defer os.RemoveAll(dir) // clean up fss := filesystem.NewStorage(osfs.New(dir), cache.NewObjectLRUDefault()) // This exercises `storage.filesystem.Storage.CheckAndSetReference()`. s.testFetchFastForward(c, fss) } func (s *RemoteSuite) TestString(c *C) { r := newRemote(nil, &config.RemoteConfig{ Name: "foo", URLs: []string{"https://github.com/git-fixtures/basic.git"}, }) c.Assert(r.String(), Equals, ""+ "foo\thttps://github.com/git-fixtures/basic.git (fetch)\n"+ "foo\thttps://github.com/git-fixtures/basic.git (push)", ) } func (s *RemoteSuite) TestPushToEmptyRepository(c *C) { url := c.MkDir() server, err := PlainInit(url, true) c.Assert(err, IsNil) srcFs := fixtures.Basic().One().DotGit() sto := filesystem.NewStorage(srcFs, cache.NewObjectLRUDefault()) r := newRemote(sto, &config.RemoteConfig{ Name: DefaultRemoteName, URLs: []string{url}, }) rs := config.RefSpec("refs/heads/*:refs/heads/*") err = r.Push(&PushOptions{ RefSpecs: []config.RefSpec{rs}, }) c.Assert(err, IsNil) iter, err := r.s.IterReferences() c.Assert(err, IsNil) expected := make(map[string]string) iter.ForEach(func(ref *plumbing.Reference) error { if !ref.Name().IsBranch() { return nil } expected[ref.Name().String()] = ref.Hash().String() return nil }) c.Assert(err, IsNil) AssertReferences(c, server, expected) } func (s *RemoteSuite) TestPushContext(c *C) { url := c.MkDir() _, err := PlainInit(url, true) c.Assert(err, IsNil) fs := fixtures.ByURL("https://github.com/git-fixtures/tags.git").One().DotGit() sto := filesystem.NewStorage(fs, cache.NewObjectLRUDefault()) r := newRemote(sto, &config.RemoteConfig{ Name: DefaultRemoteName, URLs: []string{url}, }) ctx, cancel := context.WithCancel(context.Background()) cancel() numGoroutines := runtime.NumGoroutine() err = r.PushContext(ctx, &PushOptions{ RefSpecs: []config.RefSpec{"refs/tags/*:refs/tags/*"}, }) c.Assert(err, NotNil) // let the goroutine from pushHashes finish and check that the number of // goroutines is the same as before time.Sleep(100 * time.Millisecond) c.Assert(runtime.NumGoroutine(), Equals, numGoroutines) } func (s *RemoteSuite) TestPushTags(c *C) { url := c.MkDir() server, err := PlainInit(url, true) c.Assert(err, IsNil) fs := fixtures.ByURL("https://github.com/git-fixtures/tags.git").One().DotGit() sto := filesystem.NewStorage(fs, cache.NewObjectLRUDefault()) r := newRemote(sto, &config.RemoteConfig{ Name: DefaultRemoteName, URLs: []string{url}, }) err = r.Push(&PushOptions{ RefSpecs: []config.RefSpec{"refs/tags/*:refs/tags/*"}, }) c.Assert(err, IsNil) AssertReferences(c, server, map[string]string{ "refs/tags/lightweight-tag": "f7b877701fbf855b44c0a9e86f3fdce2c298b07f", "refs/tags/annotated-tag": "b742a2a9fa0afcfa9a6fad080980fbc26b007c69", "refs/tags/commit-tag": "ad7897c0fb8e7d9a9ba41fa66072cf06095a6cfc", "refs/tags/blob-tag": "fe6cb94756faa81e5ed9240f9191b833db5f40ae", "refs/tags/tree-tag": "152175bf7e5580299fa1f0ba41ef6474cc043b70", }) } func (s *RemoteSuite) TestPushNoErrAlreadyUpToDate(c *C) { fs := fixtures.Basic().One().DotGit() sto := filesystem.NewStorage(fs, cache.NewObjectLRUDefault()) r := newRemote(sto, &config.RemoteConfig{ Name: DefaultRemoteName, URLs: []string{fs.Root()}, }) err := r.Push(&PushOptions{ RefSpecs: []config.RefSpec{"refs/heads/*:refs/heads/*"}, }) c.Assert(err, Equals, NoErrAlreadyUpToDate) } func (s *RemoteSuite) TestPushDeleteReference(c *C) { fs := fixtures.Basic().One().DotGit() sto := filesystem.NewStorage(fs, cache.NewObjectLRUDefault()) r, err := PlainClone(c.MkDir(), true, &CloneOptions{ URL: fs.Root(), }) c.Assert(err, IsNil) remote, err := r.Remote(DefaultRemoteName) c.Assert(err, IsNil) err = remote.Push(&PushOptions{ RefSpecs: []config.RefSpec{":refs/heads/branch"}, }) c.Assert(err, IsNil) _, err = sto.Reference(plumbing.ReferenceName("refs/heads/branch")) c.Assert(err, Equals, plumbing.ErrReferenceNotFound) _, err = r.Storer.Reference(plumbing.ReferenceName("refs/heads/branch")) c.Assert(err, Equals, plumbing.ErrReferenceNotFound) } func (s *RemoteSuite) TestPushRejectNonFastForward(c *C) { fs := fixtures.Basic().One().DotGit() server := filesystem.NewStorage(fs, cache.NewObjectLRUDefault()) r, err := PlainClone(c.MkDir(), true, &CloneOptions{ URL: fs.Root(), }) c.Assert(err, IsNil) remote, err := r.Remote(DefaultRemoteName) c.Assert(err, IsNil) branch := plumbing.ReferenceName("refs/heads/branch") oldRef, err := server.Reference(branch) c.Assert(err, IsNil) c.Assert(oldRef, NotNil) err = remote.Push(&PushOptions{RefSpecs: []config.RefSpec{ "refs/heads/master:refs/heads/branch", }}) c.Assert(err, ErrorMatches, "non-fast-forward update: refs/heads/branch") newRef, err := server.Reference(branch) c.Assert(err, IsNil) c.Assert(newRef, DeepEquals, oldRef) } func (s *RemoteSuite) TestPushForce(c *C) { f := fixtures.Basic().One() sto := filesystem.NewStorage(f.DotGit(), cache.NewObjectLRUDefault()) dstFs := f.DotGit() dstSto := filesystem.NewStorage(dstFs, cache.NewObjectLRUDefault()) url := dstFs.Root() r := newRemote(sto, &config.RemoteConfig{ Name: DefaultRemoteName, URLs: []string{url}, }) oldRef, err := dstSto.Reference(plumbing.ReferenceName("refs/heads/branch")) c.Assert(err, IsNil) c.Assert(oldRef, NotNil) err = r.Push(&PushOptions{RefSpecs: []config.RefSpec{ config.RefSpec("+refs/heads/master:refs/heads/branch"), }}) c.Assert(err, IsNil) newRef, err := dstSto.Reference(plumbing.ReferenceName("refs/heads/branch")) c.Assert(err, IsNil) c.Assert(newRef, Not(DeepEquals), oldRef) } func (s *RemoteSuite) TestPushNewReference(c *C) { fs := fixtures.Basic().One().DotGit() url := c.MkDir() server, err := PlainClone(url, true, &CloneOptions{ URL: fs.Root(), }) c.Assert(err, IsNil) r, err := PlainClone(c.MkDir(), true, &CloneOptions{ URL: url, }) c.Assert(err, IsNil) remote, err := r.Remote(DefaultRemoteName) c.Assert(err, IsNil) ref, err := r.Reference(plumbing.ReferenceName("refs/heads/master"), true) c.Assert(err, IsNil) err = remote.Push(&PushOptions{RefSpecs: []config.RefSpec{ "refs/heads/master:refs/heads/branch2", }}) c.Assert(err, IsNil) AssertReferences(c, server, map[string]string{ "refs/heads/branch2": ref.Hash().String(), }) AssertReferences(c, r, map[string]string{ "refs/remotes/origin/branch2": ref.Hash().String(), }) } func (s *RemoteSuite) TestPushNewReferenceAndDeleteInBatch(c *C) { fs := fixtures.Basic().One().DotGit() url := c.MkDir() server, err := PlainClone(url, true, &CloneOptions{ URL: fs.Root(), }) c.Assert(err, IsNil) r, err := PlainClone(c.MkDir(), true, &CloneOptions{ URL: url, }) c.Assert(err, IsNil) remote, err := r.Remote(DefaultRemoteName) c.Assert(err, IsNil) ref, err := r.Reference(plumbing.ReferenceName("refs/heads/master"), true) c.Assert(err, IsNil) err = remote.Push(&PushOptions{RefSpecs: []config.RefSpec{ "refs/heads/master:refs/heads/branch2", ":refs/heads/branch", }}) c.Assert(err, IsNil) AssertReferences(c, server, map[string]string{ "refs/heads/branch2": ref.Hash().String(), }) AssertReferences(c, r, map[string]string{ "refs/remotes/origin/branch2": ref.Hash().String(), }) _, err = server.Storer.Reference(plumbing.ReferenceName("refs/heads/branch")) c.Assert(err, Equals, plumbing.ErrReferenceNotFound) } func (s *RemoteSuite) TestPushInvalidEndpoint(c *C) { r := newRemote(nil, &config.RemoteConfig{Name: "foo", URLs: []string{"http://\\"}}) err := r.Push(&PushOptions{RemoteName: "foo"}) c.Assert(err, ErrorMatches, ".*invalid character.*") } func (s *RemoteSuite) TestPushNonExistentEndpoint(c *C) { r := newRemote(nil, &config.RemoteConfig{Name: "foo", URLs: []string{"ssh://non-existent/foo.git"}}) err := r.Push(&PushOptions{}) c.Assert(err, NotNil) } func (s *RemoteSuite) TestPushInvalidSchemaEndpoint(c *C) { r := newRemote(nil, &config.RemoteConfig{Name: "origin", URLs: []string{"qux://foo"}}) err := r.Push(&PushOptions{}) c.Assert(err, ErrorMatches, ".*unsupported scheme.*") } func (s *RemoteSuite) TestPushInvalidFetchOptions(c *C) { r := newRemote(nil, &config.RemoteConfig{Name: "foo", URLs: []string{"qux://foo"}}) invalid := config.RefSpec("^*$ñ") err := r.Push(&PushOptions{RefSpecs: []config.RefSpec{invalid}}) c.Assert(err, Equals, config.ErrRefSpecMalformedSeparator) } func (s *RemoteSuite) TestPushInvalidRefSpec(c *C) { r := newRemote(nil, &config.RemoteConfig{ Name: DefaultRemoteName, URLs: []string{"some-url"}, }) rs := config.RefSpec("^*$**") err := r.Push(&PushOptions{ RefSpecs: []config.RefSpec{rs}, }) c.Assert(err, Equals, config.ErrRefSpecMalformedSeparator) } func (s *RemoteSuite) TestPushWrongRemoteName(c *C) { r := newRemote(nil, &config.RemoteConfig{ Name: DefaultRemoteName, URLs: []string{"some-url"}, }) err := r.Push(&PushOptions{ RemoteName: "other-remote", }) c.Assert(err, ErrorMatches, ".*remote names don't match.*") } func (s *RemoteSuite) TestGetHaves(c *C) { f := fixtures.Basic().One() sto := filesystem.NewStorage(f.DotGit(), cache.NewObjectLRUDefault()) var localRefs = []*plumbing.Reference{ plumbing.NewReferenceFromStrings( "foo", "f7b877701fbf855b44c0a9e86f3fdce2c298b07f", ), plumbing.NewReferenceFromStrings( "bar", "fe6cb94756faa81e5ed9240f9191b833db5f40ae", ), plumbing.NewReferenceFromStrings( "qux", "f7b877701fbf855b44c0a9e86f3fdce2c298b07f", ), } l, err := getHaves(localRefs, memory.NewStorage(), sto) c.Assert(err, IsNil) c.Assert(l, HasLen, 2) } func (s *RemoteSuite) TestList(c *C) { repo := fixtures.Basic().One() remote := newRemote(memory.NewStorage(), &config.RemoteConfig{ Name: DefaultRemoteName, URLs: []string{repo.URL}, }) refs, err := remote.List(&ListOptions{}) c.Assert(err, IsNil) expected := []*plumbing.Reference{ plumbing.NewSymbolicReference("HEAD", "refs/heads/master"), plumbing.NewReferenceFromStrings("refs/heads/master", "6ecf0ef2c2dffb796033e5a02219af86ec6584e5"), plumbing.NewReferenceFromStrings("refs/heads/branch", "e8d3ffab552895c19b9fcf7aa264d277cde33881"), plumbing.NewReferenceFromStrings("refs/pull/1/head", "b8e471f58bcbca63b07bda20e428190409c2db47"), plumbing.NewReferenceFromStrings("refs/pull/2/head", "9632f02833b2f9613afb5e75682132b0b22e4a31"), plumbing.NewReferenceFromStrings("refs/pull/2/merge", "c37f58a130ca555e42ff96a071cb9ccb3f437504"), } c.Assert(len(refs), Equals, len(expected)) for _, e := range expected { found := false for _, r := range refs { if r.Name() == e.Name() { found = true c.Assert(r, DeepEquals, e) } } c.Assert(found, Equals, true) } } func (s *RemoteSuite) TestUpdateShallows(c *C) { hashes := []plumbing.Hash{ plumbing.NewHash("0000000000000000000000000000000000000001"), plumbing.NewHash("0000000000000000000000000000000000000002"), plumbing.NewHash("0000000000000000000000000000000000000003"), plumbing.NewHash("0000000000000000000000000000000000000004"), plumbing.NewHash("0000000000000000000000000000000000000005"), plumbing.NewHash("0000000000000000000000000000000000000006"), } tests := []struct { hashes []plumbing.Hash result []plumbing.Hash }{ // add to empty shallows {hashes[0:2], hashes[0:2]}, // add new hashes {hashes[2:4], hashes[0:4]}, // add some hashes already in shallow list {hashes[2:6], hashes[0:6]}, // add all hashes {hashes[0:6], hashes[0:6]}, // add empty list {nil, hashes[0:6]}, } remote := newRemote(memory.NewStorage(), &config.RemoteConfig{ Name: DefaultRemoteName, }) shallows, err := remote.s.Shallow() c.Assert(err, IsNil) c.Assert(len(shallows), Equals, 0) resp := new(packp.UploadPackResponse) o := &FetchOptions{ Depth: 1, } for _, t := range tests { resp.Shallows = t.hashes err = remote.updateShallow(o, resp) c.Assert(err, IsNil) shallow, err := remote.s.Shallow() c.Assert(err, IsNil) c.Assert(len(shallow), Equals, len(t.result)) c.Assert(shallow, DeepEquals, t.result) } } func (s *RemoteSuite) TestUseRefDeltas(c *C) { url := c.MkDir() _, err := PlainInit(url, true) c.Assert(err, IsNil) fs := fixtures.ByURL("https://github.com/git-fixtures/tags.git").One().DotGit() sto := filesystem.NewStorage(fs, cache.NewObjectLRUDefault()) r := newRemote(sto, &config.RemoteConfig{ Name: DefaultRemoteName, URLs: []string{url}, }) ar := packp.NewAdvRefs() ar.Capabilities.Add(capability.OFSDelta) c.Assert(r.useRefDeltas(ar), Equals, false) ar.Capabilities.Delete(capability.OFSDelta) c.Assert(r.useRefDeltas(ar), Equals, true) } golang-gopkg-src-d-go-git.v4-4.11.0/repository.go000066400000000000000000001143341345605224300214530ustar00rootroot00000000000000package git import ( "bytes" "context" "errors" "fmt" "io" stdioutil "io/ioutil" "os" "path" "path/filepath" "strings" "time" "golang.org/x/crypto/openpgp" "gopkg.in/src-d/go-git.v4/config" "gopkg.in/src-d/go-git.v4/internal/revision" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/cache" "gopkg.in/src-d/go-git.v4/plumbing/format/packfile" "gopkg.in/src-d/go-git.v4/plumbing/object" "gopkg.in/src-d/go-git.v4/plumbing/storer" "gopkg.in/src-d/go-git.v4/storage" "gopkg.in/src-d/go-git.v4/storage/filesystem" "gopkg.in/src-d/go-git.v4/utils/ioutil" "gopkg.in/src-d/go-billy.v4" "gopkg.in/src-d/go-billy.v4/osfs" ) // GitDirName this is a special folder where all the git stuff is. const GitDirName = ".git" var ( // ErrBranchExists an error stating the specified branch already exists ErrBranchExists = errors.New("branch already exists") // ErrBranchNotFound an error stating the specified branch does not exist ErrBranchNotFound = errors.New("branch not found") // ErrTagExists an error stating the specified tag already exists ErrTagExists = errors.New("tag already exists") // ErrTagNotFound an error stating the specified tag does not exist ErrTagNotFound = errors.New("tag not found") // ErrFetching is returned when the packfile could not be downloaded ErrFetching = errors.New("unable to fetch packfile") ErrInvalidReference = errors.New("invalid reference, should be a tag or a branch") ErrRepositoryNotExists = errors.New("repository does not exist") ErrRepositoryAlreadyExists = errors.New("repository already exists") ErrRemoteNotFound = errors.New("remote not found") ErrRemoteExists = errors.New("remote already exists") ErrAnonymousRemoteName = errors.New("anonymous remote name must be 'anonymous'") ErrWorktreeNotProvided = errors.New("worktree should be provided") ErrIsBareRepository = errors.New("worktree not available in a bare repository") ErrUnableToResolveCommit = errors.New("unable to resolve commit") ErrPackedObjectsNotSupported = errors.New("Packed objects not supported") ) // Repository represents a git repository type Repository struct { Storer storage.Storer r map[string]*Remote wt billy.Filesystem } // Init creates an empty git repository, based on the given Storer and worktree. // The worktree Filesystem is optional, if nil a bare repository is created. If // the given storer is not empty ErrRepositoryAlreadyExists is returned func Init(s storage.Storer, worktree billy.Filesystem) (*Repository, error) { if err := initStorer(s); err != nil { return nil, err } r := newRepository(s, worktree) _, err := r.Reference(plumbing.HEAD, false) switch err { case plumbing.ErrReferenceNotFound: case nil: return nil, ErrRepositoryAlreadyExists default: return nil, err } h := plumbing.NewSymbolicReference(plumbing.HEAD, plumbing.Master) if err := s.SetReference(h); err != nil { return nil, err } if worktree == nil { r.setIsBare(true) return r, nil } return r, setWorktreeAndStoragePaths(r, worktree) } func initStorer(s storer.Storer) error { i, ok := s.(storer.Initializer) if !ok { return nil } return i.Init() } func setWorktreeAndStoragePaths(r *Repository, worktree billy.Filesystem) error { type fsBased interface { Filesystem() billy.Filesystem } // .git file is only created if the storage is file based and the file // system is osfs.OS fs, isFSBased := r.Storer.(fsBased) if !isFSBased { return nil } if err := createDotGitFile(worktree, fs.Filesystem()); err != nil { return err } return setConfigWorktree(r, worktree, fs.Filesystem()) } func createDotGitFile(worktree, storage billy.Filesystem) error { path, err := filepath.Rel(worktree.Root(), storage.Root()) if err != nil { path = storage.Root() } if path == GitDirName { // not needed, since the folder is the default place return nil } f, err := worktree.Create(GitDirName) if err != nil { return err } defer f.Close() _, err = fmt.Fprintf(f, "gitdir: %s\n", path) return err } func setConfigWorktree(r *Repository, worktree, storage billy.Filesystem) error { path, err := filepath.Rel(storage.Root(), worktree.Root()) if err != nil { path = worktree.Root() } if path == ".." { // not needed, since the folder is the default place return nil } cfg, err := r.Storer.Config() if err != nil { return err } cfg.Core.Worktree = path return r.Storer.SetConfig(cfg) } // Open opens a git repository using the given Storer and worktree filesystem, // if the given storer is complete empty ErrRepositoryNotExists is returned. // The worktree can be nil when the repository being opened is bare, if the // repository is a normal one (not bare) and worktree is nil the err // ErrWorktreeNotProvided is returned func Open(s storage.Storer, worktree billy.Filesystem) (*Repository, error) { _, err := s.Reference(plumbing.HEAD) if err == plumbing.ErrReferenceNotFound { return nil, ErrRepositoryNotExists } if err != nil { return nil, err } return newRepository(s, worktree), nil } // Clone a repository into the given Storer and worktree Filesystem with the // given options, if worktree is nil a bare repository is created. If the given // storer is not empty ErrRepositoryAlreadyExists is returned. // // The provided Context must be non-nil. If the context expires before the // operation is complete, an error is returned. The context only affects to the // transport operations. func Clone(s storage.Storer, worktree billy.Filesystem, o *CloneOptions) (*Repository, error) { return CloneContext(context.Background(), s, worktree, o) } // CloneContext a repository into the given Storer and worktree Filesystem with // the given options, if worktree is nil a bare repository is created. If the // given storer is not empty ErrRepositoryAlreadyExists is returned. // // The provided Context must be non-nil. If the context expires before the // operation is complete, an error is returned. The context only affects to the // transport operations. func CloneContext( ctx context.Context, s storage.Storer, worktree billy.Filesystem, o *CloneOptions, ) (*Repository, error) { r, err := Init(s, worktree) if err != nil { return nil, err } return r, r.clone(ctx, o) } // PlainInit create an empty git repository at the given path. isBare defines // if the repository will have worktree (non-bare) or not (bare), if the path // is not empty ErrRepositoryAlreadyExists is returned. func PlainInit(path string, isBare bool) (*Repository, error) { var wt, dot billy.Filesystem if isBare { dot = osfs.New(path) } else { wt = osfs.New(path) dot, _ = wt.Chroot(GitDirName) } s := filesystem.NewStorage(dot, cache.NewObjectLRUDefault()) return Init(s, wt) } // PlainOpen opens a git repository from the given path. It detects if the // repository is bare or a normal one. If the path doesn't contain a valid // repository ErrRepositoryNotExists is returned func PlainOpen(path string) (*Repository, error) { return PlainOpenWithOptions(path, &PlainOpenOptions{}) } // PlainOpenWithOptions opens a git repository from the given path with specific // options. See PlainOpen for more info. func PlainOpenWithOptions(path string, o *PlainOpenOptions) (*Repository, error) { dot, wt, err := dotGitToOSFilesystems(path, o.DetectDotGit) if err != nil { return nil, err } if _, err := dot.Stat(""); err != nil { if os.IsNotExist(err) { return nil, ErrRepositoryNotExists } return nil, err } s := filesystem.NewStorage(dot, cache.NewObjectLRUDefault()) return Open(s, wt) } func dotGitToOSFilesystems(path string, detect bool) (dot, wt billy.Filesystem, err error) { if path, err = filepath.Abs(path); err != nil { return nil, nil, err } var fs billy.Filesystem var fi os.FileInfo for { fs = osfs.New(path) fi, err = fs.Stat(GitDirName) if err == nil { // no error; stop break } if !os.IsNotExist(err) { // unknown error; stop return nil, nil, err } if detect { // try its parent as long as we haven't reached // the root dir if dir := filepath.Dir(path); dir != path { path = dir continue } } // not detecting via parent dirs and the dir does not exist; // stop return fs, nil, nil } if fi.IsDir() { dot, err = fs.Chroot(GitDirName) return dot, fs, err } dot, err = dotGitFileToOSFilesystem(path, fs) if err != nil { return nil, nil, err } return dot, fs, nil } func dotGitFileToOSFilesystem(path string, fs billy.Filesystem) (bfs billy.Filesystem, err error) { f, err := fs.Open(GitDirName) if err != nil { return nil, err } defer ioutil.CheckClose(f, &err) b, err := stdioutil.ReadAll(f) if err != nil { return nil, err } line := string(b) const prefix = "gitdir: " if !strings.HasPrefix(line, prefix) { return nil, fmt.Errorf(".git file has no %s prefix", prefix) } gitdir := strings.Split(line[len(prefix):], "\n")[0] gitdir = strings.TrimSpace(gitdir) if filepath.IsAbs(gitdir) { return osfs.New(gitdir), nil } return osfs.New(fs.Join(path, gitdir)), nil } // PlainClone a repository into the path with the given options, isBare defines // if the new repository will be bare or normal. If the path is not empty // ErrRepositoryAlreadyExists is returned. // // TODO(mcuadros): move isBare to CloneOptions in v5 func PlainClone(path string, isBare bool, o *CloneOptions) (*Repository, error) { return PlainCloneContext(context.Background(), path, isBare, o) } // PlainCloneContext a repository into the path with the given options, isBare // defines if the new repository will be bare or normal. If the path is not empty // ErrRepositoryAlreadyExists is returned. // // The provided Context must be non-nil. If the context expires before the // operation is complete, an error is returned. The context only affects to the // transport operations. // // TODO(mcuadros): move isBare to CloneOptions in v5 // TODO(smola): refuse upfront to clone on a non-empty directory in v5, see #1027 func PlainCloneContext(ctx context.Context, path string, isBare bool, o *CloneOptions) (*Repository, error) { cleanup, cleanupParent, err := checkIfCleanupIsNeeded(path) if err != nil { return nil, err } r, err := PlainInit(path, isBare) if err != nil { return nil, err } err = r.clone(ctx, o) if err != nil && err != ErrRepositoryAlreadyExists { if cleanup { cleanUpDir(path, cleanupParent) } } return r, err } func newRepository(s storage.Storer, worktree billy.Filesystem) *Repository { return &Repository{ Storer: s, wt: worktree, r: make(map[string]*Remote), } } func checkIfCleanupIsNeeded(path string) (cleanup bool, cleanParent bool, err error) { fi, err := os.Stat(path) if err != nil { if os.IsNotExist(err) { return true, true, nil } return false, false, err } if !fi.IsDir() { return false, false, fmt.Errorf("path is not a directory: %s", path) } f, err := os.Open(path) if err != nil { return false, false, err } defer ioutil.CheckClose(f, &err) _, err = f.Readdirnames(1) if err == io.EOF { return true, false, nil } if err != nil { return false, false, err } return false, false, nil } func cleanUpDir(path string, all bool) error { if all { return os.RemoveAll(path) } f, err := os.Open(path) if err != nil { return err } defer ioutil.CheckClose(f, &err) names, err := f.Readdirnames(-1) if err != nil { return err } for _, name := range names { if err := os.RemoveAll(filepath.Join(path, name)); err != nil { return err } } return err } // Config return the repository config func (r *Repository) Config() (*config.Config, error) { return r.Storer.Config() } // Remote return a remote if exists func (r *Repository) Remote(name string) (*Remote, error) { cfg, err := r.Storer.Config() if err != nil { return nil, err } c, ok := cfg.Remotes[name] if !ok { return nil, ErrRemoteNotFound } return newRemote(r.Storer, c), nil } // Remotes returns a list with all the remotes func (r *Repository) Remotes() ([]*Remote, error) { cfg, err := r.Storer.Config() if err != nil { return nil, err } remotes := make([]*Remote, len(cfg.Remotes)) var i int for _, c := range cfg.Remotes { remotes[i] = newRemote(r.Storer, c) i++ } return remotes, nil } // CreateRemote creates a new remote func (r *Repository) CreateRemote(c *config.RemoteConfig) (*Remote, error) { if err := c.Validate(); err != nil { return nil, err } remote := newRemote(r.Storer, c) cfg, err := r.Storer.Config() if err != nil { return nil, err } if _, ok := cfg.Remotes[c.Name]; ok { return nil, ErrRemoteExists } cfg.Remotes[c.Name] = c return remote, r.Storer.SetConfig(cfg) } // CreateRemoteAnonymous creates a new anonymous remote. c.Name must be "anonymous". // It's used like 'git fetch git@github.com:src-d/go-git.git master:master'. func (r *Repository) CreateRemoteAnonymous(c *config.RemoteConfig) (*Remote, error) { if err := c.Validate(); err != nil { return nil, err } if c.Name != "anonymous" { return nil, ErrAnonymousRemoteName } remote := newRemote(r.Storer, c) return remote, nil } // DeleteRemote delete a remote from the repository and delete the config func (r *Repository) DeleteRemote(name string) error { cfg, err := r.Storer.Config() if err != nil { return err } if _, ok := cfg.Remotes[name]; !ok { return ErrRemoteNotFound } delete(cfg.Remotes, name) return r.Storer.SetConfig(cfg) } // Branch return a Branch if exists func (r *Repository) Branch(name string) (*config.Branch, error) { cfg, err := r.Storer.Config() if err != nil { return nil, err } b, ok := cfg.Branches[name] if !ok { return nil, ErrBranchNotFound } return b, nil } // CreateBranch creates a new Branch func (r *Repository) CreateBranch(c *config.Branch) error { if err := c.Validate(); err != nil { return err } cfg, err := r.Storer.Config() if err != nil { return err } if _, ok := cfg.Branches[c.Name]; ok { return ErrBranchExists } cfg.Branches[c.Name] = c return r.Storer.SetConfig(cfg) } // DeleteBranch delete a Branch from the repository and delete the config func (r *Repository) DeleteBranch(name string) error { cfg, err := r.Storer.Config() if err != nil { return err } if _, ok := cfg.Branches[name]; !ok { return ErrBranchNotFound } delete(cfg.Branches, name) return r.Storer.SetConfig(cfg) } // CreateTag creates a tag. If opts is included, the tag is an annotated tag, // otherwise a lightweight tag is created. func (r *Repository) CreateTag(name string, hash plumbing.Hash, opts *CreateTagOptions) (*plumbing.Reference, error) { rname := plumbing.ReferenceName(path.Join("refs", "tags", name)) _, err := r.Storer.Reference(rname) switch err { case nil: // Tag exists, this is an error return nil, ErrTagExists case plumbing.ErrReferenceNotFound: // Tag missing, available for creation, pass this default: // Some other error return nil, err } var target plumbing.Hash if opts != nil { target, err = r.createTagObject(name, hash, opts) if err != nil { return nil, err } } else { target = hash } ref := plumbing.NewHashReference(rname, target) if err = r.Storer.SetReference(ref); err != nil { return nil, err } return ref, nil } func (r *Repository) createTagObject(name string, hash plumbing.Hash, opts *CreateTagOptions) (plumbing.Hash, error) { if err := opts.Validate(r, hash); err != nil { return plumbing.ZeroHash, err } rawobj, err := object.GetObject(r.Storer, hash) if err != nil { return plumbing.ZeroHash, err } tag := &object.Tag{ Name: name, Tagger: *opts.Tagger, Message: opts.Message, TargetType: rawobj.Type(), Target: hash, } if opts.SignKey != nil { sig, err := r.buildTagSignature(tag, opts.SignKey) if err != nil { return plumbing.ZeroHash, err } tag.PGPSignature = sig } obj := r.Storer.NewEncodedObject() if err := tag.Encode(obj); err != nil { return plumbing.ZeroHash, err } return r.Storer.SetEncodedObject(obj) } func (r *Repository) buildTagSignature(tag *object.Tag, signKey *openpgp.Entity) (string, error) { encoded := &plumbing.MemoryObject{} if err := tag.Encode(encoded); err != nil { return "", err } rdr, err := encoded.Reader() if err != nil { return "", err } var b bytes.Buffer if err := openpgp.ArmoredDetachSign(&b, signKey, rdr, nil); err != nil { return "", err } return b.String(), nil } // Tag returns a tag from the repository. // // If you want to check to see if the tag is an annotated tag, you can call // TagObject on the hash of the reference in ForEach: // // ref, err := r.Tag("v0.1.0") // if err != nil { // // Handle error // } // // obj, err := r.TagObject(ref.Hash()) // switch err { // case nil: // // Tag object present // case plumbing.ErrObjectNotFound: // // Not a tag object // default: // // Some other error // } // func (r *Repository) Tag(name string) (*plumbing.Reference, error) { ref, err := r.Reference(plumbing.ReferenceName(path.Join("refs", "tags", name)), false) if err != nil { if err == plumbing.ErrReferenceNotFound { // Return a friendly error for this one, versus just ReferenceNotFound. return nil, ErrTagNotFound } return nil, err } return ref, nil } // DeleteTag deletes a tag from the repository. func (r *Repository) DeleteTag(name string) error { _, err := r.Tag(name) if err != nil { return err } return r.Storer.RemoveReference(plumbing.ReferenceName(path.Join("refs", "tags", name))) } func (r *Repository) resolveToCommitHash(h plumbing.Hash) (plumbing.Hash, error) { obj, err := r.Storer.EncodedObject(plumbing.AnyObject, h) if err != nil { return plumbing.ZeroHash, err } switch obj.Type() { case plumbing.TagObject: t, err := object.DecodeTag(r.Storer, obj) if err != nil { return plumbing.ZeroHash, err } return r.resolveToCommitHash(t.Target) case plumbing.CommitObject: return h, nil default: return plumbing.ZeroHash, ErrUnableToResolveCommit } } // Clone clones a remote repository func (r *Repository) clone(ctx context.Context, o *CloneOptions) error { if err := o.Validate(); err != nil { return err } c := &config.RemoteConfig{ Name: o.RemoteName, URLs: []string{o.URL}, Fetch: r.cloneRefSpec(o), } if _, err := r.CreateRemote(c); err != nil { return err } ref, err := r.fetchAndUpdateReferences(ctx, &FetchOptions{ RefSpecs: c.Fetch, Depth: o.Depth, Auth: o.Auth, Progress: o.Progress, Tags: o.Tags, RemoteName: o.RemoteName, }, o.ReferenceName) if err != nil { return err } if r.wt != nil && !o.NoCheckout { w, err := r.Worktree() if err != nil { return err } head, err := r.Head() if err != nil { return err } if err := w.Reset(&ResetOptions{ Mode: MergeReset, Commit: head.Hash(), }); err != nil { return err } if o.RecurseSubmodules != NoRecurseSubmodules { if err := w.updateSubmodules(&SubmoduleUpdateOptions{ RecurseSubmodules: o.RecurseSubmodules, Auth: o.Auth, }); err != nil { return err } } } if err := r.updateRemoteConfigIfNeeded(o, c, ref); err != nil { return err } if ref.Name().IsBranch() { branchRef := ref.Name() branchName := strings.Split(string(branchRef), "refs/heads/")[1] b := &config.Branch{ Name: branchName, Merge: branchRef, } if o.RemoteName == "" { b.Remote = "origin" } else { b.Remote = o.RemoteName } if err := r.CreateBranch(b); err != nil { return err } } return nil } const ( refspecTag = "+refs/tags/%s:refs/tags/%[1]s" refspecSingleBranch = "+refs/heads/%s:refs/remotes/%s/%[1]s" refspecSingleBranchHEAD = "+HEAD:refs/remotes/%s/HEAD" ) func (r *Repository) cloneRefSpec(o *CloneOptions) []config.RefSpec { switch { case o.ReferenceName.IsTag(): return []config.RefSpec{ config.RefSpec(fmt.Sprintf(refspecTag, o.ReferenceName.Short())), } case o.SingleBranch && o.ReferenceName == plumbing.HEAD: return []config.RefSpec{ config.RefSpec(fmt.Sprintf(refspecSingleBranchHEAD, o.RemoteName)), config.RefSpec(fmt.Sprintf(refspecSingleBranch, plumbing.Master.Short(), o.RemoteName)), } case o.SingleBranch: return []config.RefSpec{ config.RefSpec(fmt.Sprintf(refspecSingleBranch, o.ReferenceName.Short(), o.RemoteName)), } default: return []config.RefSpec{ config.RefSpec(fmt.Sprintf(config.DefaultFetchRefSpec, o.RemoteName)), } } } func (r *Repository) setIsBare(isBare bool) error { cfg, err := r.Storer.Config() if err != nil { return err } cfg.Core.IsBare = isBare return r.Storer.SetConfig(cfg) } func (r *Repository) updateRemoteConfigIfNeeded(o *CloneOptions, c *config.RemoteConfig, head *plumbing.Reference) error { if !o.SingleBranch { return nil } c.Fetch = r.cloneRefSpec(o) cfg, err := r.Storer.Config() if err != nil { return err } cfg.Remotes[c.Name] = c return r.Storer.SetConfig(cfg) } func (r *Repository) fetchAndUpdateReferences( ctx context.Context, o *FetchOptions, ref plumbing.ReferenceName, ) (*plumbing.Reference, error) { if err := o.Validate(); err != nil { return nil, err } remote, err := r.Remote(o.RemoteName) if err != nil { return nil, err } objsUpdated := true remoteRefs, err := remote.fetch(ctx, o) if err == NoErrAlreadyUpToDate { objsUpdated = false } else if err == packfile.ErrEmptyPackfile { return nil, ErrFetching } else if err != nil { return nil, err } resolvedRef, err := storer.ResolveReference(remoteRefs, ref) if err != nil { return nil, err } refsUpdated, err := r.updateReferences(remote.c.Fetch, resolvedRef) if err != nil { return nil, err } if !objsUpdated && !refsUpdated { return nil, NoErrAlreadyUpToDate } return resolvedRef, nil } func (r *Repository) updateReferences(spec []config.RefSpec, resolvedRef *plumbing.Reference) (updated bool, err error) { if !resolvedRef.Name().IsBranch() { // Detached HEAD mode h, err := r.resolveToCommitHash(resolvedRef.Hash()) if err != nil { return false, err } head := plumbing.NewHashReference(plumbing.HEAD, h) return updateReferenceStorerIfNeeded(r.Storer, head) } refs := []*plumbing.Reference{ // Create local reference for the resolved ref resolvedRef, // Create local symbolic HEAD plumbing.NewSymbolicReference(plumbing.HEAD, resolvedRef.Name()), } refs = append(refs, r.calculateRemoteHeadReference(spec, resolvedRef)...) for _, ref := range refs { u, err := updateReferenceStorerIfNeeded(r.Storer, ref) if err != nil { return updated, err } if u { updated = true } } return } func (r *Repository) calculateRemoteHeadReference(spec []config.RefSpec, resolvedHead *plumbing.Reference) []*plumbing.Reference { var refs []*plumbing.Reference // Create resolved HEAD reference with remote prefix if it does not // exist. This is needed when using single branch and HEAD. for _, rs := range spec { name := resolvedHead.Name() if !rs.Match(name) { continue } name = rs.Dst(name) _, err := r.Storer.Reference(name) if err == plumbing.ErrReferenceNotFound { refs = append(refs, plumbing.NewHashReference(name, resolvedHead.Hash())) } } return refs } func checkAndUpdateReferenceStorerIfNeeded( s storer.ReferenceStorer, r, old *plumbing.Reference) ( updated bool, err error) { p, err := s.Reference(r.Name()) if err != nil && err != plumbing.ErrReferenceNotFound { return false, err } // we use the string method to compare references, is the easiest way if err == plumbing.ErrReferenceNotFound || r.String() != p.String() { if err := s.CheckAndSetReference(r, old); err != nil { return false, err } return true, nil } return false, nil } func updateReferenceStorerIfNeeded( s storer.ReferenceStorer, r *plumbing.Reference) (updated bool, err error) { return checkAndUpdateReferenceStorerIfNeeded(s, r, nil) } // Fetch fetches references along with the objects necessary to complete // their histories, from the remote named as FetchOptions.RemoteName. // // Returns nil if the operation is successful, NoErrAlreadyUpToDate if there are // no changes to be fetched, or an error. func (r *Repository) Fetch(o *FetchOptions) error { return r.FetchContext(context.Background(), o) } // FetchContext fetches references along with the objects necessary to complete // their histories, from the remote named as FetchOptions.RemoteName. // // Returns nil if the operation is successful, NoErrAlreadyUpToDate if there are // no changes to be fetched, or an error. // // The provided Context must be non-nil. If the context expires before the // operation is complete, an error is returned. The context only affects to the // transport operations. func (r *Repository) FetchContext(ctx context.Context, o *FetchOptions) error { if err := o.Validate(); err != nil { return err } remote, err := r.Remote(o.RemoteName) if err != nil { return err } return remote.FetchContext(ctx, o) } // Push performs a push to the remote. Returns NoErrAlreadyUpToDate if // the remote was already up-to-date, from the remote named as // FetchOptions.RemoteName. func (r *Repository) Push(o *PushOptions) error { return r.PushContext(context.Background(), o) } // PushContext performs a push to the remote. Returns NoErrAlreadyUpToDate if // the remote was already up-to-date, from the remote named as // FetchOptions.RemoteName. // // The provided Context must be non-nil. If the context expires before the // operation is complete, an error is returned. The context only affects to the // transport operations. func (r *Repository) PushContext(ctx context.Context, o *PushOptions) error { if err := o.Validate(); err != nil { return err } remote, err := r.Remote(o.RemoteName) if err != nil { return err } return remote.PushContext(ctx, o) } // Log returns the commit history from the given LogOptions. func (r *Repository) Log(o *LogOptions) (object.CommitIter, error) { fn := commitIterFunc(o.Order) if fn == nil { return nil, fmt.Errorf("invalid Order=%v", o.Order) } var ( it object.CommitIter err error ) if o.All { it, err = r.logAll(fn) } else { it, err = r.log(o.From, fn) } if err != nil { return nil, err } if o.FileName != nil { // for `git log --all` also check parent (if the next commit comes from the real parent) it = r.logWithFile(*o.FileName, it, o.All) } return it, nil } func (r *Repository) log(from plumbing.Hash, commitIterFunc func(*object.Commit) object.CommitIter) (object.CommitIter, error) { h := from if from == plumbing.ZeroHash { head, err := r.Head() if err != nil { return nil, err } h = head.Hash() } commit, err := r.CommitObject(h) if err != nil { return nil, err } return commitIterFunc(commit), nil } func (r *Repository) logAll(commitIterFunc func(*object.Commit) object.CommitIter) (object.CommitIter, error) { return object.NewCommitAllIter(r.Storer, commitIterFunc) } func (*Repository) logWithFile(fileName string, commitIter object.CommitIter, checkParent bool) object.CommitIter { return object.NewCommitFileIterFromIter(fileName, commitIter, checkParent) } func commitIterFunc(order LogOrder) func(c *object.Commit) object.CommitIter { switch order { case LogOrderDefault: return func(c *object.Commit) object.CommitIter { return object.NewCommitPreorderIter(c, nil, nil) } case LogOrderDFS: return func(c *object.Commit) object.CommitIter { return object.NewCommitPreorderIter(c, nil, nil) } case LogOrderDFSPost: return func(c *object.Commit) object.CommitIter { return object.NewCommitPostorderIter(c, nil) } case LogOrderBSF: return func(c *object.Commit) object.CommitIter { return object.NewCommitIterBSF(c, nil, nil) } case LogOrderCommitterTime: return func(c *object.Commit) object.CommitIter { return object.NewCommitIterCTime(c, nil, nil) } } return nil } // Tags returns all the tag References in a repository. // // If you want to check to see if the tag is an annotated tag, you can call // TagObject on the hash Reference passed in through ForEach: // // iter, err := r.Tags() // if err != nil { // // Handle error // } // // if err := iter.ForEach(func (ref *plumbing.Reference) error { // obj, err := r.TagObject(ref.Hash()) // switch err { // case nil: // // Tag object present // case plumbing.ErrObjectNotFound: // // Not a tag object // default: // // Some other error // return err // } // }); err != nil { // // Handle outer iterator error // } // func (r *Repository) Tags() (storer.ReferenceIter, error) { refIter, err := r.Storer.IterReferences() if err != nil { return nil, err } return storer.NewReferenceFilteredIter( func(r *plumbing.Reference) bool { return r.Name().IsTag() }, refIter), nil } // Branches returns all the References that are Branches. func (r *Repository) Branches() (storer.ReferenceIter, error) { refIter, err := r.Storer.IterReferences() if err != nil { return nil, err } return storer.NewReferenceFilteredIter( func(r *plumbing.Reference) bool { return r.Name().IsBranch() }, refIter), nil } // Notes returns all the References that are notes. For more information: // https://git-scm.com/docs/git-notes func (r *Repository) Notes() (storer.ReferenceIter, error) { refIter, err := r.Storer.IterReferences() if err != nil { return nil, err } return storer.NewReferenceFilteredIter( func(r *plumbing.Reference) bool { return r.Name().IsNote() }, refIter), nil } // TreeObject return a Tree with the given hash. If not found // plumbing.ErrObjectNotFound is returned func (r *Repository) TreeObject(h plumbing.Hash) (*object.Tree, error) { return object.GetTree(r.Storer, h) } // TreeObjects returns an unsorted TreeIter with all the trees in the repository func (r *Repository) TreeObjects() (*object.TreeIter, error) { iter, err := r.Storer.IterEncodedObjects(plumbing.TreeObject) if err != nil { return nil, err } return object.NewTreeIter(r.Storer, iter), nil } // CommitObject return a Commit with the given hash. If not found // plumbing.ErrObjectNotFound is returned. func (r *Repository) CommitObject(h plumbing.Hash) (*object.Commit, error) { return object.GetCommit(r.Storer, h) } // CommitObjects returns an unsorted CommitIter with all the commits in the repository. func (r *Repository) CommitObjects() (object.CommitIter, error) { iter, err := r.Storer.IterEncodedObjects(plumbing.CommitObject) if err != nil { return nil, err } return object.NewCommitIter(r.Storer, iter), nil } // BlobObject returns a Blob with the given hash. If not found // plumbing.ErrObjectNotFound is returned. func (r *Repository) BlobObject(h plumbing.Hash) (*object.Blob, error) { return object.GetBlob(r.Storer, h) } // BlobObjects returns an unsorted BlobIter with all the blobs in the repository. func (r *Repository) BlobObjects() (*object.BlobIter, error) { iter, err := r.Storer.IterEncodedObjects(plumbing.BlobObject) if err != nil { return nil, err } return object.NewBlobIter(r.Storer, iter), nil } // TagObject returns a Tag with the given hash. If not found // plumbing.ErrObjectNotFound is returned. This method only returns // annotated Tags, no lightweight Tags. func (r *Repository) TagObject(h plumbing.Hash) (*object.Tag, error) { return object.GetTag(r.Storer, h) } // TagObjects returns a unsorted TagIter that can step through all of the annotated // tags in the repository. func (r *Repository) TagObjects() (*object.TagIter, error) { iter, err := r.Storer.IterEncodedObjects(plumbing.TagObject) if err != nil { return nil, err } return object.NewTagIter(r.Storer, iter), nil } // Object returns an Object with the given hash. If not found // plumbing.ErrObjectNotFound is returned. func (r *Repository) Object(t plumbing.ObjectType, h plumbing.Hash) (object.Object, error) { obj, err := r.Storer.EncodedObject(t, h) if err != nil { return nil, err } return object.DecodeObject(r.Storer, obj) } // Objects returns an unsorted ObjectIter with all the objects in the repository. func (r *Repository) Objects() (*object.ObjectIter, error) { iter, err := r.Storer.IterEncodedObjects(plumbing.AnyObject) if err != nil { return nil, err } return object.NewObjectIter(r.Storer, iter), nil } // Head returns the reference where HEAD is pointing to. func (r *Repository) Head() (*plumbing.Reference, error) { return storer.ResolveReference(r.Storer, plumbing.HEAD) } // Reference returns the reference for a given reference name. If resolved is // true, any symbolic reference will be resolved. func (r *Repository) Reference(name plumbing.ReferenceName, resolved bool) ( *plumbing.Reference, error) { if resolved { return storer.ResolveReference(r.Storer, name) } return r.Storer.Reference(name) } // References returns an unsorted ReferenceIter for all references. func (r *Repository) References() (storer.ReferenceIter, error) { return r.Storer.IterReferences() } // Worktree returns a worktree based on the given fs, if nil the default // worktree will be used. func (r *Repository) Worktree() (*Worktree, error) { if r.wt == nil { return nil, ErrIsBareRepository } return &Worktree{r: r, Filesystem: r.wt}, nil } func countTrue(vals ...bool) int { sum := 0 for _, v := range vals { if v { sum++ } } return sum } // ResolveRevision resolves revision to corresponding hash. It will always // resolve to a commit hash, not a tree or annotated tag. // // Implemented resolvers : HEAD, branch, tag, heads/branch, refs/heads/branch, // refs/tags/tag, refs/remotes/origin/branch, refs/remotes/origin/HEAD, tilde and caret (HEAD~1, master~^, tag~2, ref/heads/master~1, ...), selection by text (HEAD^{/fix nasty bug}) func (r *Repository) ResolveRevision(rev plumbing.Revision) (*plumbing.Hash, error) { p := revision.NewParserFromString(string(rev)) items, err := p.Parse() if err != nil { return nil, err } var commit *object.Commit for _, item := range items { switch item.(type) { case revision.Ref: revisionRef := item.(revision.Ref) var ref *plumbing.Reference var hashCommit, refCommit, tagCommit *object.Commit var rErr, hErr, tErr error for _, rule := range append([]string{"%s"}, plumbing.RefRevParseRules...) { ref, err = storer.ResolveReference(r.Storer, plumbing.ReferenceName(fmt.Sprintf(rule, revisionRef))) if err == nil { break } } if ref != nil { tag, tObjErr := r.TagObject(ref.Hash()) if tObjErr != nil { tErr = tObjErr } else { tagCommit, tErr = tag.Commit() } refCommit, rErr = r.CommitObject(ref.Hash()) } else { rErr = plumbing.ErrReferenceNotFound tErr = plumbing.ErrReferenceNotFound } maybeHash := plumbing.NewHash(string(revisionRef)).String() == string(revisionRef) if maybeHash { hashCommit, hErr = r.CommitObject(plumbing.NewHash(string(revisionRef))) } else { hErr = plumbing.ErrReferenceNotFound } isTag := tErr == nil isCommit := rErr == nil isHash := hErr == nil switch { case countTrue(isTag, isCommit, isHash) > 1: return &plumbing.ZeroHash, fmt.Errorf(`refname "%s" is ambiguous`, revisionRef) case isTag: commit = tagCommit case isCommit: commit = refCommit case isHash: commit = hashCommit default: return &plumbing.ZeroHash, plumbing.ErrReferenceNotFound } case revision.CaretPath: depth := item.(revision.CaretPath).Depth if depth == 0 { break } iter := commit.Parents() c, err := iter.Next() if err != nil { return &plumbing.ZeroHash, err } if depth == 1 { commit = c break } c, err = iter.Next() if err != nil { return &plumbing.ZeroHash, err } commit = c case revision.TildePath: for i := 0; i < item.(revision.TildePath).Depth; i++ { c, err := commit.Parents().Next() if err != nil { return &plumbing.ZeroHash, err } commit = c } case revision.CaretReg: history := object.NewCommitPreorderIter(commit, nil, nil) re := item.(revision.CaretReg).Regexp negate := item.(revision.CaretReg).Negate var c *object.Commit err := history.ForEach(func(hc *object.Commit) error { if !negate && re.MatchString(hc.Message) { c = hc return storer.ErrStop } if negate && !re.MatchString(hc.Message) { c = hc return storer.ErrStop } return nil }) if err != nil { return &plumbing.ZeroHash, err } if c == nil { return &plumbing.ZeroHash, fmt.Errorf(`No commit message match regexp : "%s"`, re.String()) } commit = c } } return &commit.Hash, nil } type RepackConfig struct { // UseRefDeltas configures whether packfile encoder will use reference deltas. // By default OFSDeltaObject is used. UseRefDeltas bool // OnlyDeletePacksOlderThan if set to non-zero value // selects only objects older than the time provided. OnlyDeletePacksOlderThan time.Time } func (r *Repository) RepackObjects(cfg *RepackConfig) (err error) { pos, ok := r.Storer.(storer.PackedObjectStorer) if !ok { return ErrPackedObjectsNotSupported } // Get the existing object packs. hs, err := pos.ObjectPacks() if err != nil { return err } // Create a new pack. nh, err := r.createNewObjectPack(cfg) if err != nil { return err } // Delete old packs. for _, h := range hs { // Skip if new hash is the same as an old one. if h == nh { continue } err = pos.DeleteOldObjectPackAndIndex(h, cfg.OnlyDeletePacksOlderThan) if err != nil { return err } } return nil } // createNewObjectPack is a helper for RepackObjects taking care // of creating a new pack. It is used so the the PackfileWriter // deferred close has the right scope. func (r *Repository) createNewObjectPack(cfg *RepackConfig) (h plumbing.Hash, err error) { ow := newObjectWalker(r.Storer) err = ow.walkAllRefs() if err != nil { return h, err } objs := make([]plumbing.Hash, 0, len(ow.seen)) for h := range ow.seen { objs = append(objs, h) } pfw, ok := r.Storer.(storer.PackfileWriter) if !ok { return h, fmt.Errorf("Repository storer is not a storer.PackfileWriter") } wc, err := pfw.PackfileWriter() if err != nil { return h, err } defer ioutil.CheckClose(wc, &err) scfg, err := r.Storer.Config() if err != nil { return h, err } enc := packfile.NewEncoder(wc, r.Storer, cfg.UseRefDeltas) h, err = enc.Encode(objs, scfg.Pack.Window) if err != nil { return h, err } // Delete the packed, loose objects. if los, ok := r.Storer.(storer.LooseObjectStorer); ok { err = los.ForEachObjectHash(func(hash plumbing.Hash) error { if ow.isSeen(hash) { err = los.DeleteLooseObject(hash) if err != nil { return err } } return nil }) if err != nil { return h, err } } return h, err } golang-gopkg-src-d-go-git.v4-4.11.0/repository_test.go000066400000000000000000002123021345605224300225040ustar00rootroot00000000000000package git import ( "bytes" "context" "fmt" "io" "io/ioutil" "os" "os/exec" "path/filepath" "strings" "testing" "time" "golang.org/x/crypto/openpgp" "golang.org/x/crypto/openpgp/armor" openpgperr "golang.org/x/crypto/openpgp/errors" "gopkg.in/src-d/go-git.v4/config" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/cache" "gopkg.in/src-d/go-git.v4/plumbing/object" "gopkg.in/src-d/go-git.v4/plumbing/storer" "gopkg.in/src-d/go-git.v4/plumbing/transport" "gopkg.in/src-d/go-git.v4/storage" "gopkg.in/src-d/go-git.v4/storage/filesystem" "gopkg.in/src-d/go-git.v4/storage/memory" . "gopkg.in/check.v1" "gopkg.in/src-d/go-billy.v4/memfs" "gopkg.in/src-d/go-billy.v4/osfs" "gopkg.in/src-d/go-billy.v4/util" "gopkg.in/src-d/go-git-fixtures.v3" ) type RepositorySuite struct { BaseSuite } var _ = Suite(&RepositorySuite{}) func (s *RepositorySuite) TestInit(c *C) { r, err := Init(memory.NewStorage(), memfs.New()) c.Assert(err, IsNil) c.Assert(r, NotNil) cfg, err := r.Config() c.Assert(err, IsNil) c.Assert(cfg.Core.IsBare, Equals, false) } func (s *RepositorySuite) TestInitNonStandardDotGit(c *C) { dir, err := ioutil.TempDir("", "init-non-standard") c.Assert(err, IsNil) c.Assert(os.RemoveAll(dir), IsNil) fs := osfs.New(dir) dot, _ := fs.Chroot("storage") storage := filesystem.NewStorage(dot, cache.NewObjectLRUDefault()) wt, _ := fs.Chroot("worktree") r, err := Init(storage, wt) c.Assert(err, IsNil) c.Assert(r, NotNil) f, err := fs.Open(fs.Join("worktree", ".git")) c.Assert(err, IsNil) all, err := ioutil.ReadAll(f) c.Assert(err, IsNil) c.Assert(string(all), Equals, fmt.Sprintf("gitdir: %s\n", filepath.Join("..", "storage"))) cfg, err := r.Config() c.Assert(err, IsNil) c.Assert(cfg.Core.Worktree, Equals, filepath.Join("..", "worktree")) } func (s *RepositorySuite) TestInitStandardDotGit(c *C) { dir, err := ioutil.TempDir("", "init-standard") c.Assert(err, IsNil) c.Assert(os.RemoveAll(dir), IsNil) fs := osfs.New(dir) dot, _ := fs.Chroot(".git") storage := filesystem.NewStorage(dot, cache.NewObjectLRUDefault()) r, err := Init(storage, fs) c.Assert(err, IsNil) c.Assert(r, NotNil) l, err := fs.ReadDir(".git") c.Assert(err, IsNil) c.Assert(len(l) > 0, Equals, true) cfg, err := r.Config() c.Assert(err, IsNil) c.Assert(cfg.Core.Worktree, Equals, "") } func (s *RepositorySuite) TestInitBare(c *C) { r, err := Init(memory.NewStorage(), nil) c.Assert(err, IsNil) c.Assert(r, NotNil) cfg, err := r.Config() c.Assert(err, IsNil) c.Assert(cfg.Core.IsBare, Equals, true) } func (s *RepositorySuite) TestInitAlreadyExists(c *C) { st := memory.NewStorage() r, err := Init(st, nil) c.Assert(err, IsNil) c.Assert(r, NotNil) r, err = Init(st, nil) c.Assert(err, Equals, ErrRepositoryAlreadyExists) c.Assert(r, IsNil) } func (s *RepositorySuite) TestOpen(c *C) { st := memory.NewStorage() r, err := Init(st, memfs.New()) c.Assert(err, IsNil) c.Assert(r, NotNil) r, err = Open(st, memfs.New()) c.Assert(err, IsNil) c.Assert(r, NotNil) } func (s *RepositorySuite) TestOpenBare(c *C) { st := memory.NewStorage() r, err := Init(st, nil) c.Assert(err, IsNil) c.Assert(r, NotNil) r, err = Open(st, nil) c.Assert(err, IsNil) c.Assert(r, NotNil) } func (s *RepositorySuite) TestOpenBareMissingWorktree(c *C) { st := memory.NewStorage() r, err := Init(st, memfs.New()) c.Assert(err, IsNil) c.Assert(r, NotNil) r, err = Open(st, nil) c.Assert(err, IsNil) c.Assert(r, NotNil) } func (s *RepositorySuite) TestOpenNotExists(c *C) { r, err := Open(memory.NewStorage(), nil) c.Assert(err, Equals, ErrRepositoryNotExists) c.Assert(r, IsNil) } func (s *RepositorySuite) TestClone(c *C) { r, err := Clone(memory.NewStorage(), nil, &CloneOptions{ URL: s.GetBasicLocalRepositoryURL(), }) c.Assert(err, IsNil) remotes, err := r.Remotes() c.Assert(err, IsNil) c.Assert(remotes, HasLen, 1) } func (s *RepositorySuite) TestCloneContext(c *C) { ctx, cancel := context.WithCancel(context.Background()) cancel() r, err := CloneContext(ctx, memory.NewStorage(), nil, &CloneOptions{ URL: s.GetBasicLocalRepositoryURL(), }) c.Assert(r, NotNil) c.Assert(err, ErrorMatches, ".* context canceled") } func (s *RepositorySuite) TestCloneWithTags(c *C) { url := s.GetLocalRepositoryURL( fixtures.ByURL("https://github.com/git-fixtures/tags.git").One(), ) r, err := Clone(memory.NewStorage(), nil, &CloneOptions{URL: url, Tags: NoTags}) c.Assert(err, IsNil) remotes, err := r.Remotes() c.Assert(err, IsNil) c.Assert(remotes, HasLen, 1) i, err := r.References() c.Assert(err, IsNil) var count int i.ForEach(func(r *plumbing.Reference) error { count++; return nil }) c.Assert(count, Equals, 3) } func (s *RepositorySuite) TestCreateRemoteAndRemote(c *C) { r, _ := Init(memory.NewStorage(), nil) remote, err := r.CreateRemote(&config.RemoteConfig{ Name: "foo", URLs: []string{"http://foo/foo.git"}, }) c.Assert(err, IsNil) c.Assert(remote.Config().Name, Equals, "foo") alt, err := r.Remote("foo") c.Assert(err, IsNil) c.Assert(alt, Not(Equals), remote) c.Assert(alt.Config().Name, Equals, "foo") } func (s *RepositorySuite) TestCreateRemoteInvalid(c *C) { r, _ := Init(memory.NewStorage(), nil) remote, err := r.CreateRemote(&config.RemoteConfig{}) c.Assert(err, Equals, config.ErrRemoteConfigEmptyName) c.Assert(remote, IsNil) } func (s *RepositorySuite) TestCreateRemoteAnonymous(c *C) { r, _ := Init(memory.NewStorage(), nil) remote, err := r.CreateRemoteAnonymous(&config.RemoteConfig{ Name: "anonymous", URLs: []string{"http://foo/foo.git"}, }) c.Assert(err, IsNil) c.Assert(remote.Config().Name, Equals, "anonymous") } func (s *RepositorySuite) TestCreateRemoteAnonymousInvalidName(c *C) { r, _ := Init(memory.NewStorage(), nil) remote, err := r.CreateRemoteAnonymous(&config.RemoteConfig{ Name: "not_anonymous", URLs: []string{"http://foo/foo.git"}, }) c.Assert(err, Equals, ErrAnonymousRemoteName) c.Assert(remote, IsNil) } func (s *RepositorySuite) TestCreateRemoteAnonymousInvalid(c *C) { r, _ := Init(memory.NewStorage(), nil) remote, err := r.CreateRemoteAnonymous(&config.RemoteConfig{}) c.Assert(err, Equals, config.ErrRemoteConfigEmptyName) c.Assert(remote, IsNil) } func (s *RepositorySuite) TestDeleteRemote(c *C) { r, _ := Init(memory.NewStorage(), nil) _, err := r.CreateRemote(&config.RemoteConfig{ Name: "foo", URLs: []string{"http://foo/foo.git"}, }) c.Assert(err, IsNil) err = r.DeleteRemote("foo") c.Assert(err, IsNil) alt, err := r.Remote("foo") c.Assert(err, Equals, ErrRemoteNotFound) c.Assert(alt, IsNil) } func (s *RepositorySuite) TestCreateBranchAndBranch(c *C) { r, _ := Init(memory.NewStorage(), nil) testBranch := &config.Branch{ Name: "foo", Remote: "origin", Merge: "refs/heads/foo", } err := r.CreateBranch(testBranch) c.Assert(err, IsNil) cfg, err := r.Config() c.Assert(err, IsNil) c.Assert(len(cfg.Branches), Equals, 1) branch := cfg.Branches["foo"] c.Assert(branch.Name, Equals, testBranch.Name) c.Assert(branch.Remote, Equals, testBranch.Remote) c.Assert(branch.Merge, Equals, testBranch.Merge) branch, err = r.Branch("foo") c.Assert(err, IsNil) c.Assert(branch.Name, Equals, testBranch.Name) c.Assert(branch.Remote, Equals, testBranch.Remote) c.Assert(branch.Merge, Equals, testBranch.Merge) } func (s *RepositorySuite) TestCreateBranchUnmarshal(c *C) { r, _ := Init(memory.NewStorage(), nil) expected := []byte(`[core] bare = true [remote "foo"] url = http://foo/foo.git fetch = +refs/heads/*:refs/remotes/foo/* [branch "foo"] remote = origin merge = refs/heads/foo [branch "master"] remote = origin merge = refs/heads/master `) _, err := r.CreateRemote(&config.RemoteConfig{ Name: "foo", URLs: []string{"http://foo/foo.git"}, }) c.Assert(err, IsNil) testBranch1 := &config.Branch{ Name: "master", Remote: "origin", Merge: "refs/heads/master", } testBranch2 := &config.Branch{ Name: "foo", Remote: "origin", Merge: "refs/heads/foo", } err = r.CreateBranch(testBranch1) err = r.CreateBranch(testBranch2) c.Assert(err, IsNil) cfg, err := r.Config() c.Assert(err, IsNil) marshaled, err := cfg.Marshal() c.Assert(string(expected), Equals, string(marshaled)) } func (s *RepositorySuite) TestBranchInvalid(c *C) { r, _ := Init(memory.NewStorage(), nil) branch, err := r.Branch("foo") c.Assert(err, NotNil) c.Assert(branch, IsNil) } func (s *RepositorySuite) TestCreateBranchInvalid(c *C) { r, _ := Init(memory.NewStorage(), nil) err := r.CreateBranch(&config.Branch{}) c.Assert(err, NotNil) testBranch := &config.Branch{ Name: "foo", Remote: "origin", Merge: "refs/heads/foo", } err = r.CreateBranch(testBranch) c.Assert(err, IsNil) err = r.CreateBranch(testBranch) c.Assert(err, NotNil) } func (s *RepositorySuite) TestDeleteBranch(c *C) { r, _ := Init(memory.NewStorage(), nil) testBranch := &config.Branch{ Name: "foo", Remote: "origin", Merge: "refs/heads/foo", } err := r.CreateBranch(testBranch) c.Assert(err, IsNil) err = r.DeleteBranch("foo") c.Assert(err, IsNil) b, err := r.Branch("foo") c.Assert(err, Equals, ErrBranchNotFound) c.Assert(b, IsNil) err = r.DeleteBranch("foo") c.Assert(err, Equals, ErrBranchNotFound) } func (s *RepositorySuite) TestPlainInit(c *C) { dir, err := ioutil.TempDir("", "plain-init") c.Assert(err, IsNil) defer os.RemoveAll(dir) r, err := PlainInit(dir, true) c.Assert(err, IsNil) c.Assert(r, NotNil) cfg, err := r.Config() c.Assert(err, IsNil) c.Assert(cfg.Core.IsBare, Equals, true) } func (s *RepositorySuite) TestPlainInitAlreadyExists(c *C) { dir, err := ioutil.TempDir("", "plain-init") c.Assert(err, IsNil) defer os.RemoveAll(dir) r, err := PlainInit(dir, true) c.Assert(err, IsNil) c.Assert(r, NotNil) r, err = PlainInit(dir, true) c.Assert(err, Equals, ErrRepositoryAlreadyExists) c.Assert(r, IsNil) } func (s *RepositorySuite) TestPlainOpen(c *C) { dir, err := ioutil.TempDir("", "plain-open") c.Assert(err, IsNil) defer os.RemoveAll(dir) r, err := PlainInit(dir, false) c.Assert(err, IsNil) c.Assert(r, NotNil) r, err = PlainOpen(dir) c.Assert(err, IsNil) c.Assert(r, NotNil) } func (s *RepositorySuite) TestPlainOpenBare(c *C) { dir, err := ioutil.TempDir("", "plain-open") c.Assert(err, IsNil) defer os.RemoveAll(dir) r, err := PlainInit(dir, true) c.Assert(err, IsNil) c.Assert(r, NotNil) r, err = PlainOpen(dir) c.Assert(err, IsNil) c.Assert(r, NotNil) } func (s *RepositorySuite) TestPlainOpenNotBare(c *C) { dir, err := ioutil.TempDir("", "plain-open") c.Assert(err, IsNil) defer os.RemoveAll(dir) r, err := PlainInit(dir, false) c.Assert(err, IsNil) c.Assert(r, NotNil) r, err = PlainOpen(filepath.Join(dir, ".git")) c.Assert(err, IsNil) c.Assert(r, NotNil) } func (s *RepositorySuite) testPlainOpenGitFile(c *C, f func(string, string) string) { dir, err := ioutil.TempDir("", "plain-open") c.Assert(err, IsNil) defer os.RemoveAll(dir) r, err := PlainInit(dir, true) c.Assert(err, IsNil) c.Assert(r, NotNil) altDir, err := ioutil.TempDir("", "plain-open") c.Assert(err, IsNil) defer os.RemoveAll(altDir) err = ioutil.WriteFile(filepath.Join(altDir, ".git"), []byte(f(dir, altDir)), 0644) c.Assert(err, IsNil) r, err = PlainOpen(altDir) c.Assert(err, IsNil) c.Assert(r, NotNil) } func (s *RepositorySuite) TestPlainOpenBareAbsoluteGitDirFile(c *C) { s.testPlainOpenGitFile(c, func(dir, altDir string) string { return fmt.Sprintf("gitdir: %s\n", dir) }) } func (s *RepositorySuite) TestPlainOpenBareAbsoluteGitDirFileNoEOL(c *C) { s.testPlainOpenGitFile(c, func(dir, altDir string) string { return fmt.Sprintf("gitdir: %s", dir) }) } func (s *RepositorySuite) TestPlainOpenBareRelativeGitDirFile(c *C) { s.testPlainOpenGitFile(c, func(dir, altDir string) string { dir, err := filepath.Rel(altDir, dir) c.Assert(err, IsNil) return fmt.Sprintf("gitdir: %s\n", dir) }) } func (s *RepositorySuite) TestPlainOpenBareRelativeGitDirFileNoEOL(c *C) { s.testPlainOpenGitFile(c, func(dir, altDir string) string { dir, err := filepath.Rel(altDir, dir) c.Assert(err, IsNil) return fmt.Sprintf("gitdir: %s\n", dir) }) } func (s *RepositorySuite) TestPlainOpenBareRelativeGitDirFileTrailingGarbage(c *C) { dir, err := ioutil.TempDir("", "plain-open") c.Assert(err, IsNil) defer os.RemoveAll(dir) r, err := PlainInit(dir, true) c.Assert(err, IsNil) c.Assert(r, NotNil) altDir, err := ioutil.TempDir("", "plain-open") c.Assert(err, IsNil) err = ioutil.WriteFile(filepath.Join(altDir, ".git"), []byte(fmt.Sprintf("gitdir: %s\nTRAILING", altDir)), 0644) c.Assert(err, IsNil) r, err = PlainOpen(altDir) c.Assert(err, Equals, ErrRepositoryNotExists) c.Assert(r, IsNil) } func (s *RepositorySuite) TestPlainOpenBareRelativeGitDirFileBadPrefix(c *C) { dir, err := ioutil.TempDir("", "plain-open") c.Assert(err, IsNil) defer os.RemoveAll(dir) r, err := PlainInit(dir, true) c.Assert(err, IsNil) c.Assert(r, NotNil) altDir, err := ioutil.TempDir("", "plain-open") c.Assert(err, IsNil) err = ioutil.WriteFile(filepath.Join(altDir, ".git"), []byte(fmt.Sprintf("xgitdir: %s\n", dir)), 0644) c.Assert(err, IsNil) r, err = PlainOpen(altDir) c.Assert(err, ErrorMatches, ".*gitdir.*") c.Assert(r, IsNil) } func (s *RepositorySuite) TestPlainOpenNotExists(c *C) { r, err := PlainOpen("/not-exists/") c.Assert(err, Equals, ErrRepositoryNotExists) c.Assert(r, IsNil) } func (s *RepositorySuite) TestPlainOpenDetectDotGit(c *C) { dir, err := ioutil.TempDir("", "plain-open") c.Assert(err, IsNil) defer os.RemoveAll(dir) subdir := filepath.Join(dir, "a", "b") err = os.MkdirAll(subdir, 0755) c.Assert(err, IsNil) r, err := PlainInit(dir, false) c.Assert(err, IsNil) c.Assert(r, NotNil) opt := &PlainOpenOptions{DetectDotGit: true} r, err = PlainOpenWithOptions(subdir, opt) c.Assert(err, IsNil) c.Assert(r, NotNil) } func (s *RepositorySuite) TestPlainOpenNotExistsDetectDotGit(c *C) { dir, err := ioutil.TempDir("", "plain-open") c.Assert(err, IsNil) defer os.RemoveAll(dir) opt := &PlainOpenOptions{DetectDotGit: true} r, err := PlainOpenWithOptions(dir, opt) c.Assert(err, Equals, ErrRepositoryNotExists) c.Assert(r, IsNil) } func (s *RepositorySuite) TestPlainClone(c *C) { r, err := PlainClone(c.MkDir(), false, &CloneOptions{ URL: s.GetBasicLocalRepositoryURL(), }) c.Assert(err, IsNil) remotes, err := r.Remotes() c.Assert(err, IsNil) c.Assert(remotes, HasLen, 1) cfg, err := r.Config() c.Assert(err, IsNil) c.Assert(cfg.Branches, HasLen, 1) c.Assert(cfg.Branches["master"].Name, Equals, "master") } func (s *RepositorySuite) TestPlainCloneWithRemoteName(c *C) { r, err := PlainClone(c.MkDir(), false, &CloneOptions{ URL: s.GetBasicLocalRepositoryURL(), RemoteName: "test", }) c.Assert(err, IsNil) remote, err := r.Remote("test") c.Assert(err, IsNil) c.Assert(remote, NotNil) } func (s *RepositorySuite) TestPlainCloneOverExistingGitDirectory(c *C) { tmpDir := c.MkDir() r, err := PlainInit(tmpDir, false) c.Assert(r, NotNil) c.Assert(err, IsNil) r, err = PlainClone(tmpDir, false, &CloneOptions{ URL: s.GetBasicLocalRepositoryURL(), }) c.Assert(r, IsNil) c.Assert(err, Equals, ErrRepositoryAlreadyExists) } func (s *RepositorySuite) TestPlainCloneContextCancel(c *C) { ctx, cancel := context.WithCancel(context.Background()) cancel() r, err := PlainCloneContext(ctx, c.MkDir(), false, &CloneOptions{ URL: s.GetBasicLocalRepositoryURL(), }) c.Assert(r, NotNil) c.Assert(err, ErrorMatches, ".* context canceled") } func (s *RepositorySuite) TestPlainCloneContextNonExistentWithExistentDir(c *C) { ctx, cancel := context.WithCancel(context.Background()) cancel() tmpDir := c.MkDir() repoDir := tmpDir r, err := PlainCloneContext(ctx, repoDir, false, &CloneOptions{ URL: "incorrectOnPurpose", }) c.Assert(r, NotNil) c.Assert(err, Equals, transport.ErrRepositoryNotFound) _, err = os.Stat(repoDir) c.Assert(os.IsNotExist(err), Equals, false) names, err := ioutil.ReadDir(repoDir) c.Assert(err, IsNil) c.Assert(names, HasLen, 0) } func (s *RepositorySuite) TestPlainCloneContextNonExistentWithNonExistentDir(c *C) { ctx, cancel := context.WithCancel(context.Background()) cancel() tmpDir := c.MkDir() repoDir := filepath.Join(tmpDir, "repoDir") r, err := PlainCloneContext(ctx, repoDir, false, &CloneOptions{ URL: "incorrectOnPurpose", }) c.Assert(r, NotNil) c.Assert(err, Equals, transport.ErrRepositoryNotFound) _, err = os.Stat(repoDir) c.Assert(os.IsNotExist(err), Equals, true) } func (s *RepositorySuite) TestPlainCloneContextNonExistentWithNotDir(c *C) { ctx, cancel := context.WithCancel(context.Background()) cancel() tmpDir := c.MkDir() repoDir := filepath.Join(tmpDir, "repoDir") f, err := os.Create(repoDir) c.Assert(err, IsNil) c.Assert(f.Close(), IsNil) r, err := PlainCloneContext(ctx, repoDir, false, &CloneOptions{ URL: "incorrectOnPurpose", }) c.Assert(r, IsNil) c.Assert(err, ErrorMatches, ".*not a directory.*") fi, err := os.Stat(repoDir) c.Assert(err, IsNil) c.Assert(fi.IsDir(), Equals, false) } func (s *RepositorySuite) TestPlainCloneContextNonExistentWithNotEmptyDir(c *C) { ctx, cancel := context.WithCancel(context.Background()) cancel() tmpDir := c.MkDir() repoDirPath := filepath.Join(tmpDir, "repoDir") err := os.Mkdir(repoDirPath, 0777) c.Assert(err, IsNil) dummyFile := filepath.Join(repoDirPath, "dummyFile") err = ioutil.WriteFile(dummyFile, []byte(fmt.Sprint("dummyContent")), 0644) c.Assert(err, IsNil) r, err := PlainCloneContext(ctx, repoDirPath, false, &CloneOptions{ URL: "incorrectOnPurpose", }) c.Assert(r, NotNil) c.Assert(err, Equals, transport.ErrRepositoryNotFound) _, err = os.Stat(dummyFile) c.Assert(err, IsNil) } func (s *RepositorySuite) TestPlainCloneContextNonExistingOverExistingGitDirectory(c *C) { ctx, cancel := context.WithCancel(context.Background()) cancel() tmpDir := c.MkDir() r, err := PlainInit(tmpDir, false) c.Assert(r, NotNil) c.Assert(err, IsNil) r, err = PlainCloneContext(ctx, tmpDir, false, &CloneOptions{ URL: "incorrectOnPurpose", }) c.Assert(r, IsNil) c.Assert(err, Equals, ErrRepositoryAlreadyExists) } func (s *RepositorySuite) TestPlainCloneWithRecurseSubmodules(c *C) { if testing.Short() { c.Skip("skipping test in short mode.") } dir, err := ioutil.TempDir("", "plain-clone-submodule") c.Assert(err, IsNil) defer os.RemoveAll(dir) path := fixtures.ByTag("submodule").One().Worktree().Root() r, err := PlainClone(dir, false, &CloneOptions{ URL: path, RecurseSubmodules: DefaultSubmoduleRecursionDepth, }) c.Assert(err, IsNil) cfg, err := r.Config() c.Assert(err, IsNil) c.Assert(cfg.Remotes, HasLen, 1) c.Assert(cfg.Branches, HasLen, 1) c.Assert(cfg.Submodules, HasLen, 2) } func (s *RepositorySuite) TestPlainCloneNoCheckout(c *C) { dir, err := ioutil.TempDir("", "plain-clone-no-checkout") c.Assert(err, IsNil) defer os.RemoveAll(dir) path := fixtures.ByTag("submodule").One().Worktree().Root() r, err := PlainClone(dir, false, &CloneOptions{ URL: path, NoCheckout: true, RecurseSubmodules: DefaultSubmoduleRecursionDepth, }) c.Assert(err, IsNil) h, err := r.Head() c.Assert(err, IsNil) c.Assert(h.Hash().String(), Equals, "b685400c1f9316f350965a5993d350bc746b0bf4") fi, err := osfs.New(dir).ReadDir("") c.Assert(err, IsNil) c.Assert(fi, HasLen, 1) // .git } func (s *RepositorySuite) TestFetch(c *C) { r, _ := Init(memory.NewStorage(), nil) _, err := r.CreateRemote(&config.RemoteConfig{ Name: DefaultRemoteName, URLs: []string{s.GetBasicLocalRepositoryURL()}, }) c.Assert(err, IsNil) c.Assert(r.Fetch(&FetchOptions{}), IsNil) remotes, err := r.Remotes() c.Assert(err, IsNil) c.Assert(remotes, HasLen, 1) _, err = r.Head() c.Assert(err, Equals, plumbing.ErrReferenceNotFound) branch, err := r.Reference("refs/remotes/origin/master", false) c.Assert(err, IsNil) c.Assert(branch, NotNil) c.Assert(branch.Type(), Equals, plumbing.HashReference) c.Assert(branch.Hash().String(), Equals, "6ecf0ef2c2dffb796033e5a02219af86ec6584e5") } func (s *RepositorySuite) TestFetchContext(c *C) { r, _ := Init(memory.NewStorage(), nil) _, err := r.CreateRemote(&config.RemoteConfig{ Name: DefaultRemoteName, URLs: []string{s.GetBasicLocalRepositoryURL()}, }) c.Assert(err, IsNil) ctx, cancel := context.WithCancel(context.Background()) cancel() c.Assert(r.FetchContext(ctx, &FetchOptions{}), NotNil) } func (s *RepositorySuite) TestCloneWithProgress(c *C) { fs := memfs.New() buf := bytes.NewBuffer(nil) _, err := Clone(memory.NewStorage(), fs, &CloneOptions{ URL: s.GetBasicLocalRepositoryURL(), Progress: buf, }) c.Assert(err, IsNil) c.Assert(buf.Len(), Not(Equals), 0) } func (s *RepositorySuite) TestCloneDeep(c *C) { fs := memfs.New() r, _ := Init(memory.NewStorage(), fs) head, err := r.Head() c.Assert(err, Equals, plumbing.ErrReferenceNotFound) c.Assert(head, IsNil) err = r.clone(context.Background(), &CloneOptions{ URL: s.GetBasicLocalRepositoryURL(), }) c.Assert(err, IsNil) remotes, err := r.Remotes() c.Assert(err, IsNil) c.Assert(remotes, HasLen, 1) head, err = r.Reference(plumbing.HEAD, false) c.Assert(err, IsNil) c.Assert(head, NotNil) c.Assert(head.Type(), Equals, plumbing.SymbolicReference) c.Assert(head.Target().String(), Equals, "refs/heads/master") branch, err := r.Reference(head.Target(), false) c.Assert(err, IsNil) c.Assert(branch, NotNil) c.Assert(branch.Hash().String(), Equals, "6ecf0ef2c2dffb796033e5a02219af86ec6584e5") branch, err = r.Reference("refs/remotes/origin/master", false) c.Assert(err, IsNil) c.Assert(branch, NotNil) c.Assert(branch.Type(), Equals, plumbing.HashReference) c.Assert(branch.Hash().String(), Equals, "6ecf0ef2c2dffb796033e5a02219af86ec6584e5") fi, err := fs.ReadDir("") c.Assert(err, IsNil) c.Assert(fi, HasLen, 8) } func (s *RepositorySuite) TestCloneConfig(c *C) { r, _ := Init(memory.NewStorage(), nil) head, err := r.Head() c.Assert(err, Equals, plumbing.ErrReferenceNotFound) c.Assert(head, IsNil) err = r.clone(context.Background(), &CloneOptions{ URL: s.GetBasicLocalRepositoryURL(), }) c.Assert(err, IsNil) cfg, err := r.Config() c.Assert(err, IsNil) c.Assert(cfg.Core.IsBare, Equals, true) c.Assert(cfg.Remotes, HasLen, 1) c.Assert(cfg.Remotes["origin"].Name, Equals, "origin") c.Assert(cfg.Remotes["origin"].URLs, HasLen, 1) c.Assert(cfg.Branches, HasLen, 1) c.Assert(cfg.Branches["master"].Name, Equals, "master") } func (s *RepositorySuite) TestCloneSingleBranchAndNonHEAD(c *C) { r, _ := Init(memory.NewStorage(), nil) head, err := r.Head() c.Assert(err, Equals, plumbing.ErrReferenceNotFound) c.Assert(head, IsNil) err = r.clone(context.Background(), &CloneOptions{ URL: s.GetBasicLocalRepositoryURL(), ReferenceName: plumbing.ReferenceName("refs/heads/branch"), SingleBranch: true, }) c.Assert(err, IsNil) remotes, err := r.Remotes() c.Assert(err, IsNil) c.Assert(remotes, HasLen, 1) cfg, err := r.Config() c.Assert(err, IsNil) c.Assert(cfg.Branches, HasLen, 1) c.Assert(cfg.Branches["branch"].Name, Equals, "branch") c.Assert(cfg.Branches["branch"].Remote, Equals, "origin") c.Assert(cfg.Branches["branch"].Merge, Equals, plumbing.ReferenceName("refs/heads/branch")) head, err = r.Reference(plumbing.HEAD, false) c.Assert(err, IsNil) c.Assert(head, NotNil) c.Assert(head.Type(), Equals, plumbing.SymbolicReference) c.Assert(head.Target().String(), Equals, "refs/heads/branch") branch, err := r.Reference(head.Target(), false) c.Assert(err, IsNil) c.Assert(branch, NotNil) c.Assert(branch.Hash().String(), Equals, "e8d3ffab552895c19b9fcf7aa264d277cde33881") branch, err = r.Reference("refs/remotes/origin/branch", false) c.Assert(err, IsNil) c.Assert(branch, NotNil) c.Assert(branch.Type(), Equals, plumbing.HashReference) c.Assert(branch.Hash().String(), Equals, "e8d3ffab552895c19b9fcf7aa264d277cde33881") } func (s *RepositorySuite) TestCloneSingleBranch(c *C) { r, _ := Init(memory.NewStorage(), nil) head, err := r.Head() c.Assert(err, Equals, plumbing.ErrReferenceNotFound) c.Assert(head, IsNil) err = r.clone(context.Background(), &CloneOptions{ URL: s.GetBasicLocalRepositoryURL(), SingleBranch: true, }) c.Assert(err, IsNil) remotes, err := r.Remotes() c.Assert(err, IsNil) c.Assert(remotes, HasLen, 1) cfg, err := r.Config() c.Assert(err, IsNil) c.Assert(cfg.Branches, HasLen, 1) c.Assert(cfg.Branches["master"].Name, Equals, "master") c.Assert(cfg.Branches["master"].Remote, Equals, "origin") c.Assert(cfg.Branches["master"].Merge, Equals, plumbing.ReferenceName("refs/heads/master")) head, err = r.Reference(plumbing.HEAD, false) c.Assert(err, IsNil) c.Assert(head, NotNil) c.Assert(head.Type(), Equals, plumbing.SymbolicReference) c.Assert(head.Target().String(), Equals, "refs/heads/master") branch, err := r.Reference(head.Target(), false) c.Assert(err, IsNil) c.Assert(branch, NotNil) c.Assert(branch.Hash().String(), Equals, "6ecf0ef2c2dffb796033e5a02219af86ec6584e5") branch, err = r.Reference("refs/remotes/origin/master", false) c.Assert(err, IsNil) c.Assert(branch, NotNil) c.Assert(branch.Type(), Equals, plumbing.HashReference) c.Assert(branch.Hash().String(), Equals, "6ecf0ef2c2dffb796033e5a02219af86ec6584e5") } func (s *RepositorySuite) TestCloneSingleTag(c *C) { r, _ := Init(memory.NewStorage(), nil) url := s.GetLocalRepositoryURL( fixtures.ByURL("https://github.com/git-fixtures/tags.git").One(), ) err := r.clone(context.Background(), &CloneOptions{ URL: url, SingleBranch: true, ReferenceName: plumbing.ReferenceName("refs/tags/commit-tag"), }) c.Assert(err, IsNil) branch, err := r.Reference("refs/tags/commit-tag", false) c.Assert(err, IsNil) c.Assert(branch, NotNil) conf, err := r.Config() c.Assert(err, IsNil) originRemote := conf.Remotes["origin"] c.Assert(originRemote, NotNil) c.Assert(originRemote.Fetch, HasLen, 1) c.Assert(originRemote.Fetch[0].String(), Equals, "+refs/tags/commit-tag:refs/tags/commit-tag") } func (s *RepositorySuite) TestCloneDetachedHEAD(c *C) { r, _ := Init(memory.NewStorage(), nil) err := r.clone(context.Background(), &CloneOptions{ URL: s.GetBasicLocalRepositoryURL(), ReferenceName: plumbing.ReferenceName("refs/tags/v1.0.0"), }) c.Assert(err, IsNil) cfg, err := r.Config() c.Assert(err, IsNil) c.Assert(cfg.Branches, HasLen, 0) head, err := r.Reference(plumbing.HEAD, false) c.Assert(err, IsNil) c.Assert(head, NotNil) c.Assert(head.Type(), Equals, plumbing.HashReference) c.Assert(head.Hash().String(), Equals, "6ecf0ef2c2dffb796033e5a02219af86ec6584e5") count := 0 objects, err := r.Objects() c.Assert(err, IsNil) objects.ForEach(func(object.Object) error { count++; return nil }) c.Assert(count, Equals, 28) } func (s *RepositorySuite) TestCloneDetachedHEADAndSingle(c *C) { r, _ := Init(memory.NewStorage(), nil) err := r.clone(context.Background(), &CloneOptions{ URL: s.GetBasicLocalRepositoryURL(), ReferenceName: plumbing.ReferenceName("refs/tags/v1.0.0"), SingleBranch: true, }) c.Assert(err, IsNil) cfg, err := r.Config() c.Assert(err, IsNil) c.Assert(cfg.Branches, HasLen, 0) head, err := r.Reference(plumbing.HEAD, false) c.Assert(err, IsNil) c.Assert(head, NotNil) c.Assert(head.Type(), Equals, plumbing.HashReference) c.Assert(head.Hash().String(), Equals, "6ecf0ef2c2dffb796033e5a02219af86ec6584e5") count := 0 objects, err := r.Objects() c.Assert(err, IsNil) objects.ForEach(func(object.Object) error { count++; return nil }) c.Assert(count, Equals, 28) } func (s *RepositorySuite) TestCloneDetachedHEADAndShallow(c *C) { r, _ := Init(memory.NewStorage(), memfs.New()) err := r.clone(context.Background(), &CloneOptions{ URL: s.GetBasicLocalRepositoryURL(), ReferenceName: plumbing.ReferenceName("refs/tags/v1.0.0"), Depth: 1, }) c.Assert(err, IsNil) cfg, err := r.Config() c.Assert(err, IsNil) c.Assert(cfg.Branches, HasLen, 0) head, err := r.Reference(plumbing.HEAD, false) c.Assert(err, IsNil) c.Assert(head, NotNil) c.Assert(head.Type(), Equals, plumbing.HashReference) c.Assert(head.Hash().String(), Equals, "6ecf0ef2c2dffb796033e5a02219af86ec6584e5") count := 0 objects, err := r.Objects() c.Assert(err, IsNil) objects.ForEach(func(object.Object) error { count++; return nil }) c.Assert(count, Equals, 15) } func (s *RepositorySuite) TestCloneDetachedHEADAnnotatedTag(c *C) { r, _ := Init(memory.NewStorage(), nil) err := r.clone(context.Background(), &CloneOptions{ URL: s.GetLocalRepositoryURL(fixtures.ByTag("tags").One()), ReferenceName: plumbing.ReferenceName("refs/tags/annotated-tag"), }) c.Assert(err, IsNil) cfg, err := r.Config() c.Assert(err, IsNil) c.Assert(cfg.Branches, HasLen, 0) head, err := r.Reference(plumbing.HEAD, false) c.Assert(err, IsNil) c.Assert(head, NotNil) c.Assert(head.Type(), Equals, plumbing.HashReference) c.Assert(head.Hash().String(), Equals, "f7b877701fbf855b44c0a9e86f3fdce2c298b07f") count := 0 objects, err := r.Objects() c.Assert(err, IsNil) objects.ForEach(func(object.Object) error { count++; return nil }) c.Assert(count, Equals, 7) } func (s *RepositorySuite) TestPush(c *C) { url := c.MkDir() server, err := PlainInit(url, true) c.Assert(err, IsNil) _, err = s.Repository.CreateRemote(&config.RemoteConfig{ Name: "test", URLs: []string{url}, }) c.Assert(err, IsNil) err = s.Repository.Push(&PushOptions{ RemoteName: "test", }) c.Assert(err, IsNil) AssertReferences(c, server, map[string]string{ "refs/heads/master": "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", "refs/heads/branch": "e8d3ffab552895c19b9fcf7aa264d277cde33881", }) AssertReferences(c, s.Repository, map[string]string{ "refs/remotes/test/master": "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", "refs/remotes/test/branch": "e8d3ffab552895c19b9fcf7aa264d277cde33881", }) } func (s *RepositorySuite) TestPushContext(c *C) { url := c.MkDir() _, err := PlainInit(url, true) c.Assert(err, IsNil) _, err = s.Repository.CreateRemote(&config.RemoteConfig{ Name: "foo", URLs: []string{url}, }) c.Assert(err, IsNil) ctx, cancel := context.WithCancel(context.Background()) cancel() err = s.Repository.PushContext(ctx, &PushOptions{ RemoteName: "foo", }) c.Assert(err, NotNil) } // installPreReceiveHook installs a pre-receive hook in the .git // directory at path which prints message m before exiting // successfully. func installPreReceiveHook(c *C, path, m string) { hooks := filepath.Join(path, "hooks") err := os.MkdirAll(hooks, 0777) c.Assert(err, IsNil) err = ioutil.WriteFile(filepath.Join(hooks, "pre-receive"), preReceiveHook(m), 0777) c.Assert(err, IsNil) } func (s *RepositorySuite) TestPushWithProgress(c *C) { url := c.MkDir() server, err := PlainInit(url, true) c.Assert(err, IsNil) m := "Receiving..." installPreReceiveHook(c, url, m) _, err = s.Repository.CreateRemote(&config.RemoteConfig{ Name: "bar", URLs: []string{url}, }) c.Assert(err, IsNil) var p bytes.Buffer err = s.Repository.Push(&PushOptions{ RemoteName: "bar", Progress: &p, }) c.Assert(err, IsNil) AssertReferences(c, server, map[string]string{ "refs/heads/master": "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", "refs/heads/branch": "e8d3ffab552895c19b9fcf7aa264d277cde33881", }) c.Assert((&p).Bytes(), DeepEquals, []byte(m)) } func (s *RepositorySuite) TestPushDepth(c *C) { url := c.MkDir() server, err := PlainClone(url, true, &CloneOptions{ URL: fixtures.Basic().One().DotGit().Root(), }) c.Assert(err, IsNil) r, err := Clone(memory.NewStorage(), memfs.New(), &CloneOptions{ URL: url, Depth: 1, }) c.Assert(err, IsNil) err = util.WriteFile(r.wt, "foo", nil, 0755) c.Assert(err, IsNil) w, err := r.Worktree() c.Assert(err, IsNil) _, err = w.Add("foo") c.Assert(err, IsNil) hash, err := w.Commit("foo", &CommitOptions{ Author: defaultSignature(), Committer: defaultSignature(), }) c.Assert(err, IsNil) err = r.Push(&PushOptions{}) c.Assert(err, IsNil) AssertReferences(c, server, map[string]string{ "refs/heads/master": hash.String(), }) AssertReferences(c, r, map[string]string{ "refs/remotes/origin/master": hash.String(), }) } func (s *RepositorySuite) TestPushNonExistentRemote(c *C) { srcFs := fixtures.Basic().One().DotGit() sto := filesystem.NewStorage(srcFs, cache.NewObjectLRUDefault()) r, err := Open(sto, srcFs) c.Assert(err, IsNil) err = r.Push(&PushOptions{RemoteName: "myremote"}) c.Assert(err, ErrorMatches, ".*remote not found.*") } func (s *RepositorySuite) TestLog(c *C) { r, _ := Init(memory.NewStorage(), nil) err := r.clone(context.Background(), &CloneOptions{ URL: s.GetBasicLocalRepositoryURL(), }) c.Assert(err, IsNil) cIter, err := r.Log(&LogOptions{ From: plumbing.NewHash("b8e471f58bcbca63b07bda20e428190409c2db47"), }) c.Assert(err, IsNil) commitOrder := []plumbing.Hash{ plumbing.NewHash("b8e471f58bcbca63b07bda20e428190409c2db47"), plumbing.NewHash("b029517f6300c2da0f4b651b8642506cd6aaf45d"), } for _, o := range commitOrder { commit, err := cIter.Next() c.Assert(err, IsNil) c.Assert(commit.Hash, Equals, o) } _, err = cIter.Next() c.Assert(err, Equals, io.EOF) } func (s *RepositorySuite) TestLogAll(c *C) { r, _ := Init(memory.NewStorage(), nil) err := r.clone(context.Background(), &CloneOptions{ URL: s.GetBasicLocalRepositoryURL(), }) c.Assert(err, IsNil) rIter, err := r.Storer.IterReferences() c.Assert(err, IsNil) refCount := 0 err = rIter.ForEach(func(ref *plumbing.Reference) error { refCount++ return nil }) c.Assert(err, IsNil) c.Assert(refCount, Equals, 5) cIter, err := r.Log(&LogOptions{ All: true, }) c.Assert(err, IsNil) commitOrder := []plumbing.Hash{ plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5"), plumbing.NewHash("e8d3ffab552895c19b9fcf7aa264d277cde33881"), plumbing.NewHash("918c48b83bd081e863dbe1b80f8998f058cd8294"), plumbing.NewHash("af2d6a6954d532f8ffb47615169c8fdf9d383a1a"), plumbing.NewHash("1669dce138d9b841a518c64b10914d88f5e488ea"), plumbing.NewHash("35e85108805c84807bc66a02d91535e1e24b38b9"), plumbing.NewHash("b029517f6300c2da0f4b651b8642506cd6aaf45d"), plumbing.NewHash("a5b8b09e2f8fcb0bb99d3ccb0958157b40890d69"), plumbing.NewHash("b8e471f58bcbca63b07bda20e428190409c2db47"), } for _, o := range commitOrder { commit, err := cIter.Next() c.Assert(err, IsNil) c.Assert(commit.Hash, Equals, o) } _, err = cIter.Next() c.Assert(err, Equals, io.EOF) cIter.Close() } func (s *RepositorySuite) TestLogAllMissingReferences(c *C) { r, _ := Init(memory.NewStorage(), nil) err := r.clone(context.Background(), &CloneOptions{ URL: s.GetBasicLocalRepositoryURL(), }) c.Assert(err, IsNil) err = r.Storer.RemoveReference(plumbing.HEAD) c.Assert(err, IsNil) rIter, err := r.Storer.IterReferences() c.Assert(err, IsNil) refCount := 0 err = rIter.ForEach(func(ref *plumbing.Reference) error { refCount++ return nil }) c.Assert(err, IsNil) c.Assert(refCount, Equals, 4) err = r.Storer.SetReference(plumbing.NewHashReference(plumbing.ReferenceName("DUMMY"), plumbing.NewHash("DUMMY"))) c.Assert(err, IsNil) rIter, err = r.Storer.IterReferences() c.Assert(err, IsNil) refCount = 0 err = rIter.ForEach(func(ref *plumbing.Reference) error { refCount++ return nil }) c.Assert(err, IsNil) c.Assert(refCount, Equals, 5) cIter, err := r.Log(&LogOptions{ All: true, }) c.Assert(cIter, NotNil) c.Assert(err, IsNil) cCount := 0 cIter.ForEach(func(c *object.Commit) error { cCount++ return nil }) c.Assert(cCount, Equals, 9) _, err = cIter.Next() c.Assert(err, Equals, io.EOF) cIter.Close() } func (s *RepositorySuite) TestLogAllOrderByTime(c *C) { r, _ := Init(memory.NewStorage(), nil) err := r.clone(context.Background(), &CloneOptions{ URL: s.GetBasicLocalRepositoryURL(), }) c.Assert(err, IsNil) cIter, err := r.Log(&LogOptions{ Order: LogOrderCommitterTime, All: true, }) c.Assert(err, IsNil) commitOrder := []plumbing.Hash{ plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5"), plumbing.NewHash("e8d3ffab552895c19b9fcf7aa264d277cde33881"), plumbing.NewHash("918c48b83bd081e863dbe1b80f8998f058cd8294"), plumbing.NewHash("af2d6a6954d532f8ffb47615169c8fdf9d383a1a"), plumbing.NewHash("1669dce138d9b841a518c64b10914d88f5e488ea"), plumbing.NewHash("a5b8b09e2f8fcb0bb99d3ccb0958157b40890d69"), plumbing.NewHash("35e85108805c84807bc66a02d91535e1e24b38b9"), plumbing.NewHash("b8e471f58bcbca63b07bda20e428190409c2db47"), plumbing.NewHash("b029517f6300c2da0f4b651b8642506cd6aaf45d"), } for _, o := range commitOrder { commit, err := cIter.Next() c.Assert(err, IsNil) c.Assert(commit.Hash, Equals, o) } _, err = cIter.Next() c.Assert(err, Equals, io.EOF) cIter.Close() } func (s *RepositorySuite) TestLogHead(c *C) { r, _ := Init(memory.NewStorage(), nil) err := r.clone(context.Background(), &CloneOptions{ URL: s.GetBasicLocalRepositoryURL(), }) c.Assert(err, IsNil) cIter, err := r.Log(&LogOptions{}) c.Assert(err, IsNil) commitOrder := []plumbing.Hash{ plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5"), plumbing.NewHash("918c48b83bd081e863dbe1b80f8998f058cd8294"), plumbing.NewHash("af2d6a6954d532f8ffb47615169c8fdf9d383a1a"), plumbing.NewHash("1669dce138d9b841a518c64b10914d88f5e488ea"), plumbing.NewHash("35e85108805c84807bc66a02d91535e1e24b38b9"), plumbing.NewHash("b029517f6300c2da0f4b651b8642506cd6aaf45d"), plumbing.NewHash("a5b8b09e2f8fcb0bb99d3ccb0958157b40890d69"), plumbing.NewHash("b8e471f58bcbca63b07bda20e428190409c2db47"), } for _, o := range commitOrder { commit, err := cIter.Next() c.Assert(err, IsNil) c.Assert(commit.Hash, Equals, o) } _, err = cIter.Next() c.Assert(err, Equals, io.EOF) } func (s *RepositorySuite) TestLogError(c *C) { r, _ := Init(memory.NewStorage(), nil) err := r.clone(context.Background(), &CloneOptions{ URL: s.GetBasicLocalRepositoryURL(), }) c.Assert(err, IsNil) _, err = r.Log(&LogOptions{ From: plumbing.NewHash("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), }) c.Assert(err, NotNil) } func (s *RepositorySuite) TestLogFileNext(c *C) { r, _ := Init(memory.NewStorage(), nil) err := r.clone(context.Background(), &CloneOptions{ URL: s.GetBasicLocalRepositoryURL(), }) c.Assert(err, IsNil) fileName := "vendor/foo.go" cIter, err := r.Log(&LogOptions{FileName: &fileName}) c.Assert(err, IsNil) commitOrder := []plumbing.Hash{ plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5"), } for _, o := range commitOrder { commit, err := cIter.Next() c.Assert(err, IsNil) c.Assert(commit.Hash, Equals, o) } _, err = cIter.Next() c.Assert(err, Equals, io.EOF) } func (s *RepositorySuite) TestLogFileForEach(c *C) { r, _ := Init(memory.NewStorage(), nil) err := r.clone(context.Background(), &CloneOptions{ URL: s.GetBasicLocalRepositoryURL(), }) c.Assert(err, IsNil) fileName := "php/crappy.php" cIter, err := r.Log(&LogOptions{FileName: &fileName}) c.Assert(err, IsNil) defer cIter.Close() commitOrder := []plumbing.Hash{ plumbing.NewHash("918c48b83bd081e863dbe1b80f8998f058cd8294"), } expectedIndex := 0 cIter.ForEach(func(commit *object.Commit) error { expectedCommitHash := commitOrder[expectedIndex] c.Assert(commit.Hash.String(), Equals, expectedCommitHash.String()) expectedIndex++ return nil }) c.Assert(expectedIndex, Equals, 1) } func (s *RepositorySuite) TestLogNonHeadFile(c *C) { r, _ := Init(memory.NewStorage(), nil) err := r.clone(context.Background(), &CloneOptions{ URL: s.GetBasicLocalRepositoryURL(), }) c.Assert(err, IsNil) fileName := "README" cIter, err := r.Log(&LogOptions{FileName: &fileName}) c.Assert(err, IsNil) defer cIter.Close() _, err = cIter.Next() c.Assert(err, Equals, io.EOF) } func (s *RepositorySuite) TestLogAllFileForEach(c *C) { r, _ := Init(memory.NewStorage(), nil) err := r.clone(context.Background(), &CloneOptions{ URL: s.GetBasicLocalRepositoryURL(), }) c.Assert(err, IsNil) fileName := "README" cIter, err := r.Log(&LogOptions{FileName: &fileName, All: true}) c.Assert(err, IsNil) defer cIter.Close() commitOrder := []plumbing.Hash{ plumbing.NewHash("e8d3ffab552895c19b9fcf7aa264d277cde33881"), } expectedIndex := 0 cIter.ForEach(func(commit *object.Commit) error { expectedCommitHash := commitOrder[expectedIndex] c.Assert(commit.Hash.String(), Equals, expectedCommitHash.String()) expectedIndex++ return nil }) c.Assert(expectedIndex, Equals, 1) } func (s *RepositorySuite) TestLogInvalidFile(c *C) { r, _ := Init(memory.NewStorage(), nil) err := r.clone(context.Background(), &CloneOptions{ URL: s.GetBasicLocalRepositoryURL(), }) c.Assert(err, IsNil) // Throwing in a file that does not exist fileName := "vendor/foo12.go" cIter, err := r.Log(&LogOptions{FileName: &fileName}) // Not raising an error since `git log -- vendor/foo12.go` responds silently c.Assert(err, IsNil) defer cIter.Close() _, err = cIter.Next() c.Assert(err, Equals, io.EOF) } func (s *RepositorySuite) TestLogFileInitialCommit(c *C) { r, _ := Init(memory.NewStorage(), nil) err := r.clone(context.Background(), &CloneOptions{ URL: s.GetBasicLocalRepositoryURL(), }) c.Assert(err, IsNil) fileName := "LICENSE" cIter, err := r.Log(&LogOptions{ Order: LogOrderCommitterTime, FileName: &fileName, }) c.Assert(err, IsNil) defer cIter.Close() commitOrder := []plumbing.Hash{ plumbing.NewHash("b029517f6300c2da0f4b651b8642506cd6aaf45d"), } expectedIndex := 0 cIter.ForEach(func(commit *object.Commit) error { expectedCommitHash := commitOrder[expectedIndex] c.Assert(commit.Hash.String(), Equals, expectedCommitHash.String()) expectedIndex++ return nil }) c.Assert(expectedIndex, Equals, 1) } func (s *RepositorySuite) TestLogFileWithOtherParamsFail(c *C) { r, _ := Init(memory.NewStorage(), nil) err := r.clone(context.Background(), &CloneOptions{ URL: s.GetBasicLocalRepositoryURL(), }) c.Assert(err, IsNil) fileName := "vendor/foo.go" cIter, err := r.Log(&LogOptions{ Order: LogOrderCommitterTime, FileName: &fileName, From: plumbing.NewHash("35e85108805c84807bc66a02d91535e1e24b38b9"), }) c.Assert(err, IsNil) defer cIter.Close() _, iterErr := cIter.Next() c.Assert(iterErr, Equals, io.EOF) } func (s *RepositorySuite) TestLogFileWithOtherParamsPass(c *C) { r, _ := Init(memory.NewStorage(), nil) err := r.clone(context.Background(), &CloneOptions{ URL: s.GetBasicLocalRepositoryURL(), }) c.Assert(err, IsNil) fileName := "LICENSE" cIter, err := r.Log(&LogOptions{ Order: LogOrderCommitterTime, FileName: &fileName, From: plumbing.NewHash("35e85108805c84807bc66a02d91535e1e24b38b9"), }) c.Assert(err, IsNil) commitVal, iterErr := cIter.Next() c.Assert(iterErr, Equals, nil) c.Assert(commitVal.Hash.String(), Equals, "b029517f6300c2da0f4b651b8642506cd6aaf45d") _, iterErr = cIter.Next() c.Assert(iterErr, Equals, io.EOF) } func (s *RepositorySuite) TestCommit(c *C) { r, _ := Init(memory.NewStorage(), nil) err := r.clone(context.Background(), &CloneOptions{ URL: s.GetBasicLocalRepositoryURL(), }) c.Assert(err, IsNil) hash := plumbing.NewHash("b8e471f58bcbca63b07bda20e428190409c2db47") commit, err := r.CommitObject(hash) c.Assert(err, IsNil) c.Assert(commit.Hash.IsZero(), Equals, false) c.Assert(commit.Hash, Equals, commit.ID()) c.Assert(commit.Hash, Equals, hash) c.Assert(commit.Type(), Equals, plumbing.CommitObject) tree, err := commit.Tree() c.Assert(err, IsNil) c.Assert(tree.Hash.IsZero(), Equals, false) c.Assert(commit.Author.Email, Equals, "daniel@lordran.local") } func (s *RepositorySuite) TestCommits(c *C) { r, _ := Init(memory.NewStorage(), nil) err := r.clone(context.Background(), &CloneOptions{URL: s.GetBasicLocalRepositoryURL()}) c.Assert(err, IsNil) count := 0 commits, err := r.CommitObjects() c.Assert(err, IsNil) for { commit, err := commits.Next() if err != nil { break } count++ c.Assert(commit.Hash.IsZero(), Equals, false) c.Assert(commit.Hash, Equals, commit.ID()) c.Assert(commit.Type(), Equals, plumbing.CommitObject) } c.Assert(count, Equals, 9) } func (s *RepositorySuite) TestBlob(c *C) { r, _ := Init(memory.NewStorage(), nil) err := r.clone(context.Background(), &CloneOptions{ URL: s.GetBasicLocalRepositoryURL(), }) c.Assert(err, IsNil) blob, err := r.BlobObject(plumbing.NewHash("b8e471f58bcbca63b07bda20e428190409c2db47")) c.Assert(err, NotNil) c.Assert(blob, IsNil) blobHash := plumbing.NewHash("9a48f23120e880dfbe41f7c9b7b708e9ee62a492") blob, err = r.BlobObject(blobHash) c.Assert(err, IsNil) c.Assert(blob.Hash.IsZero(), Equals, false) c.Assert(blob.Hash, Equals, blob.ID()) c.Assert(blob.Hash, Equals, blobHash) c.Assert(blob.Type(), Equals, plumbing.BlobObject) } func (s *RepositorySuite) TestBlobs(c *C) { r, _ := Init(memory.NewStorage(), nil) err := r.clone(context.Background(), &CloneOptions{URL: s.GetBasicLocalRepositoryURL()}) c.Assert(err, IsNil) count := 0 blobs, err := r.BlobObjects() c.Assert(err, IsNil) for { blob, err := blobs.Next() if err != nil { break } count++ c.Assert(blob.Hash.IsZero(), Equals, false) c.Assert(blob.Hash, Equals, blob.ID()) c.Assert(blob.Type(), Equals, plumbing.BlobObject) } c.Assert(count, Equals, 10) } func (s *RepositorySuite) TestTagObject(c *C) { url := s.GetLocalRepositoryURL( fixtures.ByURL("https://github.com/git-fixtures/tags.git").One(), ) r, _ := Init(memory.NewStorage(), nil) err := r.clone(context.Background(), &CloneOptions{URL: url}) c.Assert(err, IsNil) hash := plumbing.NewHash("ad7897c0fb8e7d9a9ba41fa66072cf06095a6cfc") tag, err := r.TagObject(hash) c.Assert(err, IsNil) c.Assert(tag.Hash.IsZero(), Equals, false) c.Assert(tag.Hash, Equals, hash) c.Assert(tag.Type(), Equals, plumbing.TagObject) } func (s *RepositorySuite) TestTags(c *C) { url := s.GetLocalRepositoryURL( fixtures.ByURL("https://github.com/git-fixtures/tags.git").One(), ) r, _ := Init(memory.NewStorage(), nil) err := r.clone(context.Background(), &CloneOptions{URL: url}) c.Assert(err, IsNil) count := 0 tags, err := r.Tags() c.Assert(err, IsNil) tags.ForEach(func(tag *plumbing.Reference) error { count++ c.Assert(tag.Hash().IsZero(), Equals, false) c.Assert(tag.Name().IsTag(), Equals, true) return nil }) c.Assert(count, Equals, 5) } func (s *RepositorySuite) TestCreateTagLightweight(c *C) { url := s.GetLocalRepositoryURL( fixtures.ByURL("https://github.com/git-fixtures/tags.git").One(), ) r, _ := Init(memory.NewStorage(), nil) err := r.clone(context.Background(), &CloneOptions{URL: url}) c.Assert(err, IsNil) expected, err := r.Head() c.Assert(err, IsNil) ref, err := r.CreateTag("foobar", expected.Hash(), nil) c.Assert(err, IsNil) c.Assert(ref, NotNil) actual, err := r.Tag("foobar") c.Assert(err, IsNil) c.Assert(expected.Hash(), Equals, actual.Hash()) } func (s *RepositorySuite) TestCreateTagLightweightExists(c *C) { url := s.GetLocalRepositoryURL( fixtures.ByURL("https://github.com/git-fixtures/tags.git").One(), ) r, _ := Init(memory.NewStorage(), nil) err := r.clone(context.Background(), &CloneOptions{URL: url}) c.Assert(err, IsNil) expected, err := r.Head() c.Assert(err, IsNil) ref, err := r.CreateTag("lightweight-tag", expected.Hash(), nil) c.Assert(ref, IsNil) c.Assert(err, Equals, ErrTagExists) } func (s *RepositorySuite) TestCreateTagAnnotated(c *C) { url := s.GetLocalRepositoryURL( fixtures.ByURL("https://github.com/git-fixtures/tags.git").One(), ) r, _ := Init(memory.NewStorage(), nil) err := r.clone(context.Background(), &CloneOptions{URL: url}) c.Assert(err, IsNil) h, err := r.Head() c.Assert(err, IsNil) expectedHash := h.Hash() ref, err := r.CreateTag("foobar", expectedHash, &CreateTagOptions{ Tagger: defaultSignature(), Message: "foo bar baz qux", }) c.Assert(err, IsNil) tag, err := r.Tag("foobar") c.Assert(err, IsNil) obj, err := r.TagObject(tag.Hash()) c.Assert(err, IsNil) c.Assert(ref, DeepEquals, tag) c.Assert(obj.Hash, Equals, ref.Hash()) c.Assert(obj.Type(), Equals, plumbing.TagObject) c.Assert(obj.Target, Equals, expectedHash) } func (s *RepositorySuite) TestCreateTagAnnotatedBadOpts(c *C) { url := s.GetLocalRepositoryURL( fixtures.ByURL("https://github.com/git-fixtures/tags.git").One(), ) r, _ := Init(memory.NewStorage(), nil) err := r.clone(context.Background(), &CloneOptions{URL: url}) c.Assert(err, IsNil) h, err := r.Head() c.Assert(err, IsNil) expectedHash := h.Hash() ref, err := r.CreateTag("foobar", expectedHash, &CreateTagOptions{ Message: "foo bar baz qux", }) c.Assert(ref, IsNil) c.Assert(err, Equals, ErrMissingTagger) ref, err = r.CreateTag("foobar", expectedHash, &CreateTagOptions{ Tagger: defaultSignature(), }) c.Assert(ref, IsNil) c.Assert(err, Equals, ErrMissingMessage) } func (s *RepositorySuite) TestCreateTagAnnotatedBadHash(c *C) { url := s.GetLocalRepositoryURL( fixtures.ByURL("https://github.com/git-fixtures/tags.git").One(), ) r, _ := Init(memory.NewStorage(), nil) err := r.clone(context.Background(), &CloneOptions{URL: url}) c.Assert(err, IsNil) ref, err := r.CreateTag("foobar", plumbing.ZeroHash, &CreateTagOptions{ Tagger: defaultSignature(), Message: "foo bar baz qux", }) c.Assert(ref, IsNil) c.Assert(err, Equals, plumbing.ErrObjectNotFound) } func (s *RepositorySuite) TestCreateTagSigned(c *C) { url := s.GetLocalRepositoryURL( fixtures.ByURL("https://github.com/git-fixtures/tags.git").One(), ) r, _ := Init(memory.NewStorage(), nil) err := r.clone(context.Background(), &CloneOptions{URL: url}) c.Assert(err, IsNil) h, err := r.Head() c.Assert(err, IsNil) key := commitSignKey(c, true) _, err = r.CreateTag("foobar", h.Hash(), &CreateTagOptions{ Tagger: defaultSignature(), Message: "foo bar baz qux", SignKey: key, }) c.Assert(err, IsNil) tag, err := r.Tag("foobar") c.Assert(err, IsNil) obj, err := r.TagObject(tag.Hash()) c.Assert(err, IsNil) // Verify the tag. pks := new(bytes.Buffer) pkw, err := armor.Encode(pks, openpgp.PublicKeyType, nil) c.Assert(err, IsNil) err = key.Serialize(pkw) c.Assert(err, IsNil) err = pkw.Close() c.Assert(err, IsNil) actual, err := obj.Verify(pks.String()) c.Assert(err, IsNil) c.Assert(actual.PrimaryKey, DeepEquals, key.PrimaryKey) } func (s *RepositorySuite) TestCreateTagSignedBadKey(c *C) { url := s.GetLocalRepositoryURL( fixtures.ByURL("https://github.com/git-fixtures/tags.git").One(), ) r, _ := Init(memory.NewStorage(), nil) err := r.clone(context.Background(), &CloneOptions{URL: url}) c.Assert(err, IsNil) h, err := r.Head() c.Assert(err, IsNil) key := commitSignKey(c, false) _, err = r.CreateTag("foobar", h.Hash(), &CreateTagOptions{ Tagger: defaultSignature(), Message: "foo bar baz qux", SignKey: key, }) c.Assert(err, Equals, openpgperr.InvalidArgumentError("signing key is encrypted")) } func (s *RepositorySuite) TestCreateTagCanonicalize(c *C) { url := s.GetLocalRepositoryURL( fixtures.ByURL("https://github.com/git-fixtures/tags.git").One(), ) r, _ := Init(memory.NewStorage(), nil) err := r.clone(context.Background(), &CloneOptions{URL: url}) c.Assert(err, IsNil) h, err := r.Head() c.Assert(err, IsNil) key := commitSignKey(c, true) _, err = r.CreateTag("foobar", h.Hash(), &CreateTagOptions{ Tagger: defaultSignature(), Message: "\n\nfoo bar baz qux\n\nsome message here", SignKey: key, }) c.Assert(err, IsNil) tag, err := r.Tag("foobar") c.Assert(err, IsNil) obj, err := r.TagObject(tag.Hash()) c.Assert(err, IsNil) // Assert the new canonicalized message. c.Assert(obj.Message, Equals, "foo bar baz qux\n\nsome message here\n") // Verify the tag. pks := new(bytes.Buffer) pkw, err := armor.Encode(pks, openpgp.PublicKeyType, nil) c.Assert(err, IsNil) err = key.Serialize(pkw) c.Assert(err, IsNil) err = pkw.Close() c.Assert(err, IsNil) actual, err := obj.Verify(pks.String()) c.Assert(err, IsNil) c.Assert(actual.PrimaryKey, DeepEquals, key.PrimaryKey) } func (s *RepositorySuite) TestTagLightweight(c *C) { url := s.GetLocalRepositoryURL( fixtures.ByURL("https://github.com/git-fixtures/tags.git").One(), ) r, _ := Init(memory.NewStorage(), nil) err := r.clone(context.Background(), &CloneOptions{URL: url}) c.Assert(err, IsNil) expected := plumbing.NewHash("f7b877701fbf855b44c0a9e86f3fdce2c298b07f") tag, err := r.Tag("lightweight-tag") c.Assert(err, IsNil) actual := tag.Hash() c.Assert(expected, Equals, actual) } func (s *RepositorySuite) TestTagLightweightMissingTag(c *C) { url := s.GetLocalRepositoryURL( fixtures.ByURL("https://github.com/git-fixtures/tags.git").One(), ) r, _ := Init(memory.NewStorage(), nil) err := r.clone(context.Background(), &CloneOptions{URL: url}) c.Assert(err, IsNil) tag, err := r.Tag("lightweight-tag-tag") c.Assert(tag, IsNil) c.Assert(err, Equals, ErrTagNotFound) } func (s *RepositorySuite) TestDeleteTag(c *C) { url := s.GetLocalRepositoryURL( fixtures.ByURL("https://github.com/git-fixtures/tags.git").One(), ) r, _ := Init(memory.NewStorage(), nil) err := r.clone(context.Background(), &CloneOptions{URL: url}) c.Assert(err, IsNil) err = r.DeleteTag("lightweight-tag") c.Assert(err, IsNil) _, err = r.Tag("lightweight-tag") c.Assert(err, Equals, ErrTagNotFound) } func (s *RepositorySuite) TestDeleteTagMissingTag(c *C) { url := s.GetLocalRepositoryURL( fixtures.ByURL("https://github.com/git-fixtures/tags.git").One(), ) r, _ := Init(memory.NewStorage(), nil) err := r.clone(context.Background(), &CloneOptions{URL: url}) c.Assert(err, IsNil) err = r.DeleteTag("lightweight-tag-tag") c.Assert(err, Equals, ErrTagNotFound) } func (s *RepositorySuite) TestDeleteTagAnnotated(c *C) { url := s.GetLocalRepositoryURL( fixtures.ByURL("https://github.com/git-fixtures/tags.git").One(), ) dir, err := ioutil.TempDir("", "go-git-test-deletetag-annotated") c.Assert(err, IsNil) defer os.RemoveAll(dir) // clean up fss := filesystem.NewStorage(osfs.New(dir), cache.NewObjectLRUDefault()) r, _ := Init(fss, nil) err = r.clone(context.Background(), &CloneOptions{URL: url}) c.Assert(err, IsNil) ref, err := r.Tag("annotated-tag") c.Assert(ref, NotNil) c.Assert(err, IsNil) obj, err := r.TagObject(ref.Hash()) c.Assert(obj, NotNil) c.Assert(err, IsNil) err = r.DeleteTag("annotated-tag") c.Assert(err, IsNil) _, err = r.Tag("annotated-tag") c.Assert(err, Equals, ErrTagNotFound) // Run a prune (and repack, to ensure that we are GCing everything regardless // of the fixture in use) and try to get the tag object again. // // The repo needs to be re-opened after the repack. err = r.Prune(PruneOptions{Handler: r.DeleteObject}) c.Assert(err, IsNil) err = r.RepackObjects(&RepackConfig{}) c.Assert(err, IsNil) r, err = PlainOpen(dir) c.Assert(r, NotNil) c.Assert(err, IsNil) // Now check to see if the GC was effective in removing the tag object. obj, err = r.TagObject(ref.Hash()) c.Assert(obj, IsNil) c.Assert(err, Equals, plumbing.ErrObjectNotFound) } func (s *RepositorySuite) TestDeleteTagAnnotatedUnpacked(c *C) { url := s.GetLocalRepositoryURL( fixtures.ByURL("https://github.com/git-fixtures/tags.git").One(), ) dir, err := ioutil.TempDir("", "go-git-test-deletetag-annotated-unpacked") c.Assert(err, IsNil) defer os.RemoveAll(dir) // clean up fss := filesystem.NewStorage(osfs.New(dir), cache.NewObjectLRUDefault()) r, _ := Init(fss, nil) err = r.clone(context.Background(), &CloneOptions{URL: url}) c.Assert(err, IsNil) // Create a tag for the deletion test. This ensures that the ultimate loose // object will be unpacked (as we aren't doing anything that should pack it), // so that we can effectively test that a prune deletes it, without having to // resort to a repack. h, err := r.Head() c.Assert(err, IsNil) expectedHash := h.Hash() ref, err := r.CreateTag("foobar", expectedHash, &CreateTagOptions{ Tagger: defaultSignature(), Message: "foo bar baz qux", }) c.Assert(err, IsNil) tag, err := r.Tag("foobar") c.Assert(err, IsNil) obj, err := r.TagObject(tag.Hash()) c.Assert(obj, NotNil) c.Assert(err, IsNil) err = r.DeleteTag("foobar") c.Assert(err, IsNil) _, err = r.Tag("foobar") c.Assert(err, Equals, ErrTagNotFound) // As mentioned, only run a prune. We are not testing for packed objects // here. err = r.Prune(PruneOptions{Handler: r.DeleteObject}) c.Assert(err, IsNil) // Now check to see if the GC was effective in removing the tag object. obj, err = r.TagObject(ref.Hash()) c.Assert(obj, IsNil) c.Assert(err, Equals, plumbing.ErrObjectNotFound) } func (s *RepositorySuite) TestBranches(c *C) { f := fixtures.ByURL("https://github.com/git-fixtures/root-references.git").One() sto := filesystem.NewStorage(f.DotGit(), cache.NewObjectLRUDefault()) r, err := Open(sto, f.DotGit()) c.Assert(err, IsNil) count := 0 branches, err := r.Branches() c.Assert(err, IsNil) branches.ForEach(func(branch *plumbing.Reference) error { count++ c.Assert(branch.Hash().IsZero(), Equals, false) c.Assert(branch.Name().IsBranch(), Equals, true) return nil }) c.Assert(count, Equals, 8) } func (s *RepositorySuite) TestNotes(c *C) { // TODO add fixture with Notes url := s.GetLocalRepositoryURL( fixtures.ByURL("https://github.com/git-fixtures/tags.git").One(), ) r, _ := Init(memory.NewStorage(), nil) err := r.clone(context.Background(), &CloneOptions{URL: url}) c.Assert(err, IsNil) count := 0 notes, err := r.Notes() c.Assert(err, IsNil) notes.ForEach(func(note *plumbing.Reference) error { count++ c.Assert(note.Hash().IsZero(), Equals, false) c.Assert(note.Name().IsNote(), Equals, true) return nil }) c.Assert(count, Equals, 0) } func (s *RepositorySuite) TestTree(c *C) { r, _ := Init(memory.NewStorage(), nil) err := r.clone(context.Background(), &CloneOptions{ URL: s.GetBasicLocalRepositoryURL(), }) c.Assert(err, IsNil) invalidHash := plumbing.NewHash("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") tree, err := r.TreeObject(invalidHash) c.Assert(tree, IsNil) c.Assert(err, NotNil) hash := plumbing.NewHash("dbd3641b371024f44d0e469a9c8f5457b0660de1") tree, err = r.TreeObject(hash) c.Assert(err, IsNil) c.Assert(tree.Hash.IsZero(), Equals, false) c.Assert(tree.Hash, Equals, tree.ID()) c.Assert(tree.Hash, Equals, hash) c.Assert(tree.Type(), Equals, plumbing.TreeObject) c.Assert(len(tree.Entries), Not(Equals), 0) } func (s *RepositorySuite) TestTrees(c *C) { r, _ := Init(memory.NewStorage(), nil) err := r.clone(context.Background(), &CloneOptions{URL: s.GetBasicLocalRepositoryURL()}) c.Assert(err, IsNil) count := 0 trees, err := r.TreeObjects() c.Assert(err, IsNil) for { tree, err := trees.Next() if err != nil { break } count++ c.Assert(tree.Hash.IsZero(), Equals, false) c.Assert(tree.Hash, Equals, tree.ID()) c.Assert(tree.Type(), Equals, plumbing.TreeObject) c.Assert(len(tree.Entries), Not(Equals), 0) } c.Assert(count, Equals, 12) } func (s *RepositorySuite) TestTagObjects(c *C) { url := s.GetLocalRepositoryURL( fixtures.ByURL("https://github.com/git-fixtures/tags.git").One(), ) r, _ := Init(memory.NewStorage(), nil) err := r.clone(context.Background(), &CloneOptions{URL: url}) c.Assert(err, IsNil) count := 0 tags, err := r.TagObjects() c.Assert(err, IsNil) tags.ForEach(func(tag *object.Tag) error { count++ c.Assert(tag.Hash.IsZero(), Equals, false) c.Assert(tag.Type(), Equals, plumbing.TagObject) return nil }) refs, _ := r.References() refs.ForEach(func(ref *plumbing.Reference) error { return nil }) c.Assert(count, Equals, 4) } func (s *RepositorySuite) TestCommitIterClosePanic(c *C) { r, _ := Init(memory.NewStorage(), nil) err := r.clone(context.Background(), &CloneOptions{URL: s.GetBasicLocalRepositoryURL()}) c.Assert(err, IsNil) commits, err := r.CommitObjects() c.Assert(err, IsNil) commits.Close() } func (s *RepositorySuite) TestRef(c *C) { r, _ := Init(memory.NewStorage(), nil) err := r.clone(context.Background(), &CloneOptions{URL: s.GetBasicLocalRepositoryURL()}) c.Assert(err, IsNil) ref, err := r.Reference(plumbing.HEAD, false) c.Assert(err, IsNil) c.Assert(ref.Name(), Equals, plumbing.HEAD) ref, err = r.Reference(plumbing.HEAD, true) c.Assert(err, IsNil) c.Assert(ref.Name(), Equals, plumbing.ReferenceName("refs/heads/master")) } func (s *RepositorySuite) TestRefs(c *C) { r, _ := Init(memory.NewStorage(), nil) err := r.clone(context.Background(), &CloneOptions{URL: s.GetBasicLocalRepositoryURL()}) c.Assert(err, IsNil) c.Assert(err, IsNil) iter, err := r.References() c.Assert(err, IsNil) c.Assert(iter, NotNil) } func (s *RepositorySuite) TestObject(c *C) { r, _ := Init(memory.NewStorage(), nil) err := r.clone(context.Background(), &CloneOptions{URL: s.GetBasicLocalRepositoryURL()}) c.Assert(err, IsNil) hash := plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5") o, err := r.Object(plumbing.CommitObject, hash) c.Assert(err, IsNil) c.Assert(o.ID().IsZero(), Equals, false) c.Assert(o.Type(), Equals, plumbing.CommitObject) } func (s *RepositorySuite) TestObjects(c *C) { r, _ := Init(memory.NewStorage(), nil) err := r.clone(context.Background(), &CloneOptions{URL: s.GetBasicLocalRepositoryURL()}) c.Assert(err, IsNil) count := 0 objects, err := r.Objects() c.Assert(err, IsNil) for { o, err := objects.Next() if err != nil { break } count++ c.Assert(o.ID().IsZero(), Equals, false) c.Assert(o.Type(), Not(Equals), plumbing.AnyObject) } c.Assert(count, Equals, 31) } func (s *RepositorySuite) TestObjectNotFound(c *C) { r, _ := Init(memory.NewStorage(), nil) err := r.clone(context.Background(), &CloneOptions{URL: s.GetBasicLocalRepositoryURL()}) c.Assert(err, IsNil) hash := plumbing.NewHash("0a3fb06ff80156fb153bcdcc58b5e16c2d27625c") tag, err := r.Object(plumbing.TagObject, hash) c.Assert(err, DeepEquals, plumbing.ErrObjectNotFound) c.Assert(tag, IsNil) } func (s *RepositorySuite) TestWorktree(c *C) { def := memfs.New() r, _ := Init(memory.NewStorage(), def) w, err := r.Worktree() c.Assert(err, IsNil) c.Assert(w.Filesystem, Equals, def) } func (s *RepositorySuite) TestWorktreeBare(c *C) { r, _ := Init(memory.NewStorage(), nil) w, err := r.Worktree() c.Assert(err, Equals, ErrIsBareRepository) c.Assert(w, IsNil) } func (s *RepositorySuite) TestResolveRevision(c *C) { f := fixtures.ByURL("https://github.com/git-fixtures/basic.git").One() sto := filesystem.NewStorage(f.DotGit(), cache.NewObjectLRUDefault()) r, err := Open(sto, f.DotGit()) c.Assert(err, IsNil) datas := map[string]string{ "HEAD": "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", "heads/master": "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", "heads/master~1": "918c48b83bd081e863dbe1b80f8998f058cd8294", "refs/heads/master": "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", "refs/heads/master~2^^~": "b029517f6300c2da0f4b651b8642506cd6aaf45d", "refs/tags/v1.0.0": "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", "refs/remotes/origin/master": "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", "refs/remotes/origin/HEAD": "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", "HEAD~2^^~": "b029517f6300c2da0f4b651b8642506cd6aaf45d", "HEAD~3^2": "a5b8b09e2f8fcb0bb99d3ccb0958157b40890d69", "HEAD~3^2^0": "a5b8b09e2f8fcb0bb99d3ccb0958157b40890d69", "HEAD~2^{/binary file}": "35e85108805c84807bc66a02d91535e1e24b38b9", "HEAD~^{/!-some}": "1669dce138d9b841a518c64b10914d88f5e488ea", "master": "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", "branch": "e8d3ffab552895c19b9fcf7aa264d277cde33881", "v1.0.0": "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", "branch~1": "918c48b83bd081e863dbe1b80f8998f058cd8294", "v1.0.0~1": "918c48b83bd081e863dbe1b80f8998f058cd8294", "master~1": "918c48b83bd081e863dbe1b80f8998f058cd8294", "918c48b83bd081e863dbe1b80f8998f058cd8294": "918c48b83bd081e863dbe1b80f8998f058cd8294", } for rev, hash := range datas { h, err := r.ResolveRevision(plumbing.Revision(rev)) c.Assert(err, IsNil) c.Check(h.String(), Equals, hash, Commentf("while checking %s", rev)) } } func (s *RepositorySuite) TestResolveRevisionAnnotated(c *C) { f := fixtures.ByURL("https://github.com/git-fixtures/tags.git").One() sto := filesystem.NewStorage(f.DotGit(), cache.NewObjectLRUDefault()) r, err := Open(sto, f.DotGit()) c.Assert(err, IsNil) datas := map[string]string{ "refs/tags/annotated-tag": "f7b877701fbf855b44c0a9e86f3fdce2c298b07f", } for rev, hash := range datas { h, err := r.ResolveRevision(plumbing.Revision(rev)) c.Assert(err, IsNil) c.Check(h.String(), Equals, hash, Commentf("while checking %s", rev)) } } func (s *RepositorySuite) TestResolveRevisionWithErrors(c *C) { url := s.GetLocalRepositoryURL( fixtures.ByURL("https://github.com/git-fixtures/basic.git").One(), ) r, _ := Init(memory.NewStorage(), nil) err := r.clone(context.Background(), &CloneOptions{URL: url}) c.Assert(err, IsNil) headRef, err := r.Head() c.Assert(err, IsNil) ref := plumbing.NewHashReference("refs/heads/918c48b83bd081e863dbe1b80f8998f058cd8294", headRef.Hash()) err = r.Storer.SetReference(ref) c.Assert(err, IsNil) datas := map[string]string{ "efs/heads/master~": "reference not found", "HEAD^3": `Revision invalid : "3" found must be 0, 1 or 2 after "^"`, "HEAD^{/whatever}": `No commit message match regexp : "whatever"`, "4e1243bd22c66e76c2ba9eddc1f91394e57f9f83": "reference not found", "918c48b83bd081e863dbe1b80f8998f058cd8294": `refname "918c48b83bd081e863dbe1b80f8998f058cd8294" is ambiguous`, } for rev, rerr := range datas { _, err := r.ResolveRevision(plumbing.Revision(rev)) c.Assert(err.Error(), Equals, rerr) } } func (s *RepositorySuite) testRepackObjects( c *C, deleteTime time.Time, expectedPacks int) { srcFs := fixtures.ByTag("unpacked").One().DotGit() var sto storage.Storer var err error sto = filesystem.NewStorage(srcFs, cache.NewObjectLRUDefault()) los := sto.(storer.LooseObjectStorer) c.Assert(los, NotNil) numLooseStart := 0 err = los.ForEachObjectHash(func(_ plumbing.Hash) error { numLooseStart++ return nil }) c.Assert(err, IsNil) c.Assert(numLooseStart > 0, Equals, true) pos := sto.(storer.PackedObjectStorer) c.Assert(los, NotNil) packs, err := pos.ObjectPacks() c.Assert(err, IsNil) numPacksStart := len(packs) c.Assert(numPacksStart > 1, Equals, true) r, err := Open(sto, srcFs) c.Assert(err, IsNil) c.Assert(r, NotNil) err = r.RepackObjects(&RepackConfig{ OnlyDeletePacksOlderThan: deleteTime, }) c.Assert(err, IsNil) numLooseEnd := 0 err = los.ForEachObjectHash(func(_ plumbing.Hash) error { numLooseEnd++ return nil }) c.Assert(err, IsNil) c.Assert(numLooseEnd, Equals, 0) packs, err = pos.ObjectPacks() c.Assert(err, IsNil) numPacksEnd := len(packs) c.Assert(numPacksEnd, Equals, expectedPacks) } func (s *RepositorySuite) TestRepackObjects(c *C) { if testing.Short() { c.Skip("skipping test in short mode.") } s.testRepackObjects(c, time.Time{}, 1) } func (s *RepositorySuite) TestRepackObjectsWithNoDelete(c *C) { if testing.Short() { c.Skip("skipping test in short mode.") } s.testRepackObjects(c, time.Unix(0, 1), 3) } func ExecuteOnPath(c *C, path string, cmds ...string) error { for _, cmd := range cmds { err := executeOnPath(path, cmd) c.Assert(err, IsNil) } return nil } func executeOnPath(path, cmd string) error { args := strings.Split(cmd, " ") c := exec.Command(args[0], args[1:]...) c.Dir = path c.Env = os.Environ() buf := bytes.NewBuffer(nil) c.Stderr = buf c.Stdout = buf return c.Run() } func (s *RepositorySuite) TestBrokenMultipleShallowFetch(c *C) { r, _ := Init(memory.NewStorage(), nil) _, err := r.CreateRemote(&config.RemoteConfig{ Name: DefaultRemoteName, URLs: []string{s.GetBasicLocalRepositoryURL()}, }) c.Assert(err, IsNil) c.Assert(r.Fetch(&FetchOptions{ Depth: 2, RefSpecs: []config.RefSpec{config.RefSpec("refs/heads/master:refs/heads/master")}, }), IsNil) shallows, err := r.Storer.Shallow() c.Assert(err, IsNil) c.Assert(len(shallows), Equals, 1) ref, err := r.Reference("refs/heads/master", true) c.Assert(err, IsNil) cobj, err := r.CommitObject(ref.Hash()) c.Assert(err, IsNil) c.Assert(cobj, NotNil) err = object.NewCommitPreorderIter(cobj, nil, nil).ForEach(func(c *object.Commit) error { for _, ph := range c.ParentHashes { for _, h := range shallows { if ph == h { return storer.ErrStop } } } return nil }) c.Assert(err, IsNil) c.Assert(r.Fetch(&FetchOptions{ Depth: 5, RefSpecs: []config.RefSpec{config.RefSpec("refs/heads/*:refs/heads/*")}, }), IsNil) shallows, err = r.Storer.Shallow() c.Assert(err, IsNil) c.Assert(len(shallows), Equals, 3) ref, err = r.Reference("refs/heads/master", true) c.Assert(err, IsNil) cobj, err = r.CommitObject(ref.Hash()) c.Assert(err, IsNil) c.Assert(cobj, NotNil) err = object.NewCommitPreorderIter(cobj, nil, nil).ForEach(func(c *object.Commit) error { for _, ph := range c.ParentHashes { for _, h := range shallows { if ph == h { return storer.ErrStop } } } return nil }) c.Assert(err, IsNil) } func BenchmarkObjects(b *testing.B) { if err := fixtures.Init(); err != nil { b.Fatal(err) } defer func() { if err := fixtures.Clean(); err != nil { b.Fatal(err) } }() for _, f := range fixtures.ByTag("packfile") { if f.DotGitHash == plumbing.ZeroHash { continue } b.Run(f.URL, func(b *testing.B) { fs := f.DotGit() storer := filesystem.NewStorage(fs, cache.NewObjectLRUDefault()) worktree, err := fs.Chroot(filepath.Dir(fs.Root())) if err != nil { b.Fatal(err) } repo, err := Open(storer, worktree) if err != nil { b.Fatal(err) } for i := 0; i < b.N; i++ { iter, err := repo.Objects() if err != nil { b.Fatal(err) } for { _, err := iter.Next() if err == io.EOF { break } if err != nil { b.Fatal(err) } } iter.Close() } }) } } golang-gopkg-src-d-go-git.v4-4.11.0/repository_unix_test.go000066400000000000000000000004101345605224300235420ustar00rootroot00000000000000// +build !plan9,!windows package git import "fmt" // preReceiveHook returns the bytes of a pre-receive hook script // that prints m before exiting successfully func preReceiveHook(m string) []byte { return []byte(fmt.Sprintf("#!/bin/sh\nprintf '%s'\n", m)) } golang-gopkg-src-d-go-git.v4-4.11.0/repository_windows_test.go000066400000000000000000000004131345605224300242540ustar00rootroot00000000000000package git import "fmt" // preReceiveHook returns the bytes of a pre-receive hook script // that prints m before exiting successfully func preReceiveHook(m string) []byte { return []byte(fmt.Sprintf("#!C:/Program\\ Files/Git/usr/bin/sh.exe\nprintf '%s'\n", m)) } golang-gopkg-src-d-go-git.v4-4.11.0/status.go000066400000000000000000000037401345605224300205550ustar00rootroot00000000000000package git import ( "bytes" "fmt" "path/filepath" ) // Status represents the current status of a Worktree. // The key of the map is the path of the file. type Status map[string]*FileStatus // File returns the FileStatus for a given path, if the FileStatus doesn't // exists a new FileStatus is added to the map using the path as key. func (s Status) File(path string) *FileStatus { if _, ok := (s)[path]; !ok { s[path] = &FileStatus{Worktree: Untracked, Staging: Untracked} } return s[path] } // IsUntracked checks if file for given path is 'Untracked' func (s Status) IsUntracked(path string) bool { stat, ok := (s)[filepath.ToSlash(path)] return ok && stat.Worktree == Untracked } // IsClean returns true if all the files are in Unmodified status. func (s Status) IsClean() bool { for _, status := range s { if status.Worktree != Unmodified || status.Staging != Unmodified { return false } } return true } func (s Status) String() string { buf := bytes.NewBuffer(nil) for path, status := range s { if status.Staging == Unmodified && status.Worktree == Unmodified { continue } if status.Staging == Renamed { path = fmt.Sprintf("%s -> %s", path, status.Extra) } fmt.Fprintf(buf, "%c%c %s\n", status.Staging, status.Worktree, path) } return buf.String() } // FileStatus contains the status of a file in the worktree type FileStatus struct { // Staging is the status of a file in the staging area Staging StatusCode // Worktree is the status of a file in the worktree Worktree StatusCode // Extra contains extra information, such as the previous name in a rename Extra string } // StatusCode status code of a file in the Worktree type StatusCode byte const ( Unmodified StatusCode = ' ' Untracked StatusCode = '?' Modified StatusCode = 'M' Added StatusCode = 'A' Deleted StatusCode = 'D' Renamed StatusCode = 'R' Copied StatusCode = 'C' UpdatedButUnmerged StatusCode = 'U' ) golang-gopkg-src-d-go-git.v4-4.11.0/storage/000077500000000000000000000000001345605224300203435ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/storage/filesystem/000077500000000000000000000000001345605224300225275ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/storage/filesystem/config.go000066400000000000000000000017021345605224300243230ustar00rootroot00000000000000package filesystem import ( stdioutil "io/ioutil" "os" "gopkg.in/src-d/go-git.v4/config" "gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit" "gopkg.in/src-d/go-git.v4/utils/ioutil" ) type ConfigStorage struct { dir *dotgit.DotGit } func (c *ConfigStorage) Config() (conf *config.Config, err error) { cfg := config.NewConfig() f, err := c.dir.Config() if err != nil { if os.IsNotExist(err) { return cfg, nil } return nil, err } defer ioutil.CheckClose(f, &err) b, err := stdioutil.ReadAll(f) if err != nil { return nil, err } if err = cfg.Unmarshal(b); err != nil { return nil, err } return cfg, err } func (c *ConfigStorage) SetConfig(cfg *config.Config) (err error) { if err = cfg.Validate(); err != nil { return err } f, err := c.dir.ConfigWriter() if err != nil { return err } defer ioutil.CheckClose(f, &err) b, err := cfg.Marshal() if err != nil { return err } _, err = f.Write(b) return err } golang-gopkg-src-d-go-git.v4-4.11.0/storage/filesystem/config_test.go000066400000000000000000000021271345605224300253640ustar00rootroot00000000000000package filesystem import ( "io/ioutil" "os" "gopkg.in/src-d/go-git.v4/config" "gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit" . "gopkg.in/check.v1" "gopkg.in/src-d/go-billy.v4/osfs" "gopkg.in/src-d/go-git-fixtures.v3" ) type ConfigSuite struct { fixtures.Suite dir *dotgit.DotGit path string } var _ = Suite(&ConfigSuite{}) func (s *ConfigSuite) SetUpTest(c *C) { tmp, err := ioutil.TempDir("", "go-git-filestystem-config") c.Assert(err, IsNil) s.dir = dotgit.New(osfs.New(tmp)) s.path = tmp } func (s *ConfigSuite) TestRemotes(c *C) { dir := dotgit.New(fixtures.Basic().ByTag(".git").One().DotGit()) storer := &ConfigStorage{dir} cfg, err := storer.Config() c.Assert(err, IsNil) remotes := cfg.Remotes c.Assert(remotes, HasLen, 1) remote := remotes["origin"] c.Assert(remote.Name, Equals, "origin") c.Assert(remote.URLs, DeepEquals, []string{"https://github.com/git-fixtures/basic"}) c.Assert(remote.Fetch, DeepEquals, []config.RefSpec{config.RefSpec("+refs/heads/*:refs/remotes/origin/*")}) } func (s *ConfigSuite) TearDownTest(c *C) { defer os.RemoveAll(s.path) } golang-gopkg-src-d-go-git.v4-4.11.0/storage/filesystem/deltaobject.go000066400000000000000000000011511345605224300253340ustar00rootroot00000000000000package filesystem import ( "gopkg.in/src-d/go-git.v4/plumbing" ) type deltaObject struct { plumbing.EncodedObject base plumbing.Hash hash plumbing.Hash size int64 } func newDeltaObject( obj plumbing.EncodedObject, hash plumbing.Hash, base plumbing.Hash, size int64) plumbing.DeltaObject { return &deltaObject{ EncodedObject: obj, hash: hash, base: base, size: size, } } func (o *deltaObject) BaseHash() plumbing.Hash { return o.base } func (o *deltaObject) ActualSize() int64 { return o.size } func (o *deltaObject) ActualHash() plumbing.Hash { return o.hash } golang-gopkg-src-d-go-git.v4-4.11.0/storage/filesystem/dotgit/000077500000000000000000000000001345605224300240215ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/storage/filesystem/dotgit/dotgit.go000066400000000000000000000614721345605224300256540ustar00rootroot00000000000000// https://github.com/git/git/blob/master/Documentation/gitrepository-layout.txt package dotgit import ( "bufio" "errors" "fmt" "io" stdioutil "io/ioutil" "os" "path/filepath" "strings" "time" "gopkg.in/src-d/go-billy.v4/osfs" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/storage" "gopkg.in/src-d/go-git.v4/utils/ioutil" "gopkg.in/src-d/go-billy.v4" ) const ( suffix = ".git" packedRefsPath = "packed-refs" configPath = "config" indexPath = "index" shallowPath = "shallow" modulePath = "modules" objectsPath = "objects" packPath = "pack" refsPath = "refs" tmpPackedRefsPrefix = "._packed-refs" packExt = ".pack" idxExt = ".idx" ) var ( // ErrNotFound is returned by New when the path is not found. ErrNotFound = errors.New("path not found") // ErrIdxNotFound is returned by Idxfile when the idx file is not found ErrIdxNotFound = errors.New("idx file not found") // ErrPackfileNotFound is returned by Packfile when the packfile is not found ErrPackfileNotFound = errors.New("packfile not found") // ErrConfigNotFound is returned by Config when the config is not found ErrConfigNotFound = errors.New("config file not found") // ErrPackedRefsDuplicatedRef is returned when a duplicated reference is // found in the packed-ref file. This is usually the case for corrupted git // repositories. ErrPackedRefsDuplicatedRef = errors.New("duplicated ref found in packed-ref file") // ErrPackedRefsBadFormat is returned when the packed-ref file corrupt. ErrPackedRefsBadFormat = errors.New("malformed packed-ref") // ErrSymRefTargetNotFound is returned when a symbolic reference is // targeting a non-existing object. This usually means the repository // is corrupt. ErrSymRefTargetNotFound = errors.New("symbolic reference target not found") ) // Options holds configuration for the storage. type Options struct { // ExclusiveAccess means that the filesystem is not modified externally // while the repo is open. ExclusiveAccess bool // KeepDescriptors makes the file descriptors to be reused but they will // need to be manually closed calling Close(). KeepDescriptors bool } // The DotGit type represents a local git repository on disk. This // type is not zero-value-safe, use the New function to initialize it. type DotGit struct { options Options fs billy.Filesystem // incoming object directory information incomingChecked bool incomingDirName string objectList []plumbing.Hash objectMap map[plumbing.Hash]struct{} packList []plumbing.Hash packMap map[plumbing.Hash]struct{} files map[string]billy.File } // New returns a DotGit value ready to be used. The path argument must // be the absolute path of a git repository directory (e.g. // "/foo/bar/.git"). func New(fs billy.Filesystem) *DotGit { return NewWithOptions(fs, Options{}) } // NewWithOptions sets non default configuration options. // See New for complete help. func NewWithOptions(fs billy.Filesystem, o Options) *DotGit { return &DotGit{ options: o, fs: fs, } } // Initialize creates all the folder scaffolding. func (d *DotGit) Initialize() error { mustExists := []string{ d.fs.Join("objects", "info"), d.fs.Join("objects", "pack"), d.fs.Join("refs", "heads"), d.fs.Join("refs", "tags"), } for _, path := range mustExists { _, err := d.fs.Stat(path) if err == nil { continue } if !os.IsNotExist(err) { return err } if err := d.fs.MkdirAll(path, os.ModeDir|os.ModePerm); err != nil { return err } } return nil } // Close closes all opened files. func (d *DotGit) Close() error { var firstError error if d.files != nil { for _, f := range d.files { err := f.Close() if err != nil && firstError == nil { firstError = err continue } } d.files = nil } if firstError != nil { return firstError } return nil } // ConfigWriter returns a file pointer for write to the config file func (d *DotGit) ConfigWriter() (billy.File, error) { return d.fs.Create(configPath) } // Config returns a file pointer for read to the config file func (d *DotGit) Config() (billy.File, error) { return d.fs.Open(configPath) } // IndexWriter returns a file pointer for write to the index file func (d *DotGit) IndexWriter() (billy.File, error) { return d.fs.Create(indexPath) } // Index returns a file pointer for read to the index file func (d *DotGit) Index() (billy.File, error) { return d.fs.Open(indexPath) } // ShallowWriter returns a file pointer for write to the shallow file func (d *DotGit) ShallowWriter() (billy.File, error) { return d.fs.Create(shallowPath) } // Shallow returns a file pointer for read to the shallow file func (d *DotGit) Shallow() (billy.File, error) { f, err := d.fs.Open(shallowPath) if err != nil { if os.IsNotExist(err) { return nil, nil } return nil, err } return f, nil } // NewObjectPack return a writer for a new packfile, it saves the packfile to // disk and also generates and save the index for the given packfile. func (d *DotGit) NewObjectPack() (*PackWriter, error) { d.cleanPackList() return newPackWrite(d.fs) } // ObjectPacks returns the list of availables packfiles func (d *DotGit) ObjectPacks() ([]plumbing.Hash, error) { if !d.options.ExclusiveAccess { return d.objectPacks() } err := d.genPackList() if err != nil { return nil, err } return d.packList, nil } func (d *DotGit) objectPacks() ([]plumbing.Hash, error) { packDir := d.fs.Join(objectsPath, packPath) files, err := d.fs.ReadDir(packDir) if err != nil { if os.IsNotExist(err) { return nil, nil } return nil, err } var packs []plumbing.Hash for _, f := range files { if !strings.HasSuffix(f.Name(), packExt) { continue } n := f.Name() h := plumbing.NewHash(n[5 : len(n)-5]) //pack-(hash).pack if h.IsZero() { // Ignore files with badly-formatted names. continue } packs = append(packs, h) } return packs, nil } func (d *DotGit) objectPackPath(hash plumbing.Hash, extension string) string { return d.fs.Join(objectsPath, packPath, fmt.Sprintf("pack-%s.%s", hash.String(), extension)) } func (d *DotGit) objectPackOpen(hash plumbing.Hash, extension string) (billy.File, error) { if d.files == nil { d.files = make(map[string]billy.File) } err := d.hasPack(hash) if err != nil { return nil, err } path := d.objectPackPath(hash, extension) f, ok := d.files[path] if ok { return f, nil } pack, err := d.fs.Open(path) if err != nil { if os.IsNotExist(err) { return nil, ErrPackfileNotFound } return nil, err } if d.options.KeepDescriptors && extension == "pack" { d.files[path] = pack } return pack, nil } // ObjectPack returns a fs.File of the given packfile func (d *DotGit) ObjectPack(hash plumbing.Hash) (billy.File, error) { err := d.hasPack(hash) if err != nil { return nil, err } return d.objectPackOpen(hash, `pack`) } // ObjectPackIdx returns a fs.File of the index file for a given packfile func (d *DotGit) ObjectPackIdx(hash plumbing.Hash) (billy.File, error) { err := d.hasPack(hash) if err != nil { return nil, err } return d.objectPackOpen(hash, `idx`) } func (d *DotGit) DeleteOldObjectPackAndIndex(hash plumbing.Hash, t time.Time) error { d.cleanPackList() path := d.objectPackPath(hash, `pack`) if !t.IsZero() { fi, err := d.fs.Stat(path) if err != nil { return err } // too new, skip deletion. if !fi.ModTime().Before(t) { return nil } } err := d.fs.Remove(path) if err != nil { return err } return d.fs.Remove(d.objectPackPath(hash, `idx`)) } // NewObject return a writer for a new object file. func (d *DotGit) NewObject() (*ObjectWriter, error) { d.cleanObjectList() return newObjectWriter(d.fs) } // Objects returns a slice with the hashes of objects found under the // .git/objects/ directory. func (d *DotGit) Objects() ([]plumbing.Hash, error) { if d.options.ExclusiveAccess { err := d.genObjectList() if err != nil { return nil, err } return d.objectList, nil } var objects []plumbing.Hash err := d.ForEachObjectHash(func(hash plumbing.Hash) error { objects = append(objects, hash) return nil }) if err != nil { return nil, err } return objects, nil } // ForEachObjectHash iterates over the hashes of objects found under the // .git/objects/ directory and executes the provided function. func (d *DotGit) ForEachObjectHash(fun func(plumbing.Hash) error) error { if !d.options.ExclusiveAccess { return d.forEachObjectHash(fun) } err := d.genObjectList() if err != nil { return err } for _, h := range d.objectList { err := fun(h) if err != nil { return err } } return nil } func (d *DotGit) forEachObjectHash(fun func(plumbing.Hash) error) error { files, err := d.fs.ReadDir(objectsPath) if err != nil { if os.IsNotExist(err) { return nil } return err } for _, f := range files { if f.IsDir() && len(f.Name()) == 2 && isHex(f.Name()) { base := f.Name() d, err := d.fs.ReadDir(d.fs.Join(objectsPath, base)) if err != nil { return err } for _, o := range d { h := plumbing.NewHash(base + o.Name()) if h.IsZero() { // Ignore files with badly-formatted names. continue } err = fun(h) if err != nil { return err } } } } return nil } func (d *DotGit) cleanObjectList() { d.objectMap = nil d.objectList = nil } func (d *DotGit) genObjectList() error { if d.objectMap != nil { return nil } d.objectMap = make(map[plumbing.Hash]struct{}) return d.forEachObjectHash(func(h plumbing.Hash) error { d.objectList = append(d.objectList, h) d.objectMap[h] = struct{}{} return nil }) } func (d *DotGit) hasObject(h plumbing.Hash) error { if !d.options.ExclusiveAccess { return nil } err := d.genObjectList() if err != nil { return err } _, ok := d.objectMap[h] if !ok { return plumbing.ErrObjectNotFound } return nil } func (d *DotGit) cleanPackList() { d.packMap = nil d.packList = nil } func (d *DotGit) genPackList() error { if d.packMap != nil { return nil } op, err := d.objectPacks() if err != nil { return err } d.packMap = make(map[plumbing.Hash]struct{}) d.packList = nil for _, h := range op { d.packList = append(d.packList, h) d.packMap[h] = struct{}{} } return nil } func (d *DotGit) hasPack(h plumbing.Hash) error { if !d.options.ExclusiveAccess { return nil } err := d.genPackList() if err != nil { return err } _, ok := d.packMap[h] if !ok { return ErrPackfileNotFound } return nil } func (d *DotGit) objectPath(h plumbing.Hash) string { hash := h.String() return d.fs.Join(objectsPath, hash[0:2], hash[2:40]) } // incomingObjectPath is intended to add support for a git pre-receive hook // to be written it adds support for go-git to find objects in an "incoming" // directory, so that the library can be used to write a pre-receive hook // that deals with the incoming objects. // // More on git hooks found here : https://git-scm.com/docs/githooks // More on 'quarantine'/incoming directory here: // https://git-scm.com/docs/git-receive-pack func (d *DotGit) incomingObjectPath(h plumbing.Hash) string { hString := h.String() if d.incomingDirName == "" { return d.fs.Join(objectsPath, hString[0:2], hString[2:40]) } return d.fs.Join(objectsPath, d.incomingDirName, hString[0:2], hString[2:40]) } // hasIncomingObjects searches for an incoming directory and keeps its name // so it doesn't have to be found each time an object is accessed. func (d *DotGit) hasIncomingObjects() bool { if !d.incomingChecked { directoryContents, err := d.fs.ReadDir(objectsPath) if err == nil { for _, file := range directoryContents { if strings.HasPrefix(file.Name(), "incoming-") && file.IsDir() { d.incomingDirName = file.Name() } } } d.incomingChecked = true } return d.incomingDirName != "" } // Object returns a fs.File pointing the object file, if exists func (d *DotGit) Object(h plumbing.Hash) (billy.File, error) { err := d.hasObject(h) if err != nil { return nil, err } obj1, err1 := d.fs.Open(d.objectPath(h)) if os.IsNotExist(err1) && d.hasIncomingObjects() { obj2, err2 := d.fs.Open(d.incomingObjectPath(h)) if err2 != nil { return obj1, err1 } return obj2, err2 } return obj1, err1 } // ObjectStat returns a os.FileInfo pointing the object file, if exists func (d *DotGit) ObjectStat(h plumbing.Hash) (os.FileInfo, error) { err := d.hasObject(h) if err != nil { return nil, err } obj1, err1 := d.fs.Stat(d.objectPath(h)) if os.IsNotExist(err1) && d.hasIncomingObjects() { obj2, err2 := d.fs.Stat(d.incomingObjectPath(h)) if err2 != nil { return obj1, err1 } return obj2, err2 } return obj1, err1 } // ObjectDelete removes the object file, if exists func (d *DotGit) ObjectDelete(h plumbing.Hash) error { d.cleanObjectList() err1 := d.fs.Remove(d.objectPath(h)) if os.IsNotExist(err1) && d.hasIncomingObjects() { err2 := d.fs.Remove(d.incomingObjectPath(h)) if err2 != nil { return err1 } return err2 } return err1 } func (d *DotGit) readReferenceFrom(rd io.Reader, name string) (ref *plumbing.Reference, err error) { b, err := stdioutil.ReadAll(rd) if err != nil { return nil, err } line := strings.TrimSpace(string(b)) return plumbing.NewReferenceFromStrings(name, line), nil } func (d *DotGit) checkReferenceAndTruncate(f billy.File, old *plumbing.Reference) error { if old == nil { return nil } ref, err := d.readReferenceFrom(f, old.Name().String()) if err != nil { return err } if ref.Hash() != old.Hash() { return storage.ErrReferenceHasChanged } _, err = f.Seek(0, io.SeekStart) if err != nil { return err } return f.Truncate(0) } func (d *DotGit) SetRef(r, old *plumbing.Reference) error { var content string switch r.Type() { case plumbing.SymbolicReference: content = fmt.Sprintf("ref: %s\n", r.Target()) case plumbing.HashReference: content = fmt.Sprintln(r.Hash().String()) } fileName := r.Name().String() return d.setRef(fileName, content, old) } // Refs scans the git directory collecting references, which it returns. // Symbolic references are resolved and included in the output. func (d *DotGit) Refs() ([]*plumbing.Reference, error) { var refs []*plumbing.Reference var seen = make(map[plumbing.ReferenceName]bool) if err := d.addRefsFromRefDir(&refs, seen); err != nil { return nil, err } if err := d.addRefsFromPackedRefs(&refs, seen); err != nil { return nil, err } if err := d.addRefFromHEAD(&refs); err != nil { return nil, err } return refs, nil } // Ref returns the reference for a given reference name. func (d *DotGit) Ref(name plumbing.ReferenceName) (*plumbing.Reference, error) { ref, err := d.readReferenceFile(".", name.String()) if err == nil { return ref, nil } return d.packedRef(name) } func (d *DotGit) findPackedRefsInFile(f billy.File) ([]*plumbing.Reference, error) { s := bufio.NewScanner(f) var refs []*plumbing.Reference for s.Scan() { ref, err := d.processLine(s.Text()) if err != nil { return nil, err } if ref != nil { refs = append(refs, ref) } } return refs, s.Err() } func (d *DotGit) findPackedRefs() (r []*plumbing.Reference, err error) { f, err := d.fs.Open(packedRefsPath) if err != nil { if os.IsNotExist(err) { return nil, nil } return nil, err } defer ioutil.CheckClose(f, &err) return d.findPackedRefsInFile(f) } func (d *DotGit) packedRef(name plumbing.ReferenceName) (*plumbing.Reference, error) { refs, err := d.findPackedRefs() if err != nil { return nil, err } for _, ref := range refs { if ref.Name() == name { return ref, nil } } return nil, plumbing.ErrReferenceNotFound } // RemoveRef removes a reference by name. func (d *DotGit) RemoveRef(name plumbing.ReferenceName) error { path := d.fs.Join(".", name.String()) _, err := d.fs.Stat(path) if err == nil { err = d.fs.Remove(path) // Drop down to remove it from the packed refs file, too. } if err != nil && !os.IsNotExist(err) { return err } return d.rewritePackedRefsWithoutRef(name) } func (d *DotGit) addRefsFromPackedRefs(refs *[]*plumbing.Reference, seen map[plumbing.ReferenceName]bool) (err error) { packedRefs, err := d.findPackedRefs() if err != nil { return err } for _, ref := range packedRefs { if !seen[ref.Name()] { *refs = append(*refs, ref) seen[ref.Name()] = true } } return nil } func (d *DotGit) addRefsFromPackedRefsFile(refs *[]*plumbing.Reference, f billy.File, seen map[plumbing.ReferenceName]bool) (err error) { packedRefs, err := d.findPackedRefsInFile(f) if err != nil { return err } for _, ref := range packedRefs { if !seen[ref.Name()] { *refs = append(*refs, ref) seen[ref.Name()] = true } } return nil } func (d *DotGit) openAndLockPackedRefs(doCreate bool) ( pr billy.File, err error) { var f billy.File defer func() { if err != nil && f != nil { ioutil.CheckClose(f, &err) } }() // File mode is retrieved from a constant defined in the target specific // files (dotgit_rewrite_packed_refs_*). Some modes are not available // in all filesystems. openFlags := d.openAndLockPackedRefsMode() if doCreate { openFlags |= os.O_CREATE } // Keep trying to open and lock the file until we're sure the file // didn't change between the open and the lock. for { f, err = d.fs.OpenFile(packedRefsPath, openFlags, 0600) if err != nil { if os.IsNotExist(err) && !doCreate { return nil, nil } return nil, err } fi, err := d.fs.Stat(packedRefsPath) if err != nil { return nil, err } mtime := fi.ModTime() err = f.Lock() if err != nil { return nil, err } fi, err = d.fs.Stat(packedRefsPath) if err != nil { return nil, err } if mtime.Equal(fi.ModTime()) { break } // The file has changed since we opened it. Close and retry. err = f.Close() if err != nil { return nil, err } } return f, nil } func (d *DotGit) rewritePackedRefsWithoutRef(name plumbing.ReferenceName) (err error) { pr, err := d.openAndLockPackedRefs(false) if err != nil { return err } if pr == nil { return nil } defer ioutil.CheckClose(pr, &err) // Creating the temp file in the same directory as the target file // improves our chances for rename operation to be atomic. tmp, err := d.fs.TempFile("", tmpPackedRefsPrefix) if err != nil { return err } tmpName := tmp.Name() defer func() { ioutil.CheckClose(tmp, &err) _ = d.fs.Remove(tmpName) // don't check err, we might have renamed it }() s := bufio.NewScanner(pr) found := false for s.Scan() { line := s.Text() ref, err := d.processLine(line) if err != nil { return err } if ref != nil && ref.Name() == name { found = true continue } if _, err := fmt.Fprintln(tmp, line); err != nil { return err } } if err := s.Err(); err != nil { return err } if !found { return nil } return d.rewritePackedRefsWhileLocked(tmp, pr) } // process lines from a packed-refs file func (d *DotGit) processLine(line string) (*plumbing.Reference, error) { if len(line) == 0 { return nil, nil } switch line[0] { case '#': // comment - ignore return nil, nil case '^': // annotated tag commit of the previous line - ignore return nil, nil default: ws := strings.Split(line, " ") // hash then ref if len(ws) != 2 { return nil, ErrPackedRefsBadFormat } return plumbing.NewReferenceFromStrings(ws[1], ws[0]), nil } } func (d *DotGit) addRefsFromRefDir(refs *[]*plumbing.Reference, seen map[plumbing.ReferenceName]bool) error { return d.walkReferencesTree(refs, []string{refsPath}, seen) } func (d *DotGit) walkReferencesTree(refs *[]*plumbing.Reference, relPath []string, seen map[plumbing.ReferenceName]bool) error { files, err := d.fs.ReadDir(d.fs.Join(relPath...)) if err != nil { if os.IsNotExist(err) { return nil } return err } for _, f := range files { newRelPath := append(append([]string(nil), relPath...), f.Name()) if f.IsDir() { if err = d.walkReferencesTree(refs, newRelPath, seen); err != nil { return err } continue } ref, err := d.readReferenceFile(".", strings.Join(newRelPath, "/")) if err != nil { return err } if ref != nil && !seen[ref.Name()] { *refs = append(*refs, ref) seen[ref.Name()] = true } } return nil } func (d *DotGit) addRefFromHEAD(refs *[]*plumbing.Reference) error { ref, err := d.readReferenceFile(".", "HEAD") if err != nil { if os.IsNotExist(err) { return nil } return err } *refs = append(*refs, ref) return nil } func (d *DotGit) readReferenceFile(path, name string) (ref *plumbing.Reference, err error) { path = d.fs.Join(path, d.fs.Join(strings.Split(name, "/")...)) f, err := d.fs.Open(path) if err != nil { return nil, err } defer ioutil.CheckClose(f, &err) return d.readReferenceFrom(f, name) } func (d *DotGit) CountLooseRefs() (int, error) { var refs []*plumbing.Reference var seen = make(map[plumbing.ReferenceName]bool) if err := d.addRefsFromRefDir(&refs, seen); err != nil { return 0, err } return len(refs), nil } // PackRefs packs all loose refs into the packed-refs file. // // This implementation only works under the assumption that the view // of the file system won't be updated during this operation. This // strategy would not work on a general file system though, without // locking each loose reference and checking it again before deleting // the file, because otherwise an updated reference could sneak in and // then be deleted by the packed-refs process. Alternatively, every // ref update could also lock packed-refs, so only one lock is // required during ref-packing. But that would worsen performance in // the common case. // // TODO: add an "all" boolean like the `git pack-refs --all` flag. // When `all` is false, it would only pack refs that have already been // packed, plus all tags. func (d *DotGit) PackRefs() (err error) { // Lock packed-refs, and create it if it doesn't exist yet. f, err := d.openAndLockPackedRefs(true) if err != nil { return err } defer ioutil.CheckClose(f, &err) // Gather all refs using addRefsFromRefDir and addRefsFromPackedRefs. var refs []*plumbing.Reference seen := make(map[plumbing.ReferenceName]bool) if err = d.addRefsFromRefDir(&refs, seen); err != nil { return err } if len(refs) == 0 { // Nothing to do! return nil } numLooseRefs := len(refs) if err = d.addRefsFromPackedRefsFile(&refs, f, seen); err != nil { return err } // Write them all to a new temp packed-refs file. tmp, err := d.fs.TempFile("", tmpPackedRefsPrefix) if err != nil { return err } tmpName := tmp.Name() defer func() { ioutil.CheckClose(tmp, &err) _ = d.fs.Remove(tmpName) // don't check err, we might have renamed it }() w := bufio.NewWriter(tmp) for _, ref := range refs { _, err = w.WriteString(ref.String() + "\n") if err != nil { return err } } err = w.Flush() if err != nil { return err } // Rename the temp packed-refs file. err = d.rewritePackedRefsWhileLocked(tmp, f) if err != nil { return err } // Delete all the loose refs, while still holding the packed-refs // lock. for _, ref := range refs[:numLooseRefs] { path := d.fs.Join(".", ref.Name().String()) err = d.fs.Remove(path) if err != nil && !os.IsNotExist(err) { return err } } return nil } // Module return a billy.Filesystem pointing to the module folder func (d *DotGit) Module(name string) (billy.Filesystem, error) { return d.fs.Chroot(d.fs.Join(modulePath, name)) } // Alternates returns DotGit(s) based off paths in objects/info/alternates if // available. This can be used to checks if it's a shared repository. func (d *DotGit) Alternates() ([]*DotGit, error) { altpath := d.fs.Join("objects", "info", "alternates") f, err := d.fs.Open(altpath) if err != nil { return nil, err } defer f.Close() var alternates []*DotGit // Read alternate paths line-by-line and create DotGit objects. scanner := bufio.NewScanner(f) for scanner.Scan() { path := scanner.Text() if !filepath.IsAbs(path) { // For relative paths, we can perform an internal conversion to // slash so that they work cross-platform. slashPath := filepath.ToSlash(path) // If the path is not absolute, it must be relative to object // database (.git/objects/info). // https://www.kernel.org/pub/software/scm/git/docs/gitrepository-layout.html // Hence, derive a path relative to DotGit's root. // "../../../reponame/.git/" -> "../../reponame/.git" // Remove the first ../ relpath := filepath.Join(strings.Split(slashPath, "/")[1:]...) normalPath := filepath.FromSlash(relpath) path = filepath.Join(d.fs.Root(), normalPath) } fs := osfs.New(filepath.Dir(path)) alternates = append(alternates, New(fs)) } if err = scanner.Err(); err != nil { return nil, err } return alternates, nil } // Fs returns the underlying filesystem of the DotGit folder. func (d *DotGit) Fs() billy.Filesystem { return d.fs } func isHex(s string) bool { for _, b := range []byte(s) { if isNum(b) { continue } if isHexAlpha(b) { continue } return false } return true } func isNum(b byte) bool { return b >= '0' && b <= '9' } func isHexAlpha(b byte) bool { return b >= 'a' && b <= 'f' || b >= 'A' && b <= 'F' } golang-gopkg-src-d-go-git.v4-4.11.0/storage/filesystem/dotgit/dotgit_rewrite_packed_refs.go000066400000000000000000000032371345605224300317360ustar00rootroot00000000000000package dotgit import ( "io" "os" "runtime" "gopkg.in/src-d/go-billy.v4" "gopkg.in/src-d/go-git.v4/utils/ioutil" ) func (d *DotGit) openAndLockPackedRefsMode() int { if billy.CapabilityCheck(d.fs, billy.ReadAndWriteCapability) { return os.O_RDWR } return os.O_RDONLY } func (d *DotGit) rewritePackedRefsWhileLocked( tmp billy.File, pr billy.File) error { // Try plain rename. If we aren't using the bare Windows filesystem as the // storage layer, we might be able to get away with a rename over a locked // file. err := d.fs.Rename(tmp.Name(), pr.Name()) if err == nil { return nil } // If we are in a filesystem that does not support rename (e.g. sivafs) // a full copy is done. if err == billy.ErrNotSupported { return d.copyNewFile(tmp, pr) } if runtime.GOOS != "windows" { return err } // Otherwise, Windows doesn't let us rename over a locked file, so // we have to do a straight copy. Unfortunately this could result // in a partially-written file if the process fails before the // copy completes. return d.copyToExistingFile(tmp, pr) } func (d *DotGit) copyToExistingFile(tmp, pr billy.File) error { _, err := pr.Seek(0, io.SeekStart) if err != nil { return err } err = pr.Truncate(0) if err != nil { return err } _, err = tmp.Seek(0, io.SeekStart) if err != nil { return err } _, err = io.Copy(pr, tmp) return err } func (d *DotGit) copyNewFile(tmp billy.File, pr billy.File) (err error) { prWrite, err := d.fs.Create(pr.Name()) if err != nil { return err } defer ioutil.CheckClose(prWrite, &err) _, err = tmp.Seek(0, io.SeekStart) if err != nil { return err } _, err = io.Copy(prWrite, tmp) return err } golang-gopkg-src-d-go-git.v4-4.11.0/storage/filesystem/dotgit/dotgit_setref.go000066400000000000000000000043531345605224300272170ustar00rootroot00000000000000package dotgit import ( "fmt" "os" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/utils/ioutil" "gopkg.in/src-d/go-billy.v4" ) func (d *DotGit) setRef(fileName, content string, old *plumbing.Reference) (err error) { if billy.CapabilityCheck(d.fs, billy.ReadAndWriteCapability) { return d.setRefRwfs(fileName, content, old) } return d.setRefNorwfs(fileName, content, old) } func (d *DotGit) setRefRwfs(fileName, content string, old *plumbing.Reference) (err error) { // If we are not checking an old ref, just truncate the file. mode := os.O_RDWR | os.O_CREATE if old == nil { mode |= os.O_TRUNC } f, err := d.fs.OpenFile(fileName, mode, 0666) if err != nil { return err } defer ioutil.CheckClose(f, &err) // Lock is unlocked by the deferred Close above. This is because Unlock // does not imply a fsync and thus there would be a race between // Unlock+Close and other concurrent writers. Adding Sync to go-billy // could work, but this is better (and avoids superfluous syncs). err = f.Lock() if err != nil { return err } // this is a no-op to call even when old is nil. err = d.checkReferenceAndTruncate(f, old) if err != nil { return err } _, err = f.Write([]byte(content)) return err } // There are some filesystems that don't support opening files in RDWD mode. // In these filesystems the standard SetRef function can not be used as it // reads the reference file to check that it's not modified before updating it. // // This version of the function writes the reference without extra checks // making it compatible with these simple filesystems. This is usually not // a problem as they should be accessed by only one process at a time. func (d *DotGit) setRefNorwfs(fileName, content string, old *plumbing.Reference) error { _, err := d.fs.Stat(fileName) if err == nil && old != nil { fRead, err := d.fs.Open(fileName) if err != nil { return err } ref, err := d.readReferenceFrom(fRead, old.Name().String()) fRead.Close() if err != nil { return err } if ref.Hash() != old.Hash() { return fmt.Errorf("reference has changed concurrently") } } f, err := d.fs.Create(fileName) if err != nil { return err } defer f.Close() _, err = f.Write([]byte(content)) return err } golang-gopkg-src-d-go-git.v4-4.11.0/storage/filesystem/dotgit/dotgit_test.go000066400000000000000000000515371345605224300267140ustar00rootroot00000000000000package dotgit import ( "bufio" "io/ioutil" "os" "path/filepath" "runtime" "strings" "testing" "gopkg.in/src-d/go-billy.v4" "gopkg.in/src-d/go-git.v4/plumbing" . "gopkg.in/check.v1" "gopkg.in/src-d/go-billy.v4/osfs" "gopkg.in/src-d/go-git-fixtures.v3" ) func Test(t *testing.T) { TestingT(t) } type SuiteDotGit struct { fixtures.Suite } var _ = Suite(&SuiteDotGit{}) func (s *SuiteDotGit) TestInitialize(c *C) { tmp, err := ioutil.TempDir("", "dot-git") c.Assert(err, IsNil) defer os.RemoveAll(tmp) fs := osfs.New(tmp) dir := New(fs) err = dir.Initialize() c.Assert(err, IsNil) _, err = fs.Stat(fs.Join("objects", "info")) c.Assert(err, IsNil) _, err = fs.Stat(fs.Join("objects", "pack")) c.Assert(err, IsNil) _, err = fs.Stat(fs.Join("refs", "heads")) c.Assert(err, IsNil) _, err = fs.Stat(fs.Join("refs", "tags")) c.Assert(err, IsNil) } func (s *SuiteDotGit) TestSetRefs(c *C) { tmp, err := ioutil.TempDir("", "dot-git") c.Assert(err, IsNil) defer os.RemoveAll(tmp) fs := osfs.New(tmp) dir := New(fs) testSetRefs(c, dir) } func (s *SuiteDotGit) TestSetRefsNorwfs(c *C) { tmp, err := ioutil.TempDir("", "dot-git") c.Assert(err, IsNil) defer os.RemoveAll(tmp) fs := osfs.New(tmp) dir := New(&norwfs{fs}) testSetRefs(c, dir) } func testSetRefs(c *C, dir *DotGit) { firstFoo := plumbing.NewReferenceFromStrings( "refs/heads/foo", "e8d3ffab552895c19b9fcf7aa264d277cde33881", ) err := dir.SetRef(firstFoo, nil) c.Assert(err, IsNil) err = dir.SetRef(plumbing.NewReferenceFromStrings( "refs/heads/symbolic", "ref: refs/heads/foo", ), nil) c.Assert(err, IsNil) err = dir.SetRef(plumbing.NewReferenceFromStrings( "bar", "e8d3ffab552895c19b9fcf7aa264d277cde33881", ), nil) c.Assert(err, IsNil) refs, err := dir.Refs() c.Assert(err, IsNil) c.Assert(refs, HasLen, 2) ref := findReference(refs, "refs/heads/foo") c.Assert(ref, NotNil) c.Assert(ref.Hash().String(), Equals, "e8d3ffab552895c19b9fcf7aa264d277cde33881") ref = findReference(refs, "refs/heads/symbolic") c.Assert(ref, NotNil) c.Assert(ref.Target().String(), Equals, "refs/heads/foo") ref = findReference(refs, "bar") c.Assert(ref, IsNil) ref, err = dir.Ref("refs/heads/foo") c.Assert(err, IsNil) c.Assert(ref, NotNil) c.Assert(ref.Hash().String(), Equals, "e8d3ffab552895c19b9fcf7aa264d277cde33881") ref, err = dir.Ref("refs/heads/symbolic") c.Assert(err, IsNil) c.Assert(ref, NotNil) c.Assert(ref.Target().String(), Equals, "refs/heads/foo") ref, err = dir.Ref("bar") c.Assert(err, IsNil) c.Assert(ref, NotNil) c.Assert(ref.Hash().String(), Equals, "e8d3ffab552895c19b9fcf7aa264d277cde33881") // Check that SetRef with a non-nil `old` works. err = dir.SetRef(plumbing.NewReferenceFromStrings( "refs/heads/foo", "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", ), firstFoo) c.Assert(err, IsNil) // `firstFoo` is no longer the right `old` reference, so this // should fail. err = dir.SetRef(plumbing.NewReferenceFromStrings( "refs/heads/foo", "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", ), firstFoo) c.Assert(err, NotNil) } func (s *SuiteDotGit) TestRefsFromPackedRefs(c *C) { fs := fixtures.Basic().ByTag(".git").One().DotGit() dir := New(fs) refs, err := dir.Refs() c.Assert(err, IsNil) ref := findReference(refs, "refs/remotes/origin/branch") c.Assert(ref, NotNil) c.Assert(ref.Hash().String(), Equals, "e8d3ffab552895c19b9fcf7aa264d277cde33881") } func (s *SuiteDotGit) TestRefsFromReferenceFile(c *C) { fs := fixtures.Basic().ByTag(".git").One().DotGit() dir := New(fs) refs, err := dir.Refs() c.Assert(err, IsNil) ref := findReference(refs, "refs/remotes/origin/HEAD") c.Assert(ref, NotNil) c.Assert(ref.Type(), Equals, plumbing.SymbolicReference) c.Assert(string(ref.Target()), Equals, "refs/remotes/origin/master") } func BenchmarkRefMultipleTimes(b *testing.B) { fixtures.Init() fs := fixtures.Basic().ByTag(".git").One().DotGit() refname := plumbing.ReferenceName("refs/remotes/origin/branch") dir := New(fs) _, err := dir.Ref(refname) if err != nil { b.Fatalf("unexpected error: %s", err) } for i := 0; i < b.N; i++ { _, err := dir.Ref(refname) if err != nil { b.Fatalf("unexpected error: %s", err) } } } func (s *SuiteDotGit) TestRemoveRefFromReferenceFile(c *C) { fs := fixtures.Basic().ByTag(".git").One().DotGit() dir := New(fs) name := plumbing.ReferenceName("refs/remotes/origin/HEAD") err := dir.RemoveRef(name) c.Assert(err, IsNil) refs, err := dir.Refs() c.Assert(err, IsNil) ref := findReference(refs, string(name)) c.Assert(ref, IsNil) } func (s *SuiteDotGit) TestRemoveRefFromPackedRefs(c *C) { fs := fixtures.Basic().ByTag(".git").One().DotGit() dir := New(fs) name := plumbing.ReferenceName("refs/remotes/origin/master") err := dir.RemoveRef(name) c.Assert(err, IsNil) b, err := ioutil.ReadFile(filepath.Join(fs.Root(), packedRefsPath)) c.Assert(err, IsNil) c.Assert(string(b), Equals, ""+ "# pack-refs with: peeled fully-peeled \n"+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 refs/heads/master\n"+ "e8d3ffab552895c19b9fcf7aa264d277cde33881 refs/remotes/origin/branch\n") } func (s *SuiteDotGit) TestRemoveRefFromReferenceFileAndPackedRefs(c *C) { fs := fixtures.Basic().ByTag(".git").One().DotGit() dir := New(fs) // Make a ref file for a ref that's already in `packed-refs`. err := dir.SetRef(plumbing.NewReferenceFromStrings( "refs/remotes/origin/branch", "e8d3ffab552895c19b9fcf7aa264d277cde33881", ), nil) // Make sure it only appears once in the refs list. refs, err := dir.Refs() c.Assert(err, IsNil) found := false for _, ref := range refs { if ref.Name() == "refs/remotes/origin/branch" { c.Assert(found, Equals, false) found = true } } name := plumbing.ReferenceName("refs/remotes/origin/branch") err = dir.RemoveRef(name) c.Assert(err, IsNil) b, err := ioutil.ReadFile(filepath.Join(fs.Root(), packedRefsPath)) c.Assert(err, IsNil) c.Assert(string(b), Equals, ""+ "# pack-refs with: peeled fully-peeled \n"+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 refs/heads/master\n"+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 refs/remotes/origin/master\n") refs, err = dir.Refs() c.Assert(err, IsNil) ref := findReference(refs, string(name)) c.Assert(ref, IsNil) } func (s *SuiteDotGit) TestRemoveRefNonExistent(c *C) { fs := fixtures.Basic().ByTag(".git").One().DotGit() dir := New(fs) packedRefs := filepath.Join(fs.Root(), packedRefsPath) before, err := ioutil.ReadFile(packedRefs) c.Assert(err, IsNil) name := plumbing.ReferenceName("refs/heads/nonexistent") err = dir.RemoveRef(name) c.Assert(err, IsNil) after, err := ioutil.ReadFile(packedRefs) c.Assert(err, IsNil) c.Assert(string(before), Equals, string(after)) } func (s *SuiteDotGit) TestRemoveRefInvalidPackedRefs(c *C) { fs := fixtures.Basic().ByTag(".git").One().DotGit() dir := New(fs) packedRefs := filepath.Join(fs.Root(), packedRefsPath) brokenContent := "BROKEN STUFF REALLY BROKEN" err := ioutil.WriteFile(packedRefs, []byte(brokenContent), os.FileMode(0755)) c.Assert(err, IsNil) name := plumbing.ReferenceName("refs/heads/nonexistent") err = dir.RemoveRef(name) c.Assert(err, NotNil) after, err := ioutil.ReadFile(filepath.Join(fs.Root(), packedRefsPath)) c.Assert(err, IsNil) c.Assert(brokenContent, Equals, string(after)) } func (s *SuiteDotGit) TestRemoveRefInvalidPackedRefs2(c *C) { fs := fixtures.Basic().ByTag(".git").One().DotGit() dir := New(fs) packedRefs := filepath.Join(fs.Root(), packedRefsPath) brokenContent := strings.Repeat("a", bufio.MaxScanTokenSize*2) err := ioutil.WriteFile(packedRefs, []byte(brokenContent), os.FileMode(0755)) c.Assert(err, IsNil) name := plumbing.ReferenceName("refs/heads/nonexistent") err = dir.RemoveRef(name) c.Assert(err, NotNil) after, err := ioutil.ReadFile(filepath.Join(fs.Root(), packedRefsPath)) c.Assert(err, IsNil) c.Assert(brokenContent, Equals, string(after)) } func (s *SuiteDotGit) TestRefsFromHEADFile(c *C) { fs := fixtures.Basic().ByTag(".git").One().DotGit() dir := New(fs) refs, err := dir.Refs() c.Assert(err, IsNil) ref := findReference(refs, "HEAD") c.Assert(ref, NotNil) c.Assert(ref.Type(), Equals, plumbing.SymbolicReference) c.Assert(string(ref.Target()), Equals, "refs/heads/master") } func (s *SuiteDotGit) TestConfig(c *C) { fs := fixtures.Basic().ByTag(".git").One().DotGit() dir := New(fs) file, err := dir.Config() c.Assert(err, IsNil) c.Assert(filepath.Base(file.Name()), Equals, "config") } func (s *SuiteDotGit) TestConfigWriteAndConfig(c *C) { tmp, err := ioutil.TempDir("", "dot-git") c.Assert(err, IsNil) defer os.RemoveAll(tmp) fs := osfs.New(tmp) dir := New(fs) f, err := dir.ConfigWriter() c.Assert(err, IsNil) _, err = f.Write([]byte("foo")) c.Assert(err, IsNil) f, err = dir.Config() c.Assert(err, IsNil) cnt, err := ioutil.ReadAll(f) c.Assert(err, IsNil) c.Assert(string(cnt), Equals, "foo") } func (s *SuiteDotGit) TestIndex(c *C) { fs := fixtures.Basic().ByTag(".git").One().DotGit() dir := New(fs) idx, err := dir.Index() c.Assert(err, IsNil) c.Assert(idx, NotNil) } func (s *SuiteDotGit) TestIndexWriteAndIndex(c *C) { tmp, err := ioutil.TempDir("", "dot-git") c.Assert(err, IsNil) defer os.RemoveAll(tmp) fs := osfs.New(tmp) dir := New(fs) f, err := dir.IndexWriter() c.Assert(err, IsNil) _, err = f.Write([]byte("foo")) c.Assert(err, IsNil) f, err = dir.Index() c.Assert(err, IsNil) cnt, err := ioutil.ReadAll(f) c.Assert(err, IsNil) c.Assert(string(cnt), Equals, "foo") } func (s *SuiteDotGit) TestShallow(c *C) { fs := fixtures.Basic().ByTag(".git").One().DotGit() dir := New(fs) file, err := dir.Shallow() c.Assert(err, IsNil) c.Assert(file, IsNil) } func (s *SuiteDotGit) TestShallowWriteAndShallow(c *C) { tmp, err := ioutil.TempDir("", "dot-git") c.Assert(err, IsNil) defer os.RemoveAll(tmp) fs := osfs.New(tmp) dir := New(fs) f, err := dir.ShallowWriter() c.Assert(err, IsNil) _, err = f.Write([]byte("foo")) c.Assert(err, IsNil) f, err = dir.Shallow() c.Assert(err, IsNil) cnt, err := ioutil.ReadAll(f) c.Assert(err, IsNil) c.Assert(string(cnt), Equals, "foo") } func findReference(refs []*plumbing.Reference, name string) *plumbing.Reference { n := plumbing.ReferenceName(name) for _, ref := range refs { if ref.Name() == n { return ref } } return nil } func (s *SuiteDotGit) TestObjectPacks(c *C) { f := fixtures.Basic().ByTag(".git").One() fs := f.DotGit() dir := New(fs) testObjectPacks(c, fs, dir, f) } func (s *SuiteDotGit) TestObjectPacksExclusive(c *C) { f := fixtures.Basic().ByTag(".git").One() fs := f.DotGit() dir := NewWithOptions(fs, Options{ExclusiveAccess: true}) testObjectPacks(c, fs, dir, f) } func testObjectPacks(c *C, fs billy.Filesystem, dir *DotGit, f *fixtures.Fixture) { hashes, err := dir.ObjectPacks() c.Assert(err, IsNil) c.Assert(hashes, HasLen, 1) c.Assert(hashes[0], Equals, f.PackfileHash) // Make sure that a random file in the pack directory doesn't // break everything. badFile, err := fs.Create("objects/pack/OOPS_THIS_IS_NOT_RIGHT.pack") c.Assert(err, IsNil) err = badFile.Close() c.Assert(err, IsNil) hashes2, err := dir.ObjectPacks() c.Assert(err, IsNil) c.Assert(hashes2, HasLen, 1) c.Assert(hashes[0], Equals, hashes2[0]) } func (s *SuiteDotGit) TestObjectPack(c *C) { f := fixtures.Basic().ByTag(".git").One() fs := f.DotGit() dir := New(fs) pack, err := dir.ObjectPack(f.PackfileHash) c.Assert(err, IsNil) c.Assert(filepath.Ext(pack.Name()), Equals, ".pack") } func (s *SuiteDotGit) TestObjectPackWithKeepDescriptors(c *C) { f := fixtures.Basic().ByTag(".git").One() fs := f.DotGit() dir := NewWithOptions(fs, Options{KeepDescriptors: true}) pack, err := dir.ObjectPack(f.PackfileHash) c.Assert(err, IsNil) c.Assert(filepath.Ext(pack.Name()), Equals, ".pack") // Move to an specific offset pack.Seek(42, os.SEEK_SET) pack2, err := dir.ObjectPack(f.PackfileHash) c.Assert(err, IsNil) // If the file is the same the offset should be the same offset, err := pack2.Seek(0, os.SEEK_CUR) c.Assert(err, IsNil) c.Assert(offset, Equals, int64(42)) err = dir.Close() c.Assert(err, IsNil) pack2, err = dir.ObjectPack(f.PackfileHash) c.Assert(err, IsNil) // If the file is opened again its offset should be 0 offset, err = pack2.Seek(0, os.SEEK_CUR) c.Assert(err, IsNil) c.Assert(offset, Equals, int64(0)) err = pack2.Close() c.Assert(err, IsNil) err = dir.Close() c.Assert(err, NotNil) } func (s *SuiteDotGit) TestObjectPackIdx(c *C) { f := fixtures.Basic().ByTag(".git").One() fs := f.DotGit() dir := New(fs) idx, err := dir.ObjectPackIdx(f.PackfileHash) c.Assert(err, IsNil) c.Assert(filepath.Ext(idx.Name()), Equals, ".idx") c.Assert(idx.Close(), IsNil) } func (s *SuiteDotGit) TestObjectPackNotFound(c *C) { fs := fixtures.Basic().ByTag(".git").One().DotGit() dir := New(fs) pack, err := dir.ObjectPack(plumbing.ZeroHash) c.Assert(err, Equals, ErrPackfileNotFound) c.Assert(pack, IsNil) idx, err := dir.ObjectPackIdx(plumbing.ZeroHash) c.Assert(err, Equals, ErrPackfileNotFound) c.Assert(idx, IsNil) } func (s *SuiteDotGit) TestNewObject(c *C) { tmp, err := ioutil.TempDir("", "dot-git") c.Assert(err, IsNil) defer os.RemoveAll(tmp) fs := osfs.New(tmp) dir := New(fs) w, err := dir.NewObject() c.Assert(err, IsNil) err = w.WriteHeader(plumbing.BlobObject, 14) c.Assert(err, IsNil) n, err := w.Write([]byte("this is a test")) c.Assert(err, IsNil) c.Assert(n, Equals, 14) c.Assert(w.Hash().String(), Equals, "a8a940627d132695a9769df883f85992f0ff4a43") err = w.Close() c.Assert(err, IsNil) i, err := fs.Stat("objects/a8/a940627d132695a9769df883f85992f0ff4a43") c.Assert(err, IsNil) c.Assert(i.Size(), Equals, int64(34)) } func (s *SuiteDotGit) TestObjects(c *C) { fs := fixtures.ByTag(".git").ByTag("unpacked").One().DotGit() dir := New(fs) testObjects(c, fs, dir) } func (s *SuiteDotGit) TestObjectsExclusive(c *C) { fs := fixtures.ByTag(".git").ByTag("unpacked").One().DotGit() dir := NewWithOptions(fs, Options{ExclusiveAccess: true}) testObjects(c, fs, dir) } func testObjects(c *C, fs billy.Filesystem, dir *DotGit) { hashes, err := dir.Objects() c.Assert(err, IsNil) c.Assert(hashes, HasLen, 187) c.Assert(hashes[0].String(), Equals, "0097821d427a3c3385898eb13b50dcbc8702b8a3") c.Assert(hashes[1].String(), Equals, "01d5fa556c33743006de7e76e67a2dfcd994ca04") c.Assert(hashes[2].String(), Equals, "03db8e1fbe133a480f2867aac478fd866686d69e") } func (s *SuiteDotGit) TestObjectsNoFolder(c *C) { tmp, err := ioutil.TempDir("", "dot-git") c.Assert(err, IsNil) defer os.RemoveAll(tmp) fs := osfs.New(tmp) dir := New(fs) hash, err := dir.Objects() c.Assert(err, IsNil) c.Assert(hash, HasLen, 0) } func (s *SuiteDotGit) TestObject(c *C) { fs := fixtures.ByTag(".git").ByTag("unpacked").One().DotGit() dir := New(fs) hash := plumbing.NewHash("03db8e1fbe133a480f2867aac478fd866686d69e") file, err := dir.Object(hash) c.Assert(err, IsNil) c.Assert(strings.HasSuffix( file.Name(), fs.Join("objects", "03", "db8e1fbe133a480f2867aac478fd866686d69e")), Equals, true, ) incomingHash := "9d25e0f9bde9f82882b49fe29117b9411cb157b7" //made up hash incomingDirPath := fs.Join("objects", "incoming-123456") incomingFilePath := fs.Join(incomingDirPath, incomingHash[0:2], incomingHash[2:40]) fs.MkdirAll(incomingDirPath, os.FileMode(0755)) fs.Create(incomingFilePath) file, err = dir.Object(plumbing.NewHash(incomingHash)) c.Assert(err, IsNil) } func (s *SuiteDotGit) TestObjectStat(c *C) { fs := fixtures.ByTag(".git").ByTag("unpacked").One().DotGit() dir := New(fs) hash := plumbing.NewHash("03db8e1fbe133a480f2867aac478fd866686d69e") _, err := dir.ObjectStat(hash) c.Assert(err, IsNil) incomingHash := "9d25e0f9bde9f82882b49fe29117b9411cb157b7" //made up hash incomingDirPath := fs.Join("objects", "incoming-123456") incomingFilePath := fs.Join(incomingDirPath, incomingHash[0:2], incomingHash[2:40]) fs.MkdirAll(incomingDirPath, os.FileMode(0755)) fs.Create(incomingFilePath) _, err = dir.ObjectStat(plumbing.NewHash(incomingHash)) c.Assert(err, IsNil) } func (s *SuiteDotGit) TestObjectDelete(c *C) { fs := fixtures.ByTag(".git").ByTag("unpacked").One().DotGit() dir := New(fs) hash := plumbing.NewHash("03db8e1fbe133a480f2867aac478fd866686d69e") err := dir.ObjectDelete(hash) c.Assert(err, IsNil) incomingHash := "9d25e0f9bde9f82882b49fe29117b9411cb157b7" //made up hash incomingDirPath := fs.Join("objects", "incoming-123456") incomingSubDirPath := fs.Join(incomingDirPath, incomingHash[0:2]) incomingFilePath := fs.Join(incomingSubDirPath, incomingHash[2:40]) err = fs.MkdirAll(incomingSubDirPath, os.FileMode(0755)) c.Assert(err, IsNil) f, err := fs.Create(incomingFilePath) c.Assert(err, IsNil) err = f.Close() c.Assert(err, IsNil) err = dir.ObjectDelete(plumbing.NewHash(incomingHash)) c.Assert(err, IsNil) } func (s *SuiteDotGit) TestObjectNotFound(c *C) { fs := fixtures.ByTag(".git").ByTag("unpacked").One().DotGit() dir := New(fs) hash := plumbing.NewHash("not-found-object") file, err := dir.Object(hash) c.Assert(err, NotNil) c.Assert(file, IsNil) } func (s *SuiteDotGit) TestSubmodules(c *C) { fs := fixtures.ByTag("submodule").One().DotGit() dir := New(fs) m, err := dir.Module("basic") c.Assert(err, IsNil) c.Assert(strings.HasSuffix(m.Root(), m.Join(".git", "modules", "basic")), Equals, true) } func (s *SuiteDotGit) TestPackRefs(c *C) { tmp, err := ioutil.TempDir("", "dot-git") c.Assert(err, IsNil) defer os.RemoveAll(tmp) fs := osfs.New(tmp) dir := New(fs) err = dir.SetRef(plumbing.NewReferenceFromStrings( "refs/heads/foo", "e8d3ffab552895c19b9fcf7aa264d277cde33881", ), nil) c.Assert(err, IsNil) err = dir.SetRef(plumbing.NewReferenceFromStrings( "refs/heads/bar", "a8d3ffab552895c19b9fcf7aa264d277cde33881", ), nil) c.Assert(err, IsNil) refs, err := dir.Refs() c.Assert(err, IsNil) c.Assert(refs, HasLen, 2) looseCount, err := dir.CountLooseRefs() c.Assert(err, IsNil) c.Assert(looseCount, Equals, 2) err = dir.PackRefs() c.Assert(err, IsNil) // Make sure the refs are still there, but no longer loose. refs, err = dir.Refs() c.Assert(err, IsNil) c.Assert(refs, HasLen, 2) looseCount, err = dir.CountLooseRefs() c.Assert(err, IsNil) c.Assert(looseCount, Equals, 0) ref, err := dir.Ref("refs/heads/foo") c.Assert(err, IsNil) c.Assert(ref, NotNil) c.Assert(ref.Hash().String(), Equals, "e8d3ffab552895c19b9fcf7aa264d277cde33881") ref, err = dir.Ref("refs/heads/bar") c.Assert(err, IsNil) c.Assert(ref, NotNil) c.Assert(ref.Hash().String(), Equals, "a8d3ffab552895c19b9fcf7aa264d277cde33881") // Now update one of them, re-pack, and check again. err = dir.SetRef(plumbing.NewReferenceFromStrings( "refs/heads/foo", "b8d3ffab552895c19b9fcf7aa264d277cde33881", ), nil) c.Assert(err, IsNil) looseCount, err = dir.CountLooseRefs() c.Assert(err, IsNil) c.Assert(looseCount, Equals, 1) err = dir.PackRefs() c.Assert(err, IsNil) // Make sure the refs are still there, but no longer loose. refs, err = dir.Refs() c.Assert(err, IsNil) c.Assert(refs, HasLen, 2) looseCount, err = dir.CountLooseRefs() c.Assert(err, IsNil) c.Assert(looseCount, Equals, 0) ref, err = dir.Ref("refs/heads/foo") c.Assert(err, IsNil) c.Assert(ref, NotNil) c.Assert(ref.Hash().String(), Equals, "b8d3ffab552895c19b9fcf7aa264d277cde33881") } func (s *SuiteDotGit) TestAlternates(c *C) { tmp, err := ioutil.TempDir("", "dot-git") c.Assert(err, IsNil) defer os.RemoveAll(tmp) // Create a new billy fs. fs := osfs.New(tmp) // Create a new dotgit object and initialize. dir := New(fs) err = dir.Initialize() c.Assert(err, IsNil) // Create alternates file. altpath := filepath.Join("objects", "info", "alternates") f, err := fs.Create(altpath) c.Assert(err, IsNil) // Multiple alternates. var strContent string if runtime.GOOS == "windows" { strContent = "C:\\Users\\username\\repo1\\.git\\objects\r\n..\\..\\..\\rep2\\.git\\objects" } else { strContent = "/Users/username/rep1//.git/objects\n../../../rep2//.git/objects" } content := []byte(strContent) f.Write(content) f.Close() dotgits, err := dir.Alternates() c.Assert(err, IsNil) if runtime.GOOS == "windows" { c.Assert(dotgits[0].fs.Root(), Equals, "C:\\Users\\username\\repo1\\.git") } else { c.Assert(dotgits[0].fs.Root(), Equals, "/Users/username/rep1/.git") } // For relative path: // /some/absolute/path/to/dot-git -> /some/absolute/path pathx := strings.Split(tmp, string(filepath.Separator)) pathx = pathx[:len(pathx)-2] // Use string.Join() to avoid malformed absolutepath on windows // C:Users\\User\\... instead of C:\\Users\\appveyor\\... . resolvedPath := strings.Join(pathx, string(filepath.Separator)) // Append the alternate path to the resolvedPath expectedPath := filepath.Join(string(filepath.Separator), resolvedPath, "rep2", ".git") if runtime.GOOS == "windows" { expectedPath = filepath.Join(resolvedPath, "rep2", ".git") } c.Assert(dotgits[1].fs.Root(), Equals, expectedPath) } type norwfs struct { billy.Filesystem } func (f *norwfs) Capabilities() billy.Capability { return billy.Capabilities(f.Filesystem) &^ billy.ReadAndWriteCapability } golang-gopkg-src-d-go-git.v4-4.11.0/storage/filesystem/dotgit/writers.go000066400000000000000000000130401345605224300260450ustar00rootroot00000000000000package dotgit import ( "fmt" "io" "sync/atomic" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/format/idxfile" "gopkg.in/src-d/go-git.v4/plumbing/format/objfile" "gopkg.in/src-d/go-git.v4/plumbing/format/packfile" "gopkg.in/src-d/go-billy.v4" ) // PackWriter is a io.Writer that generates the packfile index simultaneously, // a packfile.Decoder is used with a file reader to read the file being written // this operation is synchronized with the write operations. // The packfile is written in a temp file, when Close is called this file // is renamed/moved (depends on the Filesystem implementation) to the final // location, if the PackWriter is not used, nothing is written type PackWriter struct { Notify func(plumbing.Hash, *idxfile.Writer) fs billy.Filesystem fr, fw billy.File synced *syncedReader checksum plumbing.Hash parser *packfile.Parser writer *idxfile.Writer result chan error } func newPackWrite(fs billy.Filesystem) (*PackWriter, error) { fw, err := fs.TempFile(fs.Join(objectsPath, packPath), "tmp_pack_") if err != nil { return nil, err } fr, err := fs.Open(fw.Name()) if err != nil { return nil, err } writer := &PackWriter{ fs: fs, fw: fw, fr: fr, synced: newSyncedReader(fw, fr), result: make(chan error), } go writer.buildIndex() return writer, nil } func (w *PackWriter) buildIndex() { s := packfile.NewScanner(w.synced) w.writer = new(idxfile.Writer) var err error w.parser, err = packfile.NewParser(s, w.writer) if err != nil { w.result <- err return } checksum, err := w.parser.Parse() if err != nil { w.result <- err return } w.checksum = checksum w.result <- err } // waitBuildIndex waits until buildIndex function finishes, this can terminate // with a packfile.ErrEmptyPackfile, this means that nothing was written so we // ignore the error func (w *PackWriter) waitBuildIndex() error { err := <-w.result if err == packfile.ErrEmptyPackfile { return nil } return err } func (w *PackWriter) Write(p []byte) (int, error) { return w.synced.Write(p) } // Close closes all the file descriptors and save the final packfile, if nothing // was written, the tempfiles are deleted without writing a packfile. func (w *PackWriter) Close() error { defer func() { if w.Notify != nil && w.writer != nil && w.writer.Finished() { w.Notify(w.checksum, w.writer) } close(w.result) }() if err := w.synced.Close(); err != nil { return err } if err := w.waitBuildIndex(); err != nil { return err } if err := w.fr.Close(); err != nil { return err } if err := w.fw.Close(); err != nil { return err } if w.writer == nil || !w.writer.Finished() { return w.clean() } return w.save() } func (w *PackWriter) clean() error { return w.fs.Remove(w.fw.Name()) } func (w *PackWriter) save() error { base := w.fs.Join(objectsPath, packPath, fmt.Sprintf("pack-%s", w.checksum)) idx, err := w.fs.Create(fmt.Sprintf("%s.idx", base)) if err != nil { return err } if err := w.encodeIdx(idx); err != nil { return err } if err := idx.Close(); err != nil { return err } return w.fs.Rename(w.fw.Name(), fmt.Sprintf("%s.pack", base)) } func (w *PackWriter) encodeIdx(writer io.Writer) error { idx, err := w.writer.Index() if err != nil { return err } e := idxfile.NewEncoder(writer) _, err = e.Encode(idx) return err } type syncedReader struct { w io.Writer r io.ReadSeeker blocked, done uint32 written, read uint64 news chan bool } func newSyncedReader(w io.Writer, r io.ReadSeeker) *syncedReader { return &syncedReader{ w: w, r: r, news: make(chan bool), } } func (s *syncedReader) Write(p []byte) (n int, err error) { defer func() { written := atomic.AddUint64(&s.written, uint64(n)) read := atomic.LoadUint64(&s.read) if written > read { s.wake() } }() n, err = s.w.Write(p) return } func (s *syncedReader) Read(p []byte) (n int, err error) { defer func() { atomic.AddUint64(&s.read, uint64(n)) }() for { s.sleep() n, err = s.r.Read(p) if err == io.EOF && !s.isDone() && n == 0 { continue } break } return } func (s *syncedReader) isDone() bool { return atomic.LoadUint32(&s.done) == 1 } func (s *syncedReader) isBlocked() bool { return atomic.LoadUint32(&s.blocked) == 1 } func (s *syncedReader) wake() { if s.isBlocked() { atomic.StoreUint32(&s.blocked, 0) s.news <- true } } func (s *syncedReader) sleep() { read := atomic.LoadUint64(&s.read) written := atomic.LoadUint64(&s.written) if read >= written { atomic.StoreUint32(&s.blocked, 1) <-s.news } } func (s *syncedReader) Seek(offset int64, whence int) (int64, error) { if whence == io.SeekCurrent { return s.r.Seek(offset, whence) } p, err := s.r.Seek(offset, whence) atomic.StoreUint64(&s.read, uint64(p)) return p, err } func (s *syncedReader) Close() error { atomic.StoreUint32(&s.done, 1) close(s.news) return nil } type ObjectWriter struct { objfile.Writer fs billy.Filesystem f billy.File } func newObjectWriter(fs billy.Filesystem) (*ObjectWriter, error) { f, err := fs.TempFile(fs.Join(objectsPath, packPath), "tmp_obj_") if err != nil { return nil, err } return &ObjectWriter{ Writer: (*objfile.NewWriter(f)), fs: fs, f: f, }, nil } func (w *ObjectWriter) Close() error { if err := w.Writer.Close(); err != nil { return err } if err := w.f.Close(); err != nil { return err } return w.save() } func (w *ObjectWriter) save() error { hash := w.Hash().String() file := w.fs.Join(objectsPath, hash[0:2], hash[2:40]) return w.fs.Rename(w.f.Name(), file) } golang-gopkg-src-d-go-git.v4-4.11.0/storage/filesystem/dotgit/writers_test.go000066400000000000000000000061101345605224300271040ustar00rootroot00000000000000package dotgit import ( "fmt" "io" "io/ioutil" "log" "os" "strconv" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/format/idxfile" "gopkg.in/src-d/go-git.v4/plumbing/format/packfile" . "gopkg.in/check.v1" "gopkg.in/src-d/go-billy.v4/osfs" "gopkg.in/src-d/go-git-fixtures.v3" ) func (s *SuiteDotGit) TestNewObjectPack(c *C) { f := fixtures.Basic().One() dir, err := ioutil.TempDir("", "example") if err != nil { log.Fatal(err) } defer os.RemoveAll(dir) fs := osfs.New(dir) dot := New(fs) w, err := dot.NewObjectPack() c.Assert(err, IsNil) _, err = io.Copy(w, f.Packfile()) c.Assert(err, IsNil) c.Assert(w.Close(), IsNil) pfPath := fmt.Sprintf("objects/pack/pack-%s.pack", f.PackfileHash) idxPath := fmt.Sprintf("objects/pack/pack-%s.idx", f.PackfileHash) stat, err := fs.Stat(pfPath) c.Assert(err, IsNil) c.Assert(stat.Size(), Equals, int64(84794)) stat, err = fs.Stat(idxPath) c.Assert(err, IsNil) c.Assert(stat.Size(), Equals, int64(1940)) pf, err := fs.Open(pfPath) c.Assert(err, IsNil) pfs := packfile.NewScanner(pf) _, objects, err := pfs.Header() c.Assert(err, IsNil) for i := uint32(0); i < objects; i++ { _, err := pfs.NextObjectHeader() if err != nil { c.Assert(err, IsNil) break } } c.Assert(pfs.Close(), IsNil) } func (s *SuiteDotGit) TestNewObjectPackUnused(c *C) { dir, err := ioutil.TempDir("", "example") if err != nil { log.Fatal(err) } defer os.RemoveAll(dir) fs := osfs.New(dir) dot := New(fs) w, err := dot.NewObjectPack() c.Assert(err, IsNil) c.Assert(w.Close(), IsNil) info, err := fs.ReadDir("objects/pack") c.Assert(err, IsNil) c.Assert(info, HasLen, 0) // check clean up of temporary files info, err = fs.ReadDir("") c.Assert(err, IsNil) for _, fi := range info { c.Assert(fi.IsDir(), Equals, true) } } func (s *SuiteDotGit) TestSyncedReader(c *C) { tmpw, err := ioutil.TempFile("", "example") c.Assert(err, IsNil) tmpr, err := os.Open(tmpw.Name()) c.Assert(err, IsNil) defer func() { tmpw.Close() tmpr.Close() os.Remove(tmpw.Name()) }() synced := newSyncedReader(tmpw, tmpr) go func() { for i := 0; i < 281; i++ { _, err := synced.Write([]byte(strconv.Itoa(i) + "\n")) c.Assert(err, IsNil) } synced.Close() }() o, err := synced.Seek(1002, io.SeekStart) c.Assert(err, IsNil) c.Assert(o, Equals, int64(1002)) head := make([]byte, 3) n, err := io.ReadFull(synced, head) c.Assert(err, IsNil) c.Assert(n, Equals, 3) c.Assert(string(head), Equals, "278") o, err = synced.Seek(1010, io.SeekStart) c.Assert(err, IsNil) c.Assert(o, Equals, int64(1010)) n, err = io.ReadFull(synced, head) c.Assert(err, IsNil) c.Assert(n, Equals, 3) c.Assert(string(head), Equals, "280") } func (s *SuiteDotGit) TestPackWriterUnusedNotify(c *C) { dir, err := ioutil.TempDir("", "example") if err != nil { c.Assert(err, IsNil) } defer os.RemoveAll(dir) fs := osfs.New(dir) w, err := newPackWrite(fs) c.Assert(err, IsNil) w.Notify = func(h plumbing.Hash, idx *idxfile.Writer) { c.Fatal("unexpected call to PackWriter.Notify") } c.Assert(w.Close(), IsNil) } golang-gopkg-src-d-go-git.v4-4.11.0/storage/filesystem/index.go000066400000000000000000000014101345605224300241610ustar00rootroot00000000000000package filesystem import ( "os" "gopkg.in/src-d/go-git.v4/plumbing/format/index" "gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit" "gopkg.in/src-d/go-git.v4/utils/ioutil" ) type IndexStorage struct { dir *dotgit.DotGit } func (s *IndexStorage) SetIndex(idx *index.Index) (err error) { f, err := s.dir.IndexWriter() if err != nil { return err } defer ioutil.CheckClose(f, &err) e := index.NewEncoder(f) err = e.Encode(idx) return err } func (s *IndexStorage) Index() (i *index.Index, err error) { idx := &index.Index{ Version: 2, } f, err := s.dir.Index() if err != nil { if os.IsNotExist(err) { return idx, nil } return nil, err } defer ioutil.CheckClose(f, &err) d := index.NewDecoder(f) err = d.Decode(idx) return idx, err } golang-gopkg-src-d-go-git.v4-4.11.0/storage/filesystem/module.go000066400000000000000000000006371345605224300243510ustar00rootroot00000000000000package filesystem import ( "gopkg.in/src-d/go-git.v4/plumbing/cache" "gopkg.in/src-d/go-git.v4/storage" "gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit" ) type ModuleStorage struct { dir *dotgit.DotGit } func (s *ModuleStorage) Module(name string) (storage.Storer, error) { fs, err := s.dir.Module(name) if err != nil { return nil, err } return NewStorage(fs, cache.NewObjectLRUDefault()), nil } golang-gopkg-src-d-go-git.v4-4.11.0/storage/filesystem/object.go000066400000000000000000000374621345605224300243400ustar00rootroot00000000000000package filesystem import ( "io" "os" "time" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/cache" "gopkg.in/src-d/go-git.v4/plumbing/format/idxfile" "gopkg.in/src-d/go-git.v4/plumbing/format/objfile" "gopkg.in/src-d/go-git.v4/plumbing/format/packfile" "gopkg.in/src-d/go-git.v4/plumbing/storer" "gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit" "gopkg.in/src-d/go-git.v4/utils/ioutil" "gopkg.in/src-d/go-billy.v4" ) type ObjectStorage struct { options Options // objectCache is an object cache uses to cache delta's bases and also recently // loaded loose objects objectCache cache.Object dir *dotgit.DotGit index map[plumbing.Hash]idxfile.Index } // NewObjectStorage creates a new ObjectStorage with the given .git directory and cache. func NewObjectStorage(dir *dotgit.DotGit, objectCache cache.Object) *ObjectStorage { return NewObjectStorageWithOptions(dir, objectCache, Options{}) } // NewObjectStorageWithOptions creates a new ObjectStorage with the given .git directory, cache and extra options func NewObjectStorageWithOptions(dir *dotgit.DotGit, objectCache cache.Object, ops Options) *ObjectStorage { return &ObjectStorage{ options: ops, objectCache: objectCache, dir: dir, } } func (s *ObjectStorage) requireIndex() error { if s.index != nil { return nil } s.index = make(map[plumbing.Hash]idxfile.Index) packs, err := s.dir.ObjectPacks() if err != nil { return err } for _, h := range packs { if err := s.loadIdxFile(h); err != nil { return err } } return nil } // Reindex indexes again all packfiles. Useful if git changed packfiles externally func (s *ObjectStorage) Reindex() { s.index = nil } func (s *ObjectStorage) loadIdxFile(h plumbing.Hash) (err error) { f, err := s.dir.ObjectPackIdx(h) if err != nil { return err } defer ioutil.CheckClose(f, &err) idxf := idxfile.NewMemoryIndex() d := idxfile.NewDecoder(f) if err = d.Decode(idxf); err != nil { return err } s.index[h] = idxf return err } func (s *ObjectStorage) NewEncodedObject() plumbing.EncodedObject { return &plumbing.MemoryObject{} } func (s *ObjectStorage) PackfileWriter() (io.WriteCloser, error) { if err := s.requireIndex(); err != nil { return nil, err } w, err := s.dir.NewObjectPack() if err != nil { return nil, err } w.Notify = func(h plumbing.Hash, writer *idxfile.Writer) { index, err := writer.Index() if err == nil { s.index[h] = index } } return w, nil } // SetEncodedObject adds a new object to the storage. func (s *ObjectStorage) SetEncodedObject(o plumbing.EncodedObject) (h plumbing.Hash, err error) { if o.Type() == plumbing.OFSDeltaObject || o.Type() == plumbing.REFDeltaObject { return plumbing.ZeroHash, plumbing.ErrInvalidType } ow, err := s.dir.NewObject() if err != nil { return plumbing.ZeroHash, err } defer ioutil.CheckClose(ow, &err) or, err := o.Reader() if err != nil { return plumbing.ZeroHash, err } defer ioutil.CheckClose(or, &err) if err = ow.WriteHeader(o.Type(), o.Size()); err != nil { return plumbing.ZeroHash, err } if _, err = io.Copy(ow, or); err != nil { return plumbing.ZeroHash, err } return o.Hash(), err } // HasEncodedObject returns nil if the object exists, without actually // reading the object data from storage. func (s *ObjectStorage) HasEncodedObject(h plumbing.Hash) (err error) { // Check unpacked objects f, err := s.dir.Object(h) if err != nil { if !os.IsNotExist(err) { return err } // Fall through to check packed objects. } else { defer ioutil.CheckClose(f, &err) return nil } // Check packed objects. if err := s.requireIndex(); err != nil { return err } _, _, offset := s.findObjectInPackfile(h) if offset == -1 { return plumbing.ErrObjectNotFound } return nil } func (s *ObjectStorage) encodedObjectSizeFromUnpacked(h plumbing.Hash) ( size int64, err error) { f, err := s.dir.Object(h) if err != nil { if os.IsNotExist(err) { return 0, plumbing.ErrObjectNotFound } return 0, err } r, err := objfile.NewReader(f) if err != nil { return 0, err } defer ioutil.CheckClose(r, &err) _, size, err = r.Header() return size, err } func (s *ObjectStorage) encodedObjectSizeFromPackfile(h plumbing.Hash) ( size int64, err error) { if err := s.requireIndex(); err != nil { return 0, err } pack, _, offset := s.findObjectInPackfile(h) if offset == -1 { return 0, plumbing.ErrObjectNotFound } f, err := s.dir.ObjectPack(pack) if err != nil { return 0, err } defer ioutil.CheckClose(f, &err) idx := s.index[pack] hash, err := idx.FindHash(offset) if err == nil { obj, ok := s.objectCache.Get(hash) if ok { return obj.Size(), nil } } else if err != nil && err != plumbing.ErrObjectNotFound { return 0, err } var p *packfile.Packfile if s.objectCache != nil { p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.objectCache) } else { p = packfile.NewPackfile(idx, s.dir.Fs(), f) } return p.GetSizeByOffset(offset) } // EncodedObjectSize returns the plaintext size of the given object, // without actually reading the full object data from storage. func (s *ObjectStorage) EncodedObjectSize(h plumbing.Hash) ( size int64, err error) { size, err = s.encodedObjectSizeFromUnpacked(h) if err != nil && err != plumbing.ErrObjectNotFound { return 0, err } else if err == nil { return size, nil } return s.encodedObjectSizeFromPackfile(h) } // EncodedObject returns the object with the given hash, by searching for it in // the packfile and the git object directories. func (s *ObjectStorage) EncodedObject(t plumbing.ObjectType, h plumbing.Hash) (plumbing.EncodedObject, error) { var obj plumbing.EncodedObject var err error if s.index != nil { obj, err = s.getFromPackfile(h, false) if err == plumbing.ErrObjectNotFound { obj, err = s.getFromUnpacked(h) } } else { obj, err = s.getFromUnpacked(h) if err == plumbing.ErrObjectNotFound { obj, err = s.getFromPackfile(h, false) } } // If the error is still object not found, check if it's a shared object // repository. if err == plumbing.ErrObjectNotFound { dotgits, e := s.dir.Alternates() if e == nil { // Create a new object storage with the DotGit(s) and check for the // required hash object. Skip when not found. for _, dg := range dotgits { o := NewObjectStorage(dg, s.objectCache) enobj, enerr := o.EncodedObject(t, h) if enerr != nil { continue } return enobj, nil } } } if err != nil { return nil, err } if plumbing.AnyObject != t && obj.Type() != t { return nil, plumbing.ErrObjectNotFound } return obj, nil } // DeltaObject returns the object with the given hash, by searching for // it in the packfile and the git object directories. func (s *ObjectStorage) DeltaObject(t plumbing.ObjectType, h plumbing.Hash) (plumbing.EncodedObject, error) { obj, err := s.getFromUnpacked(h) if err == plumbing.ErrObjectNotFound { obj, err = s.getFromPackfile(h, true) } if err != nil { return nil, err } if plumbing.AnyObject != t && obj.Type() != t { return nil, plumbing.ErrObjectNotFound } return obj, nil } func (s *ObjectStorage) getFromUnpacked(h plumbing.Hash) (obj plumbing.EncodedObject, err error) { f, err := s.dir.Object(h) if err != nil { if os.IsNotExist(err) { return nil, plumbing.ErrObjectNotFound } return nil, err } defer ioutil.CheckClose(f, &err) if cacheObj, found := s.objectCache.Get(h); found { return cacheObj, nil } obj = s.NewEncodedObject() r, err := objfile.NewReader(f) if err != nil { return nil, err } defer ioutil.CheckClose(r, &err) t, size, err := r.Header() if err != nil { return nil, err } obj.SetType(t) obj.SetSize(size) w, err := obj.Writer() if err != nil { return nil, err } s.objectCache.Put(obj) _, err = io.Copy(w, r) return obj, err } // Get returns the object with the given hash, by searching for it in // the packfile. func (s *ObjectStorage) getFromPackfile(h plumbing.Hash, canBeDelta bool) ( plumbing.EncodedObject, error) { if err := s.requireIndex(); err != nil { return nil, err } pack, hash, offset := s.findObjectInPackfile(h) if offset == -1 { return nil, plumbing.ErrObjectNotFound } f, err := s.dir.ObjectPack(pack) if err != nil { return nil, err } if !s.options.KeepDescriptors { defer ioutil.CheckClose(f, &err) } idx := s.index[pack] if canBeDelta { return s.decodeDeltaObjectAt(f, idx, offset, hash) } return s.decodeObjectAt(f, idx, offset) } func (s *ObjectStorage) decodeObjectAt( f billy.File, idx idxfile.Index, offset int64, ) (plumbing.EncodedObject, error) { hash, err := idx.FindHash(offset) if err == nil { obj, ok := s.objectCache.Get(hash) if ok { return obj, nil } } if err != nil && err != plumbing.ErrObjectNotFound { return nil, err } var p *packfile.Packfile if s.objectCache != nil { p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.objectCache) } else { p = packfile.NewPackfile(idx, s.dir.Fs(), f) } return p.GetByOffset(offset) } func (s *ObjectStorage) decodeDeltaObjectAt( f billy.File, idx idxfile.Index, offset int64, hash plumbing.Hash, ) (plumbing.EncodedObject, error) { if _, err := f.Seek(0, io.SeekStart); err != nil { return nil, err } p := packfile.NewScanner(f) header, err := p.SeekObjectHeader(offset) if err != nil { return nil, err } var ( base plumbing.Hash ) switch header.Type { case plumbing.REFDeltaObject: base = header.Reference case plumbing.OFSDeltaObject: base, err = idx.FindHash(header.OffsetReference) if err != nil { return nil, err } default: return s.decodeObjectAt(f, idx, offset) } obj := &plumbing.MemoryObject{} obj.SetType(header.Type) w, err := obj.Writer() if err != nil { return nil, err } if _, _, err := p.NextObject(w); err != nil { return nil, err } return newDeltaObject(obj, hash, base, header.Length), nil } func (s *ObjectStorage) findObjectInPackfile(h plumbing.Hash) (plumbing.Hash, plumbing.Hash, int64) { for packfile, index := range s.index { offset, err := index.FindOffset(h) if err == nil { return packfile, h, offset } } return plumbing.ZeroHash, plumbing.ZeroHash, -1 } // IterEncodedObjects returns an iterator for all the objects in the packfile // with the given type. func (s *ObjectStorage) IterEncodedObjects(t plumbing.ObjectType) (storer.EncodedObjectIter, error) { objects, err := s.dir.Objects() if err != nil { return nil, err } seen := make(map[plumbing.Hash]struct{}) var iters []storer.EncodedObjectIter if len(objects) != 0 { iters = append(iters, &objectsIter{s: s, t: t, h: objects}) seen = hashListAsMap(objects) } packi, err := s.buildPackfileIters(t, seen) if err != nil { return nil, err } iters = append(iters, packi) return storer.NewMultiEncodedObjectIter(iters), nil } func (s *ObjectStorage) buildPackfileIters( t plumbing.ObjectType, seen map[plumbing.Hash]struct{}, ) (storer.EncodedObjectIter, error) { if err := s.requireIndex(); err != nil { return nil, err } packs, err := s.dir.ObjectPacks() if err != nil { return nil, err } return &lazyPackfilesIter{ hashes: packs, open: func(h plumbing.Hash) (storer.EncodedObjectIter, error) { pack, err := s.dir.ObjectPack(h) if err != nil { return nil, err } return newPackfileIter( s.dir.Fs(), pack, t, seen, s.index[h], s.objectCache, s.options.KeepDescriptors, ) }, }, nil } // Close closes all opened files. func (s *ObjectStorage) Close() error { return s.dir.Close() } type lazyPackfilesIter struct { hashes []plumbing.Hash open func(h plumbing.Hash) (storer.EncodedObjectIter, error) cur storer.EncodedObjectIter } func (it *lazyPackfilesIter) Next() (plumbing.EncodedObject, error) { for { if it.cur == nil { if len(it.hashes) == 0 { return nil, io.EOF } h := it.hashes[0] it.hashes = it.hashes[1:] sub, err := it.open(h) if err == io.EOF { continue } else if err != nil { return nil, err } it.cur = sub } ob, err := it.cur.Next() if err == io.EOF { it.cur.Close() it.cur = nil continue } else if err != nil { return nil, err } return ob, nil } } func (it *lazyPackfilesIter) ForEach(cb func(plumbing.EncodedObject) error) error { return storer.ForEachIterator(it, cb) } func (it *lazyPackfilesIter) Close() { if it.cur != nil { it.cur.Close() it.cur = nil } it.hashes = nil } type packfileIter struct { pack billy.File iter storer.EncodedObjectIter seen map[plumbing.Hash]struct{} // tells whether the pack file should be left open after iteration or not keepPack bool } // NewPackfileIter returns a new EncodedObjectIter for the provided packfile // and object type. Packfile and index file will be closed after they're // used. If keepPack is true the packfile won't be closed after the iteration // finished. func NewPackfileIter( fs billy.Filesystem, f billy.File, idxFile billy.File, t plumbing.ObjectType, keepPack bool, ) (storer.EncodedObjectIter, error) { idx := idxfile.NewMemoryIndex() if err := idxfile.NewDecoder(idxFile).Decode(idx); err != nil { return nil, err } if err := idxFile.Close(); err != nil { return nil, err } seen := make(map[plumbing.Hash]struct{}) return newPackfileIter(fs, f, t, seen, idx, nil, keepPack) } func newPackfileIter( fs billy.Filesystem, f billy.File, t plumbing.ObjectType, seen map[plumbing.Hash]struct{}, index idxfile.Index, cache cache.Object, keepPack bool, ) (storer.EncodedObjectIter, error) { var p *packfile.Packfile if cache != nil { p = packfile.NewPackfileWithCache(index, fs, f, cache) } else { p = packfile.NewPackfile(index, fs, f) } iter, err := p.GetByType(t) if err != nil { return nil, err } return &packfileIter{ pack: f, iter: iter, seen: seen, keepPack: keepPack, }, nil } func (iter *packfileIter) Next() (plumbing.EncodedObject, error) { for { obj, err := iter.iter.Next() if err != nil { return nil, err } if _, ok := iter.seen[obj.Hash()]; ok { continue } return obj, nil } } func (iter *packfileIter) ForEach(cb func(plumbing.EncodedObject) error) error { for { o, err := iter.Next() if err != nil { if err == io.EOF { iter.Close() return nil } return err } if err := cb(o); err != nil { return err } } } func (iter *packfileIter) Close() { iter.iter.Close() if !iter.keepPack { _ = iter.pack.Close() } } type objectsIter struct { s *ObjectStorage t plumbing.ObjectType h []plumbing.Hash } func (iter *objectsIter) Next() (plumbing.EncodedObject, error) { if len(iter.h) == 0 { return nil, io.EOF } obj, err := iter.s.getFromUnpacked(iter.h[0]) iter.h = iter.h[1:] if err != nil { return nil, err } if iter.t != plumbing.AnyObject && iter.t != obj.Type() { return iter.Next() } return obj, err } func (iter *objectsIter) ForEach(cb func(plumbing.EncodedObject) error) error { for { o, err := iter.Next() if err != nil { if err == io.EOF { return nil } return err } if err := cb(o); err != nil { return err } } } func (iter *objectsIter) Close() { iter.h = []plumbing.Hash{} } func hashListAsMap(l []plumbing.Hash) map[plumbing.Hash]struct{} { m := make(map[plumbing.Hash]struct{}, len(l)) for _, h := range l { m[h] = struct{}{} } return m } func (s *ObjectStorage) ForEachObjectHash(fun func(plumbing.Hash) error) error { err := s.dir.ForEachObjectHash(fun) if err == storer.ErrStop { return nil } return err } func (s *ObjectStorage) LooseObjectTime(hash plumbing.Hash) (time.Time, error) { fi, err := s.dir.ObjectStat(hash) if err != nil { return time.Time{}, err } return fi.ModTime(), nil } func (s *ObjectStorage) DeleteLooseObject(hash plumbing.Hash) error { return s.dir.ObjectDelete(hash) } func (s *ObjectStorage) ObjectPacks() ([]plumbing.Hash, error) { return s.dir.ObjectPacks() } func (s *ObjectStorage) DeleteOldObjectPackAndIndex(h plumbing.Hash, t time.Time) error { return s.dir.DeleteOldObjectPackAndIndex(h, t) } golang-gopkg-src-d-go-git.v4-4.11.0/storage/filesystem/object_test.go000066400000000000000000000274441345605224300253760ustar00rootroot00000000000000package filesystem import ( "fmt" "io" "io/ioutil" "os" "path/filepath" "testing" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/cache" "gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit" . "gopkg.in/check.v1" "gopkg.in/src-d/go-git-fixtures.v3" ) type FsSuite struct { fixtures.Suite } var objectTypes = []plumbing.ObjectType{ plumbing.CommitObject, plumbing.TagObject, plumbing.TreeObject, plumbing.BlobObject, } var _ = Suite(&FsSuite{}) func (s *FsSuite) TestGetFromObjectFile(c *C) { fs := fixtures.ByTag(".git").ByTag("unpacked").One().DotGit() o := NewObjectStorage(dotgit.New(fs), cache.NewObjectLRUDefault()) expected := plumbing.NewHash("f3dfe29d268303fc6e1bbce268605fc99573406e") obj, err := o.EncodedObject(plumbing.AnyObject, expected) c.Assert(err, IsNil) c.Assert(obj.Hash(), Equals, expected) } func (s *FsSuite) TestGetFromPackfile(c *C) { fixtures.Basic().ByTag(".git").Test(c, func(f *fixtures.Fixture) { fs := f.DotGit() o := NewObjectStorage(dotgit.New(fs), cache.NewObjectLRUDefault()) expected := plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5") obj, err := o.EncodedObject(plumbing.AnyObject, expected) c.Assert(err, IsNil) c.Assert(obj.Hash(), Equals, expected) }) } func (s *FsSuite) TestGetFromPackfileKeepDescriptors(c *C) { fixtures.Basic().ByTag(".git").Test(c, func(f *fixtures.Fixture) { fs := f.DotGit() dg := dotgit.NewWithOptions(fs, dotgit.Options{KeepDescriptors: true}) o := NewObjectStorageWithOptions(dg, cache.NewObjectLRUDefault(), Options{KeepDescriptors: true}) expected := plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5") obj, err := o.EncodedObject(plumbing.AnyObject, expected) c.Assert(err, IsNil) c.Assert(obj.Hash(), Equals, expected) packfiles, err := dg.ObjectPacks() c.Assert(err, IsNil) pack1, err := dg.ObjectPack(packfiles[0]) c.Assert(err, IsNil) pack1.Seek(42, os.SEEK_SET) err = o.Close() c.Assert(err, IsNil) pack2, err := dg.ObjectPack(packfiles[0]) c.Assert(err, IsNil) offset, err := pack2.Seek(0, os.SEEK_CUR) c.Assert(err, IsNil) c.Assert(offset, Equals, int64(0)) err = o.Close() c.Assert(err, IsNil) }) } func (s *FsSuite) TestGetSizeOfObjectFile(c *C) { fs := fixtures.ByTag(".git").ByTag("unpacked").One().DotGit() o := NewObjectStorage(dotgit.New(fs), cache.NewObjectLRUDefault()) // Get the size of `tree_walker.go`. expected := plumbing.NewHash("cbd81c47be12341eb1185b379d1c82675aeded6a") size, err := o.EncodedObjectSize(expected) c.Assert(err, IsNil) c.Assert(size, Equals, int64(2412)) } func (s *FsSuite) TestGetSizeFromPackfile(c *C) { fixtures.Basic().ByTag(".git").Test(c, func(f *fixtures.Fixture) { fs := f.DotGit() o := NewObjectStorage(dotgit.New(fs), cache.NewObjectLRUDefault()) // Get the size of `binary.jpg`. expected := plumbing.NewHash("d5c0f4ab811897cadf03aec358ae60d21f91c50d") size, err := o.EncodedObjectSize(expected) c.Assert(err, IsNil) c.Assert(size, Equals, int64(76110)) }) } func (s *FsSuite) TestGetSizeOfAllObjectFiles(c *C) { fs := fixtures.ByTag(".git").One().DotGit() o := NewObjectStorage(dotgit.New(fs), cache.NewObjectLRUDefault()) // Get the size of `tree_walker.go`. err := o.ForEachObjectHash(func(h plumbing.Hash) error { size, err := o.EncodedObjectSize(h) c.Assert(err, IsNil) c.Assert(size, Not(Equals), int64(0)) return nil }) c.Assert(err, IsNil) } func (s *FsSuite) TestGetFromPackfileMultiplePackfiles(c *C) { fs := fixtures.ByTag(".git").ByTag("multi-packfile").One().DotGit() o := NewObjectStorage(dotgit.New(fs), cache.NewObjectLRUDefault()) expected := plumbing.NewHash("8d45a34641d73851e01d3754320b33bb5be3c4d3") obj, err := o.getFromPackfile(expected, false) c.Assert(err, IsNil) c.Assert(obj.Hash(), Equals, expected) expected = plumbing.NewHash("e9cfa4c9ca160546efd7e8582ec77952a27b17db") obj, err = o.getFromPackfile(expected, false) c.Assert(err, IsNil) c.Assert(obj.Hash(), Equals, expected) } func (s *FsSuite) TestIter(c *C) { fixtures.ByTag(".git").ByTag("packfile").Test(c, func(f *fixtures.Fixture) { fs := f.DotGit() o := NewObjectStorage(dotgit.New(fs), cache.NewObjectLRUDefault()) iter, err := o.IterEncodedObjects(plumbing.AnyObject) c.Assert(err, IsNil) var count int32 err = iter.ForEach(func(o plumbing.EncodedObject) error { count++ return nil }) c.Assert(err, IsNil) c.Assert(count, Equals, f.ObjectsCount) }) } func (s *FsSuite) TestIterWithType(c *C) { fixtures.ByTag(".git").Test(c, func(f *fixtures.Fixture) { for _, t := range objectTypes { fs := f.DotGit() o := NewObjectStorage(dotgit.New(fs), cache.NewObjectLRUDefault()) iter, err := o.IterEncodedObjects(t) c.Assert(err, IsNil) err = iter.ForEach(func(o plumbing.EncodedObject) error { c.Assert(o.Type(), Equals, t) return nil }) c.Assert(err, IsNil) } }) } func (s *FsSuite) TestPackfileIter(c *C) { fixtures.ByTag(".git").Test(c, func(f *fixtures.Fixture) { fs := f.DotGit() dg := dotgit.New(fs) for _, t := range objectTypes { ph, err := dg.ObjectPacks() c.Assert(err, IsNil) for _, h := range ph { f, err := dg.ObjectPack(h) c.Assert(err, IsNil) idxf, err := dg.ObjectPackIdx(h) c.Assert(err, IsNil) iter, err := NewPackfileIter(fs, f, idxf, t, false) c.Assert(err, IsNil) err = iter.ForEach(func(o plumbing.EncodedObject) error { c.Assert(o.Type(), Equals, t) return nil }) c.Assert(err, IsNil) } } }) } func copyFile(c *C, dstDir, dstFilename string, srcFile *os.File) { _, err := srcFile.Seek(0, 0) c.Assert(err, IsNil) err = os.MkdirAll(dstDir, 0750|os.ModeDir) c.Assert(err, IsNil) dst, err := os.OpenFile(filepath.Join(dstDir, dstFilename), os.O_CREATE|os.O_WRONLY, 0666) c.Assert(err, IsNil) defer dst.Close() _, err = io.Copy(dst, srcFile) c.Assert(err, IsNil) } // TestPackfileReindex tests that externally-added packfiles are considered by go-git // after calling the Reindex method func (s *FsSuite) TestPackfileReindex(c *C) { // obtain a standalone packfile that is not part of any other repository // in the fixtures: packFixture := fixtures.ByTag("packfile").ByTag("standalone").One() packFile := packFixture.Packfile() idxFile := packFixture.Idx() packFilename := packFixture.PackfileHash.String() testObjectHash := plumbing.NewHash("a771b1e94141480861332fd0e4684d33071306c6") // this is an object we know exists in the standalone packfile fixtures.ByTag(".git").Test(c, func(f *fixtures.Fixture) { fs := f.DotGit() storer := NewStorage(fs, cache.NewObjectLRUDefault()) // check that our test object is NOT found _, err := storer.EncodedObject(plumbing.CommitObject, testObjectHash) c.Assert(err, Equals, plumbing.ErrObjectNotFound) // add the external packfile+idx to the packs folder // this simulates a git bundle unbundle command, or a repack, for example. copyFile(c, filepath.Join(storer.Filesystem().Root(), "objects", "pack"), fmt.Sprintf("pack-%s.pack", packFilename), packFile) copyFile(c, filepath.Join(storer.Filesystem().Root(), "objects", "pack"), fmt.Sprintf("pack-%s.idx", packFilename), idxFile) // check that we cannot still retrieve the test object _, err = storer.EncodedObject(plumbing.CommitObject, testObjectHash) c.Assert(err, Equals, plumbing.ErrObjectNotFound) storer.Reindex() // actually reindex // Now check that the test object can be retrieved _, err = storer.EncodedObject(plumbing.CommitObject, testObjectHash) c.Assert(err, IsNil) }) } func (s *FsSuite) TestPackfileIterKeepDescriptors(c *C) { fixtures.ByTag(".git").Test(c, func(f *fixtures.Fixture) { fs := f.DotGit() ops := dotgit.Options{KeepDescriptors: true} dg := dotgit.NewWithOptions(fs, ops) for _, t := range objectTypes { ph, err := dg.ObjectPacks() c.Assert(err, IsNil) for _, h := range ph { f, err := dg.ObjectPack(h) c.Assert(err, IsNil) idxf, err := dg.ObjectPackIdx(h) c.Assert(err, IsNil) iter, err := NewPackfileIter(fs, f, idxf, t, true) c.Assert(err, IsNil) err = iter.ForEach(func(o plumbing.EncodedObject) error { c.Assert(o.Type(), Equals, t) return nil }) c.Assert(err, IsNil) // test twice to check that packfiles are not closed err = iter.ForEach(func(o plumbing.EncodedObject) error { c.Assert(o.Type(), Equals, t) return nil }) c.Assert(err, IsNil) } } }) } func (s *FsSuite) TestGetFromObjectFileSharedCache(c *C) { f1 := fixtures.ByTag("worktree").One().DotGit() f2 := fixtures.ByTag("worktree").ByTag("submodule").One().DotGit() ch := cache.NewObjectLRUDefault() o1 := NewObjectStorage(dotgit.New(f1), ch) o2 := NewObjectStorage(dotgit.New(f2), ch) expected := plumbing.NewHash("af2d6a6954d532f8ffb47615169c8fdf9d383a1a") obj, err := o1.EncodedObject(plumbing.CommitObject, expected) c.Assert(err, IsNil) c.Assert(obj.Hash(), Equals, expected) obj, err = o2.EncodedObject(plumbing.CommitObject, expected) c.Assert(err, Equals, plumbing.ErrObjectNotFound) } func BenchmarkPackfileIter(b *testing.B) { if err := fixtures.Init(); err != nil { b.Fatal(err) } defer func() { if err := fixtures.Clean(); err != nil { b.Fatal(err) } }() for _, f := range fixtures.ByTag(".git") { b.Run(f.URL, func(b *testing.B) { fs := f.DotGit() dg := dotgit.New(fs) for i := 0; i < b.N; i++ { for _, t := range objectTypes { ph, err := dg.ObjectPacks() if err != nil { b.Fatal(err) } for _, h := range ph { f, err := dg.ObjectPack(h) if err != nil { b.Fatal(err) } idxf, err := dg.ObjectPackIdx(h) if err != nil { b.Fatal(err) } iter, err := NewPackfileIter(fs, f, idxf, t, false) if err != nil { b.Fatal(err) } err = iter.ForEach(func(o plumbing.EncodedObject) error { if o.Type() != t { b.Errorf("expecting %s, got %s", t, o.Type()) } return nil }) if err != nil { b.Fatal(err) } } } } }) } } func BenchmarkPackfileIterReadContent(b *testing.B) { if err := fixtures.Init(); err != nil { b.Fatal(err) } defer func() { if err := fixtures.Clean(); err != nil { b.Fatal(err) } }() for _, f := range fixtures.ByTag(".git") { b.Run(f.URL, func(b *testing.B) { fs := f.DotGit() dg := dotgit.New(fs) for i := 0; i < b.N; i++ { for _, t := range objectTypes { ph, err := dg.ObjectPacks() if err != nil { b.Fatal(err) } for _, h := range ph { f, err := dg.ObjectPack(h) if err != nil { b.Fatal(err) } idxf, err := dg.ObjectPackIdx(h) if err != nil { b.Fatal(err) } iter, err := NewPackfileIter(fs, f, idxf, t, false) if err != nil { b.Fatal(err) } err = iter.ForEach(func(o plumbing.EncodedObject) error { if o.Type() != t { b.Errorf("expecting %s, got %s", t, o.Type()) } r, err := o.Reader() if err != nil { b.Fatal(err) } if _, err := ioutil.ReadAll(r); err != nil { b.Fatal(err) } return r.Close() }) if err != nil { b.Fatal(err) } } } } }) } } func BenchmarkGetObjectFromPackfile(b *testing.B) { if err := fixtures.Init(); err != nil { b.Fatal(err) } defer func() { if err := fixtures.Clean(); err != nil { b.Fatal(err) } }() for _, f := range fixtures.Basic() { b.Run(f.URL, func(b *testing.B) { fs := f.DotGit() o := NewObjectStorage(dotgit.New(fs), cache.NewObjectLRUDefault()) for i := 0; i < b.N; i++ { expected := plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5") obj, err := o.EncodedObject(plumbing.AnyObject, expected) if err != nil { b.Fatal(err) } if obj.Hash() != expected { b.Errorf("expecting %s, got %s", expected, obj.Hash()) } } }) } } golang-gopkg-src-d-go-git.v4-4.11.0/storage/filesystem/reference.go000066400000000000000000000020101345605224300250050ustar00rootroot00000000000000package filesystem import ( "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/storer" "gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit" ) type ReferenceStorage struct { dir *dotgit.DotGit } func (r *ReferenceStorage) SetReference(ref *plumbing.Reference) error { return r.dir.SetRef(ref, nil) } func (r *ReferenceStorage) CheckAndSetReference(ref, old *plumbing.Reference) error { return r.dir.SetRef(ref, old) } func (r *ReferenceStorage) Reference(n plumbing.ReferenceName) (*plumbing.Reference, error) { return r.dir.Ref(n) } func (r *ReferenceStorage) IterReferences() (storer.ReferenceIter, error) { refs, err := r.dir.Refs() if err != nil { return nil, err } return storer.NewReferenceSliceIter(refs), nil } func (r *ReferenceStorage) RemoveReference(n plumbing.ReferenceName) error { return r.dir.RemoveRef(n) } func (r *ReferenceStorage) CountLooseRefs() (int, error) { return r.dir.CountLooseRefs() } func (r *ReferenceStorage) PackRefs() error { return r.dir.PackRefs() } golang-gopkg-src-d-go-git.v4-4.11.0/storage/filesystem/shallow.go000066400000000000000000000022541345605224300245320ustar00rootroot00000000000000package filesystem import ( "bufio" "fmt" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit" "gopkg.in/src-d/go-git.v4/utils/ioutil" ) // ShallowStorage where the shallow commits are stored, an internal to // manipulate the shallow file type ShallowStorage struct { dir *dotgit.DotGit } // SetShallow save the shallows in the shallow file in the .git folder as one // commit per line represented by 40-byte hexadecimal object terminated by a // newline. func (s *ShallowStorage) SetShallow(commits []plumbing.Hash) error { f, err := s.dir.ShallowWriter() if err != nil { return err } defer ioutil.CheckClose(f, &err) for _, h := range commits { if _, err := fmt.Fprintf(f, "%s\n", h); err != nil { return err } } return err } // Shallow return the shallow commits reading from shallo file from .git func (s *ShallowStorage) Shallow() ([]plumbing.Hash, error) { f, err := s.dir.Shallow() if f == nil || err != nil { return nil, err } defer ioutil.CheckClose(f, &err) var hash []plumbing.Hash scn := bufio.NewScanner(f) for scn.Scan() { hash = append(hash, plumbing.NewHash(scn.Text())) } return hash, scn.Err() } golang-gopkg-src-d-go-git.v4-4.11.0/storage/filesystem/storage.go000066400000000000000000000040121345605224300245170ustar00rootroot00000000000000// Package filesystem is a storage backend base on filesystems package filesystem import ( "gopkg.in/src-d/go-git.v4/plumbing/cache" "gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit" "gopkg.in/src-d/go-billy.v4" ) // Storage is an implementation of git.Storer that stores data on disk in the // standard git format (this is, the .git directory). Zero values of this type // are not safe to use, see the NewStorage function below. type Storage struct { fs billy.Filesystem dir *dotgit.DotGit ObjectStorage ReferenceStorage IndexStorage ShallowStorage ConfigStorage ModuleStorage } // Options holds configuration for the storage. type Options struct { // ExclusiveAccess means that the filesystem is not modified externally // while the repo is open. ExclusiveAccess bool // KeepDescriptors makes the file descriptors to be reused but they will // need to be manually closed calling Close(). KeepDescriptors bool } // NewStorage returns a new Storage backed by a given `fs.Filesystem` and cache. func NewStorage(fs billy.Filesystem, cache cache.Object) *Storage { return NewStorageWithOptions(fs, cache, Options{}) } // NewStorageWithOptions returns a new Storage with extra options, // backed by a given `fs.Filesystem` and cache. func NewStorageWithOptions(fs billy.Filesystem, cache cache.Object, ops Options) *Storage { dirOps := dotgit.Options{ ExclusiveAccess: ops.ExclusiveAccess, KeepDescriptors: ops.KeepDescriptors, } dir := dotgit.NewWithOptions(fs, dirOps) return &Storage{ fs: fs, dir: dir, ObjectStorage: *NewObjectStorageWithOptions(dir, cache, ops), ReferenceStorage: ReferenceStorage{dir: dir}, IndexStorage: IndexStorage{dir: dir}, ShallowStorage: ShallowStorage{dir: dir}, ConfigStorage: ConfigStorage{dir: dir}, ModuleStorage: ModuleStorage{dir: dir}, } } // Filesystem returns the underlying filesystem func (s *Storage) Filesystem() billy.Filesystem { return s.fs } // Init initializes .git directory func (s *Storage) Init() error { return s.dir.Initialize() } golang-gopkg-src-d-go-git.v4-4.11.0/storage/filesystem/storage_test.go000066400000000000000000000032061345605224300255620ustar00rootroot00000000000000package filesystem import ( "io/ioutil" "testing" "gopkg.in/src-d/go-git.v4/plumbing/cache" "gopkg.in/src-d/go-git.v4/plumbing/storer" "gopkg.in/src-d/go-git.v4/storage/test" . "gopkg.in/check.v1" "gopkg.in/src-d/go-billy.v4/memfs" "gopkg.in/src-d/go-billy.v4/osfs" ) func Test(t *testing.T) { TestingT(t) } type StorageSuite struct { test.BaseStorageSuite dir string } var _ = Suite(&StorageSuite{}) func (s *StorageSuite) SetUpTest(c *C) { s.dir = c.MkDir() storage := NewStorage(osfs.New(s.dir), cache.NewObjectLRUDefault()) setUpTest(s, c, storage) } func setUpTest(s *StorageSuite, c *C, storage *Storage) { // ensure that right interfaces are implemented var _ storer.EncodedObjectStorer = storage var _ storer.IndexStorer = storage var _ storer.ReferenceStorer = storage var _ storer.ShallowStorer = storage var _ storer.DeltaObjectStorer = storage var _ storer.PackfileWriter = storage s.BaseStorageSuite = test.NewBaseStorageSuite(storage) s.BaseStorageSuite.SetUpTest(c) } func (s *StorageSuite) TestFilesystem(c *C) { fs := memfs.New() storage := NewStorage(fs, cache.NewObjectLRUDefault()) c.Assert(storage.Filesystem(), Equals, fs) } func (s *StorageSuite) TestNewStorageShouldNotAddAnyContentsToDir(c *C) { fis, err := ioutil.ReadDir(s.dir) c.Assert(err, IsNil) c.Assert(fis, HasLen, 0) } type StorageExclusiveSuite struct { StorageSuite } var _ = Suite(&StorageExclusiveSuite{}) func (s *StorageExclusiveSuite) SetUpTest(c *C) { s.dir = c.MkDir() storage := NewStorageWithOptions( osfs.New(s.dir), cache.NewObjectLRUDefault(), Options{ExclusiveAccess: true}) setUpTest(&s.StorageSuite, c, storage) } golang-gopkg-src-d-go-git.v4-4.11.0/storage/memory/000077500000000000000000000000001345605224300216535ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/storage/memory/storage.go000066400000000000000000000164431345605224300236560ustar00rootroot00000000000000// Package memory is a storage backend base on memory package memory import ( "fmt" "time" "gopkg.in/src-d/go-git.v4/config" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/format/index" "gopkg.in/src-d/go-git.v4/plumbing/storer" "gopkg.in/src-d/go-git.v4/storage" ) var ErrUnsupportedObjectType = fmt.Errorf("unsupported object type") // Storage is an implementation of git.Storer that stores data on memory, being // ephemeral. The use of this storage should be done in controlled envoriments, // since the representation in memory of some repository can fill the machine // memory. in the other hand this storage has the best performance. type Storage struct { ConfigStorage ObjectStorage ShallowStorage IndexStorage ReferenceStorage ModuleStorage } // NewStorage returns a new Storage base on memory func NewStorage() *Storage { return &Storage{ ReferenceStorage: make(ReferenceStorage), ConfigStorage: ConfigStorage{}, ShallowStorage: ShallowStorage{}, ObjectStorage: ObjectStorage{ Objects: make(map[plumbing.Hash]plumbing.EncodedObject), Commits: make(map[plumbing.Hash]plumbing.EncodedObject), Trees: make(map[plumbing.Hash]plumbing.EncodedObject), Blobs: make(map[plumbing.Hash]plumbing.EncodedObject), Tags: make(map[plumbing.Hash]plumbing.EncodedObject), }, ModuleStorage: make(ModuleStorage), } } type ConfigStorage struct { config *config.Config } func (c *ConfigStorage) SetConfig(cfg *config.Config) error { if err := cfg.Validate(); err != nil { return err } c.config = cfg return nil } func (c *ConfigStorage) Config() (*config.Config, error) { if c.config == nil { c.config = config.NewConfig() } return c.config, nil } type IndexStorage struct { index *index.Index } func (c *IndexStorage) SetIndex(idx *index.Index) error { c.index = idx return nil } func (c *IndexStorage) Index() (*index.Index, error) { if c.index == nil { c.index = &index.Index{Version: 2} } return c.index, nil } type ObjectStorage struct { Objects map[plumbing.Hash]plumbing.EncodedObject Commits map[plumbing.Hash]plumbing.EncodedObject Trees map[plumbing.Hash]plumbing.EncodedObject Blobs map[plumbing.Hash]plumbing.EncodedObject Tags map[plumbing.Hash]plumbing.EncodedObject } func (o *ObjectStorage) NewEncodedObject() plumbing.EncodedObject { return &plumbing.MemoryObject{} } func (o *ObjectStorage) SetEncodedObject(obj plumbing.EncodedObject) (plumbing.Hash, error) { h := obj.Hash() o.Objects[h] = obj switch obj.Type() { case plumbing.CommitObject: o.Commits[h] = o.Objects[h] case plumbing.TreeObject: o.Trees[h] = o.Objects[h] case plumbing.BlobObject: o.Blobs[h] = o.Objects[h] case plumbing.TagObject: o.Tags[h] = o.Objects[h] default: return h, ErrUnsupportedObjectType } return h, nil } func (o *ObjectStorage) HasEncodedObject(h plumbing.Hash) (err error) { if _, ok := o.Objects[h]; !ok { return plumbing.ErrObjectNotFound } return nil } func (o *ObjectStorage) EncodedObjectSize(h plumbing.Hash) ( size int64, err error) { obj, ok := o.Objects[h] if !ok { return 0, plumbing.ErrObjectNotFound } return obj.Size(), nil } func (o *ObjectStorage) EncodedObject(t plumbing.ObjectType, h plumbing.Hash) (plumbing.EncodedObject, error) { obj, ok := o.Objects[h] if !ok || (plumbing.AnyObject != t && obj.Type() != t) { return nil, plumbing.ErrObjectNotFound } return obj, nil } func (o *ObjectStorage) IterEncodedObjects(t plumbing.ObjectType) (storer.EncodedObjectIter, error) { var series []plumbing.EncodedObject switch t { case plumbing.AnyObject: series = flattenObjectMap(o.Objects) case plumbing.CommitObject: series = flattenObjectMap(o.Commits) case plumbing.TreeObject: series = flattenObjectMap(o.Trees) case plumbing.BlobObject: series = flattenObjectMap(o.Blobs) case plumbing.TagObject: series = flattenObjectMap(o.Tags) } return storer.NewEncodedObjectSliceIter(series), nil } func flattenObjectMap(m map[plumbing.Hash]plumbing.EncodedObject) []plumbing.EncodedObject { objects := make([]plumbing.EncodedObject, 0, len(m)) for _, obj := range m { objects = append(objects, obj) } return objects } func (o *ObjectStorage) Begin() storer.Transaction { return &TxObjectStorage{ Storage: o, Objects: make(map[plumbing.Hash]plumbing.EncodedObject), } } func (o *ObjectStorage) ForEachObjectHash(fun func(plumbing.Hash) error) error { for h := range o.Objects { err := fun(h) if err != nil { if err == storer.ErrStop { return nil } return err } } return nil } func (o *ObjectStorage) ObjectPacks() ([]plumbing.Hash, error) { return nil, nil } func (o *ObjectStorage) DeleteOldObjectPackAndIndex(plumbing.Hash, time.Time) error { return nil } var errNotSupported = fmt.Errorf("Not supported") func (s *ObjectStorage) LooseObjectTime(hash plumbing.Hash) (time.Time, error) { return time.Time{}, errNotSupported } func (s *ObjectStorage) DeleteLooseObject(plumbing.Hash) error { return errNotSupported } type TxObjectStorage struct { Storage *ObjectStorage Objects map[plumbing.Hash]plumbing.EncodedObject } func (tx *TxObjectStorage) SetEncodedObject(obj plumbing.EncodedObject) (plumbing.Hash, error) { h := obj.Hash() tx.Objects[h] = obj return h, nil } func (tx *TxObjectStorage) EncodedObject(t plumbing.ObjectType, h plumbing.Hash) (plumbing.EncodedObject, error) { obj, ok := tx.Objects[h] if !ok || (plumbing.AnyObject != t && obj.Type() != t) { return nil, plumbing.ErrObjectNotFound } return obj, nil } func (tx *TxObjectStorage) Commit() error { for h, obj := range tx.Objects { delete(tx.Objects, h) if _, err := tx.Storage.SetEncodedObject(obj); err != nil { return err } } return nil } func (tx *TxObjectStorage) Rollback() error { tx.Objects = make(map[plumbing.Hash]plumbing.EncodedObject) return nil } type ReferenceStorage map[plumbing.ReferenceName]*plumbing.Reference func (r ReferenceStorage) SetReference(ref *plumbing.Reference) error { if ref != nil { r[ref.Name()] = ref } return nil } func (r ReferenceStorage) CheckAndSetReference(ref, old *plumbing.Reference) error { if ref == nil { return nil } if old != nil { tmp := r[ref.Name()] if tmp != nil && tmp.Hash() != old.Hash() { return storage.ErrReferenceHasChanged } } r[ref.Name()] = ref return nil } func (r ReferenceStorage) Reference(n plumbing.ReferenceName) (*plumbing.Reference, error) { ref, ok := r[n] if !ok { return nil, plumbing.ErrReferenceNotFound } return ref, nil } func (r ReferenceStorage) IterReferences() (storer.ReferenceIter, error) { var refs []*plumbing.Reference for _, ref := range r { refs = append(refs, ref) } return storer.NewReferenceSliceIter(refs), nil } func (r ReferenceStorage) CountLooseRefs() (int, error) { return len(r), nil } func (r ReferenceStorage) PackRefs() error { return nil } func (r ReferenceStorage) RemoveReference(n plumbing.ReferenceName) error { delete(r, n) return nil } type ShallowStorage []plumbing.Hash func (s *ShallowStorage) SetShallow(commits []plumbing.Hash) error { *s = commits return nil } func (s ShallowStorage) Shallow() ([]plumbing.Hash, error) { return s, nil } type ModuleStorage map[string]*Storage func (s ModuleStorage) Module(name string) (storage.Storer, error) { if m, ok := s[name]; ok { return m, nil } m := NewStorage() s[name] = m return m, nil } golang-gopkg-src-d-go-git.v4-4.11.0/storage/memory/storage_test.go000066400000000000000000000005571345605224300247140ustar00rootroot00000000000000package memory import ( "testing" . "gopkg.in/check.v1" "gopkg.in/src-d/go-git.v4/storage/test" ) func Test(t *testing.T) { TestingT(t) } type StorageSuite struct { test.BaseStorageSuite } var _ = Suite(&StorageSuite{}) func (s *StorageSuite) SetUpTest(c *C) { s.BaseStorageSuite = test.NewBaseStorageSuite(NewStorage()) s.BaseStorageSuite.SetUpTest(c) } golang-gopkg-src-d-go-git.v4-4.11.0/storage/storer.go000066400000000000000000000015571345605224300222200ustar00rootroot00000000000000package storage import ( "errors" "gopkg.in/src-d/go-git.v4/config" "gopkg.in/src-d/go-git.v4/plumbing/storer" ) var ErrReferenceHasChanged = errors.New("reference has changed concurrently") // Storer is a generic storage of objects, references and any information // related to a particular repository. The package gopkg.in/src-d/go-git.v4/storage // contains two implementation a filesystem base implementation (such as `.git`) // and a memory implementations being ephemeral type Storer interface { storer.EncodedObjectStorer storer.ReferenceStorer storer.ShallowStorer storer.IndexStorer config.ConfigStorer ModuleStorer } // ModuleStorer allows interact with the modules' Storers type ModuleStorer interface { // Module returns a Storer representing a submodule, if not exists returns a // new empty Storer is returned Module(name string) (Storer, error) } golang-gopkg-src-d-go-git.v4-4.11.0/storage/test/000077500000000000000000000000001345605224300213225ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/storage/test/storage_suite.go000066400000000000000000000330161345605224300245310ustar00rootroot00000000000000package test import ( "encoding/hex" "errors" "fmt" "io" "io/ioutil" "gopkg.in/src-d/go-git.v4/config" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/format/index" "gopkg.in/src-d/go-git.v4/plumbing/storer" "gopkg.in/src-d/go-git.v4/storage" . "gopkg.in/check.v1" "gopkg.in/src-d/go-git-fixtures.v3" ) type Storer interface { storer.EncodedObjectStorer storer.ReferenceStorer storer.ShallowStorer storer.IndexStorer config.ConfigStorer storage.ModuleStorer } type TestObject struct { Object plumbing.EncodedObject Hash string Type plumbing.ObjectType } type BaseStorageSuite struct { Storer Storer validTypes []plumbing.ObjectType testObjects map[plumbing.ObjectType]TestObject } func NewBaseStorageSuite(s Storer) BaseStorageSuite { commit := &plumbing.MemoryObject{} commit.SetType(plumbing.CommitObject) tree := &plumbing.MemoryObject{} tree.SetType(plumbing.TreeObject) blob := &plumbing.MemoryObject{} blob.SetType(plumbing.BlobObject) tag := &plumbing.MemoryObject{} tag.SetType(plumbing.TagObject) return BaseStorageSuite{ Storer: s, validTypes: []plumbing.ObjectType{ plumbing.CommitObject, plumbing.BlobObject, plumbing.TagObject, plumbing.TreeObject, }, testObjects: map[plumbing.ObjectType]TestObject{ plumbing.CommitObject: {commit, "dcf5b16e76cce7425d0beaef62d79a7d10fce1f5", plumbing.CommitObject}, plumbing.TreeObject: {tree, "4b825dc642cb6eb9a060e54bf8d69288fbee4904", plumbing.TreeObject}, plumbing.BlobObject: {blob, "e69de29bb2d1d6434b8b29ae775ad8c2e48c5391", plumbing.BlobObject}, plumbing.TagObject: {tag, "d994c6bb648123a17e8f70a966857c546b2a6f94", plumbing.TagObject}, }} } func (s *BaseStorageSuite) SetUpTest(c *C) { c.Assert(fixtures.Init(), IsNil) } func (s *BaseStorageSuite) TearDownTest(c *C) { c.Assert(fixtures.Clean(), IsNil) } func (s *BaseStorageSuite) TestSetEncodedObjectAndEncodedObject(c *C) { for _, to := range s.testObjects { comment := Commentf("failed for type %s", to.Type.String()) h, err := s.Storer.SetEncodedObject(to.Object) c.Assert(err, IsNil) c.Assert(h.String(), Equals, to.Hash, comment) o, err := s.Storer.EncodedObject(to.Type, h) c.Assert(err, IsNil) c.Assert(objectEquals(o, to.Object), IsNil) o, err = s.Storer.EncodedObject(plumbing.AnyObject, h) c.Assert(err, IsNil) c.Assert(objectEquals(o, to.Object), IsNil) for _, t := range s.validTypes { if t == to.Type { continue } o, err = s.Storer.EncodedObject(t, h) c.Assert(o, IsNil) c.Assert(err, Equals, plumbing.ErrObjectNotFound) } } } func (s *BaseStorageSuite) TestSetEncodedObjectInvalid(c *C) { o := s.Storer.NewEncodedObject() o.SetType(plumbing.REFDeltaObject) _, err := s.Storer.SetEncodedObject(o) c.Assert(err, NotNil) } func (s *BaseStorageSuite) TestIterEncodedObjects(c *C) { for _, o := range s.testObjects { h, err := s.Storer.SetEncodedObject(o.Object) c.Assert(err, IsNil) c.Assert(h, Equals, o.Object.Hash()) } for _, t := range s.validTypes { comment := Commentf("failed for type %s)", t.String()) i, err := s.Storer.IterEncodedObjects(t) c.Assert(err, IsNil, comment) o, err := i.Next() c.Assert(err, IsNil) c.Assert(objectEquals(o, s.testObjects[t].Object), IsNil) o, err = i.Next() c.Assert(o, IsNil) c.Assert(err, Equals, io.EOF, comment) } i, err := s.Storer.IterEncodedObjects(plumbing.AnyObject) c.Assert(err, IsNil) foundObjects := []plumbing.EncodedObject{} i.ForEach(func(o plumbing.EncodedObject) error { foundObjects = append(foundObjects, o) return nil }) c.Assert(foundObjects, HasLen, len(s.testObjects)) for _, to := range s.testObjects { found := false for _, o := range foundObjects { if to.Object.Hash() == o.Hash() { found = true break } } c.Assert(found, Equals, true, Commentf("Object of type %s not found", to.Type.String())) } } func (s *BaseStorageSuite) TestPackfileWriter(c *C) { pwr, ok := s.Storer.(storer.PackfileWriter) if !ok { c.Skip("not a storer.PackWriter") } pw, err := pwr.PackfileWriter() c.Assert(err, IsNil) f := fixtures.Basic().One() _, err = io.Copy(pw, f.Packfile()) c.Assert(err, IsNil) err = pw.Close() c.Assert(err, IsNil) iter, err := s.Storer.IterEncodedObjects(plumbing.AnyObject) c.Assert(err, IsNil) objects := 0 err = iter.ForEach(func(plumbing.EncodedObject) error { objects++ return nil }) c.Assert(err, IsNil) c.Assert(objects, Equals, 31) } func (s *BaseStorageSuite) TestObjectStorerTxSetEncodedObjectAndCommit(c *C) { storer, ok := s.Storer.(storer.Transactioner) if !ok { c.Skip("not a plumbing.ObjectStorerTx") } tx := storer.Begin() for _, o := range s.testObjects { h, err := tx.SetEncodedObject(o.Object) c.Assert(err, IsNil) c.Assert(h.String(), Equals, o.Hash) } iter, err := s.Storer.IterEncodedObjects(plumbing.AnyObject) c.Assert(err, IsNil) _, err = iter.Next() c.Assert(err, Equals, io.EOF) err = tx.Commit() c.Assert(err, IsNil) iter, err = s.Storer.IterEncodedObjects(plumbing.AnyObject) c.Assert(err, IsNil) var count int iter.ForEach(func(o plumbing.EncodedObject) error { count++ return nil }) c.Assert(count, Equals, 4) } func (s *BaseStorageSuite) TestObjectStorerTxSetObjectAndGetObject(c *C) { storer, ok := s.Storer.(storer.Transactioner) if !ok { c.Skip("not a plumbing.ObjectStorerTx") } tx := storer.Begin() for _, expected := range s.testObjects { h, err := tx.SetEncodedObject(expected.Object) c.Assert(err, IsNil) c.Assert(h.String(), Equals, expected.Hash) o, err := tx.EncodedObject(expected.Type, plumbing.NewHash(expected.Hash)) c.Assert(err, IsNil) c.Assert(o.Hash().String(), DeepEquals, expected.Hash) } } func (s *BaseStorageSuite) TestObjectStorerTxGetObjectNotFound(c *C) { storer, ok := s.Storer.(storer.Transactioner) if !ok { c.Skip("not a plumbing.ObjectStorerTx") } tx := storer.Begin() o, err := tx.EncodedObject(plumbing.AnyObject, plumbing.ZeroHash) c.Assert(o, IsNil) c.Assert(err, Equals, plumbing.ErrObjectNotFound) } func (s *BaseStorageSuite) TestObjectStorerTxSetObjectAndRollback(c *C) { storer, ok := s.Storer.(storer.Transactioner) if !ok { c.Skip("not a plumbing.ObjectStorerTx") } tx := storer.Begin() for _, o := range s.testObjects { h, err := tx.SetEncodedObject(o.Object) c.Assert(err, IsNil) c.Assert(h.String(), Equals, o.Hash) } err := tx.Rollback() c.Assert(err, IsNil) iter, err := s.Storer.IterEncodedObjects(plumbing.AnyObject) c.Assert(err, IsNil) _, err = iter.Next() c.Assert(err, Equals, io.EOF) } func (s *BaseStorageSuite) TestSetReferenceAndGetReference(c *C) { err := s.Storer.SetReference( plumbing.NewReferenceFromStrings("foo", "bc9968d75e48de59f0870ffb71f5e160bbbdcf52"), ) c.Assert(err, IsNil) err = s.Storer.SetReference( plumbing.NewReferenceFromStrings("bar", "482e0eada5de4039e6f216b45b3c9b683b83bfa"), ) c.Assert(err, IsNil) e, err := s.Storer.Reference(plumbing.ReferenceName("foo")) c.Assert(err, IsNil) c.Assert(e.Hash().String(), Equals, "bc9968d75e48de59f0870ffb71f5e160bbbdcf52") } func (s *BaseStorageSuite) TestCheckAndSetReference(c *C) { err := s.Storer.SetReference( plumbing.NewReferenceFromStrings("foo", "482e0eada5de4039e6f216b45b3c9b683b83bfa"), ) c.Assert(err, IsNil) err = s.Storer.CheckAndSetReference( plumbing.NewReferenceFromStrings("foo", "bc9968d75e48de59f0870ffb71f5e160bbbdcf52"), plumbing.NewReferenceFromStrings("foo", "482e0eada5de4039e6f216b45b3c9b683b83bfa"), ) c.Assert(err, IsNil) e, err := s.Storer.Reference(plumbing.ReferenceName("foo")) c.Assert(err, IsNil) c.Assert(e.Hash().String(), Equals, "bc9968d75e48de59f0870ffb71f5e160bbbdcf52") } func (s *BaseStorageSuite) TestCheckAndSetReferenceNil(c *C) { err := s.Storer.SetReference( plumbing.NewReferenceFromStrings("foo", "482e0eada5de4039e6f216b45b3c9b683b83bfa"), ) c.Assert(err, IsNil) err = s.Storer.CheckAndSetReference( plumbing.NewReferenceFromStrings("foo", "bc9968d75e48de59f0870ffb71f5e160bbbdcf52"), nil, ) c.Assert(err, IsNil) e, err := s.Storer.Reference(plumbing.ReferenceName("foo")) c.Assert(err, IsNil) c.Assert(e.Hash().String(), Equals, "bc9968d75e48de59f0870ffb71f5e160bbbdcf52") } func (s *BaseStorageSuite) TestCheckAndSetReferenceError(c *C) { err := s.Storer.SetReference( plumbing.NewReferenceFromStrings("foo", "c3f4688a08fd86f1bf8e055724c84b7a40a09733"), ) c.Assert(err, IsNil) err = s.Storer.CheckAndSetReference( plumbing.NewReferenceFromStrings("foo", "bc9968d75e48de59f0870ffb71f5e160bbbdcf52"), plumbing.NewReferenceFromStrings("foo", "482e0eada5de4039e6f216b45b3c9b683b83bfa"), ) c.Assert(err, Equals, storage.ErrReferenceHasChanged) e, err := s.Storer.Reference(plumbing.ReferenceName("foo")) c.Assert(err, IsNil) c.Assert(e.Hash().String(), Equals, "c3f4688a08fd86f1bf8e055724c84b7a40a09733") } func (s *BaseStorageSuite) TestRemoveReference(c *C) { err := s.Storer.SetReference( plumbing.NewReferenceFromStrings("foo", "bc9968d75e48de59f0870ffb71f5e160bbbdcf52"), ) c.Assert(err, IsNil) err = s.Storer.RemoveReference(plumbing.ReferenceName("foo")) c.Assert(err, IsNil) _, err = s.Storer.Reference(plumbing.ReferenceName("foo")) c.Assert(err, Equals, plumbing.ErrReferenceNotFound) } func (s *BaseStorageSuite) TestRemoveReferenceNonExistent(c *C) { err := s.Storer.SetReference( plumbing.NewReferenceFromStrings("foo", "bc9968d75e48de59f0870ffb71f5e160bbbdcf52"), ) c.Assert(err, IsNil) err = s.Storer.RemoveReference(plumbing.ReferenceName("nonexistent")) c.Assert(err, IsNil) e, err := s.Storer.Reference(plumbing.ReferenceName("foo")) c.Assert(err, IsNil) c.Assert(e.Hash().String(), Equals, "bc9968d75e48de59f0870ffb71f5e160bbbdcf52") } func (s *BaseStorageSuite) TestGetReferenceNotFound(c *C) { r, err := s.Storer.Reference(plumbing.ReferenceName("bar")) c.Assert(err, Equals, plumbing.ErrReferenceNotFound) c.Assert(r, IsNil) } func (s *BaseStorageSuite) TestIterReferences(c *C) { err := s.Storer.SetReference( plumbing.NewReferenceFromStrings("refs/foo", "bc9968d75e48de59f0870ffb71f5e160bbbdcf52"), ) c.Assert(err, IsNil) i, err := s.Storer.IterReferences() c.Assert(err, IsNil) e, err := i.Next() c.Assert(err, IsNil) c.Assert(e.Hash().String(), Equals, "bc9968d75e48de59f0870ffb71f5e160bbbdcf52") e, err = i.Next() c.Assert(e, IsNil) c.Assert(err, Equals, io.EOF) } func (s *BaseStorageSuite) TestSetShallowAndShallow(c *C) { expected := []plumbing.Hash{ plumbing.NewHash("b66c08ba28aa1f81eb06a1127aa3936ff77e5e2c"), plumbing.NewHash("c3f4688a08fd86f1bf8e055724c84b7a40a09733"), plumbing.NewHash("c78874f116be67ecf54df225a613162b84cc6ebf"), } err := s.Storer.SetShallow(expected) c.Assert(err, IsNil) result, err := s.Storer.Shallow() c.Assert(err, IsNil) c.Assert(result, DeepEquals, expected) } func (s *BaseStorageSuite) TestSetConfigAndConfig(c *C) { expected := config.NewConfig() expected.Core.IsBare = true expected.Remotes["foo"] = &config.RemoteConfig{ Name: "foo", URLs: []string{"http://foo/bar.git"}, } err := s.Storer.SetConfig(expected) c.Assert(err, IsNil) cfg, err := s.Storer.Config() c.Assert(err, IsNil) c.Assert(cfg.Core.IsBare, DeepEquals, expected.Core.IsBare) c.Assert(cfg.Remotes, DeepEquals, expected.Remotes) } func (s *BaseStorageSuite) TestIndex(c *C) { expected := &index.Index{} expected.Version = 2 idx, err := s.Storer.Index() c.Assert(err, IsNil) c.Assert(idx, DeepEquals, expected) } func (s *BaseStorageSuite) TestSetIndexAndIndex(c *C) { expected := &index.Index{} expected.Version = 2 err := s.Storer.SetIndex(expected) c.Assert(err, IsNil) idx, err := s.Storer.Index() c.Assert(err, IsNil) c.Assert(idx, DeepEquals, expected) } func (s *BaseStorageSuite) TestSetConfigInvalid(c *C) { cfg := config.NewConfig() cfg.Remotes["foo"] = &config.RemoteConfig{} err := s.Storer.SetConfig(cfg) c.Assert(err, NotNil) } func (s *BaseStorageSuite) TestModule(c *C) { storer, err := s.Storer.Module("foo") c.Assert(err, IsNil) c.Assert(storer, NotNil) storer, err = s.Storer.Module("foo") c.Assert(err, IsNil) c.Assert(storer, NotNil) } func (s *BaseStorageSuite) TestDeltaObjectStorer(c *C) { dos, ok := s.Storer.(storer.DeltaObjectStorer) if !ok { c.Skip("not an DeltaObjectStorer") } pwr, ok := s.Storer.(storer.PackfileWriter) if !ok { c.Skip("not a storer.PackWriter") } pw, err := pwr.PackfileWriter() c.Assert(err, IsNil) f := fixtures.Basic().One() _, err = io.Copy(pw, f.Packfile()) c.Assert(err, IsNil) err = pw.Close() c.Assert(err, IsNil) h := plumbing.NewHash("32858aad3c383ed1ff0a0f9bdf231d54a00c9e88") obj, err := dos.DeltaObject(plumbing.AnyObject, h) c.Assert(err, IsNil) c.Assert(obj.Type(), Equals, plumbing.BlobObject) h = plumbing.NewHash("aa9b383c260e1d05fbbf6b30a02914555e20c725") obj, err = dos.DeltaObject(plumbing.AnyObject, h) c.Assert(err, IsNil) c.Assert(obj.Type(), Equals, plumbing.OFSDeltaObject) _, ok = obj.(plumbing.DeltaObject) c.Assert(ok, Equals, true) } func objectEquals(a plumbing.EncodedObject, b plumbing.EncodedObject) error { ha := a.Hash() hb := b.Hash() if ha != hb { return fmt.Errorf("hashes do not match: %s != %s", ha.String(), hb.String()) } ra, err := a.Reader() if err != nil { return fmt.Errorf("can't get reader on b: %q", err) } rb, err := b.Reader() if err != nil { return fmt.Errorf("can't get reader on a: %q", err) } ca, err := ioutil.ReadAll(ra) if err != nil { return fmt.Errorf("error reading a: %q", err) } cb, err := ioutil.ReadAll(rb) if err != nil { return fmt.Errorf("error reading b: %q", err) } if hex.EncodeToString(ca) != hex.EncodeToString(cb) { return errors.New("content does not match") } return nil } golang-gopkg-src-d-go-git.v4-4.11.0/storage/transactional/000077500000000000000000000000001345605224300232055ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/storage/transactional/config.go000066400000000000000000000021611345605224300250010ustar00rootroot00000000000000package transactional import "gopkg.in/src-d/go-git.v4/config" // ConfigStorage implements the storer.ConfigStorage for the transactional package. type ConfigStorage struct { config.ConfigStorer temporal config.ConfigStorer set bool } // NewConfigStorage returns a new ConfigStorer based on a base storer and a // temporal storer. func NewConfigStorage(s, temporal config.ConfigStorer) *ConfigStorage { return &ConfigStorage{ConfigStorer: s, temporal: temporal} } // SetConfig honors the storer.ConfigStorer interface. func (c *ConfigStorage) SetConfig(cfg *config.Config) error { if err := c.temporal.SetConfig(cfg); err != nil { return err } c.set = true return nil } // Config honors the storer.ConfigStorer interface. func (c *ConfigStorage) Config() (*config.Config, error) { if !c.set { return c.ConfigStorer.Config() } return c.temporal.Config() } // Commit it copies the config from the temporal storage into the base storage. func (c *ConfigStorage) Commit() error { if !c.set { return nil } cfg, err := c.temporal.Config() if err != nil { return err } return c.ConfigStorer.SetConfig(cfg) } golang-gopkg-src-d-go-git.v4-4.11.0/storage/transactional/config_test.go000066400000000000000000000032711345605224300260430ustar00rootroot00000000000000package transactional import ( . "gopkg.in/check.v1" "gopkg.in/src-d/go-git.v4/config" "gopkg.in/src-d/go-git.v4/storage/memory" ) var _ = Suite(&ConfigSuite{}) type ConfigSuite struct{} func (s *ConfigSuite) TestSetConfigBase(c *C) { cfg := config.NewConfig() cfg.Core.Worktree = "foo" base := memory.NewStorage() err := base.SetConfig(cfg) c.Assert(err, IsNil) temporal := memory.NewStorage() cs := NewConfigStorage(base, temporal) cfg, err = cs.Config() c.Assert(err, IsNil) c.Assert(cfg.Core.Worktree, Equals, "foo") } func (s *ConfigSuite) TestSetConfigTemporal(c *C) { cfg := config.NewConfig() cfg.Core.Worktree = "foo" base := memory.NewStorage() err := base.SetConfig(cfg) c.Assert(err, IsNil) temporal := memory.NewStorage() cfg = config.NewConfig() cfg.Core.Worktree = "bar" cs := NewConfigStorage(base, temporal) err = cs.SetConfig(cfg) c.Assert(err, IsNil) baseCfg, err := base.Config() c.Assert(err, IsNil) c.Assert(baseCfg.Core.Worktree, Equals, "foo") temporalCfg, err := temporal.Config() c.Assert(err, IsNil) c.Assert(temporalCfg.Core.Worktree, Equals, "bar") cfg, err = cs.Config() c.Assert(err, IsNil) c.Assert(temporalCfg.Core.Worktree, Equals, "bar") } func (s *ConfigSuite) TestCommit(c *C) { cfg := config.NewConfig() cfg.Core.Worktree = "foo" base := memory.NewStorage() err := base.SetConfig(cfg) c.Assert(err, IsNil) temporal := memory.NewStorage() cfg = config.NewConfig() cfg.Core.Worktree = "bar" cs := NewConfigStorage(base, temporal) err = cs.SetConfig(cfg) c.Assert(err, IsNil) err = cs.Commit() c.Assert(err, IsNil) baseCfg, err := base.Config() c.Assert(err, IsNil) c.Assert(baseCfg.Core.Worktree, Equals, "bar") } golang-gopkg-src-d-go-git.v4-4.11.0/storage/transactional/doc.go000066400000000000000000000005311345605224300243000ustar00rootroot00000000000000// Package transactional is a transactional implementation of git.Storer, it // demux the write and read operation of two separate storers, allowing to merge // content calling Storage.Commit. // // The API and functionality of this package are considered EXPERIMENTAL and is // not considered stable nor production ready. package transactional golang-gopkg-src-d-go-git.v4-4.11.0/storage/transactional/index.go000066400000000000000000000022411345605224300246420ustar00rootroot00000000000000package transactional import ( "gopkg.in/src-d/go-git.v4/plumbing/format/index" "gopkg.in/src-d/go-git.v4/plumbing/storer" ) // IndexStorage implements the storer.IndexStorage for the transactional package. type IndexStorage struct { storer.IndexStorer temporal storer.IndexStorer set bool } // NewIndexStorage returns a new IndexStorer based on a base storer and a // temporal storer. func NewIndexStorage(s, temporal storer.IndexStorer) *IndexStorage { return &IndexStorage{ IndexStorer: s, temporal: temporal, } } // SetIndex honors the storer.IndexStorer interface. func (s *IndexStorage) SetIndex(idx *index.Index) (err error) { if err := s.temporal.SetIndex(idx); err != nil { return err } s.set = true return nil } // Index honors the storer.IndexStorer interface. func (s *IndexStorage) Index() (*index.Index, error) { if !s.set { return s.IndexStorer.Index() } return s.temporal.Index() } // Commit it copies the index from the temporal storage into the base storage. func (s *IndexStorage) Commit() error { if !s.set { return nil } idx, err := s.temporal.Index() if err != nil { return err } return s.IndexStorer.SetIndex(idx) } golang-gopkg-src-d-go-git.v4-4.11.0/storage/transactional/index_test.go000066400000000000000000000017521345605224300257070ustar00rootroot00000000000000package transactional import ( . "gopkg.in/check.v1" "gopkg.in/src-d/go-git.v4/plumbing/format/index" "gopkg.in/src-d/go-git.v4/storage/memory" ) var _ = Suite(&IndexSuite{}) type IndexSuite struct{} func (s *IndexSuite) TestSetIndexBase(c *C) { idx := &index.Index{} idx.Version = 2 base := memory.NewStorage() err := base.SetIndex(idx) c.Assert(err, IsNil) temporal := memory.NewStorage() cs := NewIndexStorage(base, temporal) idx, err = cs.Index() c.Assert(err, IsNil) c.Assert(idx.Version, Equals, uint32(2)) } func (s *IndexSuite) TestCommit(c *C) { idx := &index.Index{} idx.Version = 2 base := memory.NewStorage() err := base.SetIndex(idx) c.Assert(err, IsNil) temporal := memory.NewStorage() idx = &index.Index{} idx.Version = 3 is := NewIndexStorage(base, temporal) err = is.SetIndex(idx) c.Assert(err, IsNil) err = is.Commit() c.Assert(err, IsNil) baseIndex, err := base.Index() c.Assert(err, IsNil) c.Assert(baseIndex.Version, Equals, uint32(3)) } golang-gopkg-src-d-go-git.v4-4.11.0/storage/transactional/object.go000066400000000000000000000047351345605224300250130ustar00rootroot00000000000000package transactional import ( "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/storer" ) // ObjectStorage implements the storer.EncodedObjectStorer for the transactional package. type ObjectStorage struct { storer.EncodedObjectStorer temporal storer.EncodedObjectStorer } // NewObjectStorage returns a new EncodedObjectStorer based on a base storer and // a temporal storer. func NewObjectStorage(base, temporal storer.EncodedObjectStorer) *ObjectStorage { return &ObjectStorage{EncodedObjectStorer: base, temporal: temporal} } // SetEncodedObject honors the storer.EncodedObjectStorer interface. func (o *ObjectStorage) SetEncodedObject(obj plumbing.EncodedObject) (plumbing.Hash, error) { return o.temporal.SetEncodedObject(obj) } // HasEncodedObject honors the storer.EncodedObjectStorer interface. func (o *ObjectStorage) HasEncodedObject(h plumbing.Hash) error { err := o.EncodedObjectStorer.HasEncodedObject(h) if err == plumbing.ErrObjectNotFound { return o.temporal.HasEncodedObject(h) } return err } // EncodedObjectSize honors the storer.EncodedObjectStorer interface. func (o *ObjectStorage) EncodedObjectSize(h plumbing.Hash) (int64, error) { sz, err := o.EncodedObjectStorer.EncodedObjectSize(h) if err == plumbing.ErrObjectNotFound { return o.temporal.EncodedObjectSize(h) } return sz, err } // EncodedObject honors the storer.EncodedObjectStorer interface. func (o *ObjectStorage) EncodedObject(t plumbing.ObjectType, h plumbing.Hash) (plumbing.EncodedObject, error) { obj, err := o.EncodedObjectStorer.EncodedObject(t, h) if err == plumbing.ErrObjectNotFound { return o.temporal.EncodedObject(t, h) } return obj, err } // IterEncodedObjects honors the storer.EncodedObjectStorer interface. func (o *ObjectStorage) IterEncodedObjects(t plumbing.ObjectType) (storer.EncodedObjectIter, error) { baseIter, err := o.EncodedObjectStorer.IterEncodedObjects(t) if err != nil { return nil, err } temporalIter, err := o.temporal.IterEncodedObjects(t) if err != nil { return nil, err } return storer.NewMultiEncodedObjectIter([]storer.EncodedObjectIter{ baseIter, temporalIter, }), nil } // Commit it copies the objects of the temporal storage into the base storage. func (o *ObjectStorage) Commit() error { iter, err := o.temporal.IterEncodedObjects(plumbing.AnyObject) if err != nil { return err } return iter.ForEach(func(obj plumbing.EncodedObject) error { _, err := o.EncodedObjectStorer.SetEncodedObject(obj) return err }) } golang-gopkg-src-d-go-git.v4-4.11.0/storage/transactional/object_test.go000066400000000000000000000070771345605224300260540ustar00rootroot00000000000000package transactional import ( . "gopkg.in/check.v1" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/storage/memory" ) var _ = Suite(&ObjectSuite{}) type ObjectSuite struct{} func (s *ObjectSuite) TestHasEncodedObject(c *C) { base := memory.NewStorage() temporal := memory.NewStorage() os := NewObjectStorage(base, temporal) commit := base.NewEncodedObject() commit.SetType(plumbing.CommitObject) ch, err := base.SetEncodedObject(commit) c.Assert(ch.IsZero(), Equals, false) c.Assert(err, IsNil) tree := base.NewEncodedObject() tree.SetType(plumbing.TreeObject) th, err := os.SetEncodedObject(tree) c.Assert(th.IsZero(), Equals, false) c.Assert(err, IsNil) err = os.HasEncodedObject(th) c.Assert(err, IsNil) err = os.HasEncodedObject(ch) c.Assert(err, IsNil) err = base.HasEncodedObject(th) c.Assert(err, Equals, plumbing.ErrObjectNotFound) } func (s *ObjectSuite) TestEncodedObjectAndEncodedObjectSize(c *C) { base := memory.NewStorage() temporal := memory.NewStorage() os := NewObjectStorage(base, temporal) commit := base.NewEncodedObject() commit.SetType(plumbing.CommitObject) ch, err := base.SetEncodedObject(commit) c.Assert(ch.IsZero(), Equals, false) c.Assert(err, IsNil) tree := base.NewEncodedObject() tree.SetType(plumbing.TreeObject) th, err := os.SetEncodedObject(tree) c.Assert(th.IsZero(), Equals, false) c.Assert(err, IsNil) otree, err := os.EncodedObject(plumbing.TreeObject, th) c.Assert(err, IsNil) c.Assert(otree.Hash(), Equals, tree.Hash()) treeSz, err := os.EncodedObjectSize(th) c.Assert(err, IsNil) c.Assert(treeSz, Equals, int64(0)) ocommit, err := os.EncodedObject(plumbing.CommitObject, ch) c.Assert(err, IsNil) c.Assert(ocommit.Hash(), Equals, commit.Hash()) commitSz, err := os.EncodedObjectSize(ch) c.Assert(err, IsNil) c.Assert(commitSz, Equals, int64(0)) _, err = base.EncodedObject(plumbing.TreeObject, th) c.Assert(err, Equals, plumbing.ErrObjectNotFound) _, err = base.EncodedObjectSize(th) c.Assert(err, Equals, plumbing.ErrObjectNotFound) } func (s *ObjectSuite) TestIterEncodedObjects(c *C) { base := memory.NewStorage() temporal := memory.NewStorage() os := NewObjectStorage(base, temporal) commit := base.NewEncodedObject() commit.SetType(plumbing.CommitObject) ch, err := base.SetEncodedObject(commit) c.Assert(ch.IsZero(), Equals, false) c.Assert(err, IsNil) tree := base.NewEncodedObject() tree.SetType(plumbing.TreeObject) th, err := os.SetEncodedObject(tree) c.Assert(th.IsZero(), Equals, false) c.Assert(err, IsNil) iter, err := os.IterEncodedObjects(plumbing.AnyObject) c.Assert(err, IsNil) var hashes []plumbing.Hash err = iter.ForEach(func(obj plumbing.EncodedObject) error { hashes = append(hashes, obj.Hash()) return nil }) c.Assert(err, IsNil) c.Assert(hashes, HasLen, 2) c.Assert(hashes[0], Equals, ch) c.Assert(hashes[1], Equals, th) } func (s *ObjectSuite) TestCommit(c *C) { base := memory.NewStorage() temporal := memory.NewStorage() os := NewObjectStorage(base, temporal) commit := base.NewEncodedObject() commit.SetType(plumbing.CommitObject) _, err := os.SetEncodedObject(commit) c.Assert(err, IsNil) tree := base.NewEncodedObject() tree.SetType(plumbing.TreeObject) _, err = os.SetEncodedObject(tree) c.Assert(err, IsNil) err = os.Commit() c.Assert(err, IsNil) iter, err := base.IterEncodedObjects(plumbing.AnyObject) c.Assert(err, IsNil) var hashes []plumbing.Hash err = iter.ForEach(func(obj plumbing.EncodedObject) error { hashes = append(hashes, obj.Hash()) return nil }) c.Assert(err, IsNil) c.Assert(hashes, HasLen, 2) } golang-gopkg-src-d-go-git.v4-4.11.0/storage/transactional/reference.go000066400000000000000000000070321345605224300254740ustar00rootroot00000000000000package transactional import ( "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/storer" "gopkg.in/src-d/go-git.v4/storage" ) // ReferenceStorage implements the storer.ReferenceStorage for the transactional package. type ReferenceStorage struct { storer.ReferenceStorer temporal storer.ReferenceStorer // deleted, remaining references at this maps are going to be deleted when // commit is requested, the entries are added when RemoveReference is called // and deleted if SetReference is called. deleted map[plumbing.ReferenceName]struct{} // packRefs if true PackRefs is going to be called in the based storer when // commit is called. packRefs bool } // NewReferenceStorage returns a new ReferenceStorer based on a base storer and // a temporal storer. func NewReferenceStorage(base, temporal storer.ReferenceStorer) *ReferenceStorage { return &ReferenceStorage{ ReferenceStorer: base, temporal: temporal, deleted: make(map[plumbing.ReferenceName]struct{}, 0), } } // SetReference honors the storer.ReferenceStorer interface. func (r *ReferenceStorage) SetReference(ref *plumbing.Reference) error { delete(r.deleted, ref.Name()) return r.temporal.SetReference(ref) } // SetReference honors the storer.ReferenceStorer interface. func (r *ReferenceStorage) CheckAndSetReference(ref, old *plumbing.Reference) error { if old == nil { return r.SetReference(ref) } tmp, err := r.temporal.Reference(old.Name()) if err == plumbing.ErrReferenceNotFound { tmp, err = r.ReferenceStorer.Reference(old.Name()) } if err != nil { return err } if tmp.Hash() != old.Hash() { return storage.ErrReferenceHasChanged } return r.SetReference(ref) } // Reference honors the storer.ReferenceStorer interface. func (r ReferenceStorage) Reference(n plumbing.ReferenceName) (*plumbing.Reference, error) { if _, deleted := r.deleted[n]; deleted { return nil, plumbing.ErrReferenceNotFound } ref, err := r.temporal.Reference(n) if err == plumbing.ErrReferenceNotFound { return r.ReferenceStorer.Reference(n) } return ref, err } // IterReferences honors the storer.ReferenceStorer interface. func (r ReferenceStorage) IterReferences() (storer.ReferenceIter, error) { baseIter, err := r.ReferenceStorer.IterReferences() if err != nil { return nil, err } temporalIter, err := r.temporal.IterReferences() if err != nil { return nil, err } return storer.NewMultiReferenceIter([]storer.ReferenceIter{ baseIter, temporalIter, }), nil } // CountLooseRefs honors the storer.ReferenceStorer interface. func (r ReferenceStorage) CountLooseRefs() (int, error) { tc, err := r.temporal.CountLooseRefs() if err != nil { return -1, err } bc, err := r.ReferenceStorer.CountLooseRefs() if err != nil { return -1, err } return tc + bc, nil } // PackRefs honors the storer.ReferenceStorer interface. func (r ReferenceStorage) PackRefs() error { r.packRefs = true return nil } // RemoveReference honors the storer.ReferenceStorer interface. func (r ReferenceStorage) RemoveReference(n plumbing.ReferenceName) error { r.deleted[n] = struct{}{} return r.temporal.RemoveReference(n) } // Commit it copies the reference information of the temporal storage into the // base storage. func (r ReferenceStorage) Commit() error { for name := range r.deleted { if err := r.ReferenceStorer.RemoveReference(name); err != nil { return err } } iter, err := r.temporal.IterReferences() if err != nil { return err } return iter.ForEach(func(ref *plumbing.Reference) error { return r.ReferenceStorer.SetReference(ref) }) } golang-gopkg-src-d-go-git.v4-4.11.0/storage/transactional/reference_test.go000066400000000000000000000104731345605224300265360ustar00rootroot00000000000000package transactional import ( . "gopkg.in/check.v1" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/storage/memory" ) var _ = Suite(&ReferenceSuite{}) type ReferenceSuite struct{} func (s *ReferenceSuite) TestReference(c *C) { base := memory.NewStorage() temporal := memory.NewStorage() rs := NewReferenceStorage(base, temporal) refA := plumbing.NewReferenceFromStrings("refs/a", "bc9968d75e48de59f0870ffb71f5e160bbbdcf52") refB := plumbing.NewReferenceFromStrings("refs/b", "bc9968d75e48de59f0870ffb71f5e160bbbdcf52") err := base.SetReference(refA) c.Assert(err, IsNil) err = rs.SetReference(refB) c.Assert(err, IsNil) _, err = rs.Reference("refs/a") c.Assert(err, IsNil) _, err = rs.Reference("refs/b") c.Assert(err, IsNil) _, err = base.Reference("refs/b") c.Assert(err, Equals, plumbing.ErrReferenceNotFound) } func (s *ReferenceSuite) TestRemoveReferenceTemporal(c *C) { base := memory.NewStorage() temporal := memory.NewStorage() ref := plumbing.NewReferenceFromStrings("refs/a", "bc9968d75e48de59f0870ffb71f5e160bbbdcf52") rs := NewReferenceStorage(base, temporal) err := rs.SetReference(ref) c.Assert(err, IsNil) err = rs.RemoveReference("refs/a") c.Assert(err, IsNil) _, err = rs.Reference("refs/a") c.Assert(err, Equals, plumbing.ErrReferenceNotFound) } func (s *ReferenceSuite) TestRemoveReferenceBase(c *C) { base := memory.NewStorage() temporal := memory.NewStorage() ref := plumbing.NewReferenceFromStrings("refs/a", "bc9968d75e48de59f0870ffb71f5e160bbbdcf52") rs := NewReferenceStorage(base, temporal) err := base.SetReference(ref) c.Assert(err, IsNil) err = rs.RemoveReference("refs/a") c.Assert(err, IsNil) _, err = rs.Reference("refs/a") c.Assert(err, Equals, plumbing.ErrReferenceNotFound) } func (s *ReferenceSuite) TestCheckAndSetReferenceInBase(c *C) { base := memory.NewStorage() temporal := memory.NewStorage() rs := NewReferenceStorage(base, temporal) err := base.SetReference( plumbing.NewReferenceFromStrings("foo", "482e0eada5de4039e6f216b45b3c9b683b83bfa"), ) c.Assert(err, IsNil) err = rs.CheckAndSetReference( plumbing.NewReferenceFromStrings("foo", "bc9968d75e48de59f0870ffb71f5e160bbbdcf52"), plumbing.NewReferenceFromStrings("foo", "482e0eada5de4039e6f216b45b3c9b683b83bfa"), ) c.Assert(err, IsNil) e, err := rs.Reference(plumbing.ReferenceName("foo")) c.Assert(err, IsNil) c.Assert(e.Hash().String(), Equals, "bc9968d75e48de59f0870ffb71f5e160bbbdcf52") } func (s *ReferenceSuite) TestCommit(c *C) { base := memory.NewStorage() temporal := memory.NewStorage() refA := plumbing.NewReferenceFromStrings("refs/a", "bc9968d75e48de59f0870ffb71f5e160bbbdcf52") refB := plumbing.NewReferenceFromStrings("refs/b", "b66c08ba28aa1f81eb06a1127aa3936ff77e5e2c") refC := plumbing.NewReferenceFromStrings("refs/c", "c3f4688a08fd86f1bf8e055724c84b7a40a09733") rs := NewReferenceStorage(base, temporal) c.Assert(rs.SetReference(refA), IsNil) c.Assert(rs.SetReference(refB), IsNil) c.Assert(rs.SetReference(refC), IsNil) err := rs.Commit() c.Assert(err, IsNil) iter, err := base.IterReferences() c.Assert(err, IsNil) var count int iter.ForEach(func(ref *plumbing.Reference) error { count++ return nil }) c.Assert(count, Equals, 3) } func (s *ReferenceSuite) TestCommitDelete(c *C) { base := memory.NewStorage() temporal := memory.NewStorage() refA := plumbing.NewReferenceFromStrings("refs/a", "bc9968d75e48de59f0870ffb71f5e160bbbdcf52") refB := plumbing.NewReferenceFromStrings("refs/b", "b66c08ba28aa1f81eb06a1127aa3936ff77e5e2c") refC := plumbing.NewReferenceFromStrings("refs/c", "c3f4688a08fd86f1bf8e055724c84b7a40a09733") rs := NewReferenceStorage(base, temporal) c.Assert(base.SetReference(refA), IsNil) c.Assert(base.SetReference(refB), IsNil) c.Assert(base.SetReference(refC), IsNil) c.Assert(rs.RemoveReference(refA.Name()), IsNil) c.Assert(rs.RemoveReference(refB.Name()), IsNil) c.Assert(rs.RemoveReference(refC.Name()), IsNil) c.Assert(rs.SetReference(refC), IsNil) err := rs.Commit() c.Assert(err, IsNil) iter, err := base.IterReferences() c.Assert(err, IsNil) var count int iter.ForEach(func(ref *plumbing.Reference) error { count++ return nil }) c.Assert(count, Equals, 1) ref, err := rs.Reference(refC.Name()) c.Assert(err, IsNil) c.Assert(ref.Hash().String(), Equals, "c3f4688a08fd86f1bf8e055724c84b7a40a09733") } golang-gopkg-src-d-go-git.v4-4.11.0/storage/transactional/shallow.go000066400000000000000000000023631345605224300252110ustar00rootroot00000000000000package transactional import ( "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/storer" ) // ShallowStorage implements the storer.ShallowStorer for the transactional package. type ShallowStorage struct { storer.ShallowStorer temporal storer.ShallowStorer } // NewShallowStorage returns a new ShallowStorage based on a base storer and // a temporal storer. func NewShallowStorage(base, temporal storer.ShallowStorer) *ShallowStorage { return &ShallowStorage{ ShallowStorer: base, temporal: temporal, } } // SetShallow honors the storer.ShallowStorer interface. func (s *ShallowStorage) SetShallow(commits []plumbing.Hash) error { return s.temporal.SetShallow(commits) } // Shallow honors the storer.ShallowStorer interface. func (s *ShallowStorage) Shallow() ([]plumbing.Hash, error) { shallow, err := s.temporal.Shallow() if err != nil { return nil, err } if len(shallow) != 0 { return shallow, nil } return s.ShallowStorer.Shallow() } // Commit it copies the shallow information of the temporal storage into the // base storage. func (s *ShallowStorage) Commit() error { commits, err := s.temporal.Shallow() if err != nil || len(commits) == 0 { return err } return s.ShallowStorer.SetShallow(commits) } golang-gopkg-src-d-go-git.v4-4.11.0/storage/transactional/shallow_test.go000066400000000000000000000030451345605224300262460ustar00rootroot00000000000000package transactional import ( . "gopkg.in/check.v1" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/storage/memory" ) var _ = Suite(&ShallowSuite{}) type ShallowSuite struct{} func (s *ShallowSuite) TestShallow(c *C) { base := memory.NewStorage() temporal := memory.NewStorage() rs := NewShallowStorage(base, temporal) commitA := plumbing.NewHash("bc9968d75e48de59f0870ffb71f5e160bbbdcf52") commitB := plumbing.NewHash("aa9968d75e48de59f0870ffb71f5e160bbbdcf52") err := base.SetShallow([]plumbing.Hash{commitA}) c.Assert(err, IsNil) err = rs.SetShallow([]plumbing.Hash{commitB}) c.Assert(err, IsNil) commits, err := rs.Shallow() c.Assert(err, IsNil) c.Assert(commits, HasLen, 1) c.Assert(commits[0], Equals, commitB) commits, err = base.Shallow() c.Assert(err, IsNil) c.Assert(commits, HasLen, 1) c.Assert(commits[0], Equals, commitA) } func (s *ShallowSuite) TestCommit(c *C) { base := memory.NewStorage() temporal := memory.NewStorage() rs := NewShallowStorage(base, temporal) commitA := plumbing.NewHash("bc9968d75e48de59f0870ffb71f5e160bbbdcf52") commitB := plumbing.NewHash("aa9968d75e48de59f0870ffb71f5e160bbbdcf52") c.Assert(base.SetShallow([]plumbing.Hash{commitA}), IsNil) c.Assert(rs.SetShallow([]plumbing.Hash{commitB}), IsNil) c.Assert(rs.Commit(), IsNil) commits, err := rs.Shallow() c.Assert(err, IsNil) c.Assert(commits, HasLen, 1) c.Assert(commits[0], Equals, commitB) commits, err = base.Shallow() c.Assert(err, IsNil) c.Assert(commits, HasLen, 1) c.Assert(commits[0], Equals, commitB) } golang-gopkg-src-d-go-git.v4-4.11.0/storage/transactional/storage.go000066400000000000000000000044701345605224300252050ustar00rootroot00000000000000package transactional import ( "io" "gopkg.in/src-d/go-git.v4/plumbing/storer" "gopkg.in/src-d/go-git.v4/storage" ) // Storage is a transactional implementation of git.Storer, it demux the write // and read operation of two separate storers, allowing to merge content calling // Storage.Commit. // // The API and functionality of this package are considered EXPERIMENTAL and is // not considered stable nor production ready. type Storage interface { storage.Storer Commit() error } // basic implements the Storage interface. type basic struct { s, temporal storage.Storer *ObjectStorage *ReferenceStorage *IndexStorage *ShallowStorage *ConfigStorage } // packageWriter implements storer.PackfileWriter interface over // a Storage with a temporal storer that supports it. type packageWriter struct { *basic pw storer.PackfileWriter } // NewStorage returns a new Storage based on two repositories, base is the base // repository where the read operations are read and temporal is were all // the write operations are stored. func NewStorage(base, temporal storage.Storer) Storage { st := &basic{ s: base, temporal: temporal, ObjectStorage: NewObjectStorage(base, temporal), ReferenceStorage: NewReferenceStorage(base, temporal), IndexStorage: NewIndexStorage(base, temporal), ShallowStorage: NewShallowStorage(base, temporal), ConfigStorage: NewConfigStorage(base, temporal), } pw, ok := temporal.(storer.PackfileWriter) if ok { return &packageWriter{ basic: st, pw: pw, } } return st } // Module it honors the storage.ModuleStorer interface. func (s *basic) Module(name string) (storage.Storer, error) { base, err := s.s.Module(name) if err != nil { return nil, err } temporal, err := s.temporal.Module(name) if err != nil { return nil, err } return NewStorage(base, temporal), nil } // Commit it copies the content of the temporal storage into the base storage. func (s *basic) Commit() error { for _, c := range []interface{ Commit() error }{ s.ObjectStorage, s.ReferenceStorage, s.IndexStorage, s.ShallowStorage, s.ConfigStorage, } { if err := c.Commit(); err != nil { return err } } return nil } // PackfileWriter honors storage.PackfileWriter. func (s *packageWriter) PackfileWriter() (io.WriteCloser, error) { return s.pw.PackfileWriter() } golang-gopkg-src-d-go-git.v4-4.11.0/storage/transactional/storage_test.go000066400000000000000000000035651345605224300262500ustar00rootroot00000000000000package transactional import ( "testing" . "gopkg.in/check.v1" "gopkg.in/src-d/go-billy.v4/memfs" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/cache" "gopkg.in/src-d/go-git.v4/plumbing/storer" "gopkg.in/src-d/go-git.v4/storage" "gopkg.in/src-d/go-git.v4/storage/filesystem" "gopkg.in/src-d/go-git.v4/storage/memory" "gopkg.in/src-d/go-git.v4/storage/test" ) func Test(t *testing.T) { TestingT(t) } type StorageSuite struct { test.BaseStorageSuite temporal func() storage.Storer } var _ = Suite(&StorageSuite{ temporal: func() storage.Storer { return memory.NewStorage() }, }) var _ = Suite(&StorageSuite{ temporal: func() storage.Storer { fs := memfs.New() return filesystem.NewStorage(fs, cache.NewObjectLRUDefault()) }, }) func (s *StorageSuite) SetUpTest(c *C) { base := memory.NewStorage() temporal := s.temporal() s.BaseStorageSuite = test.NewBaseStorageSuite(NewStorage(base, temporal)) s.BaseStorageSuite.SetUpTest(c) } func (s *StorageSuite) TestCommit(c *C) { base := memory.NewStorage() temporal := s.temporal() st := NewStorage(base, temporal) commit := base.NewEncodedObject() commit.SetType(plumbing.CommitObject) _, err := st.SetEncodedObject(commit) c.Assert(err, IsNil) ref := plumbing.NewHashReference("refs/a", commit.Hash()) c.Assert(st.SetReference(ref), IsNil) err = st.Commit() c.Assert(err, IsNil) ref, err = base.Reference(ref.Name()) c.Assert(err, IsNil) c.Assert(ref.Hash(), Equals, commit.Hash()) obj, err := base.EncodedObject(plumbing.AnyObject, commit.Hash()) c.Assert(err, IsNil) c.Assert(obj.Hash(), Equals, commit.Hash()) } func (s *StorageSuite) TestTransactionalPackfileWriter(c *C) { base := memory.NewStorage() temporal := s.temporal() st := NewStorage(base, temporal) _, tmpOK := temporal.(storer.PackfileWriter) _, ok := st.(storer.PackfileWriter) c.Assert(ok, Equals, tmpOK) } golang-gopkg-src-d-go-git.v4-4.11.0/submodule.go000066400000000000000000000173401345605224300212320ustar00rootroot00000000000000package git import ( "bytes" "context" "errors" "fmt" "gopkg.in/src-d/go-billy.v4" "gopkg.in/src-d/go-git.v4/config" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/format/index" ) var ( ErrSubmoduleAlreadyInitialized = errors.New("submodule already initialized") ErrSubmoduleNotInitialized = errors.New("submodule not initialized") ) // Submodule a submodule allows you to keep another Git repository in a // subdirectory of your repository. type Submodule struct { // initialized defines if a submodule was already initialized. initialized bool c *config.Submodule w *Worktree } // Config returns the submodule config func (s *Submodule) Config() *config.Submodule { return s.c } // Init initialize the submodule reading the recorded Entry in the index for // the given submodule func (s *Submodule) Init() error { cfg, err := s.w.r.Storer.Config() if err != nil { return err } _, ok := cfg.Submodules[s.c.Name] if ok { return ErrSubmoduleAlreadyInitialized } s.initialized = true cfg.Submodules[s.c.Name] = s.c return s.w.r.Storer.SetConfig(cfg) } // Status returns the status of the submodule. func (s *Submodule) Status() (*SubmoduleStatus, error) { idx, err := s.w.r.Storer.Index() if err != nil { return nil, err } return s.status(idx) } func (s *Submodule) status(idx *index.Index) (*SubmoduleStatus, error) { status := &SubmoduleStatus{ Path: s.c.Path, } e, err := idx.Entry(s.c.Path) if err != nil && err != index.ErrEntryNotFound { return nil, err } if e != nil { status.Expected = e.Hash } if !s.initialized { return status, nil } r, err := s.Repository() if err != nil { return nil, err } head, err := r.Head() if err == nil { status.Current = head.Hash() } if err != nil && err == plumbing.ErrReferenceNotFound { err = nil } return status, err } // Repository returns the Repository represented by this submodule func (s *Submodule) Repository() (*Repository, error) { if !s.initialized { return nil, ErrSubmoduleNotInitialized } storer, err := s.w.r.Storer.Module(s.c.Name) if err != nil { return nil, err } _, err = storer.Reference(plumbing.HEAD) if err != nil && err != plumbing.ErrReferenceNotFound { return nil, err } var exists bool if err == nil { exists = true } var worktree billy.Filesystem if worktree, err = s.w.Filesystem.Chroot(s.c.Path); err != nil { return nil, err } if exists { return Open(storer, worktree) } r, err := Init(storer, worktree) if err != nil { return nil, err } _, err = r.CreateRemote(&config.RemoteConfig{ Name: DefaultRemoteName, URLs: []string{s.c.URL}, }) return r, err } // Update the registered submodule to match what the superproject expects, the // submodule should be initialized first calling the Init method or setting in // the options SubmoduleUpdateOptions.Init equals true func (s *Submodule) Update(o *SubmoduleUpdateOptions) error { return s.UpdateContext(context.Background(), o) } // UpdateContext the registered submodule to match what the superproject // expects, the submodule should be initialized first calling the Init method or // setting in the options SubmoduleUpdateOptions.Init equals true. // // The provided Context must be non-nil. If the context expires before the // operation is complete, an error is returned. The context only affects to the // transport operations. func (s *Submodule) UpdateContext(ctx context.Context, o *SubmoduleUpdateOptions) error { return s.update(ctx, o, plumbing.ZeroHash) } func (s *Submodule) update(ctx context.Context, o *SubmoduleUpdateOptions, forceHash plumbing.Hash) error { if !s.initialized && !o.Init { return ErrSubmoduleNotInitialized } if !s.initialized && o.Init { if err := s.Init(); err != nil { return err } } idx, err := s.w.r.Storer.Index() if err != nil { return err } hash := forceHash if hash.IsZero() { e, err := idx.Entry(s.c.Path) if err != nil { return err } hash = e.Hash } r, err := s.Repository() if err != nil { return err } if err := s.fetchAndCheckout(ctx, r, o, hash); err != nil { return err } return s.doRecursiveUpdate(r, o) } func (s *Submodule) doRecursiveUpdate(r *Repository, o *SubmoduleUpdateOptions) error { if o.RecurseSubmodules == NoRecurseSubmodules { return nil } w, err := r.Worktree() if err != nil { return err } l, err := w.Submodules() if err != nil { return err } new := &SubmoduleUpdateOptions{} *new = *o new.RecurseSubmodules-- return l.Update(new) } func (s *Submodule) fetchAndCheckout( ctx context.Context, r *Repository, o *SubmoduleUpdateOptions, hash plumbing.Hash, ) error { if !o.NoFetch { err := r.FetchContext(ctx, &FetchOptions{Auth: o.Auth}) if err != nil && err != NoErrAlreadyUpToDate { return err } } w, err := r.Worktree() if err != nil { return err } if err := w.Checkout(&CheckoutOptions{Hash: hash}); err != nil { return err } head := plumbing.NewHashReference(plumbing.HEAD, hash) return r.Storer.SetReference(head) } // Submodules list of several submodules from the same repository. type Submodules []*Submodule // Init initializes the submodules in this list. func (s Submodules) Init() error { for _, sub := range s { if err := sub.Init(); err != nil { return err } } return nil } // Update updates all the submodules in this list. func (s Submodules) Update(o *SubmoduleUpdateOptions) error { return s.UpdateContext(context.Background(), o) } // UpdateContext updates all the submodules in this list. // // The provided Context must be non-nil. If the context expires before the // operation is complete, an error is returned. The context only affects to the // transport operations. func (s Submodules) UpdateContext(ctx context.Context, o *SubmoduleUpdateOptions) error { for _, sub := range s { if err := sub.UpdateContext(ctx, o); err != nil { return err } } return nil } // Status returns the status of the submodules. func (s Submodules) Status() (SubmodulesStatus, error) { var list SubmodulesStatus var r *Repository for _, sub := range s { if r == nil { r = sub.w.r } idx, err := r.Storer.Index() if err != nil { return nil, err } status, err := sub.status(idx) if err != nil { return nil, err } list = append(list, status) } return list, nil } // SubmodulesStatus contains the status for all submodiles in the worktree type SubmodulesStatus []*SubmoduleStatus // String is equivalent to `git submodule status` func (s SubmodulesStatus) String() string { buf := bytes.NewBuffer(nil) for _, sub := range s { fmt.Fprintln(buf, sub) } return buf.String() } // SubmoduleStatus contains the status for a submodule in the worktree type SubmoduleStatus struct { Path string Current plumbing.Hash Expected plumbing.Hash Branch plumbing.ReferenceName } // IsClean is the HEAD of the submodule is equals to the expected commit func (s *SubmoduleStatus) IsClean() bool { return s.Current == s.Expected } // String is equivalent to `git submodule status ` // // This will print the SHA-1 of the currently checked out commit for a // submodule, along with the submodule path and the output of git describe fo // the SHA-1. Each SHA-1 will be prefixed with - if the submodule is not // initialized, + if the currently checked out submodule commit does not match // the SHA-1 found in the index of the containing repository. func (s *SubmoduleStatus) String() string { var extra string var status = ' ' if s.Current.IsZero() { status = '-' } else if !s.IsClean() { status = '+' } if len(s.Branch) != 0 { extra = string(s.Branch[5:]) } else if !s.Current.IsZero() { extra = s.Current.String()[:7] } if extra != "" { extra = fmt.Sprintf(" (%s)", extra) } return fmt.Sprintf("%c%s %s%s", status, s.Expected, s.Path, extra) } golang-gopkg-src-d-go-git.v4-4.11.0/submodule_test.go000066400000000000000000000116371345605224300222740ustar00rootroot00000000000000package git import ( "context" "io/ioutil" "os" "path/filepath" "testing" "gopkg.in/src-d/go-git.v4/plumbing" . "gopkg.in/check.v1" "gopkg.in/src-d/go-git-fixtures.v3" ) type SubmoduleSuite struct { BaseSuite Worktree *Worktree path string } var _ = Suite(&SubmoduleSuite{}) func (s *SubmoduleSuite) SetUpTest(c *C) { path := fixtures.ByTag("submodule").One().Worktree().Root() dir, err := ioutil.TempDir("", "submodule") c.Assert(err, IsNil) r, err := PlainClone(filepath.Join(dir, "worktree"), false, &CloneOptions{ URL: path, }) c.Assert(err, IsNil) s.Repository = r s.Worktree, err = r.Worktree() c.Assert(err, IsNil) s.path = dir } func (s *SubmoduleSuite) TearDownTest(c *C) { err := os.RemoveAll(s.path) c.Assert(err, IsNil) } func (s *SubmoduleSuite) TestInit(c *C) { sm, err := s.Worktree.Submodule("basic") c.Assert(err, IsNil) c.Assert(sm.initialized, Equals, false) err = sm.Init() c.Assert(err, IsNil) c.Assert(sm.initialized, Equals, true) cfg, err := s.Repository.Config() c.Assert(err, IsNil) c.Assert(cfg.Submodules, HasLen, 1) c.Assert(cfg.Submodules["basic"], NotNil) status, err := sm.Status() c.Assert(err, IsNil) c.Assert(status.IsClean(), Equals, false) } func (s *SubmoduleSuite) TestUpdate(c *C) { if testing.Short() { c.Skip("skipping test in short mode.") } sm, err := s.Worktree.Submodule("basic") c.Assert(err, IsNil) err = sm.Update(&SubmoduleUpdateOptions{ Init: true, }) c.Assert(err, IsNil) r, err := sm.Repository() c.Assert(err, IsNil) ref, err := r.Reference(plumbing.HEAD, true) c.Assert(err, IsNil) c.Assert(ref.Hash().String(), Equals, "6ecf0ef2c2dffb796033e5a02219af86ec6584e5") status, err := sm.Status() c.Assert(err, IsNil) c.Assert(status.IsClean(), Equals, true) } func (s *SubmoduleSuite) TestRepositoryWithoutInit(c *C) { sm, err := s.Worktree.Submodule("basic") c.Assert(err, IsNil) r, err := sm.Repository() c.Assert(err, Equals, ErrSubmoduleNotInitialized) c.Assert(r, IsNil) } func (s *SubmoduleSuite) TestUpdateWithoutInit(c *C) { sm, err := s.Worktree.Submodule("basic") c.Assert(err, IsNil) err = sm.Update(&SubmoduleUpdateOptions{}) c.Assert(err, Equals, ErrSubmoduleNotInitialized) } func (s *SubmoduleSuite) TestUpdateWithNotFetch(c *C) { sm, err := s.Worktree.Submodule("basic") c.Assert(err, IsNil) err = sm.Update(&SubmoduleUpdateOptions{ Init: true, NoFetch: true, }) // Since we are not fetching, the object is not there c.Assert(err, Equals, plumbing.ErrObjectNotFound) } func (s *SubmoduleSuite) TestUpdateWithRecursion(c *C) { if testing.Short() { c.Skip("skipping test in short mode.") } sm, err := s.Worktree.Submodule("itself") c.Assert(err, IsNil) err = sm.Update(&SubmoduleUpdateOptions{ Init: true, RecurseSubmodules: 2, }) c.Assert(err, IsNil) fs := s.Worktree.Filesystem _, err = fs.Stat(fs.Join("itself", "basic", "LICENSE")) c.Assert(err, IsNil) } func (s *SubmoduleSuite) TestUpdateWithInitAndUpdate(c *C) { if testing.Short() { c.Skip("skipping test in short mode.") } sm, err := s.Worktree.Submodule("basic") c.Assert(err, IsNil) err = sm.Update(&SubmoduleUpdateOptions{ Init: true, }) c.Assert(err, IsNil) idx, err := s.Repository.Storer.Index() c.Assert(err, IsNil) for i, e := range idx.Entries { if e.Name == "basic" { e.Hash = plumbing.NewHash("b029517f6300c2da0f4b651b8642506cd6aaf45d") } idx.Entries[i] = e } err = s.Repository.Storer.SetIndex(idx) c.Assert(err, IsNil) err = sm.Update(&SubmoduleUpdateOptions{}) c.Assert(err, IsNil) r, err := sm.Repository() c.Assert(err, IsNil) ref, err := r.Reference(plumbing.HEAD, true) c.Assert(err, IsNil) c.Assert(ref.Hash().String(), Equals, "b029517f6300c2da0f4b651b8642506cd6aaf45d") } func (s *SubmoduleSuite) TestSubmodulesInit(c *C) { sm, err := s.Worktree.Submodules() c.Assert(err, IsNil) err = sm.Init() c.Assert(err, IsNil) sm, err = s.Worktree.Submodules() c.Assert(err, IsNil) for _, m := range sm { c.Assert(m.initialized, Equals, true) } } func (s *SubmoduleSuite) TestGitSubmodulesSymlink(c *C) { f, err := s.Worktree.Filesystem.Create("badfile") c.Assert(err, IsNil) defer f.Close() err = s.Worktree.Filesystem.Remove(gitmodulesFile) c.Assert(err, IsNil) err = s.Worktree.Filesystem.Symlink("badfile", gitmodulesFile) c.Assert(err, IsNil) _, err = s.Worktree.Submodules() c.Assert(err, Equals, ErrGitModulesSymlink) } func (s *SubmoduleSuite) TestSubmodulesStatus(c *C) { sm, err := s.Worktree.Submodules() c.Assert(err, IsNil) status, err := sm.Status() c.Assert(err, IsNil) c.Assert(status, HasLen, 2) } func (s *SubmoduleSuite) TestSubmodulesUpdateContext(c *C) { if testing.Short() { c.Skip("skipping test in short mode.") } sm, err := s.Worktree.Submodules() c.Assert(err, IsNil) ctx, cancel := context.WithCancel(context.Background()) cancel() err = sm.UpdateContext(ctx, &SubmoduleUpdateOptions{Init: true}) c.Assert(err, NotNil) } golang-gopkg-src-d-go-git.v4-4.11.0/utils/000077500000000000000000000000001345605224300200375ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/utils/binary/000077500000000000000000000000001345605224300213235ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/utils/binary/read.go000066400000000000000000000076211345605224300225730ustar00rootroot00000000000000// Package binary implements sintax-sugar functions on top of the standard // library binary package package binary import ( "bufio" "encoding/binary" "io" "gopkg.in/src-d/go-git.v4/plumbing" ) // Read reads structured binary data from r into data. Bytes are read and // decoded in BigEndian order // https://golang.org/pkg/encoding/binary/#Read func Read(r io.Reader, data ...interface{}) error { for _, v := range data { if err := binary.Read(r, binary.BigEndian, v); err != nil { return err } } return nil } // ReadUntil reads from r untin delim is found func ReadUntil(r io.Reader, delim byte) ([]byte, error) { var buf [1]byte value := make([]byte, 0, 16) for { if _, err := io.ReadFull(r, buf[:]); err != nil { if err == io.EOF { return nil, err } return nil, err } if buf[0] == delim { return value, nil } value = append(value, buf[0]) } } // ReadVariableWidthInt reads and returns an int in Git VLQ special format: // // Ordinary VLQ has some redundancies, example: the number 358 can be // encoded as the 2-octet VLQ 0x8166 or the 3-octet VLQ 0x808166 or the // 4-octet VLQ 0x80808166 and so forth. // // To avoid these redundancies, the VLQ format used in Git removes this // prepending redundancy and extends the representable range of shorter // VLQs by adding an offset to VLQs of 2 or more octets in such a way // that the lowest possible value for such an (N+1)-octet VLQ becomes // exactly one more than the maximum possible value for an N-octet VLQ. // In particular, since a 1-octet VLQ can store a maximum value of 127, // the minimum 2-octet VLQ (0x8000) is assigned the value 128 instead of // 0. Conversely, the maximum value of such a 2-octet VLQ (0xff7f) is // 16511 instead of just 16383. Similarly, the minimum 3-octet VLQ // (0x808000) has a value of 16512 instead of zero, which means // that the maximum 3-octet VLQ (0xffff7f) is 2113663 instead of // just 2097151. And so forth. // // This is how the offset is saved in C: // // dheader[pos] = ofs & 127; // while (ofs >>= 7) // dheader[--pos] = 128 | (--ofs & 127); // func ReadVariableWidthInt(r io.Reader) (int64, error) { var c byte if err := Read(r, &c); err != nil { return 0, err } var v = int64(c & maskLength) for c&maskContinue > 0 { v++ if err := Read(r, &c); err != nil { return 0, err } v = (v << lengthBits) + int64(c&maskLength) } return v, nil } const ( maskContinue = uint8(128) // 1000 000 maskLength = uint8(127) // 0111 1111 lengthBits = uint8(7) // subsequent bytes has 7 bits to store the length ) // ReadUint64 reads 8 bytes and returns them as a BigEndian uint32 func ReadUint64(r io.Reader) (uint64, error) { var v uint64 if err := binary.Read(r, binary.BigEndian, &v); err != nil { return 0, err } return v, nil } // ReadUint32 reads 4 bytes and returns them as a BigEndian uint32 func ReadUint32(r io.Reader) (uint32, error) { var v uint32 if err := binary.Read(r, binary.BigEndian, &v); err != nil { return 0, err } return v, nil } // ReadUint16 reads 2 bytes and returns them as a BigEndian uint16 func ReadUint16(r io.Reader) (uint16, error) { var v uint16 if err := binary.Read(r, binary.BigEndian, &v); err != nil { return 0, err } return v, nil } // ReadHash reads a plumbing.Hash from r func ReadHash(r io.Reader) (plumbing.Hash, error) { var h plumbing.Hash if err := binary.Read(r, binary.BigEndian, h[:]); err != nil { return plumbing.ZeroHash, err } return h, nil } const sniffLen = 8000 // IsBinary detects if data is a binary value based on: // http://git.kernel.org/cgit/git/git.git/tree/xdiff-interface.c?id=HEAD#n198 func IsBinary(r io.Reader) (bool, error) { reader := bufio.NewReader(r) c := 0 for { if c == sniffLen { break } b, err := reader.ReadByte() if err == io.EOF { break } if err != nil { return false, err } if b == byte(0) { return true, nil } c++ } return false, nil } golang-gopkg-src-d-go-git.v4-4.11.0/utils/binary/read_test.go000066400000000000000000000047251345605224300236340ustar00rootroot00000000000000package binary import ( "bytes" "encoding/binary" "testing" . "gopkg.in/check.v1" "gopkg.in/src-d/go-git.v4/plumbing" ) func Test(t *testing.T) { TestingT(t) } type BinarySuite struct{} var _ = Suite(&BinarySuite{}) func (s *BinarySuite) TestRead(c *C) { buf := bytes.NewBuffer(nil) err := binary.Write(buf, binary.BigEndian, int64(42)) c.Assert(err, IsNil) err = binary.Write(buf, binary.BigEndian, int32(42)) c.Assert(err, IsNil) var i64 int64 var i32 int32 err = Read(buf, &i64, &i32) c.Assert(err, IsNil) c.Assert(i64, Equals, int64(42)) c.Assert(i32, Equals, int32(42)) } func (s *BinarySuite) TestReadUntil(c *C) { buf := bytes.NewBuffer([]byte("foo bar")) b, err := ReadUntil(buf, ' ') c.Assert(err, IsNil) c.Assert(b, HasLen, 3) c.Assert(string(b), Equals, "foo") } func (s *BinarySuite) TestReadVariableWidthInt(c *C) { buf := bytes.NewBuffer([]byte{129, 110}) i, err := ReadVariableWidthInt(buf) c.Assert(err, IsNil) c.Assert(i, Equals, int64(366)) } func (s *BinarySuite) TestReadVariableWidthIntShort(c *C) { buf := bytes.NewBuffer([]byte{19}) i, err := ReadVariableWidthInt(buf) c.Assert(err, IsNil) c.Assert(i, Equals, int64(19)) } func (s *BinarySuite) TestReadUint32(c *C) { buf := bytes.NewBuffer(nil) err := binary.Write(buf, binary.BigEndian, uint32(42)) c.Assert(err, IsNil) i32, err := ReadUint32(buf) c.Assert(err, IsNil) c.Assert(i32, Equals, uint32(42)) } func (s *BinarySuite) TestReadUint16(c *C) { buf := bytes.NewBuffer(nil) err := binary.Write(buf, binary.BigEndian, uint16(42)) c.Assert(err, IsNil) i32, err := ReadUint16(buf) c.Assert(err, IsNil) c.Assert(i32, Equals, uint16(42)) } func (s *BinarySuite) TestReadHash(c *C) { expected := plumbing.NewHash("43aec75c611f22c73b27ece2841e6ccca592f285") buf := bytes.NewBuffer(nil) err := binary.Write(buf, binary.BigEndian, expected) c.Assert(err, IsNil) hash, err := ReadHash(buf) c.Assert(err, IsNil) c.Assert(hash.String(), Equals, expected.String()) } func (s *BinarySuite) TestIsBinary(c *C) { buf := bytes.NewBuffer(nil) buf.Write(bytes.Repeat([]byte{'A'}, sniffLen)) buf.Write([]byte{0}) ok, err := IsBinary(buf) c.Assert(err, IsNil) c.Assert(ok, Equals, false) buf.Reset() buf.Write(bytes.Repeat([]byte{'A'}, sniffLen-1)) buf.Write([]byte{0}) ok, err = IsBinary(buf) c.Assert(err, IsNil) c.Assert(ok, Equals, true) buf.Reset() buf.Write(bytes.Repeat([]byte{'A'}, 10)) ok, err = IsBinary(buf) c.Assert(err, IsNil) c.Assert(ok, Equals, false) } golang-gopkg-src-d-go-git.v4-4.11.0/utils/binary/write.go000066400000000000000000000022211345605224300230010ustar00rootroot00000000000000package binary import ( "encoding/binary" "io" ) // Write writes the binary representation of data into w, using BigEndian order // https://golang.org/pkg/encoding/binary/#Write func Write(w io.Writer, data ...interface{}) error { for _, v := range data { if err := binary.Write(w, binary.BigEndian, v); err != nil { return err } } return nil } func WriteVariableWidthInt(w io.Writer, n int64) error { buf := []byte{byte(n & 0x7f)} n >>= 7 for n != 0 { n-- buf = append([]byte{0x80 | (byte(n & 0x7f))}, buf...) n >>= 7 } _, err := w.Write(buf) return err } // WriteUint64 writes the binary representation of a uint64 into w, in BigEndian // order func WriteUint64(w io.Writer, value uint64) error { return binary.Write(w, binary.BigEndian, value) } // WriteUint32 writes the binary representation of a uint32 into w, in BigEndian // order func WriteUint32(w io.Writer, value uint32) error { return binary.Write(w, binary.BigEndian, value) } // WriteUint16 writes the binary representation of a uint16 into w, in BigEndian // order func WriteUint16(w io.Writer, value uint16) error { return binary.Write(w, binary.BigEndian, value) } golang-gopkg-src-d-go-git.v4-4.11.0/utils/binary/write_test.go000066400000000000000000000026121345605224300240440ustar00rootroot00000000000000package binary import ( "bytes" "encoding/binary" . "gopkg.in/check.v1" ) func (s *BinarySuite) TestWrite(c *C) { expected := bytes.NewBuffer(nil) err := binary.Write(expected, binary.BigEndian, int64(42)) c.Assert(err, IsNil) err = binary.Write(expected, binary.BigEndian, int32(42)) c.Assert(err, IsNil) buf := bytes.NewBuffer(nil) err = Write(buf, int64(42), int32(42)) c.Assert(err, IsNil) c.Assert(buf, DeepEquals, expected) } func (s *BinarySuite) TestWriteUint32(c *C) { expected := bytes.NewBuffer(nil) err := binary.Write(expected, binary.BigEndian, int32(42)) c.Assert(err, IsNil) buf := bytes.NewBuffer(nil) err = WriteUint32(buf, 42) c.Assert(err, IsNil) c.Assert(buf, DeepEquals, expected) } func (s *BinarySuite) TestWriteUint16(c *C) { expected := bytes.NewBuffer(nil) err := binary.Write(expected, binary.BigEndian, int16(42)) c.Assert(err, IsNil) buf := bytes.NewBuffer(nil) err = WriteUint16(buf, 42) c.Assert(err, IsNil) c.Assert(buf, DeepEquals, expected) } func (s *BinarySuite) TestWriteVariableWidthInt(c *C) { buf := bytes.NewBuffer(nil) err := WriteVariableWidthInt(buf, 366) c.Assert(err, IsNil) c.Assert(buf.Bytes(), DeepEquals, []byte{129, 110}) } func (s *BinarySuite) TestWriteVariableWidthIntShort(c *C) { buf := bytes.NewBuffer(nil) err := WriteVariableWidthInt(buf, 19) c.Assert(err, IsNil) c.Assert(buf.Bytes(), DeepEquals, []byte{19}) } golang-gopkg-src-d-go-git.v4-4.11.0/utils/blame2humantest.bash000077500000000000000000000020721345605224300237750ustar00rootroot00000000000000#!/bin/bash set -e repo=`git remote show origin | grep Fetch | cut -d' ' -f5` branch="master" if [ "$#" -eq 1 ] ; then commit=`git log | head -1 | cut -d' ' -f2` path=$1 elif [ "$#" -eq 2 ] ; then commit=$1 path=$2 else echo "bad number of parameters" > /dev/stderr echo > /dev/stderr echo " try with: [commit] path" > /dev/stderr exit fi blames=`git blame --root $path | cut -d' ' -f1` declare -a blame i=0 for shortBlame in $blames ; do blame[$i]=`git show $shortBlame | head -1 | cut -d' ' -f2` i=`expr $i + 1` done # some remotes have the .git, other don't, # repoDot makes sure all have repoDot="${repo%.git}.git" echo -e "\t{\"${repoDot}\", \"${branch}\", \"${commit}\", \"${path}\", concat(&[]string{}," prev="" count=1 for i in ${blame[@]} ; do if [ "${prev}" == "" ] ; then prev=$i elif [ "$prev" == "$i" ] ; then count=`expr $count + 1` else echo -e "\t\trepeat(\"${prev}\", $count)," count=1 prev=$i fi done echo -e "\t\trepeat(\"${prev}\", $count)," echo -e "\t)}," golang-gopkg-src-d-go-git.v4-4.11.0/utils/diff/000077500000000000000000000000001345605224300207475ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/utils/diff/diff.go000066400000000000000000000040751345605224300222140ustar00rootroot00000000000000// Package diff implements line oriented diffs, similar to the ancient // Unix diff command. // // The current implementation is just a wrapper around Sergi's // go-diff/diffmatchpatch library, which is a go port of Neil // Fraser's google-diff-match-patch code package diff import ( "bytes" "time" "github.com/sergi/go-diff/diffmatchpatch" ) // Do computes the (line oriented) modifications needed to turn the src // string into the dst string. The underlying algorithm is Meyers, // its complexity is O(N*d) where N is min(lines(src), lines(dst)) and d // is the size of the diff. func Do(src, dst string) (diffs []diffmatchpatch.Diff) { // the default timeout is time.Second which may be too small under heavy load return DoWithTimeout(src, dst, time.Hour) } // DoWithTimeout computes the (line oriented) modifications needed to turn the src // string into the dst string. The `timeout` argument specifies the maximum // amount of time it is allowed to spend in this function. If the timeout // is exceeded, the parts of the strings which were not considered are turned into // a bulk delete+insert and the half-baked suboptimal result is returned at once. // The underlying algorithm is Meyers, its complexity is O(N*d) where N is // min(lines(src), lines(dst)) and d is the size of the diff. func DoWithTimeout (src, dst string, timeout time.Duration) (diffs []diffmatchpatch.Diff) { dmp := diffmatchpatch.New() dmp.DiffTimeout = timeout wSrc, wDst, warray := dmp.DiffLinesToRunes(src, dst) diffs = dmp.DiffMainRunes(wSrc, wDst, false) diffs = dmp.DiffCharsToLines(diffs, warray) return diffs } // Dst computes and returns the destination text. func Dst(diffs []diffmatchpatch.Diff) string { var text bytes.Buffer for _, d := range diffs { if d.Type != diffmatchpatch.DiffDelete { text.WriteString(d.Text) } } return text.String() } // Src computes and returns the source text func Src(diffs []diffmatchpatch.Diff) string { var text bytes.Buffer for _, d := range diffs { if d.Type != diffmatchpatch.DiffInsert { text.WriteString(d.Text) } } return text.String() } golang-gopkg-src-d-go-git.v4-4.11.0/utils/diff/diff_ext_test.go000066400000000000000000000037421345605224300241330ustar00rootroot00000000000000package diff_test import ( "testing" "gopkg.in/src-d/go-git.v4/utils/diff" "github.com/sergi/go-diff/diffmatchpatch" . "gopkg.in/check.v1" ) func Test(t *testing.T) { TestingT(t) } type suiteCommon struct{} var _ = Suite(&suiteCommon{}) var diffTests = [...]struct { src string // the src string to diff dst string // the dst string to diff }{ // equal inputs {"", ""}, {"a", "a"}, {"a\n", "a\n"}, {"a\nb", "a\nb"}, {"a\nb\n", "a\nb\n"}, {"a\nb\nc", "a\nb\nc"}, {"a\nb\nc\n", "a\nb\nc\n"}, // missing '\n' {"", "\n"}, {"\n", ""}, {"a", "a\n"}, {"a\n", "a"}, {"a\nb", "a\nb"}, {"a\nb\n", "a\nb\n"}, {"a\nb\nc", "a\nb\nc"}, {"a\nb\nc\n", "a\nb\nc\n"}, // generic {"a\nbbbbb\n\tccc\ndd\n\tfffffffff\n", "bbbbb\n\tccc\n\tDD\n\tffff\n"}, } func (s *suiteCommon) TestAll(c *C) { for i, t := range diffTests { diffs := diff.Do(t.src, t.dst) src := diff.Src(diffs) dst := diff.Dst(diffs) c.Assert(src, Equals, t.src, Commentf("subtest %d, src=%q, dst=%q, bad calculated src", i, t.src, t.dst)) c.Assert(dst, Equals, t.dst, Commentf("subtest %d, src=%q, dst=%q, bad calculated dst", i, t.src, t.dst)) } } var doTests = [...]struct { src, dst string exp []diffmatchpatch.Diff }{ { src: "", dst: "", exp: []diffmatchpatch.Diff{}, }, { src: "a", dst: "a", exp: []diffmatchpatch.Diff{ { Type: 0, Text: "a", }, }, }, { src: "", dst: "abc\ncba", exp: []diffmatchpatch.Diff{ { Type: 1, Text: "abc\ncba", }, }, }, { src: "abc\ncba", dst: "", exp: []diffmatchpatch.Diff{ { Type: -1, Text: "abc\ncba", }, }, }, { src: "abc\nbcd\ncde", dst: "000\nabc\n111\nBCD\n", exp: []diffmatchpatch.Diff{ {Type: 1, Text: "000\n"}, {Type: 0, Text: "abc\n"}, {Type: -1, Text: "bcd\ncde"}, {Type: 1, Text: "111\nBCD\n"}, }, }, } func (s *suiteCommon) TestDo(c *C) { for i, t := range doTests { diffs := diff.Do(t.src, t.dst) c.Assert(diffs, DeepEquals, t.exp, Commentf("subtest %d", i)) } } golang-gopkg-src-d-go-git.v4-4.11.0/utils/ioutil/000077500000000000000000000000001345605224300213445ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/utils/ioutil/common.go000066400000000000000000000107771345605224300231770ustar00rootroot00000000000000// Package ioutil implements some I/O utility functions. package ioutil import ( "bufio" "context" "errors" "io" "github.com/jbenet/go-context/io" ) type readPeeker interface { io.Reader Peek(int) ([]byte, error) } var ( ErrEmptyReader = errors.New("reader is empty") ) // NonEmptyReader takes a reader and returns it if it is not empty, or // `ErrEmptyReader` if it is empty. If there is an error when reading the first // byte of the given reader, it will be propagated. func NonEmptyReader(r io.Reader) (io.Reader, error) { pr, ok := r.(readPeeker) if !ok { pr = bufio.NewReader(r) } _, err := pr.Peek(1) if err == io.EOF { return nil, ErrEmptyReader } if err != nil { return nil, err } return pr, nil } type readCloser struct { io.Reader closer io.Closer } func (r *readCloser) Close() error { return r.closer.Close() } // NewReadCloser creates an `io.ReadCloser` with the given `io.Reader` and // `io.Closer`. func NewReadCloser(r io.Reader, c io.Closer) io.ReadCloser { return &readCloser{Reader: r, closer: c} } type writeCloser struct { io.Writer closer io.Closer } func (r *writeCloser) Close() error { return r.closer.Close() } // NewWriteCloser creates an `io.WriteCloser` with the given `io.Writer` and // `io.Closer`. func NewWriteCloser(w io.Writer, c io.Closer) io.WriteCloser { return &writeCloser{Writer: w, closer: c} } type writeNopCloser struct { io.Writer } func (writeNopCloser) Close() error { return nil } // WriteNopCloser returns a WriteCloser with a no-op Close method wrapping // the provided Writer w. func WriteNopCloser(w io.Writer) io.WriteCloser { return writeNopCloser{w} } // CheckClose calls Close on the given io.Closer. If the given *error points to // nil, it will be assigned the error returned by Close. Otherwise, any error // returned by Close will be ignored. CheckClose is usually called with defer. func CheckClose(c io.Closer, err *error) { if cerr := c.Close(); cerr != nil && *err == nil { *err = cerr } } // NewContextWriter wraps a writer to make it respect given Context. // If there is a blocking write, the returned Writer will return whenever the // context is cancelled (the return values are n=0 and err=ctx.Err()). func NewContextWriter(ctx context.Context, w io.Writer) io.Writer { return ctxio.NewWriter(ctx, w) } // NewContextReader wraps a reader to make it respect given Context. // If there is a blocking read, the returned Reader will return whenever the // context is cancelled (the return values are n=0 and err=ctx.Err()). func NewContextReader(ctx context.Context, r io.Reader) io.Reader { return ctxio.NewReader(ctx, r) } // NewContextWriteCloser as NewContextWriter but with io.Closer interface. func NewContextWriteCloser(ctx context.Context, w io.WriteCloser) io.WriteCloser { ctxw := ctxio.NewWriter(ctx, w) return NewWriteCloser(ctxw, w) } // NewContextReadCloser as NewContextReader but with io.Closer interface. func NewContextReadCloser(ctx context.Context, r io.ReadCloser) io.ReadCloser { ctxr := ctxio.NewReader(ctx, r) return NewReadCloser(ctxr, r) } type readerOnError struct { io.Reader notify func(error) } // NewReaderOnError returns a io.Reader that call the notify function when an // unexpected (!io.EOF) error happens, after call Read function. func NewReaderOnError(r io.Reader, notify func(error)) io.Reader { return &readerOnError{r, notify} } // NewReadCloserOnError returns a io.ReadCloser that call the notify function // when an unexpected (!io.EOF) error happens, after call Read function. func NewReadCloserOnError(r io.ReadCloser, notify func(error)) io.ReadCloser { return NewReadCloser(NewReaderOnError(r, notify), r) } func (r *readerOnError) Read(buf []byte) (n int, err error) { n, err = r.Reader.Read(buf) if err != nil && err != io.EOF { r.notify(err) } return } type writerOnError struct { io.Writer notify func(error) } // NewWriterOnError returns a io.Writer that call the notify function when an // unexpected (!io.EOF) error happens, after call Write function. func NewWriterOnError(w io.Writer, notify func(error)) io.Writer { return &writerOnError{w, notify} } // NewWriteCloserOnError returns a io.WriteCloser that call the notify function //when an unexpected (!io.EOF) error happens, after call Write function. func NewWriteCloserOnError(w io.WriteCloser, notify func(error)) io.WriteCloser { return NewWriteCloser(NewWriterOnError(w, notify), w) } func (r *writerOnError) Write(p []byte) (n int, err error) { n, err = r.Writer.Write(p) if err != nil && err != io.EOF { r.notify(err) } return } golang-gopkg-src-d-go-git.v4-4.11.0/utils/ioutil/common_test.go000066400000000000000000000071121345605224300242230ustar00rootroot00000000000000package ioutil import ( "bytes" "context" "io/ioutil" "strings" "testing" . "gopkg.in/check.v1" ) func Test(t *testing.T) { TestingT(t) } type CommonSuite struct{} var _ = Suite(&CommonSuite{}) type closer struct { called int } func (c *closer) Close() error { c.called++ return nil } func (s *CommonSuite) TestNonEmptyReader_Empty(c *C) { var buf bytes.Buffer r, err := NonEmptyReader(&buf) c.Assert(err, Equals, ErrEmptyReader) c.Assert(r, IsNil) } func (s *CommonSuite) TestNonEmptyReader_NonEmpty(c *C) { buf := bytes.NewBuffer([]byte("1")) r, err := NonEmptyReader(buf) c.Assert(err, IsNil) c.Assert(r, NotNil) read, err := ioutil.ReadAll(r) c.Assert(err, IsNil) c.Assert(string(read), Equals, "1") } func (s *CommonSuite) TestNewReadCloser(c *C) { buf := bytes.NewBuffer([]byte("1")) closer := &closer{} r := NewReadCloser(buf, closer) read, err := ioutil.ReadAll(r) c.Assert(err, IsNil) c.Assert(string(read), Equals, "1") c.Assert(r.Close(), IsNil) c.Assert(closer.called, Equals, 1) } func (s *CommonSuite) TestNewContextReader(c *C) { buf := bytes.NewBuffer([]byte("12")) ctx, close := context.WithCancel(context.Background()) r := NewContextReader(ctx, buf) b := make([]byte, 1) n, err := r.Read(b) c.Assert(n, Equals, 1) c.Assert(err, IsNil) close() n, err = r.Read(b) c.Assert(n, Equals, 0) c.Assert(err, NotNil) } func (s *CommonSuite) TestNewContextReadCloser(c *C) { buf := NewReadCloser(bytes.NewBuffer([]byte("12")), &closer{}) ctx, close := context.WithCancel(context.Background()) r := NewContextReadCloser(ctx, buf) b := make([]byte, 1) n, err := r.Read(b) c.Assert(n, Equals, 1) c.Assert(err, IsNil) close() n, err = r.Read(b) c.Assert(n, Equals, 0) c.Assert(err, NotNil) c.Assert(r.Close(), IsNil) } func (s *CommonSuite) TestNewContextWriter(c *C) { buf := bytes.NewBuffer(nil) ctx, close := context.WithCancel(context.Background()) r := NewContextWriter(ctx, buf) n, err := r.Write([]byte("1")) c.Assert(n, Equals, 1) c.Assert(err, IsNil) close() n, err = r.Write([]byte("1")) c.Assert(n, Equals, 0) c.Assert(err, NotNil) } func (s *CommonSuite) TestNewContextWriteCloser(c *C) { buf := NewWriteCloser(bytes.NewBuffer(nil), &closer{}) ctx, close := context.WithCancel(context.Background()) w := NewContextWriteCloser(ctx, buf) n, err := w.Write([]byte("1")) c.Assert(n, Equals, 1) c.Assert(err, IsNil) close() n, err = w.Write([]byte("1")) c.Assert(n, Equals, 0) c.Assert(err, NotNil) c.Assert(w.Close(), IsNil) } func (s *CommonSuite) TestNewWriteCloserOnError(c *C) { buf := NewWriteCloser(bytes.NewBuffer(nil), &closer{}) ctx, close := context.WithCancel(context.Background()) var called error w := NewWriteCloserOnError(NewContextWriteCloser(ctx, buf), func(err error) { called = err }) close() w.Write(nil) c.Assert(called, NotNil) } func (s *CommonSuite) TestNewReadCloserOnError(c *C) { buf := NewReadCloser(bytes.NewBuffer(nil), &closer{}) ctx, close := context.WithCancel(context.Background()) var called error w := NewReadCloserOnError(NewContextReadCloser(ctx, buf), func(err error) { called = err }) close() w.Read(nil) c.Assert(called, NotNil) } func ExampleCheckClose() { // CheckClose is commonly used with named return values f := func() (err error) { // Get a io.ReadCloser r := ioutil.NopCloser(strings.NewReader("foo")) // defer CheckClose call with an io.Closer and pointer to error defer CheckClose(r, &err) // ... work with r ... // if err is not nil, CheckClose will assign any close errors to it return err } err := f() if err != nil { panic(err) } } golang-gopkg-src-d-go-git.v4-4.11.0/utils/merkletrie/000077500000000000000000000000001345605224300222025ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/utils/merkletrie/change.go000066400000000000000000000064261345605224300237660ustar00rootroot00000000000000package merkletrie import ( "fmt" "io" "gopkg.in/src-d/go-git.v4/utils/merkletrie/noder" ) // Action values represent the kind of things a Change can represent: // insertion, deletions or modifications of files. type Action int // The set of possible actions in a change. const ( _ Action = iota Insert Delete Modify ) // String returns the action as a human readable text. func (a Action) String() string { switch a { case Insert: return "Insert" case Delete: return "Delete" case Modify: return "Modify" default: panic(fmt.Sprintf("unsupported action: %d", a)) } } // A Change value represent how a noder has change between to merkletries. type Change struct { // The noder before the change or nil if it was inserted. From noder.Path // The noder after the change or nil if it was deleted. To noder.Path } // Action is convenience method that returns what Action c represents. func (c *Change) Action() (Action, error) { if c.From == nil && c.To == nil { return Action(0), fmt.Errorf("malformed change: nil from and to") } if c.From == nil { return Insert, nil } if c.To == nil { return Delete, nil } return Modify, nil } // NewInsert returns a new Change representing the insertion of n. func NewInsert(n noder.Path) Change { return Change{To: n} } // NewDelete returns a new Change representing the deletion of n. func NewDelete(n noder.Path) Change { return Change{From: n} } // NewModify returns a new Change representing that a has been modified and // it is now b. func NewModify(a, b noder.Path) Change { return Change{ From: a, To: b, } } // String returns a single change in human readable form, using the // format: '<' + action + space + path + '>'. The contents of the file // before or after the change are not included in this format. // // Example: inserting a file at the path a/b/c.txt will return "". func (c Change) String() string { action, err := c.Action() if err != nil { panic(err) } var path string if action == Delete { path = c.From.String() } else { path = c.To.String() } return fmt.Sprintf("<%s %s>", action, path) } // Changes is a list of changes between to merkletries. type Changes []Change // NewChanges returns an empty list of changes. func NewChanges() Changes { return Changes{} } // Add adds the change c to the list of changes. func (l *Changes) Add(c Change) { *l = append(*l, c) } // AddRecursiveInsert adds the required changes to insert all the // file-like noders found in root, recursively. func (l *Changes) AddRecursiveInsert(root noder.Path) error { return l.addRecursive(root, NewInsert) } // AddRecursiveDelete adds the required changes to delete all the // file-like noders found in root, recursively. func (l *Changes) AddRecursiveDelete(root noder.Path) error { return l.addRecursive(root, NewDelete) } type noderToChangeFn func(noder.Path) Change // NewInsert or NewDelete func (l *Changes) addRecursive(root noder.Path, ctor noderToChangeFn) error { if !root.IsDir() { l.Add(ctor(root)) return nil } i, err := NewIterFromPath(root) if err != nil { return err } var current noder.Path for { if current, err = i.Step(); err != nil { if err == io.EOF { break } return err } if current.IsDir() { continue } l.Add(ctor(current)) } return nil } golang-gopkg-src-d-go-git.v4-4.11.0/utils/merkletrie/change_test.go000066400000000000000000000041221345605224300250140ustar00rootroot00000000000000package merkletrie_test import ( "gopkg.in/src-d/go-git.v4/utils/merkletrie" "gopkg.in/src-d/go-git.v4/utils/merkletrie/internal/fsnoder" "gopkg.in/src-d/go-git.v4/utils/merkletrie/noder" . "gopkg.in/check.v1" ) type ChangeSuite struct{} var _ = Suite(&ChangeSuite{}) func (s *ChangeSuite) TestActionString(c *C) { action := merkletrie.Insert c.Assert(action.String(), Equals, "Insert") action = merkletrie.Delete c.Assert(action.String(), Equals, "Delete") action = merkletrie.Modify c.Assert(action.String(), Equals, "Modify") } func (s *ChangeSuite) TestUnsupportedAction(c *C) { a := merkletrie.Action(42) c.Assert(a.String, PanicMatches, "unsupported action.*") } func (s ChangeSuite) TestNewInsert(c *C) { tree, err := fsnoder.New("(a(b(z<>)))") c.Assert(err, IsNil) path := find(c, tree, "z") change := merkletrie.NewInsert(path) c.Assert(change.String(), Equals, "") shortPath := noder.Path([]noder.Noder{path.Last()}) change = merkletrie.NewInsert(shortPath) c.Assert(change.String(), Equals, "") } func (s ChangeSuite) TestNewDelete(c *C) { tree, err := fsnoder.New("(a(b(z<>)))") c.Assert(err, IsNil) path := find(c, tree, "z") change := merkletrie.NewDelete(path) c.Assert(change.String(), Equals, "") shortPath := noder.Path([]noder.Noder{path.Last()}) change = merkletrie.NewDelete(shortPath) c.Assert(change.String(), Equals, "") } func (s ChangeSuite) TestNewModify(c *C) { tree1, err := fsnoder.New("(a(b(z<>)))") c.Assert(err, IsNil) path1 := find(c, tree1, "z") tree2, err := fsnoder.New("(a(b(z<1>)))") c.Assert(err, IsNil) path2 := find(c, tree2, "z") change := merkletrie.NewModify(path1, path2) c.Assert(change.String(), Equals, "") shortPath1 := noder.Path([]noder.Noder{path1.Last()}) shortPath2 := noder.Path([]noder.Noder{path2.Last()}) change = merkletrie.NewModify(shortPath1, shortPath2) c.Assert(change.String(), Equals, "") } func (s ChangeSuite) TestMalformedChange(c *C) { change := merkletrie.Change{} c.Assert(change.String, PanicMatches, "malformed change.*") } golang-gopkg-src-d-go-git.v4-4.11.0/utils/merkletrie/difftree.go000066400000000000000000000342021345605224300243220ustar00rootroot00000000000000package merkletrie // The focus of this difftree implementation is to save time by // skipping whole directories if their hash is the same in both // trees. // // The diff algorithm implemented here is based on the doubleiter // type defined in this same package; we will iterate over both // trees at the same time, while comparing the current noders in // each iterator. Depending on how they differ we will output the // corresponding chages and move the iterators further over both // trees. // // The table bellow show all the possible comparison results, along // with what changes should we produce and how to advance the // iterators. // // The table is implemented by the switches in this function, // diffTwoNodes, diffTwoNodesSameName and diffTwoDirs. // // Many Bothans died to bring us this information, make sure you // understand the table before modifying this code. // # Cases // // When comparing noders in both trees you will found yourself in // one of 169 possible cases, but if we ignore moves, we can // simplify a lot the search space into the following table: // // - "-": nothing, no file or directory // - a<>: an empty file named "a". // - a<1>: a file named "a", with "1" as its contents. // - a<2>: a file named "a", with "2" as its contents. // - a(): an empty dir named "a". // - a(...): a dir named "a", with some files and/or dirs inside (possibly // empty). // - a(;;;): a dir named "a", with some other files and/or dirs inside // (possibly empty), which different from the ones in "a(...)". // // \ to - a<> a<1> a<2> a() a(...) a(;;;) // from \ // - 00 01 02 03 04 05 06 // a<> 10 11 12 13 14 15 16 // a<1> 20 21 22 23 24 25 26 // a<2> 30 31 32 33 34 35 36 // a() 40 41 42 43 44 45 46 // a(...) 50 51 52 53 54 55 56 // a(;;;) 60 61 62 63 64 65 66 // // Every (from, to) combination in the table is a special case, but // some of them can be merged into some more general cases, for // instance 11 and 22 can be merged into the general case: both // noders are equal. // // Here is a full list of all the cases that are similar and how to // merge them together into more general cases. Each general case // is labeled with an uppercase letter for further reference, and it // is followed by the pseudocode of the checks you have to perfrom // on both noders to see if you are in such a case, the actions to // perform (i.e. what changes to output) and how to advance the // iterators of each tree to continue the comparison process. // // ## A. Impossible: 00 // // ## B. Same thing on both sides: 11, 22, 33, 44, 55, 66 // - check: `SameName() && SameHash()` // - action: do nothing. // - advance: `FromNext(); ToNext()` // // ### C. To was created: 01, 02, 03, 04, 05, 06 // - check: `DifferentName() && ToBeforeFrom()` // - action: inserRecursively(to) // - advance: `ToNext()` // // ### D. From was deleted: 10, 20, 30, 40, 50, 60 // - check: `DifferentName() && FromBeforeTo()` // - action: `DeleteRecursively(from)` // - advance: `FromNext()` // // ### E. Empty file to file with contents: 12, 13 // - check: `SameName() && DifferentHash() && FromIsFile() && // ToIsFile() && FromIsEmpty()` // - action: `modifyFile(from, to)` // - advance: `FromNext()` or `FromStep()` // // ### E'. file with contents to empty file: 21, 31 // - check: `SameName() && DifferentHash() && FromIsFile() && // ToIsFile() && ToIsEmpty()` // - action: `modifyFile(from, to)` // - advance: `FromNext()` or `FromStep()` // // ### F. empty file to empty dir with the same name: 14 // - check: `SameName() && FromIsFile() && FromIsEmpty() && // ToIsDir() && ToIsEmpty()` // - action: `DeleteFile(from); InsertEmptyDir(to)` // - advance: `FromNext(); ToNext()` // // ### F'. empty dir to empty file of the same name: 41 // - check: `SameName() && FromIsDir() && FromIsEmpty && // ToIsFile() && ToIsEmpty()` // - action: `DeleteEmptyDir(from); InsertFile(to)` // - advance: `FromNext(); ToNext()` or step for any of them. // // ### G. empty file to non-empty dir of the same name: 15, 16 // - check: `SameName() && FromIsFile() && ToIsDir() && // FromIsEmpty() && ToIsNotEmpty()` // - action: `DeleteFile(from); InsertDirRecursively(to)` // - advance: `FromNext(); ToNext()` // // ### G'. non-empty dir to empty file of the same name: 51, 61 // - check: `SameName() && FromIsDir() && FromIsNotEmpty() && // ToIsFile() && FromIsEmpty()` // - action: `DeleteDirRecursively(from); InsertFile(to)` // - advance: `FromNext(); ToNext()` // // ### H. modify file contents: 23, 32 // - check: `SameName() && FromIsFile() && ToIsFile() && // FromIsNotEmpty() && ToIsNotEmpty()` // - action: `ModifyFile(from, to)` // - advance: `FromNext(); ToNext()` // // ### I. file with contents to empty dir: 24, 34 // - check: `SameName() && DifferentHash() && FromIsFile() && // FromIsNotEmpty() && ToIsDir() && ToIsEmpty()` // - action: `DeleteFile(from); InsertEmptyDir(to)` // - advance: `FromNext(); ToNext()` // // ### I'. empty dir to file with contents: 42, 43 // - check: `SameName() && DifferentHash() && FromIsDir() && // FromIsEmpty() && ToIsFile() && ToIsEmpty()` // - action: `DeleteDir(from); InsertFile(to)` // - advance: `FromNext(); ToNext()` // // ### J. file with contents to dir with contetns: 25, 26, 35, 36 // - check: `SameName() && DifferentHash() && FromIsFile() && // FromIsNotEmpty() && ToIsDir() && ToIsNotEmpty()` // - action: `DeleteFile(from); InsertDirRecursively(to)` // - advance: `FromNext(); ToNext()` // // ### J'. dir with contetns to file with contents: 52, 62, 53, 63 // - check: `SameName() && DifferentHash() && FromIsDir() && // FromIsNotEmpty() && ToIsFile() && ToIsNotEmpty()` // - action: `DeleteDirRecursively(from); InsertFile(to)` // - advance: `FromNext(); ToNext()` // // ### K. empty dir to dir with contents: 45, 46 // - check: `SameName() && DifferentHash() && FromIsDir() && // FromIsEmpty() && ToIsDir() && ToIsNotEmpty()` // - action: `InsertChildrenRecursively(to)` // - advance: `FromNext(); ToNext()` // // ### K'. dir with contents to empty dir: 54, 64 // - check: `SameName() && DifferentHash() && FromIsDir() && // FromIsEmpty() && ToIsDir() && ToIsNotEmpty()` // - action: `DeleteChildrenRecursively(from)` // - advance: `FromNext(); ToNext()` // // ### L. dir with contents to dir with different contents: 56, 65 // - check: `SameName() && DifferentHash() && FromIsDir() && // FromIsNotEmpty() && ToIsDir() && ToIsNotEmpty()` // - action: nothing // - advance: `FromStep(); ToStep()` // // // All these cases can be further simplified by a truth table // reduction process, in which we gather similar checks together to // make the final code easier to read and understand. // // The first 6 columns are the outputs of the checks to perform on // both noders. I have labeled them 1 to 6, this is what they mean: // // 1: SameName() // 2: SameHash() // 3: FromIsDir() // 4: ToIsDir() // 5: FromIsEmpty() // 6: ToIsEmpty() // // The from and to columns are a fsnoder example of the elements // that you will find on each tree under the specified comparison // results (columns 1 to 6). // // The type column identifies the case we are into, from the list above. // // The type' column identifies the new set of reduced cases, using // lowercase letters, and they are explained after the table. // // The last column is the set of actions and advances for each case. // // "---" means impossible except in case of hash collision. // // advance meaning: // - NN: from.Next(); to.Next() // - SS: from.Step(); to.Step() // // 1 2 3 4 5 6 | from | to |type|type'|action ; advance // ------------+--------+--------+----+------------------------------------ // 0 0 0 0 0 0 | | | | | if !SameName() { // . | | | | | if FromBeforeTo() { // . | | | D | d | delete(from); from.Next() // . | | | | | } else { // . | | | C | c | insert(to); to.Next() // . | | | | | } // 0 1 1 1 1 1 | | | | | } // 1 0 0 0 0 0 | a<1> | a<2> | H | e | modify(from, to); NN // 1 0 0 0 0 1 | a<1> | a<> | E' | e | modify(from, to); NN // 1 0 0 0 1 0 | a<> | a<1> | E | e | modify(from, to); NN // 1 0 0 0 1 1 | ---- | ---- | | e | // 1 0 0 1 0 0 | a<1> | a(...) | J | f | delete(from); insert(to); NN // 1 0 0 1 0 1 | a<1> | a() | I | f | delete(from); insert(to); NN // 1 0 0 1 1 0 | a<> | a(...) | G | f | delete(from); insert(to); NN // 1 0 0 1 1 1 | a<> | a() | F | f | delete(from); insert(to); NN // 1 0 1 0 0 0 | a(...) | a<1> | J' | f | delete(from); insert(to); NN // 1 0 1 0 0 1 | a(...) | a<> | G' | f | delete(from); insert(to); NN // 1 0 1 0 1 0 | a() | a<1> | I' | f | delete(from); insert(to); NN // 1 0 1 0 1 1 | a() | a<> | F' | f | delete(from); insert(to); NN // 1 0 1 1 0 0 | a(...) | a(;;;) | L | g | nothing; SS // 1 0 1 1 0 1 | a(...) | a() | K' | h | deleteChidren(from); NN // 1 0 1 1 1 0 | a() | a(...) | K | i | insertChildren(to); NN // 1 0 1 1 1 1 | ---- | ---- | | | // 1 1 0 0 0 0 | a<1> | a<1> | B | b | nothing; NN // 1 1 0 0 0 1 | ---- | ---- | | b | // 1 1 0 0 1 0 | ---- | ---- | | b | // 1 1 0 0 1 1 | a<> | a<> | B | b | nothing; NN // 1 1 0 1 0 0 | ---- | ---- | | b | // 1 1 0 1 0 1 | ---- | ---- | | b | // 1 1 0 1 1 0 | ---- | ---- | | b | // 1 1 0 1 1 1 | ---- | ---- | | b | // 1 1 1 0 0 0 | ---- | ---- | | b | // 1 1 1 0 0 1 | ---- | ---- | | b | // 1 1 1 0 1 0 | ---- | ---- | | b | // 1 1 1 0 1 1 | ---- | ---- | | b | // 1 1 1 1 0 0 | a(...) | a(...) | B | b | nothing; NN // 1 1 1 1 0 1 | ---- | ---- | | b | // 1 1 1 1 1 0 | ---- | ---- | | b | // 1 1 1 1 1 1 | a() | a() | B | b | nothing; NN // // c and d: // if !SameName() // d if FromBeforeTo() // c else // b: SameName) && sameHash() // e: SameName() && !sameHash() && BothAreFiles() // f: SameName() && !sameHash() && FileAndDir() // g: SameName() && !sameHash() && BothAreDirs() && NoneIsEmpty // i: SameName() && !sameHash() && BothAreDirs() && FromIsEmpty // h: else of i import ( "context" "errors" "fmt" "gopkg.in/src-d/go-git.v4/utils/merkletrie/noder" ) var ( ErrCanceled = errors.New("operation canceled") ) // DiffTree calculates the list of changes between two merkletries. It // uses the provided hashEqual callback to compare noders. func DiffTree(fromTree, toTree noder.Noder, hashEqual noder.Equal) (Changes, error) { return DiffTreeContext(context.Background(), fromTree, toTree, hashEqual) } // DiffTree calculates the list of changes between two merkletries. It // uses the provided hashEqual callback to compare noders. // Error will be returned if context expires // Provided context must be non nil func DiffTreeContext(ctx context.Context, fromTree, toTree noder.Noder, hashEqual noder.Equal) (Changes, error) { ret := NewChanges() ii, err := newDoubleIter(fromTree, toTree, hashEqual) if err != nil { return nil, err } for { select { case <-ctx.Done(): return nil, ErrCanceled default: } from := ii.from.current to := ii.to.current switch r := ii.remaining(); r { case noMoreNoders: return ret, nil case onlyFromRemains: if err = ret.AddRecursiveDelete(from); err != nil { return nil, err } if err = ii.nextFrom(); err != nil { return nil, err } case onlyToRemains: if err = ret.AddRecursiveInsert(to); err != nil { return nil, err } if err = ii.nextTo(); err != nil { return nil, err } case bothHaveNodes: if err = diffNodes(&ret, ii); err != nil { return nil, err } default: panic(fmt.Sprintf("unknown remaining value: %d", r)) } } } func diffNodes(changes *Changes, ii *doubleIter) error { from := ii.from.current to := ii.to.current var err error // compare their full paths as strings switch from.Compare(to) { case -1: if err = changes.AddRecursiveDelete(from); err != nil { return err } if err = ii.nextFrom(); err != nil { return err } case 1: if err = changes.AddRecursiveInsert(to); err != nil { return err } if err = ii.nextTo(); err != nil { return err } default: if err := diffNodesSameName(changes, ii); err != nil { return err } } return nil } func diffNodesSameName(changes *Changes, ii *doubleIter) error { from := ii.from.current to := ii.to.current status, err := ii.compare() if err != nil { return err } switch { case status.sameHash: // do nothing if err = ii.nextBoth(); err != nil { return err } case status.bothAreFiles: changes.Add(NewModify(from, to)) if err = ii.nextBoth(); err != nil { return err } case status.fileAndDir: if err = changes.AddRecursiveDelete(from); err != nil { return err } if err = changes.AddRecursiveInsert(to); err != nil { return err } if err = ii.nextBoth(); err != nil { return err } case status.bothAreDirs: if err = diffDirs(changes, ii); err != nil { return err } default: return fmt.Errorf("bad status from double iterator") } return nil } func diffDirs(changes *Changes, ii *doubleIter) error { from := ii.from.current to := ii.to.current status, err := ii.compare() if err != nil { return err } switch { case status.fromIsEmptyDir: if err = changes.AddRecursiveInsert(to); err != nil { return err } if err = ii.nextBoth(); err != nil { return err } case status.toIsEmptyDir: if err = changes.AddRecursiveDelete(from); err != nil { return err } if err = ii.nextBoth(); err != nil { return err } case !status.fromIsEmptyDir && !status.toIsEmptyDir: // do nothing if err = ii.stepBoth(); err != nil { return err } default: return fmt.Errorf("both dirs are empty but has different hash") } return nil } golang-gopkg-src-d-go-git.v4-4.11.0/utils/merkletrie/difftree_test.go000066400000000000000000000277701345605224300253750ustar00rootroot00000000000000package merkletrie_test import ( "bytes" ctx "context" "fmt" "reflect" "sort" "strings" "testing" "unicode" "gopkg.in/src-d/go-git.v4/utils/merkletrie" "gopkg.in/src-d/go-git.v4/utils/merkletrie/internal/fsnoder" . "gopkg.in/check.v1" ) func Test(t *testing.T) { TestingT(t) } type DiffTreeSuite struct{} var _ = Suite(&DiffTreeSuite{}) type diffTreeTest struct { from string to string expected string } func (t diffTreeTest) innerRun(c *C, context string, reverse bool) { comment := Commentf("\n%s", context) if reverse { comment = Commentf("%s [REVERSED]", comment.CheckCommentString()) } a, err := fsnoder.New(t.from) c.Assert(err, IsNil, comment) comment = Commentf("%s\n\t from = %s", comment.CheckCommentString(), a) b, err := fsnoder.New(t.to) c.Assert(err, IsNil, comment) comment = Commentf("%s\n\t to = %s", comment.CheckCommentString(), b) expected, err := newChangesFromString(t.expected) c.Assert(err, IsNil, comment) if reverse { a, b = b, a expected = expected.reverse() } comment = Commentf("%s\n\texpected = %s", comment.CheckCommentString(), expected) results, err := merkletrie.DiffTree(a, b, fsnoder.HashEqual) c.Assert(err, IsNil, comment) obtained, err := newChanges(results) c.Assert(err, IsNil, comment) comment = Commentf("%s\n\tobtained = %s", comment.CheckCommentString(), obtained) c.Assert(obtained, changesEquals, expected, comment) } func (t diffTreeTest) innerRunCtx(c *C, context string, reverse bool) { comment := Commentf("\n%s", context) if reverse { comment = Commentf("%s [REVERSED]", comment.CheckCommentString()) } a, err := fsnoder.New(t.from) c.Assert(err, IsNil, comment) comment = Commentf("%s\n\t from = %s", comment.CheckCommentString(), a) b, err := fsnoder.New(t.to) c.Assert(err, IsNil, comment) comment = Commentf("%s\n\t to = %s", comment.CheckCommentString(), b) expected, err := newChangesFromString(t.expected) c.Assert(err, IsNil, comment) if reverse { a, b = b, a expected = expected.reverse() } comment = Commentf("%s\n\texpected = %s", comment.CheckCommentString(), expected) results, err := merkletrie.DiffTreeContext(ctx.Background(), a, b, fsnoder.HashEqual) c.Assert(err, IsNil, comment) obtained, err := newChanges(results) c.Assert(err, IsNil, comment) comment = Commentf("%s\n\tobtained = %s", comment.CheckCommentString(), obtained) c.Assert(obtained, changesEquals, expected, comment) } func (t diffTreeTest) run(c *C, context string) { t.innerRun(c, context, false) t.innerRun(c, context, true) t.innerRunCtx(c, context, false) t.innerRunCtx(c, context, true) } type change struct { merkletrie.Action path string } func (c change) String() string { return fmt.Sprintf("<%s %s>", c.Action, c.path) } func (c change) reverse() change { ret := change{ path: c.path, } switch c.Action { case merkletrie.Insert: ret.Action = merkletrie.Delete case merkletrie.Delete: ret.Action = merkletrie.Insert case merkletrie.Modify: ret.Action = merkletrie.Modify default: panic(fmt.Sprintf("unknown action type: %d", c.Action)) } return ret } type changes []change func newChanges(original merkletrie.Changes) (changes, error) { ret := make(changes, len(original)) for i, c := range original { action, err := c.Action() if err != nil { return nil, err } switch action { case merkletrie.Insert: ret[i] = change{ Action: merkletrie.Insert, path: c.To.String(), } case merkletrie.Delete: ret[i] = change{ Action: merkletrie.Delete, path: c.From.String(), } case merkletrie.Modify: ret[i] = change{ Action: merkletrie.Modify, path: c.From.String(), } default: panic(fmt.Sprintf("unsupported action %d", action)) } } return ret, nil } func newChangesFromString(s string) (changes, error) { ret := make([]change, 0) s = strings.TrimSpace(s) s = removeDuplicatedSpace(s) s = turnSpaceIntoLiteralSpace(s) if s == "" { return ret, nil } for _, chunk := range strings.Split(s, " ") { change := change{ path: string(chunk[1:]), } switch chunk[0] { case '+': change.Action = merkletrie.Insert case '-': change.Action = merkletrie.Delete case '*': change.Action = merkletrie.Modify default: panic(fmt.Sprintf("unsupported action descriptor %q", chunk[0])) } ret = append(ret, change) } return ret, nil } func removeDuplicatedSpace(s string) string { var buf bytes.Buffer var lastWasSpace, currentIsSpace bool for _, r := range s { currentIsSpace = unicode.IsSpace(r) if lastWasSpace && currentIsSpace { continue } lastWasSpace = currentIsSpace buf.WriteRune(r) } return buf.String() } func turnSpaceIntoLiteralSpace(s string) string { return strings.Map( func(r rune) rune { if unicode.IsSpace(r) { return ' ' } return r }, s) } func (cc changes) Len() int { return len(cc) } func (cc changes) Swap(i, j int) { cc[i], cc[j] = cc[j], cc[i] } func (cc changes) Less(i, j int) bool { return strings.Compare(cc[i].String(), cc[j].String()) < 0 } func (cc changes) equals(other changes) bool { sort.Sort(cc) sort.Sort(other) return reflect.DeepEqual(cc, other) } func (cc changes) String() string { var buf bytes.Buffer fmt.Fprintf(&buf, "len(%d) [", len(cc)) sep := "" for _, c := range cc { fmt.Fprintf(&buf, "%s%s", sep, c) sep = ", " } buf.WriteByte(']') return buf.String() } func (cc changes) reverse() changes { ret := make(changes, len(cc)) for i, c := range cc { ret[i] = c.reverse() } return ret } type changesEqualsChecker struct { *CheckerInfo } var changesEquals Checker = &changesEqualsChecker{ &CheckerInfo{Name: "changesEquals", Params: []string{"obtained", "expected"}}, } func (checker *changesEqualsChecker) Check(params []interface{}, names []string) (result bool, error string) { a, ok := params[0].(changes) if !ok { return false, "first parameter must be a changes" } b, ok := params[1].(changes) if !ok { return false, "second parameter must be a changes" } return a.equals(b), "" } func do(c *C, list []diffTreeTest) { for i, t := range list { t.run(c, fmt.Sprintf("test #%d:", i)) } } func (s *DiffTreeSuite) TestEmptyVsEmpty(c *C) { do(c, []diffTreeTest{ {"()", "()", ""}, {"A()", "A()", ""}, {"A()", "()", ""}, {"A()", "B()", ""}, }) } func (s *DiffTreeSuite) TestBasicCases(c *C) { do(c, []diffTreeTest{ {"()", "()", ""}, {"()", "(a<>)", "+a"}, {"()", "(a<1>)", "+a"}, {"()", "(a())", ""}, {"()", "(a(b()))", ""}, {"()", "(a(b<>))", "+a/b"}, {"()", "(a(b<1>))", "+a/b"}, {"(a<>)", "(a<>)", ""}, {"(a<>)", "(a<1>)", "*a"}, {"(a<>)", "(a())", "-a"}, {"(a<>)", "(a(b()))", "-a"}, {"(a<>)", "(a(b<>))", "-a +a/b"}, {"(a<>)", "(a(b<1>))", "-a +a/b"}, {"(a<>)", "(c())", "-a"}, {"(a<>)", "(c(b()))", "-a"}, {"(a<>)", "(c(b<>))", "-a +c/b"}, {"(a<>)", "(c(b<1>))", "-a +c/b"}, {"(a<>)", "(c(a()))", "-a"}, {"(a<>)", "(c(a<>))", "-a +c/a"}, {"(a<>)", "(c(a<1>))", "-a +c/a"}, {"(a<1>)", "(a<1>)", ""}, {"(a<1>)", "(a<2>)", "*a"}, {"(a<1>)", "(b<1>)", "-a +b"}, {"(a<1>)", "(b<2>)", "-a +b"}, {"(a<1>)", "(a())", "-a"}, {"(a<1>)", "(a(b()))", "-a"}, {"(a<1>)", "(a(b<>))", "-a +a/b"}, {"(a<1>)", "(a(b<1>))", "-a +a/b"}, {"(a<1>)", "(a(b<2>))", "-a +a/b"}, {"(a<1>)", "(c())", "-a"}, {"(a<1>)", "(c(b()))", "-a"}, {"(a<1>)", "(c(b<>))", "-a +c/b"}, {"(a<1>)", "(c(b<1>))", "-a +c/b"}, {"(a<1>)", "(c(b<2>))", "-a +c/b"}, {"(a<1>)", "(c())", "-a"}, {"(a<1>)", "(c(a()))", "-a"}, {"(a<1>)", "(c(a<>))", "-a +c/a"}, {"(a<1>)", "(c(a<1>))", "-a +c/a"}, {"(a<1>)", "(c(a<2>))", "-a +c/a"}, {"(a())", "(a())", ""}, {"(a())", "(b())", ""}, {"(a())", "(a(b()))", ""}, {"(a())", "(b(a()))", ""}, {"(a())", "(a(b<>))", "+a/b"}, {"(a())", "(a(b<1>))", "+a/b"}, {"(a())", "(b(a<>))", "+b/a"}, {"(a())", "(b(a<1>))", "+b/a"}, }) } func (s *DiffTreeSuite) TestHorizontals(c *C) { do(c, []diffTreeTest{ {"()", "(a<> b<>)", "+a +b"}, {"()", "(a<> b<1>)", "+a +b"}, {"()", "(a<> b())", "+a"}, {"()", "(a() b<>)", "+b"}, {"()", "(a<1> b<>)", "+a +b"}, {"()", "(a<1> b<1>)", "+a +b"}, {"()", "(a<1> b<2>)", "+a +b"}, {"()", "(a<1> b())", "+a"}, {"()", "(a() b<1>)", "+b"}, {"()", "(a() b())", ""}, {"()", "(a<> b<> c<> d<>)", "+a +b +c +d"}, {"()", "(a<> b<1> c() d<> e<2> f())", "+a +b +d +e"}, }) } func (s *DiffTreeSuite) TestVerticals(c *C) { do(c, []diffTreeTest{ {"()", "(z<>)", "+z"}, {"()", "(a(z<>))", "+a/z"}, {"()", "(a(b(z<>)))", "+a/b/z"}, {"()", "(a(b(c(z<>))))", "+a/b/c/z"}, {"()", "(a(b(c(d(z<>)))))", "+a/b/c/d/z"}, {"()", "(a(b(c(d(z<1>)))))", "+a/b/c/d/z"}, }) } func (s *DiffTreeSuite) TestSingleInserts(c *C) { do(c, []diffTreeTest{ {"()", "(z<>)", "+z"}, {"(a())", "(a(z<>))", "+a/z"}, {"(a())", "(a(b(z<>)))", "+a/b/z"}, {"(a(b(c())))", "(a(b(c(z<>))))", "+a/b/c/z"}, {"(a<> b<> c<>)", "(a<> b<> c<> z<>)", "+z"}, {"(a(b<> c<> d<>))", "(a(b<> c<> d<> z<>))", "+a/z"}, {"(a(b(c<> d<> e<>)))", "(a(b(c<> d<> e<> z<>)))", "+a/b/z"}, {"(a(b<>) f<>)", "(a(b<>) f<> z<>)", "+z"}, {"(a(b<>) f<>)", "(a(b<> z<>) f<>)", "+a/z"}, }) } func (s *DiffTreeSuite) TestDebug(c *C) { do(c, []diffTreeTest{ {"(a(b<>) f<>)", "(a(b<> z<>) f<>)", "+a/z"}, }) } // root // / | \ // / | ---- // f d h -------- // /\ / \ | // e a j b/ g // | / \ | // l n k icm // | // o // | // p/ func (s *DiffTreeSuite) TestCrazy(c *C) { crazy := "(f(e(l<1>) a(n(o(p())) k<1>)) d<1> h(j(i<1> c<2> m<>) b() g<>))" do(c, []diffTreeTest{ { crazy, "()", "-d -f/e/l -f/a/k -h/j/i -h/j/c -h/j/m -h/g", }, { crazy, crazy, "", }, { crazy, "(d<1>)", "-f/e/l -f/a/k -h/j/i -h/j/c -h/j/m -h/g", }, { crazy, "(d<1> h(b() g<>))", "-f/e/l -f/a/k -h/j/i -h/j/c -h/j/m", }, { crazy, "(d<1> f(e(l()) a()) h(b() g<>))", "-f/e/l -f/a/k -h/j/i -h/j/c -h/j/m", }, { crazy, "(d<1> f(e(l<1>) a()) h(b() g<>))", "-f/a/k -h/j/i -h/j/c -h/j/m", }, { crazy, "(d<2> f(e(l<2>) a(s(t<1>))) h(b() g<> r<> j(i<> c<3> m<>)))", "+f/a/s/t +h/r -f/a/k *d *f/e/l *h/j/c *h/j/i", }, { crazy, "(f(e(l<2>) a(n(o(p<1>)) k<>)) h(j(i<1> c<2> m<>) b() g<>))", "*f/e/l +f/a/n/o/p *f/a/k -d", }, { crazy, "(f(e(l<1>) a(n(o(p(r<1>))) k<1>)) d<1> h(j(i<1> c<2> b() m<>) g<1>))", "+f/a/n/o/p/r *h/g", }, }) } func (s *DiffTreeSuite) TestSameNames(c *C) { do(c, []diffTreeTest{ { "(a(a(a<>)))", "(a(a(a<1>)))", "*a/a/a", }, { "(a(b(a<>)))", "(a(b(a<>)) b(a<>))", "+b/a", }, { "(a(b(a<>)))", "(a(b()) b(a<>))", "-a/b/a +b/a", }, }) } func (s *DiffTreeSuite) TestIssue275(c *C) { do(c, []diffTreeTest{ { "(a(b(c.go<1>) b.go<2>))", "(a(b(c.go<1> d.go<3>) b.go<2>))", "+a/b/d.go", }, }) } func (s *DiffTreeSuite) TestIssue1057(c *C) { p1 := "TestAppWithUnicodéPath" p2 := "TestAppWithUnicodéPath" c.Assert(p1 == p2, Equals, false) do(c, []diffTreeTest{ { fmt.Sprintf("(%s(x.go<1>))", p1), fmt.Sprintf("(%s(x.go<1>) %s(x.go<1>))", p1, p2), fmt.Sprintf("+%s/x.go", p2), }, }) // swap p1 with p2 do(c, []diffTreeTest{ { fmt.Sprintf("(%s(x.go<1>))", p2), fmt.Sprintf("(%s(x.go<1>) %s(x.go<1>))", p1, p2), fmt.Sprintf("+%s/x.go", p1), }, }) } func (s *DiffTreeSuite) TestCancel(c *C) { t := diffTreeTest{"()", "(a<> b<1> c() d<> e<2> f())", "+a +b +d +e"} comment := Commentf("\n%s", "test cancel:") a, err := fsnoder.New(t.from) c.Assert(err, IsNil, comment) comment = Commentf("%s\n\t from = %s", comment.CheckCommentString(), a) b, err := fsnoder.New(t.to) c.Assert(err, IsNil, comment) comment = Commentf("%s\n\t to = %s", comment.CheckCommentString(), b) expected, err := newChangesFromString(t.expected) c.Assert(err, IsNil, comment) comment = Commentf("%s\n\texpected = %s", comment.CheckCommentString(), expected) context, cancel := ctx.WithCancel(ctx.Background()) cancel() results, err := merkletrie.DiffTreeContext(context, a, b, fsnoder.HashEqual) c.Assert(results, IsNil, comment) c.Assert(err, ErrorMatches, "operation canceled") } golang-gopkg-src-d-go-git.v4-4.11.0/utils/merkletrie/doc.go000066400000000000000000000026061345605224300233020ustar00rootroot00000000000000/* Package merkletrie provides support for n-ary trees that are at the same time Merkle trees and Radix trees (tries). Git trees are Radix n-ary trees in virtue of the names of their tree entries. At the same time, git trees are Merkle trees thanks to their hashes. This package defines Merkle tries as nodes that should have: - a hash: the Merkle part of the Merkle trie - a key: the Radix part of the Merkle trie The Merkle hash condition is not enforced by this package though. This means that the hash of a node doesn't have to take into account the hashes of their children, which is good for testing purposes. Nodes in the Merkle trie are abstracted by the Noder interface. The intended use is that git trees implements this interface, either directly or using a simple wrapper. This package provides an iterator for merkletries that can skip whole directory-like noders and an efficient merkletrie comparison algorithm. When comparing git trees, the simple approach of alphabetically sorting their elements and comparing the resulting lists is too slow as it depends linearly on the number of files in the trees: When a directory has lots of files but none of them has been modified, this approach is very expensive. We can do better by prunning whole directories that have not change, just by looking at their hashes. This package provides the tools to do exactly that. */ package merkletrie golang-gopkg-src-d-go-git.v4-4.11.0/utils/merkletrie/doubleiter.go000066400000000000000000000115441345605224300246740ustar00rootroot00000000000000package merkletrie import ( "fmt" "io" "gopkg.in/src-d/go-git.v4/utils/merkletrie/noder" ) // A doubleIter is a convenience type to keep track of the current // noders in two merkletries that are going to be iterated in parallel. // It has methods for: // // - iterating over the merkletries, both at the same time or // individually: nextFrom, nextTo, nextBoth, stepBoth // // - checking if there are noders left in one or both of them with the // remaining method and its associated returned type. // // - comparing the current noders of both merkletries in several ways, // with the compare method and its associated returned type. type doubleIter struct { from struct { iter *Iter current noder.Path // nil if no more nodes } to struct { iter *Iter current noder.Path // nil if no more nodes } hashEqual noder.Equal } // NewdoubleIter returns a new doubleIter for the merkletries "from" and // "to". The hashEqual callback function will be used by the doubleIter // to compare the hash of the noders in the merkletries. The doubleIter // will be initialized to the first elements in each merkletrie if any. func newDoubleIter(from, to noder.Noder, hashEqual noder.Equal) ( *doubleIter, error) { var ii doubleIter var err error if ii.from.iter, err = NewIter(from); err != nil { return nil, fmt.Errorf("from: %s", err) } if ii.from.current, err = ii.from.iter.Next(); turnEOFIntoNil(err) != nil { return nil, fmt.Errorf("from: %s", err) } if ii.to.iter, err = NewIter(to); err != nil { return nil, fmt.Errorf("to: %s", err) } if ii.to.current, err = ii.to.iter.Next(); turnEOFIntoNil(err) != nil { return nil, fmt.Errorf("to: %s", err) } ii.hashEqual = hashEqual return &ii, nil } func turnEOFIntoNil(e error) error { if e != nil && e != io.EOF { return e } return nil } // NextBoth makes d advance to the next noder in both merkletries. If // any of them is a directory, it skips its contents. func (d *doubleIter) nextBoth() error { if err := d.nextFrom(); err != nil { return err } if err := d.nextTo(); err != nil { return err } return nil } // NextFrom makes d advance to the next noder in the "from" merkletrie, // skipping its contents if it is a directory. func (d *doubleIter) nextFrom() (err error) { d.from.current, err = d.from.iter.Next() return turnEOFIntoNil(err) } // NextTo makes d advance to the next noder in the "to" merkletrie, // skipping its contents if it is a directory. func (d *doubleIter) nextTo() (err error) { d.to.current, err = d.to.iter.Next() return turnEOFIntoNil(err) } // StepBoth makes d advance to the next noder in both merkletries, // getting deeper into directories if that is the case. func (d *doubleIter) stepBoth() (err error) { if d.from.current, err = d.from.iter.Step(); turnEOFIntoNil(err) != nil { return err } if d.to.current, err = d.to.iter.Step(); turnEOFIntoNil(err) != nil { return err } return nil } // Remaining returns if there are no more noders in the tree, if both // have noders or if one of them doesn't. func (d *doubleIter) remaining() remaining { if d.from.current == nil && d.to.current == nil { return noMoreNoders } if d.from.current == nil && d.to.current != nil { return onlyToRemains } if d.from.current != nil && d.to.current == nil { return onlyFromRemains } return bothHaveNodes } // Remaining values tells you whether both trees still have noders, or // only one of them or none of them. type remaining int const ( noMoreNoders remaining = iota onlyToRemains onlyFromRemains bothHaveNodes ) // Compare returns the comparison between the current elements in the // merkletries. func (d *doubleIter) compare() (s comparison, err error) { s.sameHash = d.hashEqual(d.from.current, d.to.current) fromIsDir := d.from.current.IsDir() toIsDir := d.to.current.IsDir() s.bothAreDirs = fromIsDir && toIsDir s.bothAreFiles = !fromIsDir && !toIsDir s.fileAndDir = !s.bothAreDirs && !s.bothAreFiles fromNumChildren, err := d.from.current.NumChildren() if err != nil { return comparison{}, fmt.Errorf("from: %s", err) } toNumChildren, err := d.to.current.NumChildren() if err != nil { return comparison{}, fmt.Errorf("to: %s", err) } s.fromIsEmptyDir = fromIsDir && fromNumChildren == 0 s.toIsEmptyDir = toIsDir && toNumChildren == 0 return } // Answers to a lot of questions you can ask about how to noders are // equal or different. type comparison struct { // the following are only valid if both nodes have the same name // (i.e. nameComparison == 0) // Do both nodes have the same hash? sameHash bool // Are both nodes files? bothAreFiles bool // the following are only valid if any of the noders are dirs, // this is, if !bothAreFiles // Is one a file and the other a dir? fileAndDir bool // Are both nodes dirs? bothAreDirs bool // Is the from node an empty dir? fromIsEmptyDir bool // Is the to Node an empty dir? toIsEmptyDir bool } golang-gopkg-src-d-go-git.v4-4.11.0/utils/merkletrie/filesystem/000077500000000000000000000000001345605224300243665ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/utils/merkletrie/filesystem/node.go000066400000000000000000000077761345605224300256630ustar00rootroot00000000000000package filesystem import ( "io" "os" "path" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/filemode" "gopkg.in/src-d/go-git.v4/utils/merkletrie/noder" "gopkg.in/src-d/go-billy.v4" ) var ignore = map[string]bool{ ".git": true, } // The node represents a file or a directory in a billy.Filesystem. It // implements the interface noder.Noder of merkletrie package. // // This implementation implements a "standard" hash method being able to be // compared with any other noder.Noder implementation inside of go-git. type node struct { fs billy.Filesystem submodules map[string]plumbing.Hash path string hash []byte children []noder.Noder isDir bool } // NewRootNode returns the root node based on a given billy.Filesystem. // // In order to provide the submodule hash status, a map[string]plumbing.Hash // should be provided where the key is the path of the submodule and the commit // of the submodule HEAD func NewRootNode( fs billy.Filesystem, submodules map[string]plumbing.Hash, ) noder.Noder { return &node{fs: fs, submodules: submodules, isDir: true} } // Hash the hash of a filesystem is the result of concatenating the computed // plumbing.Hash of the file as a Blob and its plumbing.FileMode; that way the // difftree algorithm will detect changes in the contents of files and also in // their mode. // // The hash of a directory is always a 24-bytes slice of zero values func (n *node) Hash() []byte { return n.hash } func (n *node) Name() string { return path.Base(n.path) } func (n *node) IsDir() bool { return n.isDir } func (n *node) Children() ([]noder.Noder, error) { if err := n.calculateChildren(); err != nil { return nil, err } return n.children, nil } func (n *node) NumChildren() (int, error) { if err := n.calculateChildren(); err != nil { return -1, err } return len(n.children), nil } func (n *node) calculateChildren() error { if !n.IsDir() { return nil } if len(n.children) != 0 { return nil } files, err := n.fs.ReadDir(n.path) if err != nil { if os.IsNotExist(err) { return nil } return nil } for _, file := range files { if _, ok := ignore[file.Name()]; ok { continue } c, err := n.newChildNode(file) if err != nil { return err } n.children = append(n.children, c) } return nil } func (n *node) newChildNode(file os.FileInfo) (*node, error) { path := path.Join(n.path, file.Name()) hash, err := n.calculateHash(path, file) if err != nil { return nil, err } node := &node{ fs: n.fs, submodules: n.submodules, path: path, hash: hash, isDir: file.IsDir(), } if hash, isSubmodule := n.submodules[path]; isSubmodule { node.hash = append(hash[:], filemode.Submodule.Bytes()...) node.isDir = false } return node, nil } func (n *node) calculateHash(path string, file os.FileInfo) ([]byte, error) { if file.IsDir() { return make([]byte, 24), nil } var hash plumbing.Hash var err error if file.Mode()&os.ModeSymlink != 0 { hash, err = n.doCalculateHashForSymlink(path, file) } else { hash, err = n.doCalculateHashForRegular(path, file) } if err != nil { return nil, err } mode, err := filemode.NewFromOSFileMode(file.Mode()) if err != nil { return nil, err } return append(hash[:], mode.Bytes()...), nil } func (n *node) doCalculateHashForRegular(path string, file os.FileInfo) (plumbing.Hash, error) { f, err := n.fs.Open(path) if err != nil { return plumbing.ZeroHash, err } defer f.Close() h := plumbing.NewHasher(plumbing.BlobObject, file.Size()) if _, err := io.Copy(h, f); err != nil { return plumbing.ZeroHash, err } return h.Sum(), nil } func (n *node) doCalculateHashForSymlink(path string, file os.FileInfo) (plumbing.Hash, error) { target, err := n.fs.Readlink(path) if err != nil { return plumbing.ZeroHash, err } h := plumbing.NewHasher(plumbing.BlobObject, file.Size()) if _, err := h.Write([]byte(target)); err != nil { return plumbing.ZeroHash, err } return h.Sum(), nil } func (n *node) String() string { return n.path } golang-gopkg-src-d-go-git.v4-4.11.0/utils/merkletrie/filesystem/node_test.go000066400000000000000000000110511345605224300266770ustar00rootroot00000000000000package filesystem import ( "bytes" "io" "os" "path" "testing" . "gopkg.in/check.v1" "gopkg.in/src-d/go-billy.v4" "gopkg.in/src-d/go-billy.v4/memfs" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/utils/merkletrie" "gopkg.in/src-d/go-git.v4/utils/merkletrie/noder" ) func Test(t *testing.T) { TestingT(t) } type NoderSuite struct{} var _ = Suite(&NoderSuite{}) func (s *NoderSuite) TestDiff(c *C) { fsA := memfs.New() WriteFile(fsA, "foo", []byte("foo"), 0644) WriteFile(fsA, "qux/bar", []byte("foo"), 0644) WriteFile(fsA, "qux/qux", []byte("foo"), 0644) fsA.Symlink("foo", "bar") fsB := memfs.New() WriteFile(fsB, "foo", []byte("foo"), 0644) WriteFile(fsB, "qux/bar", []byte("foo"), 0644) WriteFile(fsB, "qux/qux", []byte("foo"), 0644) fsB.Symlink("foo", "bar") ch, err := merkletrie.DiffTree( NewRootNode(fsA, nil), NewRootNode(fsB, nil), IsEquals, ) c.Assert(err, IsNil) c.Assert(ch, HasLen, 0) } func (s *NoderSuite) TestDiffChangeLink(c *C) { fsA := memfs.New() fsA.Symlink("qux", "foo") fsB := memfs.New() fsB.Symlink("bar", "foo") ch, err := merkletrie.DiffTree( NewRootNode(fsA, nil), NewRootNode(fsB, nil), IsEquals, ) c.Assert(err, IsNil) c.Assert(ch, HasLen, 1) } func (s *NoderSuite) TestDiffChangeContent(c *C) { fsA := memfs.New() WriteFile(fsA, "foo", []byte("foo"), 0644) WriteFile(fsA, "qux/bar", []byte("foo"), 0644) WriteFile(fsA, "qux/qux", []byte("foo"), 0644) fsB := memfs.New() WriteFile(fsB, "foo", []byte("foo"), 0644) WriteFile(fsB, "qux/bar", []byte("bar"), 0644) WriteFile(fsB, "qux/qux", []byte("foo"), 0644) ch, err := merkletrie.DiffTree( NewRootNode(fsA, nil), NewRootNode(fsB, nil), IsEquals, ) c.Assert(err, IsNil) c.Assert(ch, HasLen, 1) } func (s *NoderSuite) TestDiffSymlinkDirOnA(c *C) { fsA := memfs.New() WriteFile(fsA, "qux/qux", []byte("foo"), 0644) fsB := memfs.New() fsB.Symlink("qux", "foo") WriteFile(fsB, "qux/qux", []byte("foo"), 0644) ch, err := merkletrie.DiffTree( NewRootNode(fsA, nil), NewRootNode(fsB, nil), IsEquals, ) c.Assert(err, IsNil) c.Assert(ch, HasLen, 1) } func (s *NoderSuite) TestDiffSymlinkDirOnB(c *C) { fsA := memfs.New() fsA.Symlink("qux", "foo") WriteFile(fsA, "qux/qux", []byte("foo"), 0644) fsB := memfs.New() WriteFile(fsB, "qux/qux", []byte("foo"), 0644) ch, err := merkletrie.DiffTree( NewRootNode(fsA, nil), NewRootNode(fsB, nil), IsEquals, ) c.Assert(err, IsNil) c.Assert(ch, HasLen, 1) } func (s *NoderSuite) TestDiffChangeMissing(c *C) { fsA := memfs.New() WriteFile(fsA, "foo", []byte("foo"), 0644) fsB := memfs.New() WriteFile(fsB, "bar", []byte("bar"), 0644) ch, err := merkletrie.DiffTree( NewRootNode(fsA, nil), NewRootNode(fsB, nil), IsEquals, ) c.Assert(err, IsNil) c.Assert(ch, HasLen, 2) } func (s *NoderSuite) TestDiffChangeMode(c *C) { fsA := memfs.New() WriteFile(fsA, "foo", []byte("foo"), 0644) fsB := memfs.New() WriteFile(fsB, "foo", []byte("foo"), 0755) ch, err := merkletrie.DiffTree( NewRootNode(fsA, nil), NewRootNode(fsB, nil), IsEquals, ) c.Assert(err, IsNil) c.Assert(ch, HasLen, 1) } func (s *NoderSuite) TestDiffChangeModeNotRelevant(c *C) { fsA := memfs.New() WriteFile(fsA, "foo", []byte("foo"), 0644) fsB := memfs.New() WriteFile(fsB, "foo", []byte("foo"), 0655) ch, err := merkletrie.DiffTree( NewRootNode(fsA, nil), NewRootNode(fsB, nil), IsEquals, ) c.Assert(err, IsNil) c.Assert(ch, HasLen, 0) } func (s *NoderSuite) TestDiffDirectory(c *C) { dir := path.Join("qux", "bar") fsA := memfs.New() fsA.MkdirAll(dir, 0644) fsB := memfs.New() fsB.MkdirAll(dir, 0644) ch, err := merkletrie.DiffTree( NewRootNode(fsA, map[string]plumbing.Hash{ dir: plumbing.NewHash("aa102815663d23f8b75a47e7a01965dcdc96468c"), }), NewRootNode(fsB, map[string]plumbing.Hash{ dir: plumbing.NewHash("19102815663d23f8b75a47e7a01965dcdc96468c"), }), IsEquals, ) c.Assert(err, IsNil) c.Assert(ch, HasLen, 1) a, err := ch[0].Action() c.Assert(err, IsNil) c.Assert(a, Equals, merkletrie.Modify) } func WriteFile(fs billy.Filesystem, filename string, data []byte, perm os.FileMode) error { f, err := fs.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) if err != nil { return err } n, err := f.Write(data) if err == nil && n < len(data) { err = io.ErrShortWrite } if err1 := f.Close(); err == nil { err = err1 } return err } var empty = make([]byte, 24) func IsEquals(a, b noder.Hasher) bool { if bytes.Equal(a.Hash(), empty) || bytes.Equal(b.Hash(), empty) { return false } return bytes.Equal(a.Hash(), b.Hash()) } golang-gopkg-src-d-go-git.v4-4.11.0/utils/merkletrie/index/000077500000000000000000000000001345605224300233115ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/utils/merkletrie/index/node.go000066400000000000000000000040141345605224300245640ustar00rootroot00000000000000package index import ( "path" "strings" "gopkg.in/src-d/go-git.v4/plumbing/format/index" "gopkg.in/src-d/go-git.v4/utils/merkletrie/noder" ) // The node represents a index.Entry or a directory inferred from the path // of all entries. It implements the interface noder.Noder of merkletrie // package. // // This implementation implements a "standard" hash method being able to be // compared with any other noder.Noder implementation inside of go-git type node struct { path string entry *index.Entry children []noder.Noder isDir bool } // NewRootNode returns the root node of a computed tree from a index.Index, func NewRootNode(idx *index.Index) noder.Noder { const rootNode = "" m := map[string]*node{rootNode: {isDir: true}} for _, e := range idx.Entries { parts := strings.Split(e.Name, string("/")) var fullpath string for _, part := range parts { parent := fullpath fullpath = path.Join(fullpath, part) if _, ok := m[fullpath]; ok { continue } n := &node{path: fullpath} if fullpath == e.Name { n.entry = e } else { n.isDir = true } m[n.path] = n m[parent].children = append(m[parent].children, n) } } return m[rootNode] } func (n *node) String() string { return n.path } // Hash the hash of a filesystem is a 24-byte slice, is the result of // concatenating the computed plumbing.Hash of the file as a Blob and its // plumbing.FileMode; that way the difftree algorithm will detect changes in the // contents of files and also in their mode. // // If the node is computed and not based on a index.Entry the hash is equals // to a 24-bytes slices of zero values. func (n *node) Hash() []byte { if n.entry == nil { return make([]byte, 24) } return append(n.entry.Hash[:], n.entry.Mode.Bytes()...) } func (n *node) Name() string { return path.Base(n.path) } func (n *node) IsDir() bool { return n.isDir } func (n *node) Children() ([]noder.Noder, error) { return n.children, nil } func (n *node) NumChildren() (int, error) { return len(n.children), nil } golang-gopkg-src-d-go-git.v4-4.11.0/utils/merkletrie/index/node_test.go000066400000000000000000000063111345605224300256250ustar00rootroot00000000000000package index import ( "bytes" "path/filepath" "testing" . "gopkg.in/check.v1" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/format/index" "gopkg.in/src-d/go-git.v4/utils/merkletrie" "gopkg.in/src-d/go-git.v4/utils/merkletrie/noder" ) func Test(t *testing.T) { TestingT(t) } type NoderSuite struct{} var _ = Suite(&NoderSuite{}) func (s *NoderSuite) TestDiff(c *C) { indexA := &index.Index{ Entries: []*index.Entry{ {Name: "foo", Hash: plumbing.NewHash("8ab686eafeb1f44702738c8b0f24f2567c36da6d")}, {Name: "bar/foo", Hash: plumbing.NewHash("8ab686eafeb1f44702738c8b0f24f2567c36da6d")}, {Name: "bar/qux", Hash: plumbing.NewHash("8ab686eafeb1f44702738c8b0f24f2567c36da6d")}, {Name: "bar/baz/foo", Hash: plumbing.NewHash("8ab686eafeb1f44702738c8b0f24f2567c36da6d")}, }, } indexB := &index.Index{ Entries: []*index.Entry{ {Name: "foo", Hash: plumbing.NewHash("8ab686eafeb1f44702738c8b0f24f2567c36da6d")}, {Name: "bar/foo", Hash: plumbing.NewHash("8ab686eafeb1f44702738c8b0f24f2567c36da6d")}, {Name: "bar/qux", Hash: plumbing.NewHash("8ab686eafeb1f44702738c8b0f24f2567c36da6d")}, {Name: "bar/baz/foo", Hash: plumbing.NewHash("8ab686eafeb1f44702738c8b0f24f2567c36da6d")}, }, } ch, err := merkletrie.DiffTree(NewRootNode(indexA), NewRootNode(indexB), isEquals) c.Assert(err, IsNil) c.Assert(ch, HasLen, 0) } func (s *NoderSuite) TestDiffChange(c *C) { indexA := &index.Index{ Entries: []*index.Entry{{ Name: filepath.Join("bar", "baz", "bar"), Hash: plumbing.NewHash("8ab686eafeb1f44702738c8b0f24f2567c36da6d"), }}, } indexB := &index.Index{ Entries: []*index.Entry{{ Name: filepath.Join("bar", "baz", "foo"), Hash: plumbing.NewHash("8ab686eafeb1f44702738c8b0f24f2567c36da6d"), }}, } ch, err := merkletrie.DiffTree(NewRootNode(indexA), NewRootNode(indexB), isEquals) c.Assert(err, IsNil) c.Assert(ch, HasLen, 2) } func (s *NoderSuite) TestDiffDir(c *C) { indexA := &index.Index{ Entries: []*index.Entry{{ Name: "foo", Hash: plumbing.NewHash("8ab686eafeb1f44702738c8b0f24f2567c36da6d"), }}, } indexB := &index.Index{ Entries: []*index.Entry{{ Name: filepath.Join("foo", "bar"), Hash: plumbing.NewHash("8ab686eafeb1f44702738c8b0f24f2567c36da6d"), }}, } ch, err := merkletrie.DiffTree(NewRootNode(indexA), NewRootNode(indexB), isEquals) c.Assert(err, IsNil) c.Assert(ch, HasLen, 2) } func (s *NoderSuite) TestDiffSameRoot(c *C) { indexA := &index.Index{ Entries: []*index.Entry{ {Name: "foo.go", Hash: plumbing.NewHash("aab686eafeb1f44702738c8b0f24f2567c36da6d")}, {Name: "foo/bar", Hash: plumbing.NewHash("8ab686eafeb1f44702738c8b0f24f2567c36da6d")}, }, } indexB := &index.Index{ Entries: []*index.Entry{ {Name: "foo/bar", Hash: plumbing.NewHash("8ab686eafeb1f44702738c8b0f24f2567c36da6d")}, {Name: "foo.go", Hash: plumbing.NewHash("8ab686eafeb1f44702738c8b0f24f2567c36da6d")}, }, } ch, err := merkletrie.DiffTree(NewRootNode(indexA), NewRootNode(indexB), isEquals) c.Assert(err, IsNil) c.Assert(ch, HasLen, 1) } var empty = make([]byte, 24) func isEquals(a, b noder.Hasher) bool { if bytes.Equal(a.Hash(), empty) || bytes.Equal(b.Hash(), empty) { return false } return bytes.Equal(a.Hash(), b.Hash()) } golang-gopkg-src-d-go-git.v4-4.11.0/utils/merkletrie/internal/000077500000000000000000000000001345605224300240165ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/utils/merkletrie/internal/frame/000077500000000000000000000000001345605224300251105ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/utils/merkletrie/internal/frame/frame.go000066400000000000000000000036341345605224300265370ustar00rootroot00000000000000package frame import ( "bytes" "fmt" "sort" "strings" "gopkg.in/src-d/go-git.v4/utils/merkletrie/noder" ) // A Frame is a collection of siblings in a trie, sorted alphabetically // by name. type Frame struct { // siblings, sorted in reverse alphabetical order by name stack []noder.Noder } type byName []noder.Noder func (a byName) Len() int { return len(a) } func (a byName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a byName) Less(i, j int) bool { return strings.Compare(a[i].Name(), a[j].Name()) < 0 } // New returns a frame with the children of the provided node. func New(n noder.Noder) (*Frame, error) { children, err := n.Children() if err != nil { return nil, err } sort.Sort(sort.Reverse(byName(children))) return &Frame{ stack: children, }, nil } // String returns the quoted names of the noders in the frame sorted in // alphabeticall order by name, surrounded by square brackets and // separated by comas. // // Examples: // [] // ["a", "b"] func (f *Frame) String() string { var buf bytes.Buffer _ = buf.WriteByte('[') sep := "" for i := f.Len() - 1; i >= 0; i-- { _, _ = buf.WriteString(sep) sep = ", " _, _ = buf.WriteString(fmt.Sprintf("%q", f.stack[i].Name())) } _ = buf.WriteByte(']') return buf.String() } // First returns, but dont extract, the noder with the alphabetically // smaller name in the frame and true if the frame was not empy. // Otherwise it returns nil and false. func (f *Frame) First() (noder.Noder, bool) { if f.Len() == 0 { return nil, false } top := f.Len() - 1 return f.stack[top], true } // Drop extracts the noder with the alphabetically smaller name in the // frame or does nothing if the frame was empty. func (f *Frame) Drop() { if f.Len() == 0 { return } top := f.Len() - 1 f.stack[top] = nil f.stack = f.stack[:top] } // Len returns the number of noders in the frame. func (f *Frame) Len() int { return len(f.stack) } golang-gopkg-src-d-go-git.v4-4.11.0/utils/merkletrie/internal/frame/frame_test.go000066400000000000000000000042001345605224300275640ustar00rootroot00000000000000package frame import ( "fmt" "testing" "gopkg.in/src-d/go-git.v4/utils/merkletrie/internal/fsnoder" "gopkg.in/src-d/go-git.v4/utils/merkletrie/noder" . "gopkg.in/check.v1" ) func Test(t *testing.T) { TestingT(t) } type FrameSuite struct{} var _ = Suite(&FrameSuite{}) func (s *FrameSuite) TestNewFrameFromEmptyDir(c *C) { A, err := fsnoder.New("A()") c.Assert(err, IsNil) frame, err := New(A) c.Assert(err, IsNil) expectedString := `[]` c.Assert(frame.String(), Equals, expectedString) first, ok := frame.First() c.Assert(first, IsNil) c.Assert(ok, Equals, false) first, ok = frame.First() c.Assert(first, IsNil) c.Assert(ok, Equals, false) l := frame.Len() c.Assert(l, Equals, 0) } func (s *FrameSuite) TestNewFrameFromNonEmpty(c *C) { // _______A/________ // | / \ | // x y B/ C/ // | // z root, err := fsnoder.New("A(x<> y<> B() C(z<>))") c.Assert(err, IsNil) frame, err := New(root) c.Assert(err, IsNil) expectedString := `["B", "C", "x", "y"]` c.Assert(frame.String(), Equals, expectedString) l := frame.Len() c.Assert(l, Equals, 4) checkFirstAndDrop(c, frame, "B", true) l = frame.Len() c.Assert(l, Equals, 3) checkFirstAndDrop(c, frame, "C", true) l = frame.Len() c.Assert(l, Equals, 2) checkFirstAndDrop(c, frame, "x", true) l = frame.Len() c.Assert(l, Equals, 1) checkFirstAndDrop(c, frame, "y", true) l = frame.Len() c.Assert(l, Equals, 0) checkFirstAndDrop(c, frame, "", false) l = frame.Len() c.Assert(l, Equals, 0) checkFirstAndDrop(c, frame, "", false) } func checkFirstAndDrop(c *C, f *Frame, expectedNodeName string, expectedOK bool) { first, ok := f.First() c.Assert(ok, Equals, expectedOK) if expectedOK { c.Assert(first.Name(), Equals, expectedNodeName) } f.Drop() } // a mock noder that returns error when Children() is called type errorNoder struct{ noder.Noder } func (e *errorNoder) Children() ([]noder.Noder, error) { return nil, fmt.Errorf("mock error") } func (s *FrameSuite) TestNewFrameErrors(c *C) { _, err := New(&errorNoder{}) c.Assert(err, ErrorMatches, "mock error") } golang-gopkg-src-d-go-git.v4-4.11.0/utils/merkletrie/internal/fsnoder/000077500000000000000000000000001345605224300254565ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/utils/merkletrie/internal/fsnoder/dir.go000066400000000000000000000055101345605224300265640ustar00rootroot00000000000000package fsnoder import ( "bytes" "fmt" "hash/fnv" "sort" "strings" "gopkg.in/src-d/go-git.v4/utils/merkletrie/noder" ) // Dir values implement directory-like noders. type dir struct { name string // relative children []noder.Noder // sorted by name hash []byte // memoized } type byName []noder.Noder func (a byName) Len() int { return len(a) } func (a byName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a byName) Less(i, j int) bool { return strings.Compare(a[i].Name(), a[j].Name()) < 0 } // copies the children slice, so nobody can modify the order of its // elements from the outside. func newDir(name string, children []noder.Noder) (*dir, error) { cloned := make([]noder.Noder, len(children)) _ = copy(cloned, children) sort.Sort(byName(cloned)) if hasChildrenWithNoName(cloned) { return nil, fmt.Errorf("non-root inner nodes cannot have empty names") } if hasDuplicatedNames(cloned) { return nil, fmt.Errorf("children cannot have duplicated names") } return &dir{ name: name, children: cloned, }, nil } func hasChildrenWithNoName(children []noder.Noder) bool { for _, c := range children { if c.Name() == "" { return true } } return false } func hasDuplicatedNames(children []noder.Noder) bool { if len(children) < 2 { return false } for i := 1; i < len(children); i++ { if children[i].Name() == children[i-1].Name() { return true } } return false } func (d *dir) Hash() []byte { if d.hash == nil { d.calculateHash() } return d.hash } // hash is calculated as the hash of "dir " plus the concatenation, for // each child, of its name, a space and its hash. Children are sorted // alphabetically before calculating the hash, so the result is unique. func (d *dir) calculateHash() { h := fnv.New64a() h.Write([]byte("dir ")) for _, c := range d.children { h.Write([]byte(c.Name())) h.Write([]byte(" ")) h.Write(c.Hash()) } d.hash = h.Sum([]byte{}) } func (d *dir) Name() string { return d.name } func (d *dir) IsDir() bool { return true } // returns a copy so nobody can alter the order of its elements from the // outside. func (d *dir) Children() ([]noder.Noder, error) { clon := make([]noder.Noder, len(d.children)) _ = copy(clon, d.children) return clon, nil } func (d *dir) NumChildren() (int, error) { return len(d.children), nil } const ( dirStartMark = '(' dirEndMark = ')' dirElementSep = ' ' ) // The string generated by this method is unique for each tree, as the // children of each node are sorted alphabetically by name when // generating the string. func (d *dir) String() string { var buf bytes.Buffer buf.WriteString(d.name) buf.WriteRune(dirStartMark) for i, c := range d.children { if i != 0 { buf.WriteRune(dirElementSep) } buf.WriteString(c.String()) } buf.WriteRune(dirEndMark) return buf.String() } golang-gopkg-src-d-go-git.v4-4.11.0/utils/merkletrie/internal/fsnoder/dir_test.go000066400000000000000000000217501345605224300276270ustar00rootroot00000000000000package fsnoder import ( "reflect" "sort" "gopkg.in/src-d/go-git.v4/utils/merkletrie/noder" . "gopkg.in/check.v1" ) type DirSuite struct{} var _ = Suite(&DirSuite{}) func (s *DirSuite) TestIsDir(c *C) { noName, err := newDir("", nil) c.Assert(err, IsNil) c.Assert(noName.IsDir(), Equals, true) empty, err := newDir("empty", nil) c.Assert(err, IsNil) c.Assert(empty.IsDir(), Equals, true) root, err := newDir("foo", []noder.Noder{empty}) c.Assert(err, IsNil) c.Assert(root.IsDir(), Equals, true) } func assertChildren(c *C, n noder.Noder, expected []noder.Noder) { numChildren, err := n.NumChildren() c.Assert(err, IsNil) c.Assert(numChildren, Equals, len(expected)) children, err := n.Children() c.Assert(err, IsNil) c.Assert(children, sortedSliceEquals, expected) } type sortedSliceEqualsChecker struct { *CheckerInfo } var sortedSliceEquals Checker = &sortedSliceEqualsChecker{ &CheckerInfo{ Name: "sortedSliceEquals", Params: []string{"obtained", "expected"}, }, } func (checker *sortedSliceEqualsChecker) Check( params []interface{}, names []string) (result bool, error string) { a, ok := params[0].([]noder.Noder) if !ok { return false, "first parameter must be a []noder.Noder" } b, ok := params[1].([]noder.Noder) if !ok { return false, "second parameter must be a []noder.Noder" } sort.Sort(byName(a)) sort.Sort(byName(b)) return reflect.DeepEqual(a, b), "" } func (s *DirSuite) TestNewDirectoryNoNameAndEmpty(c *C) { root, err := newDir("", nil) c.Assert(err, IsNil) c.Assert(root.Hash(), DeepEquals, []byte{0xca, 0x40, 0xf8, 0x67, 0x57, 0x8c, 0x32, 0x1c}) c.Assert(root.Name(), Equals, "") assertChildren(c, root, noder.NoChildren) c.Assert(root.String(), Equals, "()") } func (s *DirSuite) TestNewDirectoryEmpty(c *C) { root, err := newDir("root", nil) c.Assert(err, IsNil) c.Assert(root.Hash(), DeepEquals, []byte{0xca, 0x40, 0xf8, 0x67, 0x57, 0x8c, 0x32, 0x1c}) c.Assert(root.Name(), Equals, "root") assertChildren(c, root, noder.NoChildren) c.Assert(root.String(), Equals, "root()") } func (s *DirSuite) TestEmptyDirsHaveSameHash(c *C) { d1, err := newDir("foo", nil) c.Assert(err, IsNil) d2, err := newDir("bar", nil) c.Assert(err, IsNil) c.Assert(d1.Hash(), DeepEquals, d2.Hash()) } func (s *DirSuite) TestNewDirWithEmptyDir(c *C) { empty, err := newDir("empty", nil) c.Assert(err, IsNil) root, err := newDir("", []noder.Noder{empty}) c.Assert(err, IsNil) c.Assert(root.Hash(), DeepEquals, []byte{0x39, 0x25, 0xa8, 0x99, 0x16, 0x47, 0x6a, 0x75}) c.Assert(root.Name(), Equals, "") assertChildren(c, root, []noder.Noder{empty}) c.Assert(root.String(), Equals, "(empty())") } func (s *DirSuite) TestNewDirWithOneEmptyFile(c *C) { empty, err := newFile("name", "") c.Assert(err, IsNil) root, err := newDir("", []noder.Noder{empty}) c.Assert(err, IsNil) c.Assert(root.Hash(), DeepEquals, []byte{0xd, 0x4e, 0x23, 0x1d, 0xf5, 0x2e, 0xfa, 0xc2}) c.Assert(root.Name(), Equals, "") assertChildren(c, root, []noder.Noder{empty}) c.Assert(root.String(), Equals, "(name<>)") } func (s *DirSuite) TestNewDirWithOneFile(c *C) { a, err := newFile("a", "1") c.Assert(err, IsNil) root, err := newDir("", []noder.Noder{a}) c.Assert(err, IsNil) c.Assert(root.Hash(), DeepEquals, []byte{0x96, 0xab, 0x29, 0x54, 0x2, 0x9e, 0x89, 0x28}) c.Assert(root.Name(), Equals, "") assertChildren(c, root, []noder.Noder{a}) c.Assert(root.String(), Equals, "(a<1>)") } func (s *DirSuite) TestDirsWithSameFileHaveSameHash(c *C) { f1, err := newFile("a", "1") c.Assert(err, IsNil) r1, err := newDir("", []noder.Noder{f1}) c.Assert(err, IsNil) f2, err := newFile("a", "1") c.Assert(err, IsNil) r2, err := newDir("", []noder.Noder{f2}) c.Assert(err, IsNil) c.Assert(r1.Hash(), DeepEquals, r2.Hash()) } func (s *DirSuite) TestDirsWithDifferentFileContentHaveDifferentHash(c *C) { f1, err := newFile("a", "1") c.Assert(err, IsNil) r1, err := newDir("", []noder.Noder{f1}) c.Assert(err, IsNil) f2, err := newFile("a", "2") c.Assert(err, IsNil) r2, err := newDir("", []noder.Noder{f2}) c.Assert(err, IsNil) c.Assert(r1.Hash(), Not(DeepEquals), r2.Hash()) } func (s *DirSuite) TestDirsWithDifferentFileNameHaveDifferentHash(c *C) { f1, err := newFile("a", "1") c.Assert(err, IsNil) r1, err := newDir("", []noder.Noder{f1}) c.Assert(err, IsNil) f2, err := newFile("b", "1") c.Assert(err, IsNil) r2, err := newDir("", []noder.Noder{f2}) c.Assert(err, IsNil) c.Assert(r1.Hash(), Not(DeepEquals), r2.Hash()) } func (s *DirSuite) TestDirsWithDifferentFileHaveDifferentHash(c *C) { f1, err := newFile("a", "1") c.Assert(err, IsNil) r1, err := newDir("", []noder.Noder{f1}) c.Assert(err, IsNil) f2, err := newFile("b", "2") c.Assert(err, IsNil) r2, err := newDir("", []noder.Noder{f2}) c.Assert(err, IsNil) c.Assert(r1.Hash(), Not(DeepEquals), r2.Hash()) } func (s *DirSuite) TestDirWithEmptyDirHasDifferentHashThanEmptyDir(c *C) { f, err := newFile("a", "") c.Assert(err, IsNil) r1, err := newDir("", []noder.Noder{f}) c.Assert(err, IsNil) d, err := newDir("a", nil) c.Assert(err, IsNil) r2, err := newDir("", []noder.Noder{d}) c.Assert(err, IsNil) c.Assert(r1.Hash(), Not(DeepEquals), r2.Hash()) } func (s *DirSuite) TestNewDirWithTwoFilesSameContent(c *C) { a1, err := newFile("a", "1") c.Assert(err, IsNil) b1, err := newFile("b", "1") c.Assert(err, IsNil) root, err := newDir("", []noder.Noder{a1, b1}) c.Assert(err, IsNil) c.Assert(root.Hash(), DeepEquals, []byte{0xc7, 0xc4, 0xbf, 0x70, 0x33, 0xb9, 0x57, 0xdb}) c.Assert(root.Name(), Equals, "") assertChildren(c, root, []noder.Noder{b1, a1}) c.Assert(root.String(), Equals, "(a<1> b<1>)") } func (s *DirSuite) TestNewDirWithTwoFilesDifferentContent(c *C) { a1, err := newFile("a", "1") c.Assert(err, IsNil) b2, err := newFile("b", "2") c.Assert(err, IsNil) root, err := newDir("", []noder.Noder{a1, b2}) c.Assert(err, IsNil) c.Assert(root.Hash(), DeepEquals, []byte{0x94, 0x8a, 0x9d, 0x8f, 0x6d, 0x98, 0x34, 0x55}) c.Assert(root.Name(), Equals, "") assertChildren(c, root, []noder.Noder{b2, a1}) } func (s *DirSuite) TestCrazy(c *C) { // "" // | // ------------------------- // | | | | | // a1 B c1 d2 E // | | // ------------- E // | | | | | // A B X c1 E // | | // a1 e1 e1, err := newFile("e", "1") c.Assert(err, IsNil) E, err := newDir("e", []noder.Noder{e1}) c.Assert(err, IsNil) E, err = newDir("e", []noder.Noder{E}) c.Assert(err, IsNil) E, err = newDir("e", []noder.Noder{E}) c.Assert(err, IsNil) A, err := newDir("a", nil) c.Assert(err, IsNil) B, err := newDir("b", nil) c.Assert(err, IsNil) a1, err := newFile("a", "1") c.Assert(err, IsNil) X, err := newDir("x", []noder.Noder{a1}) c.Assert(err, IsNil) c1, err := newFile("c", "1") c.Assert(err, IsNil) B, err = newDir("b", []noder.Noder{c1, B, X, A}) c.Assert(err, IsNil) a1, err = newFile("a", "1") c.Assert(err, IsNil) c1, err = newFile("c", "1") c.Assert(err, IsNil) d2, err := newFile("d", "2") c.Assert(err, IsNil) root, err := newDir("", []noder.Noder{a1, d2, E, B, c1}) c.Assert(err, IsNil) c.Assert(root.Hash(), DeepEquals, []byte{0xc3, 0x72, 0x9d, 0xf1, 0xcc, 0xec, 0x6d, 0xbb}) c.Assert(root.Name(), Equals, "") assertChildren(c, root, []noder.Noder{E, c1, B, a1, d2}) c.Assert(root.String(), Equals, "(a<1> b(a() b() c<1> x(a<1>)) c<1> d<2> e(e(e(e<1>))))") } func (s *DirSuite) TestDirCannotHaveDirWithNoName(c *C) { noName, err := newDir("", nil) c.Assert(err, IsNil) _, err = newDir("", []noder.Noder{noName}) c.Assert(err, Not(IsNil)) } func (s *DirSuite) TestDirCannotHaveDuplicatedFiles(c *C) { f1, err := newFile("a", "1") c.Assert(err, IsNil) f2, err := newFile("a", "1") c.Assert(err, IsNil) _, err = newDir("", []noder.Noder{f1, f2}) c.Assert(err, Not(IsNil)) } func (s *DirSuite) TestDirCannotHaveDuplicatedFileNames(c *C) { a1, err := newFile("a", "1") c.Assert(err, IsNil) a2, err := newFile("a", "2") c.Assert(err, IsNil) _, err = newDir("", []noder.Noder{a1, a2}) c.Assert(err, Not(IsNil)) } func (s *DirSuite) TestDirCannotHaveDuplicatedDirNames(c *C) { d1, err := newDir("a", nil) c.Assert(err, IsNil) d2, err := newDir("a", nil) c.Assert(err, IsNil) _, err = newDir("", []noder.Noder{d1, d2}) c.Assert(err, Not(IsNil)) } func (s *DirSuite) TestDirCannotHaveDirAndFileWithSameName(c *C) { f, err := newFile("a", "") c.Assert(err, IsNil) d, err := newDir("a", nil) c.Assert(err, IsNil) _, err = newDir("", []noder.Noder{f, d}) c.Assert(err, Not(IsNil)) } func (s *DirSuite) TestUnsortedString(c *C) { b, err := newDir("b", nil) c.Assert(err, IsNil) z, err := newDir("z", nil) c.Assert(err, IsNil) a1, err := newFile("a", "1") c.Assert(err, IsNil) c2, err := newFile("c", "2") c.Assert(err, IsNil) d3, err := newFile("d", "3") c.Assert(err, IsNil) d, err := newDir("d", []noder.Noder{c2, z, d3, a1, b}) c.Assert(err, IsNil) c.Assert(d.String(), Equals, "d(a<1> b() c<2> d<3> z())") } golang-gopkg-src-d-go-git.v4-4.11.0/utils/merkletrie/internal/fsnoder/doc.go000066400000000000000000000031531345605224300265540ustar00rootroot00000000000000/* Package fsnoder allows to create merkletrie noders that resemble file systems, from human readable string descriptions. Its intended use is generating noders in tests in a readable way. For example: root, _ = New("(a<1> b<2>, B(c<3> d()))") will create a noder as follows: root - "root" is an unnamed dir containing "a", "b" and "B". / | \ - "a" is a file containing the string "1". / | \ - "b" is a file containing the string "2". a b B - "B" is a directory containing "c" and "d". / \ - "c" is a file containing the string "3". c d - "D" is an empty directory. Files are expressed as: - one or more letters and dots for the name of the file - a single number, between angle brackets, for the contents of the file. - examples: a<1>, foo.go<2>. Directories are expressed as: - one or more letters for the name of the directory. - its elements between parents, separated with spaces, in any order. - (optionally) the root directory can be unnamed, by skiping its name. Examples: - D(a<1> b<2>) : two files, "a" and "b", having "1" and "2" as their respective contents, inside a directory called "D". - A() : An empty directory called "A". - A(b<>) : An directory called "A", with an empty file inside called "b": - (b(c<1> d(e<2>)) f<>) : an unamed directory containing: ├── b --> directory │   ├── c --> file containing "1" │   └── d --> directory │   └── e --> file containing "2" └── f --> empty file */ package fsnoder golang-gopkg-src-d-go-git.v4-4.11.0/utils/merkletrie/internal/fsnoder/file.go000066400000000000000000000025171345605224300267310ustar00rootroot00000000000000package fsnoder import ( "bytes" "fmt" "hash/fnv" "gopkg.in/src-d/go-git.v4/utils/merkletrie/noder" ) // file values represent file-like noders in a merkle trie. type file struct { name string // relative contents string hash []byte // memoized } // newFile returns a noder representing a file with the given contents. func newFile(name, contents string) (*file, error) { if name == "" { return nil, fmt.Errorf("files cannot have empty names") } return &file{ name: name, contents: contents, }, nil } // The hash of a file is just its contents. // Empty files will have the fnv64 basis offset as its hash. func (f *file) Hash() []byte { if f.hash == nil { h := fnv.New64a() h.Write([]byte(f.contents)) // it nevers returns an error. f.hash = h.Sum(nil) } return f.hash } func (f *file) Name() string { return f.name } func (f *file) IsDir() bool { return false } func (f *file) Children() ([]noder.Noder, error) { return noder.NoChildren, nil } func (f *file) NumChildren() (int, error) { return 0, nil } const ( fileStartMark = '<' fileEndMark = '>' ) // String returns a string formated as: name. func (f *file) String() string { var buf bytes.Buffer buf.WriteString(f.name) buf.WriteRune(fileStartMark) buf.WriteString(f.contents) buf.WriteRune(fileEndMark) return buf.String() } golang-gopkg-src-d-go-git.v4-4.11.0/utils/merkletrie/internal/fsnoder/file_test.go000066400000000000000000000031361345605224300277660ustar00rootroot00000000000000package fsnoder import ( "testing" "gopkg.in/src-d/go-git.v4/utils/merkletrie/noder" . "gopkg.in/check.v1" ) func Test(t *testing.T) { TestingT(t) } type FileSuite struct{} var _ = Suite(&FileSuite{}) var ( HashOfEmptyFile = []byte{0xcb, 0xf2, 0x9c, 0xe4, 0x84, 0x22, 0x23, 0x25} // fnv64 basis offset HashOfContents = []byte{0xee, 0x7e, 0xf3, 0xd0, 0xc2, 0xb5, 0xef, 0x83} // hash of "contents" ) func (s *FileSuite) TestNewFileEmpty(c *C) { f, err := newFile("name", "") c.Assert(err, IsNil) c.Assert(f.Hash(), DeepEquals, HashOfEmptyFile) c.Assert(f.Name(), Equals, "name") c.Assert(f.IsDir(), Equals, false) assertChildren(c, f, noder.NoChildren) c.Assert(f.String(), Equals, "name<>") } func (s *FileSuite) TestNewFileWithContents(c *C) { f, err := newFile("name", "contents") c.Assert(err, IsNil) c.Assert(f.Hash(), DeepEquals, HashOfContents) c.Assert(f.Name(), Equals, "name") c.Assert(f.IsDir(), Equals, false) assertChildren(c, f, noder.NoChildren) c.Assert(f.String(), Equals, "name") } func (s *FileSuite) TestNewfileErrorEmptyName(c *C) { _, err := newFile("", "contents") c.Assert(err, Not(IsNil)) } func (s *FileSuite) TestDifferentContentsHaveDifferentHash(c *C) { f1, err := newFile("name", "contents") c.Assert(err, IsNil) f2, err := newFile("name", "foo") c.Assert(err, IsNil) c.Assert(f1.Hash(), Not(DeepEquals), f2.Hash()) } func (s *FileSuite) TestSameContentsHaveSameHash(c *C) { f1, err := newFile("name1", "contents") c.Assert(err, IsNil) f2, err := newFile("name2", "contents") c.Assert(err, IsNil) c.Assert(f1.Hash(), DeepEquals, f2.Hash()) } golang-gopkg-src-d-go-git.v4-4.11.0/utils/merkletrie/internal/fsnoder/new.go000066400000000000000000000107651345605224300266070ustar00rootroot00000000000000package fsnoder import ( "bytes" "fmt" "io" "gopkg.in/src-d/go-git.v4/utils/merkletrie/noder" ) // New function creates a full merkle trie from the string description of // a filesystem tree. See examples of the string format in the package // description. func New(s string) (noder.Noder, error) { return decodeDir([]byte(s), root) } const ( root = true nonRoot = false ) // Expected data: a fsnoder description, for example: A(foo bar qux ...). // When isRoot is true, unnamed dirs are supported, for example: (foo // bar qux ...) func decodeDir(data []byte, isRoot bool) (*dir, error) { data = bytes.TrimSpace(data) if len(data) == 0 { return nil, io.EOF } // get the name of the dir and remove it from the data. In case the // there is no name and isRoot is true, just use "" as the name. var name string switch end := bytes.IndexRune(data, dirStartMark); end { case -1: return nil, fmt.Errorf("%c not found", dirStartMark) case 0: if isRoot { name = "" } else { return nil, fmt.Errorf("inner unnamed dirs not allowed: %s", data) } default: name = string(data[0:end]) data = data[end:] } // check data ends with the dirEndMark if data[len(data)-1] != dirEndMark { return nil, fmt.Errorf("malformed data: last %q not found", dirEndMark) } data = data[1 : len(data)-1] // remove initial '(' and last ')' children, err := decodeChildren(data) if err != nil { return nil, err } return newDir(name, children) } func isNumber(b rune) bool { return '0' <= b && b <= '9' } func isLetter(b rune) bool { return ('a' <= b && b <= 'z') || ('A' <= b && b <= 'Z') } func decodeChildren(data []byte) ([]noder.Noder, error) { data = bytes.TrimSpace(data) if len(data) == 0 { return nil, nil } chunks := split(data) ret := make([]noder.Noder, len(chunks)) var err error for i, c := range chunks { ret[i], err = decodeChild(c) if err != nil { return nil, fmt.Errorf("malformed element %d (%s): %s", i, c, err) } } return ret, nil } // returns the description of the elements of a dir. It is just looking // for spaces if they are not part of inner dirs. func split(data []byte) [][]byte { chunks := [][]byte{} start := 0 dirDepth := 0 for i, b := range data { switch b { case dirStartMark: dirDepth++ case dirEndMark: dirDepth-- case dirElementSep: if dirDepth == 0 { chunks = append(chunks, data[start:i+1]) start = i + 1 } } } chunks = append(chunks, data[start:]) return chunks } // A child can be a file or a dir. func decodeChild(data []byte) (noder.Noder, error) { clean := bytes.TrimSpace(data) if len(data) < 3 { return nil, fmt.Errorf("element too short: %s", clean) } fileNameEnd := bytes.IndexRune(data, fileStartMark) dirNameEnd := bytes.IndexRune(data, dirStartMark) switch { case fileNameEnd == -1 && dirNameEnd == -1: return nil, fmt.Errorf( "malformed child, no file or dir start mark found") case fileNameEnd == -1: return decodeDir(clean, nonRoot) case dirNameEnd == -1: return decodeFile(clean) case dirNameEnd < fileNameEnd: return decodeDir(clean, nonRoot) case dirNameEnd > fileNameEnd: return decodeFile(clean) } return nil, fmt.Errorf("unreachable") } func decodeFile(data []byte) (noder.Noder, error) { nameEnd := bytes.IndexRune(data, fileStartMark) if nameEnd == -1 { return nil, fmt.Errorf("malformed file, no %c found", fileStartMark) } contentStart := nameEnd + 1 contentEnd := bytes.IndexRune(data, fileEndMark) if contentEnd == -1 { return nil, fmt.Errorf("malformed file, no %c found", fileEndMark) } switch { case nameEnd > contentEnd: return nil, fmt.Errorf("malformed file, found %c before %c", fileEndMark, fileStartMark) case contentStart == contentEnd: name := string(data[:nameEnd]) if !validFileName(name) { return nil, fmt.Errorf("invalid file name") } return newFile(name, "") default: name := string(data[:nameEnd]) if !validFileName(name) { return nil, fmt.Errorf("invalid file name") } contents := string(data[contentStart:contentEnd]) if !validFileContents(contents) { return nil, fmt.Errorf("invalid file contents") } return newFile(name, contents) } } func validFileName(s string) bool { for _, c := range s { if !isLetter(c) && c != '.' { return false } } return true } func validFileContents(s string) bool { for _, c := range s { if !isNumber(c) { return false } } return true } // HashEqual returns if a and b have the same hash. func HashEqual(a, b noder.Hasher) bool { return bytes.Equal(a.Hash(), b.Hash()) } golang-gopkg-src-d-go-git.v4-4.11.0/utils/merkletrie/internal/fsnoder/new_test.go000066400000000000000000000177161345605224300276510ustar00rootroot00000000000000package fsnoder import ( "gopkg.in/src-d/go-git.v4/utils/merkletrie/noder" . "gopkg.in/check.v1" ) type FSNoderSuite struct{} var _ = Suite(&FSNoderSuite{}) func check(c *C, input string, expected *dir) { obtained, err := New(input) c.Assert(err, IsNil, Commentf("input = %s", input)) comment := Commentf("\n input = %s\n"+ "expected = %s\nobtained = %s", input, expected, obtained) c.Assert(obtained.Hash(), DeepEquals, expected.Hash(), comment) } func (s *FSNoderSuite) TestNoDataFails(c *C) { _, err := New("") c.Assert(err, Not(IsNil)) _, err = New(" ") // SPC + TAB c.Assert(err, Not(IsNil)) } func (s *FSNoderSuite) TestUnnamedRootFailsIfNotRoot(c *C) { _, err := decodeDir([]byte("()"), false) c.Assert(err, Not(IsNil)) } func (s *FSNoderSuite) TestUnnamedInnerFails(c *C) { _, err := New("(())") c.Assert(err, Not(IsNil)) _, err = New("((a<>))") c.Assert(err, Not(IsNil)) } func (s *FSNoderSuite) TestMalformedFile(c *C) { _, err := New("(4<>)") c.Assert(err, Not(IsNil)) _, err = New("(4<1>)") c.Assert(err, Not(IsNil)) _, err = New("(4?1>)") c.Assert(err, Not(IsNil)) _, err = New("(4)") c.Assert(err, Not(IsNil)) _, err = New("(4")) c.Assert(err, Not(IsNil)) _, err = decodeFile([]byte("a")) c.Assert(err, Not(IsNil)) _, err = decodeFile([]byte("a<1?")) c.Assert(err, Not(IsNil)) _, err = decodeFile([]byte("a?>")) c.Assert(err, Not(IsNil)) _, err = decodeFile([]byte("1<>")) c.Assert(err, Not(IsNil)) _, err = decodeFile([]byte("a") c.Assert(err, Not(IsNil)) _, err = New("a<>") c.Assert(err, Not(IsNil)) } func (s *FSNoderSuite) TestUnnamedEmptyRoot(c *C) { input := "()" expected, err := newDir("", nil) c.Assert(err, IsNil) check(c, input, expected) } func (s *FSNoderSuite) TestNamedEmptyRoot(c *C) { input := "a()" expected, err := newDir("a", nil) c.Assert(err, IsNil) check(c, input, expected) } func (s *FSNoderSuite) TestEmptyFile(c *C) { input := "(a<>)" a1, err := newFile("a", "") c.Assert(err, IsNil) expected, err := newDir("", []noder.Noder{a1}) c.Assert(err, IsNil) check(c, input, expected) } func (s *FSNoderSuite) TestNonEmptyFile(c *C) { input := "(a<1>)" a1, err := newFile("a", "1") c.Assert(err, IsNil) expected, err := newDir("", []noder.Noder{a1}) c.Assert(err, IsNil) check(c, input, expected) } func (s *FSNoderSuite) TestTwoFilesSameContents(c *C) { input := "(b<1> a<1>)" a1, err := newFile("a", "1") c.Assert(err, IsNil) b1, err := newFile("b", "1") c.Assert(err, IsNil) expected, err := newDir("", []noder.Noder{a1, b1}) c.Assert(err, IsNil) check(c, input, expected) } func (s *FSNoderSuite) TestTwoFilesDifferentContents(c *C) { input := "(b<2> a<1>)" a1, err := newFile("a", "1") c.Assert(err, IsNil) b2, err := newFile("b", "2") c.Assert(err, IsNil) expected, err := newDir("", []noder.Noder{a1, b2}) c.Assert(err, IsNil) check(c, input, expected) } func (s *FSNoderSuite) TestManyFiles(c *C) { input := "(e<1> b<2> a<1> c<1> d<3> f<4>)" a1, err := newFile("a", "1") c.Assert(err, IsNil) b2, err := newFile("b", "2") c.Assert(err, IsNil) c1, err := newFile("c", "1") c.Assert(err, IsNil) d3, err := newFile("d", "3") c.Assert(err, IsNil) e1, err := newFile("e", "1") c.Assert(err, IsNil) f4, err := newFile("f", "4") c.Assert(err, IsNil) expected, err := newDir("", []noder.Noder{e1, b2, a1, c1, d3, f4}) c.Assert(err, IsNil) check(c, input, expected) } func (s *FSNoderSuite) TestEmptyDir(c *C) { input := "(A())" A, err := newDir("A", nil) c.Assert(err, IsNil) expected, err := newDir("", []noder.Noder{A}) c.Assert(err, IsNil) check(c, input, expected) } func (s *FSNoderSuite) TestDirWithEmtpyFile(c *C) { input := "(A(a<>))" a, err := newFile("a", "") c.Assert(err, IsNil) A, err := newDir("A", []noder.Noder{a}) c.Assert(err, IsNil) expected, err := newDir("", []noder.Noder{A}) c.Assert(err, IsNil) check(c, input, expected) } func (s *FSNoderSuite) TestDirWithEmtpyFileSameName(c *C) { input := "(A(A<>))" f, err := newFile("A", "") c.Assert(err, IsNil) A, err := newDir("A", []noder.Noder{f}) c.Assert(err, IsNil) expected, err := newDir("", []noder.Noder{A}) c.Assert(err, IsNil) check(c, input, expected) } func (s *FSNoderSuite) TestDirWithFileLongContents(c *C) { input := "(A(a<12>))" a1, err := newFile("a", "12") c.Assert(err, IsNil) A, err := newDir("A", []noder.Noder{a1}) c.Assert(err, IsNil) expected, err := newDir("", []noder.Noder{A}) c.Assert(err, IsNil) check(c, input, expected) } func (s *FSNoderSuite) TestDirWithFileLongName(c *C) { input := "(A(abc<12>))" a1, err := newFile("abc", "12") c.Assert(err, IsNil) A, err := newDir("A", []noder.Noder{a1}) c.Assert(err, IsNil) expected, err := newDir("", []noder.Noder{A}) c.Assert(err, IsNil) check(c, input, expected) } func (s *FSNoderSuite) TestDirWithFile(c *C) { input := "(A(a<1>))" a1, err := newFile("a", "1") c.Assert(err, IsNil) A, err := newDir("A", []noder.Noder{a1}) c.Assert(err, IsNil) expected, err := newDir("", []noder.Noder{A}) c.Assert(err, IsNil) check(c, input, expected) } func (s *FSNoderSuite) TestDirWithEmptyDirSameName(c *C) { input := "(A(A()))" A2, err := newDir("A", nil) c.Assert(err, IsNil) A1, err := newDir("A", []noder.Noder{A2}) c.Assert(err, IsNil) expected, err := newDir("", []noder.Noder{A1}) c.Assert(err, IsNil) check(c, input, expected) } func (s *FSNoderSuite) TestDirWithEmptyDir(c *C) { input := "(A(B()))" B, err := newDir("B", nil) c.Assert(err, IsNil) A, err := newDir("A", []noder.Noder{B}) c.Assert(err, IsNil) expected, err := newDir("", []noder.Noder{A}) c.Assert(err, IsNil) check(c, input, expected) } func (s *FSNoderSuite) TestDirWithTwoFiles(c *C) { input := "(A(a<1> b<2>))" a1, err := newFile("a", "1") c.Assert(err, IsNil) b2, err := newFile("b", "2") c.Assert(err, IsNil) A, err := newDir("A", []noder.Noder{b2, a1}) c.Assert(err, IsNil) expected, err := newDir("", []noder.Noder{A}) c.Assert(err, IsNil) check(c, input, expected) } func (s *FSNoderSuite) TestCrazy(c *C) { // "" // | // ------------------------- // | | | | | // a1 B c1 d2 E // | | // ------------- E // | | | | | // A B X c1 E // | | // a1 e1 input := "(d<2> b(c<1> b() a() x(a<1>)) a<1> c<1> e(e(e(e<1>))))" e1, err := newFile("e", "1") c.Assert(err, IsNil) E, err := newDir("e", []noder.Noder{e1}) c.Assert(err, IsNil) E, err = newDir("e", []noder.Noder{E}) c.Assert(err, IsNil) E, err = newDir("e", []noder.Noder{E}) c.Assert(err, IsNil) A, err := newDir("a", nil) c.Assert(err, IsNil) B, err := newDir("b", nil) c.Assert(err, IsNil) a1, err := newFile("a", "1") c.Assert(err, IsNil) X, err := newDir("x", []noder.Noder{a1}) c.Assert(err, IsNil) c1, err := newFile("c", "1") c.Assert(err, IsNil) B, err = newDir("b", []noder.Noder{c1, B, X, A}) c.Assert(err, IsNil) a1, err = newFile("a", "1") c.Assert(err, IsNil) c1, err = newFile("c", "1") c.Assert(err, IsNil) d2, err := newFile("d", "2") c.Assert(err, IsNil) expected, err := newDir("", []noder.Noder{a1, d2, E, B, c1}) c.Assert(err, IsNil) check(c, input, expected) } func (s *FSNoderSuite) TestHashEqual(c *C) { input1 := "(A(a<1> b<2>))" input2 := "(A(a<1> b<2>))" input3 := "(A(a<> b<2>))" t1, err := New(input1) c.Assert(err, IsNil) t2, err := New(input2) c.Assert(err, IsNil) t3, err := New(input3) c.Assert(err, IsNil) c.Assert(HashEqual(t1, t2), Equals, true) c.Assert(HashEqual(t2, t1), Equals, true) c.Assert(HashEqual(t2, t3), Equals, false) c.Assert(HashEqual(t3, t2), Equals, false) c.Assert(HashEqual(t3, t1), Equals, false) c.Assert(HashEqual(t1, t3), Equals, false) } golang-gopkg-src-d-go-git.v4-4.11.0/utils/merkletrie/iter.go000066400000000000000000000141071345605224300234770ustar00rootroot00000000000000package merkletrie import ( "fmt" "io" "gopkg.in/src-d/go-git.v4/utils/merkletrie/internal/frame" "gopkg.in/src-d/go-git.v4/utils/merkletrie/noder" ) // Iter is an iterator for merkletries (only the trie part of the // merkletrie is relevant here, it does not use the Hasher interface). // // The iteration is performed in depth-first pre-order. Entries at each // depth are traversed in (case-sensitive) alphabetical order. // // This is the kind of traversal you will expect when listing ordinary // files and directories recursively, for example: // // Trie Traversal order // ---- --------------- // . // / | \ c // / | \ d/ // d c z ===> d/a // / \ d/b // b a z // // // This iterator is somewhat especial as you can chose to skip whole // "directories" when iterating: // // - The Step method will iterate normally. // // - the Next method will not descend deeper into the tree. // // For example, if the iterator is at `d/`, the Step method will return // `d/a` while the Next would have returned `z` instead (skipping `d/` // and its descendants). The name of the these two methods are based on // the well known "next" and "step" operations, quite common in // debuggers, like gdb. // // The paths returned by the iterator will be relative, if the iterator // was created from a single node, or absolute, if the iterator was // created from the path to the node (the path will be prefixed to all // returned paths). type Iter struct { // Tells if the iteration has started. hasStarted bool // The top of this stack has the current node and its siblings. The // rest of the stack keeps the ancestors of the current node and // their corresponding siblings. The current element is always the // top element of the top frame. // // When "step"ping into a node, its children are pushed as a new // frame. // // When "next"ing pass a node, the current element is dropped by // popping the top frame. frameStack []*frame.Frame // The base path used to turn the relative paths used internally by // the iterator into absolute paths used by external applications. // For relative iterator this will be nil. base noder.Path } // NewIter returns a new relative iterator using the provider noder as // its unnamed root. When iterating, all returned paths will be // relative to node. func NewIter(n noder.Noder) (*Iter, error) { return newIter(n, nil) } // NewIterFromPath returns a new absolute iterator from the noder at the // end of the path p. When iterating, all returned paths will be // absolute, using the root of the path p as their root. func NewIterFromPath(p noder.Path) (*Iter, error) { return newIter(p, p) // Path implements Noder } func newIter(root noder.Noder, base noder.Path) (*Iter, error) { ret := &Iter{ base: base, } if root == nil { return ret, nil } frame, err := frame.New(root) if err != nil { return nil, err } ret.push(frame) return ret, nil } func (iter *Iter) top() (*frame.Frame, bool) { if len(iter.frameStack) == 0 { return nil, false } top := len(iter.frameStack) - 1 return iter.frameStack[top], true } func (iter *Iter) push(f *frame.Frame) { iter.frameStack = append(iter.frameStack, f) } const ( doDescend = true dontDescend = false ) // Next returns the path of the next node without descending deeper into // the trie and nil. If there are no more entries in the trie it // returns nil and io.EOF. In case of error, it will return nil and the // error. func (iter *Iter) Next() (noder.Path, error) { return iter.advance(dontDescend) } // Step returns the path to the next node in the trie, descending deeper // into it if needed, and nil. If there are no more nodes in the trie, // it returns nil and io.EOF. In case of error, it will return nil and // the error. func (iter *Iter) Step() (noder.Path, error) { return iter.advance(doDescend) } // Advances the iterator in the desired direction: descend or // dontDescend. // // Returns the new current element and a nil error on success. If there // are no more elements in the trie below the base, it returns nil, and // io.EOF. Returns nil and an error in case of errors. func (iter *Iter) advance(wantDescend bool) (noder.Path, error) { current, err := iter.current() if err != nil { return nil, err } // The first time we just return the current node. if !iter.hasStarted { iter.hasStarted = true return current, nil } // Advances means getting a next current node, either its first child or // its next sibling, depending if we must descend or not. numChildren, err := current.NumChildren() if err != nil { return nil, err } mustDescend := numChildren != 0 && wantDescend if mustDescend { // descend: add a new frame with the current's children. frame, err := frame.New(current) if err != nil { return nil, err } iter.push(frame) } else { // don't descend: just drop the current node iter.drop() } return iter.current() } // Returns the path to the current node, adding the base if there was // one, and a nil error. If there were no noders left, it returns nil // and io.EOF. If an error occurred, it returns nil and the error. func (iter *Iter) current() (noder.Path, error) { if topFrame, ok := iter.top(); !ok { return nil, io.EOF } else if _, ok := topFrame.First(); !ok { return nil, io.EOF } ret := make(noder.Path, 0, len(iter.base)+len(iter.frameStack)) // concat the base... ret = append(ret, iter.base...) // ... and the current node and all its ancestors for i, f := range iter.frameStack { t, ok := f.First() if !ok { panic(fmt.Sprintf("frame %d is empty", i)) } ret = append(ret, t) } return ret, nil } // removes the current node if any, and all the frames that become empty as a // consequence of this action. func (iter *Iter) drop() { frame, ok := iter.top() if !ok { return } frame.Drop() // if the frame is empty, remove it and its parent, recursively if frame.Len() == 0 { top := len(iter.frameStack) - 1 iter.frameStack[top] = nil iter.frameStack = iter.frameStack[:top] iter.drop() } } golang-gopkg-src-d-go-git.v4-4.11.0/utils/merkletrie/iter_test.go000066400000000000000000000273041345605224300245410ustar00rootroot00000000000000package merkletrie_test import ( "fmt" "io" "strings" "gopkg.in/src-d/go-git.v4/utils/merkletrie" "gopkg.in/src-d/go-git.v4/utils/merkletrie/internal/fsnoder" "gopkg.in/src-d/go-git.v4/utils/merkletrie/noder" . "gopkg.in/check.v1" ) type IterSuite struct{} var _ = Suite(&IterSuite{}) // A test is a list of operations we want to perform on an iterator and // their expected results. // // The operations are expressed as a sequence of `n` and `s`, // representing the amount of next and step operations we want to call // on the iterator and their order. For example, an operations value of // "nns" means: call a `n`ext, then another `n`ext and finish with a // `s`tep. // // The expected is the full path of the noders returned by the // operations, separated by spaces. // // For instance: // // t := test{ // operations: "ns", // expected: "a a/b" // } // // means: // // - the first iterator operation has to be Next, and it must return a // node called "a" with no ancestors. // // - the second operation has to be Step, and it must return a node // called "b" with a single ancestor called "a". type test struct { operations string expected string } // Runs a test on the provided iterator, checking that the names of the // returned values are correct. If not, the treeDescription value is // printed along with information about mismatch. func (t test) run(c *C, iter *merkletrie.Iter, treeDescription string, testNumber int) { expectedChunks := strings.Split(t.expected, " ") if t.expected == "" { expectedChunks = []string{} } if len(t.operations) < len(expectedChunks) { c.Fatalf("malformed test %d: not enough operations", testNumber) return } var obtained noder.Path var err error for i, b := range t.operations { comment := Commentf("\ntree: %q\ntest #%d (%q)\noperation #%d (%q)", treeDescription, testNumber, t.operations, i, t.operations[i]) switch t.operations[i] { case 'n': obtained, err = iter.Next() if err != io.EOF { c.Assert(err, IsNil) } case 's': obtained, err = iter.Step() if err != io.EOF { c.Assert(err, IsNil) } default: c.Fatalf("unknown operation at test %d, operation %d (%c)\n", testNumber, i, b) } if i >= len(expectedChunks) { c.Assert(err, Equals, io.EOF, comment) continue } c.Assert(err, IsNil, comment) c.Assert(obtained.String(), Equals, expectedChunks[i], comment) } } // A testsCollection value represents a tree and a collection of tests // we want to perfrom on iterators of that tree. // // Example: // // . // | // --------- // | | | // a b c // | // z // // var foo testCollection = { // tree: "(a<> b(z<>) c<>)" // tests: []test{ // {operations: "nns", expected: "a b b/z"}, // {operations: "nnn", expected: "a b c"}, // }, // } // // A new iterator will be build for each test. type testsCollection struct { tree string // a fsnoder description of a tree. tests []test // the collection of tests we want to run } // Executes all the tests in a testsCollection. func (tc testsCollection) run(c *C) { root, err := fsnoder.New(tc.tree) c.Assert(err, IsNil) for i, t := range tc.tests { iter, err := merkletrie.NewIter(root) c.Assert(err, IsNil) t.run(c, iter, root.String(), i) } } func (s *IterSuite) TestEmptyNamedDir(c *C) { tc := testsCollection{ tree: "A()", tests: []test{ {operations: "n", expected: ""}, {operations: "nn", expected: ""}, {operations: "nnn", expected: ""}, {operations: "nnns", expected: ""}, {operations: "nnnssnsnns", expected: ""}, {operations: "s", expected: ""}, {operations: "ss", expected: ""}, {operations: "sss", expected: ""}, {operations: "sssn", expected: ""}, {operations: "sssnnsnssn", expected: ""}, }, } tc.run(c) } func (s *IterSuite) TestEmptyUnnamedDir(c *C) { tc := testsCollection{ tree: "()", tests: []test{ {operations: "n", expected: ""}, {operations: "nn", expected: ""}, {operations: "nnn", expected: ""}, {operations: "nnns", expected: ""}, {operations: "nnnssnsnns", expected: ""}, {operations: "s", expected: ""}, {operations: "ss", expected: ""}, {operations: "sss", expected: ""}, {operations: "sssn", expected: ""}, {operations: "sssnnsnssn", expected: ""}, }, } tc.run(c) } func (s *IterSuite) TestOneFile(c *C) { tc := testsCollection{ tree: "(a<>)", tests: []test{ {operations: "n", expected: "a"}, {operations: "nn", expected: "a"}, {operations: "nnn", expected: "a"}, {operations: "nnns", expected: "a"}, {operations: "nnnssnsnns", expected: "a"}, {operations: "s", expected: "a"}, {operations: "ss", expected: "a"}, {operations: "sss", expected: "a"}, {operations: "sssn", expected: "a"}, {operations: "sssnnsnssn", expected: "a"}, }, } tc.run(c) } // root // / \ // a b func (s *IterSuite) TestTwoFiles(c *C) { tc := testsCollection{ tree: "(a<> b<>)", tests: []test{ {operations: "nnn", expected: "a b"}, {operations: "nns", expected: "a b"}, {operations: "nsn", expected: "a b"}, {operations: "nss", expected: "a b"}, {operations: "snn", expected: "a b"}, {operations: "sns", expected: "a b"}, {operations: "ssn", expected: "a b"}, {operations: "sss", expected: "a b"}, }, } tc.run(c) } // root // | // a // | // b func (s *IterSuite) TestDirWithFile(c *C) { tc := testsCollection{ tree: "(a(b<>))", tests: []test{ {operations: "nnn", expected: "a"}, {operations: "nns", expected: "a"}, {operations: "nsn", expected: "a a/b"}, {operations: "nss", expected: "a a/b"}, {operations: "snn", expected: "a"}, {operations: "sns", expected: "a"}, {operations: "ssn", expected: "a a/b"}, {operations: "sss", expected: "a a/b"}, }, } tc.run(c) } // root // /|\ // c a b func (s *IterSuite) TestThreeSiblings(c *C) { tc := testsCollection{ tree: "(c<> a<> b<>)", tests: []test{ {operations: "nnnn", expected: "a b c"}, {operations: "nnns", expected: "a b c"}, {operations: "nnsn", expected: "a b c"}, {operations: "nnss", expected: "a b c"}, {operations: "nsnn", expected: "a b c"}, {operations: "nsns", expected: "a b c"}, {operations: "nssn", expected: "a b c"}, {operations: "nsss", expected: "a b c"}, {operations: "snnn", expected: "a b c"}, {operations: "snns", expected: "a b c"}, {operations: "snsn", expected: "a b c"}, {operations: "snss", expected: "a b c"}, {operations: "ssnn", expected: "a b c"}, {operations: "ssns", expected: "a b c"}, {operations: "sssn", expected: "a b c"}, {operations: "ssss", expected: "a b c"}, }, } tc.run(c) } // root // | // b // | // c // | // a func (s *IterSuite) TestThreeVertical(c *C) { tc := testsCollection{ tree: "(b(c(a())))", tests: []test{ {operations: "nnnn", expected: "b"}, {operations: "nnns", expected: "b"}, {operations: "nnsn", expected: "b"}, {operations: "nnss", expected: "b"}, {operations: "nsnn", expected: "b b/c"}, {operations: "nsns", expected: "b b/c"}, {operations: "nssn", expected: "b b/c b/c/a"}, {operations: "nsss", expected: "b b/c b/c/a"}, {operations: "snnn", expected: "b"}, {operations: "snns", expected: "b"}, {operations: "snsn", expected: "b"}, {operations: "snss", expected: "b"}, {operations: "ssnn", expected: "b b/c"}, {operations: "ssns", expected: "b b/c"}, {operations: "sssn", expected: "b b/c b/c/a"}, {operations: "ssss", expected: "b b/c b/c/a"}, }, } tc.run(c) } // root // / \ // c a // | // b func (s *IterSuite) TestThreeMix1(c *C) { tc := testsCollection{ tree: "(c(b<>) a<>)", tests: []test{ {operations: "nnnn", expected: "a c"}, {operations: "nnns", expected: "a c"}, {operations: "nnsn", expected: "a c c/b"}, {operations: "nnss", expected: "a c c/b"}, {operations: "nsnn", expected: "a c"}, {operations: "nsns", expected: "a c"}, {operations: "nssn", expected: "a c c/b"}, {operations: "nsss", expected: "a c c/b"}, {operations: "snnn", expected: "a c"}, {operations: "snns", expected: "a c"}, {operations: "snsn", expected: "a c c/b"}, {operations: "snss", expected: "a c c/b"}, {operations: "ssnn", expected: "a c"}, {operations: "ssns", expected: "a c"}, {operations: "sssn", expected: "a c c/b"}, {operations: "ssss", expected: "a c c/b"}, }, } tc.run(c) } // root // / \ // b a // | // c func (s *IterSuite) TestThreeMix2(c *C) { tc := testsCollection{ tree: "(b() a(c<>))", tests: []test{ {operations: "nnnn", expected: "a b"}, {operations: "nnns", expected: "a b"}, {operations: "nnsn", expected: "a b"}, {operations: "nnss", expected: "a b"}, {operations: "nsnn", expected: "a a/c b"}, {operations: "nsns", expected: "a a/c b"}, {operations: "nssn", expected: "a a/c b"}, {operations: "nsss", expected: "a a/c b"}, {operations: "snnn", expected: "a b"}, {operations: "snns", expected: "a b"}, {operations: "snsn", expected: "a b"}, {operations: "snss", expected: "a b"}, {operations: "ssnn", expected: "a a/c b"}, {operations: "ssns", expected: "a a/c b"}, {operations: "sssn", expected: "a a/c b"}, {operations: "ssss", expected: "a a/c b"}, }, } tc.run(c) } // root // / | \ // / | ---- // f d h -------- // /\ / \ | // e a j b/ g // | / \ | // l n k icm // | // o // | // p/ func (s *IterSuite) TestCrazy(c *C) { tc := testsCollection{ tree: "(f(e(l<>) a(n(o(p())) k<>)) d<> h(j(i<> c<> m<>) b() g<>))", tests: []test{ {operations: "nnnnn", expected: "d f h"}, {operations: "nnnns", expected: "d f h"}, {operations: "nnnsn", expected: "d f h h/b h/g"}, {operations: "nnnss", expected: "d f h h/b h/g"}, {operations: "nnsnn", expected: "d f f/a f/e h"}, {operations: "nnsns", expected: "d f f/a f/e f/e/l"}, {operations: "nnssn", expected: "d f f/a f/a/k f/a/n"}, {operations: "nnsss", expected: "d f f/a f/a/k f/a/n"}, {operations: "nsnnn", expected: "d f h"}, {operations: "nsnns", expected: "d f h"}, {operations: "nsnsn", expected: "d f h h/b h/g"}, {operations: "nsnss", expected: "d f h h/b h/g"}, {operations: "nssnn", expected: "d f f/a f/e h"}, }, } tc.run(c) } // . // | // a // | // b // / \ // z h // / \ // d e // | // f func (s *IterSuite) TestNewIterFromPath(c *C) { tree, err := fsnoder.New("(a(b(z(d<> e(f<>)) h<>)))") c.Assert(err, IsNil) z := find(c, tree, "z") iter, err := merkletrie.NewIterFromPath(z) c.Assert(err, IsNil) n, err := iter.Next() c.Assert(err, IsNil) c.Assert(n.String(), Equals, "a/b/z/d") n, err = iter.Next() c.Assert(err, IsNil) c.Assert(n.String(), Equals, "a/b/z/e") n, err = iter.Step() c.Assert(err, IsNil) c.Assert(n.String(), Equals, "a/b/z/e/f") _, err = iter.Step() c.Assert(err, Equals, io.EOF) } func find(c *C, tree noder.Noder, name string) noder.Path { iter, err := merkletrie.NewIter(tree) c.Assert(err, IsNil) for { current, err := iter.Step() if err != io.EOF { c.Assert(err, IsNil) } else { c.Fatalf("node %s not found in tree %s", name, tree) } if current.Name() == name { return current } } } type errorNoder struct{ noder.Noder } func (e *errorNoder) Children() ([]noder.Noder, error) { return nil, fmt.Errorf("mock error") } func (s *IterSuite) TestNewIterNil(c *C) { i, err := merkletrie.NewIter(nil) c.Assert(err, IsNil) _, err = i.Next() c.Assert(err, Equals, io.EOF) } func (s *IterSuite) TestNewIterFailsOnChildrenErrors(c *C) { _, err := merkletrie.NewIter(&errorNoder{}) c.Assert(err, ErrorMatches, "mock error") } golang-gopkg-src-d-go-git.v4-4.11.0/utils/merkletrie/noder/000077500000000000000000000000001345605224300233115ustar00rootroot00000000000000golang-gopkg-src-d-go-git.v4-4.11.0/utils/merkletrie/noder/noder.go000066400000000000000000000041241345605224300247500ustar00rootroot00000000000000// Package noder provide an interface for defining nodes in a // merkletrie, their hashes and their paths (a noders and its // ancestors). // // The hasher interface is easy to implement naively by elements that // already have a hash, like git blobs and trees. More sophisticated // implementations can implement the Equal function in exotic ways // though: for instance, comparing the modification time of directories // in a filesystem. package noder import "fmt" // Hasher interface is implemented by types that can tell you // their hash. type Hasher interface { Hash() []byte } // Equal functions take two hashers and return if they are equal. // // These functions are expected to be faster than reflect.Equal or // reflect.DeepEqual because they can compare just the hash of the // objects, instead of their contents, so they are expected to be O(1). type Equal func(a, b Hasher) bool // The Noder interface is implemented by the elements of a Merkle Trie. // // There are two types of elements in a Merkle Trie: // // - file-like nodes: they cannot have children. // // - directory-like nodes: they can have 0 or more children and their // hash is calculated by combining their children hashes. type Noder interface { Hasher fmt.Stringer // for testing purposes // Name returns the name of an element (relative, not its full // path). Name() string // IsDir returns true if the element is a directory-like node or // false if it is a file-like node. IsDir() bool // Children returns the children of the element. Note that empty // directory-like noders and file-like noders will both return // NoChildren. Children() ([]Noder, error) // NumChildren returns the number of children this element has. // // This method is an optimization: the number of children is easily // calculated as the length of the value returned by the Children // method (above); yet, some implementations will be able to // implement NumChildren in O(1) while Children is usually more // complex. NumChildren() (int, error) } // NoChildren represents the children of a noder without children. var NoChildren = []Noder{} golang-gopkg-src-d-go-git.v4-4.11.0/utils/merkletrie/noder/noder_test.go000066400000000000000000000044411345605224300260110ustar00rootroot00000000000000package noder import ( "testing" . "gopkg.in/check.v1" ) func Test(t *testing.T) { TestingT(t) } type NoderSuite struct{} var _ = Suite(&NoderSuite{}) type noderMock struct { name string hash []byte isDir bool children []Noder } func (n noderMock) String() string { return n.Name() } func (n noderMock) Hash() []byte { return n.hash } func (n noderMock) Name() string { return n.name } func (n noderMock) IsDir() bool { return n.isDir } func (n noderMock) Children() ([]Noder, error) { return n.children, nil } func (n noderMock) NumChildren() (int, error) { return len(n.children), nil } // Returns a sequence with the noders 3, 2, and 1 from the // following diagram: // // 3 // | // 2 // | // 1 // / \ // c1 c2 // // This is also the path of "1". func nodersFixture() []Noder { n1 := &noderMock{ name: "1", hash: []byte{0x00, 0x01, 0x02}, isDir: true, children: childrenFixture(), } n2 := &noderMock{name: "2"} n3 := &noderMock{name: "3"} return []Noder{n3, n2, n1} } // Returns a collection of 2 noders: c1, c2. func childrenFixture() []Noder { c1 := &noderMock{name: "c1"} c2 := &noderMock{name: "c2"} return []Noder{c1, c2} } // Returns the same as nodersFixture but sorted by name, this is: "1", // "2" and then "3". func sortedNodersFixture() []Noder { n1 := &noderMock{ name: "1", hash: []byte{0x00, 0x01, 0x02}, isDir: true, children: childrenFixture(), } n2 := &noderMock{name: "2"} n3 := &noderMock{name: "3"} return []Noder{n1, n2, n3} // the same as nodersFixture but sorted by name } // returns nodersFixture as the path of "1". func pathFixture() Path { return Path(nodersFixture()) } func (s *NoderSuite) TestString(c *C) { c.Assert(pathFixture().String(), Equals, "3/2/1") } func (s *NoderSuite) TestLast(c *C) { c.Assert(pathFixture().Last().Name(), Equals, "1") } func (s *NoderSuite) TestPathImplementsNoder(c *C) { p := pathFixture() c.Assert(p.Name(), Equals, "1") c.Assert(p.Hash(), DeepEquals, []byte{0x00, 0x01, 0x02}) c.Assert(p.IsDir(), Equals, true) children, err := p.Children() c.Assert(err, IsNil) c.Assert(children, DeepEquals, childrenFixture()) numChildren, err := p.NumChildren() c.Assert(err, IsNil) c.Assert(numChildren, Equals, 2) } golang-gopkg-src-d-go-git.v4-4.11.0/utils/merkletrie/noder/path.go000066400000000000000000000040001345605224300245660ustar00rootroot00000000000000package noder import ( "bytes" "strings" ) // Path values represent a noder and its ancestors. The root goes first // and the actual final noder the path is referring to will be the last. // // A path implements the Noder interface, redirecting all the interface // calls to its final noder. // // Paths build from an empty Noder slice are not valid paths and should // not be used. type Path []Noder // String returns the full path of the final noder as a string, using // "/" as the separator. func (p Path) String() string { var buf bytes.Buffer sep := "" for _, e := range p { _, _ = buf.WriteString(sep) sep = "/" _, _ = buf.WriteString(e.Name()) } return buf.String() } // Last returns the final noder in the path. func (p Path) Last() Noder { return p[len(p)-1] } // Hash returns the hash of the final noder of the path. func (p Path) Hash() []byte { return p.Last().Hash() } // Name returns the name of the final noder of the path. func (p Path) Name() string { return p.Last().Name() } // IsDir returns if the final noder of the path is a directory-like // noder. func (p Path) IsDir() bool { return p.Last().IsDir() } // Children returns the children of the final noder in the path. func (p Path) Children() ([]Noder, error) { return p.Last().Children() } // NumChildren returns the number of children the final noder of the // path has. func (p Path) NumChildren() (int, error) { return p.Last().NumChildren() } // Compare returns -1, 0 or 1 if the path p is smaller, equal or bigger // than other, in "directory order"; for example: // // "a" < "b" // "a/b/c/d/z" < "b" // "a/b/a" > "a/b" func (p Path) Compare(other Path) int { i := 0 for { switch { case len(other) == len(p) && i == len(p): return 0 case i == len(other): return 1 case i == len(p): return -1 default: // We do *not* normalize Unicode here. CGit doesn't. // https://github.com/src-d/go-git/issues/1057 cmp := strings.Compare(p[i].Name(), other[i].Name()) if cmp != 0 { return cmp } } i++ } } golang-gopkg-src-d-go-git.v4-4.11.0/utils/merkletrie/noder/path_test.go000066400000000000000000000072441345605224300256420ustar00rootroot00000000000000package noder import ( "golang.org/x/text/unicode/norm" . "gopkg.in/check.v1" ) type PathSuite struct{} var _ = Suite(&PathSuite{}) func (s *PathSuite) TestShortFile(c *C) { f := &noderMock{ name: "1", isDir: false, } p := Path([]Noder{f}) c.Assert(p.String(), Equals, "1") } func (s *PathSuite) TestShortDir(c *C) { d := &noderMock{ name: "1", isDir: true, children: NoChildren, } p := Path([]Noder{d}) c.Assert(p.String(), Equals, "1") } func (s *PathSuite) TestLongFile(c *C) { n3 := &noderMock{ name: "3", isDir: false, } n2 := &noderMock{ name: "2", isDir: true, children: []Noder{n3}, } n1 := &noderMock{ name: "1", isDir: true, children: []Noder{n2}, } p := Path([]Noder{n1, n2, n3}) c.Assert(p.String(), Equals, "1/2/3") } func (s *PathSuite) TestLongDir(c *C) { n3 := &noderMock{ name: "3", isDir: true, children: NoChildren, } n2 := &noderMock{ name: "2", isDir: true, children: []Noder{n3}, } n1 := &noderMock{ name: "1", isDir: true, children: []Noder{n2}, } p := Path([]Noder{n1, n2, n3}) c.Assert(p.String(), Equals, "1/2/3") } func (s *PathSuite) TestCompareDepth1(c *C) { p1 := Path([]Noder{&noderMock{name: "a"}}) p2 := Path([]Noder{&noderMock{name: "b"}}) c.Assert(p1.Compare(p2), Equals, -1) c.Assert(p2.Compare(p1), Equals, 1) p1 = Path([]Noder{&noderMock{name: "a"}}) p2 = Path([]Noder{&noderMock{name: "a"}}) c.Assert(p1.Compare(p2), Equals, 0) c.Assert(p2.Compare(p1), Equals, 0) p1 = Path([]Noder{&noderMock{name: "a.go"}}) p2 = Path([]Noder{&noderMock{name: "a"}}) c.Assert(p1.Compare(p2), Equals, 1) c.Assert(p2.Compare(p1), Equals, -1) } func (s *PathSuite) TestCompareDepth2(c *C) { p1 := Path([]Noder{ &noderMock{name: "a"}, &noderMock{name: "b"}, }) p2 := Path([]Noder{ &noderMock{name: "b"}, &noderMock{name: "a"}, }) c.Assert(p1.Compare(p2), Equals, -1) c.Assert(p2.Compare(p1), Equals, 1) p1 = Path([]Noder{ &noderMock{name: "a"}, &noderMock{name: "b"}, }) p2 = Path([]Noder{ &noderMock{name: "a"}, &noderMock{name: "b"}, }) c.Assert(p1.Compare(p2), Equals, 0) c.Assert(p2.Compare(p1), Equals, 0) p1 = Path([]Noder{ &noderMock{name: "a"}, &noderMock{name: "b"}, }) p2 = Path([]Noder{ &noderMock{name: "a"}, &noderMock{name: "a"}, }) c.Assert(p1.Compare(p2), Equals, 1) c.Assert(p2.Compare(p1), Equals, -1) } func (s *PathSuite) TestCompareMixedDepths(c *C) { p1 := Path([]Noder{ &noderMock{name: "a"}, &noderMock{name: "b"}, }) p2 := Path([]Noder{&noderMock{name: "b"}}) c.Assert(p1.Compare(p2), Equals, -1) c.Assert(p2.Compare(p1), Equals, 1) p1 = Path([]Noder{ &noderMock{name: "b"}, &noderMock{name: "b"}, }) p2 = Path([]Noder{&noderMock{name: "b"}}) c.Assert(p1.Compare(p2), Equals, 1) c.Assert(p2.Compare(p1), Equals, -1) p1 = Path([]Noder{&noderMock{name: "a.go"}}) p2 = Path([]Noder{ &noderMock{name: "a"}, &noderMock{name: "a.go"}, }) c.Assert(p1.Compare(p2), Equals, 1) c.Assert(p2.Compare(p1), Equals, -1) p1 = Path([]Noder{&noderMock{name: "b.go"}}) p2 = Path([]Noder{ &noderMock{name: "a"}, &noderMock{name: "a.go"}, }) c.Assert(p1.Compare(p2), Equals, 1) c.Assert(p2.Compare(p1), Equals, -1) } func (s *PathSuite) TestCompareNormalization(c *C) { p1 := Path([]Noder{&noderMock{name: norm.Form(norm.NFKC).String("페")}}) p2 := Path([]Noder{&noderMock{name: norm.Form(norm.NFKD).String("페")}}) c.Assert(p1.Compare(p2), Equals, 1) c.Assert(p2.Compare(p1), Equals, -1) p1 = Path([]Noder{&noderMock{name: "TestAppWithUnicodéPath"}}) p2 = Path([]Noder{&noderMock{name: "TestAppWithUnicodéPath"}}) c.Assert(p1.Compare(p2), Equals, -1) c.Assert(p2.Compare(p1), Equals, 1) } golang-gopkg-src-d-go-git.v4-4.11.0/utils/revlist2humantest.bash000077500000000000000000000017111345605224300244040ustar00rootroot00000000000000#!/bin/bash # you can run this over a whole repo with: # # for file in `find . -type f | sed 's/^\.\///' | egrep -v '^\.git\/.*$'` ; do revlist2humantest.bash $file ; done > /tmp/output # # be careful with files with spaces, though set -e repo=`git remote show origin | grep Fetch | cut -d' ' -f5` branch=`git branch | egrep '^\* .*' | cut -d' ' -f2` if [ "$#" -eq 1 ] ; then commit=`git log | head -1 | cut -d' ' -f2` path=$1 elif [ "$#" -eq 2 ] ; then commit=$1 path=$2 else echo "bad number of parameters" > /dev/stderr echo > /dev/stderr echo " try with: [commit] path" > /dev/stderr exit fi hashes=`git rev-list --remove-empty --reverse $commit -- $path` # some remotes have the .git, other don't, # repoDot makes sure all have repoDot="${repo%.git}.git" echo -e "\t&humanTest{\"${repoDot}\", \"${branch}\", \"${commit}\", \"${path}\", []string{" for i in $hashes ; do echo -e "\t\t\"${i}\"," done echo -e "\t}}," golang-gopkg-src-d-go-git.v4-4.11.0/worktree.go000066400000000000000000000464271345605224300211050ustar00rootroot00000000000000package git import ( "context" "errors" "fmt" "io" stdioutil "io/ioutil" "os" "path/filepath" "strings" "gopkg.in/src-d/go-git.v4/config" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/filemode" "gopkg.in/src-d/go-git.v4/plumbing/format/gitignore" "gopkg.in/src-d/go-git.v4/plumbing/format/index" "gopkg.in/src-d/go-git.v4/plumbing/object" "gopkg.in/src-d/go-git.v4/plumbing/storer" "gopkg.in/src-d/go-git.v4/utils/ioutil" "gopkg.in/src-d/go-git.v4/utils/merkletrie" "gopkg.in/src-d/go-billy.v4" "gopkg.in/src-d/go-billy.v4/util" ) var ( ErrWorktreeNotClean = errors.New("worktree is not clean") ErrSubmoduleNotFound = errors.New("submodule not found") ErrUnstagedChanges = errors.New("worktree contains unstaged changes") ErrGitModulesSymlink = errors.New(gitmodulesFile + " is a symlink") ErrNonFastForwardUpdate = errors.New("non-fast-forward update") ) // Worktree represents a git worktree. type Worktree struct { // Filesystem underlying filesystem. Filesystem billy.Filesystem // External excludes not found in the repository .gitignore Excludes []gitignore.Pattern r *Repository } // Pull incorporates changes from a remote repository into the current branch. // Returns nil if the operation is successful, NoErrAlreadyUpToDate if there are // no changes to be fetched, or an error. // // Pull only supports merges where the can be resolved as a fast-forward. func (w *Worktree) Pull(o *PullOptions) error { return w.PullContext(context.Background(), o) } // PullContext incorporates changes from a remote repository into the current // branch. Returns nil if the operation is successful, NoErrAlreadyUpToDate if // there are no changes to be fetched, or an error. // // Pull only supports merges where the can be resolved as a fast-forward. // // The provided Context must be non-nil. If the context expires before the // operation is complete, an error is returned. The context only affects to the // transport operations. func (w *Worktree) PullContext(ctx context.Context, o *PullOptions) error { if err := o.Validate(); err != nil { return err } remote, err := w.r.Remote(o.RemoteName) if err != nil { return err } fetchHead, err := remote.fetch(ctx, &FetchOptions{ RemoteName: o.RemoteName, Depth: o.Depth, Auth: o.Auth, Progress: o.Progress, Force: o.Force, }) updated := true if err == NoErrAlreadyUpToDate { updated = false } else if err != nil { return err } ref, err := storer.ResolveReference(fetchHead, o.ReferenceName) if err != nil { return err } head, err := w.r.Head() if err == nil { if !updated && head.Hash() == ref.Hash() { return NoErrAlreadyUpToDate } ff, err := isFastForward(w.r.Storer, head.Hash(), ref.Hash()) if err != nil { return err } if !ff { return ErrNonFastForwardUpdate } } if err != nil && err != plumbing.ErrReferenceNotFound { return err } if err := w.updateHEAD(ref.Hash()); err != nil { return err } if err := w.Reset(&ResetOptions{ Mode: MergeReset, Commit: ref.Hash(), }); err != nil { return err } if o.RecurseSubmodules != NoRecurseSubmodules { return w.updateSubmodules(&SubmoduleUpdateOptions{ RecurseSubmodules: o.RecurseSubmodules, Auth: o.Auth, }) } return nil } func (w *Worktree) updateSubmodules(o *SubmoduleUpdateOptions) error { s, err := w.Submodules() if err != nil { return err } o.Init = true return s.Update(o) } // Checkout switch branches or restore working tree files. func (w *Worktree) Checkout(opts *CheckoutOptions) error { if err := opts.Validate(); err != nil { return err } if opts.Create { if err := w.createBranch(opts); err != nil { return err } } c, err := w.getCommitFromCheckoutOptions(opts) if err != nil { return err } ro := &ResetOptions{Commit: c, Mode: MergeReset} if opts.Force { ro.Mode = HardReset } if !opts.Hash.IsZero() && !opts.Create { err = w.setHEADToCommit(opts.Hash) } else { err = w.setHEADToBranch(opts.Branch, c) } if err != nil { return err } return w.Reset(ro) } func (w *Worktree) createBranch(opts *CheckoutOptions) error { _, err := w.r.Storer.Reference(opts.Branch) if err == nil { return fmt.Errorf("a branch named %q already exists", opts.Branch) } if err != plumbing.ErrReferenceNotFound { return err } if opts.Hash.IsZero() { ref, err := w.r.Head() if err != nil { return err } opts.Hash = ref.Hash() } return w.r.Storer.SetReference( plumbing.NewHashReference(opts.Branch, opts.Hash), ) } func (w *Worktree) getCommitFromCheckoutOptions(opts *CheckoutOptions) (plumbing.Hash, error) { if !opts.Hash.IsZero() { return opts.Hash, nil } b, err := w.r.Reference(opts.Branch, true) if err != nil { return plumbing.ZeroHash, err } if !b.Name().IsTag() { return b.Hash(), nil } o, err := w.r.Object(plumbing.AnyObject, b.Hash()) if err != nil { return plumbing.ZeroHash, err } switch o := o.(type) { case *object.Tag: if o.TargetType != plumbing.CommitObject { return plumbing.ZeroHash, fmt.Errorf("unsupported tag object target %q", o.TargetType) } return o.Target, nil case *object.Commit: return o.Hash, nil } return plumbing.ZeroHash, fmt.Errorf("unsupported tag target %q", o.Type()) } func (w *Worktree) setHEADToCommit(commit plumbing.Hash) error { head := plumbing.NewHashReference(plumbing.HEAD, commit) return w.r.Storer.SetReference(head) } func (w *Worktree) setHEADToBranch(branch plumbing.ReferenceName, commit plumbing.Hash) error { target, err := w.r.Storer.Reference(branch) if err != nil { return err } var head *plumbing.Reference if target.Name().IsBranch() { head = plumbing.NewSymbolicReference(plumbing.HEAD, target.Name()) } else { head = plumbing.NewHashReference(plumbing.HEAD, commit) } return w.r.Storer.SetReference(head) } // Reset the worktree to a specified state. func (w *Worktree) Reset(opts *ResetOptions) error { if err := opts.Validate(w.r); err != nil { return err } if opts.Mode == MergeReset { unstaged, err := w.containsUnstagedChanges() if err != nil { return err } if unstaged { return ErrUnstagedChanges } } if err := w.setHEADCommit(opts.Commit); err != nil { return err } if opts.Mode == SoftReset { return nil } t, err := w.getTreeFromCommitHash(opts.Commit) if err != nil { return err } if opts.Mode == MixedReset || opts.Mode == MergeReset || opts.Mode == HardReset { if err := w.resetIndex(t); err != nil { return err } } if opts.Mode == MergeReset || opts.Mode == HardReset { if err := w.resetWorktree(t); err != nil { return err } } return nil } func (w *Worktree) resetIndex(t *object.Tree) error { idx, err := w.r.Storer.Index() if err != nil { return err } changes, err := w.diffTreeWithStaging(t, true) if err != nil { return err } for _, ch := range changes { a, err := ch.Action() if err != nil { return err } var name string var e *object.TreeEntry switch a { case merkletrie.Modify, merkletrie.Insert: name = ch.To.String() e, err = t.FindEntry(name) if err != nil { return err } case merkletrie.Delete: name = ch.From.String() } _, _ = idx.Remove(name) if e == nil { continue } idx.Entries = append(idx.Entries, &index.Entry{ Name: name, Hash: e.Hash, Mode: e.Mode, }) } return w.r.Storer.SetIndex(idx) } func (w *Worktree) resetWorktree(t *object.Tree) error { changes, err := w.diffStagingWithWorktree(true) if err != nil { return err } idx, err := w.r.Storer.Index() if err != nil { return err } for _, ch := range changes { if err := w.checkoutChange(ch, t, idx); err != nil { return err } } return w.r.Storer.SetIndex(idx) } func (w *Worktree) checkoutChange(ch merkletrie.Change, t *object.Tree, idx *index.Index) error { a, err := ch.Action() if err != nil { return err } var e *object.TreeEntry var name string var isSubmodule bool switch a { case merkletrie.Modify, merkletrie.Insert: name = ch.To.String() e, err = t.FindEntry(name) if err != nil { return err } isSubmodule = e.Mode == filemode.Submodule case merkletrie.Delete: return rmFileAndDirIfEmpty(w.Filesystem, ch.From.String()) } if isSubmodule { return w.checkoutChangeSubmodule(name, a, e, idx) } return w.checkoutChangeRegularFile(name, a, t, e, idx) } func (w *Worktree) containsUnstagedChanges() (bool, error) { ch, err := w.diffStagingWithWorktree(false) if err != nil { return false, err } for _, c := range ch { a, err := c.Action() if err != nil { return false, err } if a == merkletrie.Insert { continue } return true, nil } return false, nil } func (w *Worktree) setHEADCommit(commit plumbing.Hash) error { head, err := w.r.Reference(plumbing.HEAD, false) if err != nil { return err } if head.Type() == plumbing.HashReference { head = plumbing.NewHashReference(plumbing.HEAD, commit) return w.r.Storer.SetReference(head) } branch, err := w.r.Reference(head.Target(), false) if err != nil { return err } if !branch.Name().IsBranch() { return fmt.Errorf("invalid HEAD target should be a branch, found %s", branch.Type()) } branch = plumbing.NewHashReference(branch.Name(), commit) return w.r.Storer.SetReference(branch) } func (w *Worktree) checkoutChangeSubmodule(name string, a merkletrie.Action, e *object.TreeEntry, idx *index.Index, ) error { switch a { case merkletrie.Modify: sub, err := w.Submodule(name) if err != nil { return err } if !sub.initialized { return nil } return w.addIndexFromTreeEntry(name, e, idx) case merkletrie.Insert: mode, err := e.Mode.ToOSFileMode() if err != nil { return err } if err := w.Filesystem.MkdirAll(name, mode); err != nil { return err } return w.addIndexFromTreeEntry(name, e, idx) } return nil } func (w *Worktree) checkoutChangeRegularFile(name string, a merkletrie.Action, t *object.Tree, e *object.TreeEntry, idx *index.Index, ) error { switch a { case merkletrie.Modify: _, _ = idx.Remove(name) // to apply perm changes the file is deleted, billy doesn't implement // chmod if err := w.Filesystem.Remove(name); err != nil { return err } fallthrough case merkletrie.Insert: f, err := t.File(name) if err != nil { return err } if err := w.checkoutFile(f); err != nil { return err } return w.addIndexFromFile(name, e.Hash, idx) } return nil } func (w *Worktree) checkoutFile(f *object.File) (err error) { mode, err := f.Mode.ToOSFileMode() if err != nil { return } if mode&os.ModeSymlink != 0 { return w.checkoutFileSymlink(f) } from, err := f.Reader() if err != nil { return } defer ioutil.CheckClose(from, &err) to, err := w.Filesystem.OpenFile(f.Name, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, mode.Perm()) if err != nil { return } defer ioutil.CheckClose(to, &err) _, err = io.Copy(to, from) return } func (w *Worktree) checkoutFileSymlink(f *object.File) (err error) { from, err := f.Reader() if err != nil { return } defer ioutil.CheckClose(from, &err) bytes, err := stdioutil.ReadAll(from) if err != nil { return } err = w.Filesystem.Symlink(string(bytes), f.Name) // On windows, this might fail. // Follow Git on Windows behavior by writing the link as it is. if err != nil && isSymlinkWindowsNonAdmin(err) { mode, _ := f.Mode.ToOSFileMode() to, err := w.Filesystem.OpenFile(f.Name, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, mode.Perm()) if err != nil { return err } defer ioutil.CheckClose(to, &err) _, err = to.Write(bytes) return err } return } func (w *Worktree) addIndexFromTreeEntry(name string, f *object.TreeEntry, idx *index.Index) error { _, _ = idx.Remove(name) idx.Entries = append(idx.Entries, &index.Entry{ Hash: f.Hash, Name: name, Mode: filemode.Submodule, }) return nil } func (w *Worktree) addIndexFromFile(name string, h plumbing.Hash, idx *index.Index) error { _, _ = idx.Remove(name) fi, err := w.Filesystem.Lstat(name) if err != nil { return err } mode, err := filemode.NewFromOSFileMode(fi.Mode()) if err != nil { return err } e := &index.Entry{ Hash: h, Name: name, Mode: mode, ModifiedAt: fi.ModTime(), Size: uint32(fi.Size()), } // if the FileInfo.Sys() comes from os the ctime, dev, inode, uid and gid // can be retrieved, otherwise this doesn't apply if fillSystemInfo != nil { fillSystemInfo(e, fi.Sys()) } idx.Entries = append(idx.Entries, e) return nil } func (w *Worktree) getTreeFromCommitHash(commit plumbing.Hash) (*object.Tree, error) { c, err := w.r.CommitObject(commit) if err != nil { return nil, err } return c.Tree() } var fillSystemInfo func(e *index.Entry, sys interface{}) const gitmodulesFile = ".gitmodules" // Submodule returns the submodule with the given name func (w *Worktree) Submodule(name string) (*Submodule, error) { l, err := w.Submodules() if err != nil { return nil, err } for _, m := range l { if m.Config().Name == name { return m, nil } } return nil, ErrSubmoduleNotFound } // Submodules returns all the available submodules func (w *Worktree) Submodules() (Submodules, error) { l := make(Submodules, 0) m, err := w.readGitmodulesFile() if err != nil || m == nil { return l, err } c, err := w.r.Config() if err != nil { return nil, err } for _, s := range m.Submodules { l = append(l, w.newSubmodule(s, c.Submodules[s.Name])) } return l, nil } func (w *Worktree) newSubmodule(fromModules, fromConfig *config.Submodule) *Submodule { m := &Submodule{w: w} m.initialized = fromConfig != nil if !m.initialized { m.c = fromModules return m } m.c = fromConfig m.c.Path = fromModules.Path return m } func (w *Worktree) isSymlink(path string) bool { if s, err := w.Filesystem.Lstat(path); err == nil { return s.Mode()&os.ModeSymlink != 0 } return false } func (w *Worktree) readGitmodulesFile() (*config.Modules, error) { if w.isSymlink(gitmodulesFile) { return nil, ErrGitModulesSymlink } f, err := w.Filesystem.Open(gitmodulesFile) if err != nil { if os.IsNotExist(err) { return nil, nil } return nil, err } defer f.Close() input, err := stdioutil.ReadAll(f) if err != nil { return nil, err } m := config.NewModules() return m, m.Unmarshal(input) } // Clean the worktree by removing untracked files. // An empty dir could be removed - this is what `git clean -f -d .` does. func (w *Worktree) Clean(opts *CleanOptions) error { s, err := w.Status() if err != nil { return err } root := "" files, err := w.Filesystem.ReadDir(root) if err != nil { return err } return w.doClean(s, opts, root, files) } func (w *Worktree) doClean(status Status, opts *CleanOptions, dir string, files []os.FileInfo) error { for _, fi := range files { if fi.Name() == ".git" { continue } // relative path under the root path := filepath.Join(dir, fi.Name()) if fi.IsDir() { if !opts.Dir { continue } subfiles, err := w.Filesystem.ReadDir(path) if err != nil { return err } err = w.doClean(status, opts, path, subfiles) if err != nil { return err } } else { if status.IsUntracked(path) { if err := w.Filesystem.Remove(path); err != nil { return err } } } } if opts.Dir { return doCleanDirectories(w.Filesystem, dir) } return nil } // GrepResult is structure of a grep result. type GrepResult struct { // FileName is the name of file which contains match. FileName string // LineNumber is the line number of a file at which a match was found. LineNumber int // Content is the content of the file at the matching line. Content string // TreeName is the name of the tree (reference name/commit hash) at // which the match was performed. TreeName string } func (gr GrepResult) String() string { return fmt.Sprintf("%s:%s:%d:%s", gr.TreeName, gr.FileName, gr.LineNumber, gr.Content) } // Grep performs grep on a worktree. func (w *Worktree) Grep(opts *GrepOptions) ([]GrepResult, error) { if err := opts.Validate(w); err != nil { return nil, err } // Obtain commit hash from options (CommitHash or ReferenceName). var commitHash plumbing.Hash // treeName contains the value of TreeName in GrepResult. var treeName string if opts.ReferenceName != "" { ref, err := w.r.Reference(opts.ReferenceName, true) if err != nil { return nil, err } commitHash = ref.Hash() treeName = opts.ReferenceName.String() } else if !opts.CommitHash.IsZero() { commitHash = opts.CommitHash treeName = opts.CommitHash.String() } // Obtain a tree from the commit hash and get a tracked files iterator from // the tree. tree, err := w.getTreeFromCommitHash(commitHash) if err != nil { return nil, err } fileiter := tree.Files() return findMatchInFiles(fileiter, treeName, opts) } // findMatchInFiles takes a FileIter, worktree name and GrepOptions, and // returns a slice of GrepResult containing the result of regex pattern matching // in content of all the files. func findMatchInFiles(fileiter *object.FileIter, treeName string, opts *GrepOptions) ([]GrepResult, error) { var results []GrepResult err := fileiter.ForEach(func(file *object.File) error { var fileInPathSpec bool // When no pathspecs are provided, search all the files. if len(opts.PathSpecs) == 0 { fileInPathSpec = true } // Check if the file name matches with the pathspec. Break out of the // loop once a match is found. for _, pathSpec := range opts.PathSpecs { if pathSpec != nil && pathSpec.MatchString(file.Name) { fileInPathSpec = true break } } // If the file does not match with any of the pathspec, skip it. if !fileInPathSpec { return nil } grepResults, err := findMatchInFile(file, treeName, opts) if err != nil { return err } results = append(results, grepResults...) return nil }) return results, err } // findMatchInFile takes a single File, worktree name and GrepOptions, // and returns a slice of GrepResult containing the result of regex pattern // matching in the given file. func findMatchInFile(file *object.File, treeName string, opts *GrepOptions) ([]GrepResult, error) { var grepResults []GrepResult content, err := file.Contents() if err != nil { return grepResults, err } // Split the file content and parse line-by-line. contentByLine := strings.Split(content, "\n") for lineNum, cnt := range contentByLine { addToResult := false // Match the patterns and content. Break out of the loop once a // match is found. for _, pattern := range opts.Patterns { if pattern != nil && pattern.MatchString(cnt) { // Add to result only if invert match is not enabled. if !opts.InvertMatch { addToResult = true break } } else if opts.InvertMatch { // If matching fails, and invert match is enabled, add to // results. addToResult = true break } } if addToResult { grepResults = append(grepResults, GrepResult{ FileName: file.Name, LineNumber: lineNum + 1, Content: cnt, TreeName: treeName, }) } } return grepResults, nil } func rmFileAndDirIfEmpty(fs billy.Filesystem, name string) error { if err := util.RemoveAll(fs, name); err != nil { return err } dir := filepath.Dir(name) return doCleanDirectories(fs, dir) } // doCleanDirectories removes empty subdirs (without files) func doCleanDirectories(fs billy.Filesystem, dir string) error { files, err := fs.ReadDir(dir) if err != nil { return err } if len(files) == 0 { return fs.Remove(dir) } return nil } golang-gopkg-src-d-go-git.v4-4.11.0/worktree_bsd.go000066400000000000000000000007451345605224300217260ustar00rootroot00000000000000// +build darwin freebsd netbsd package git import ( "syscall" "time" "gopkg.in/src-d/go-git.v4/plumbing/format/index" ) func init() { fillSystemInfo = func(e *index.Entry, sys interface{}) { if os, ok := sys.(*syscall.Stat_t); ok { e.CreatedAt = time.Unix(int64(os.Atimespec.Sec), int64(os.Atimespec.Nsec)) e.Dev = uint32(os.Dev) e.Inode = uint32(os.Ino) e.GID = os.Gid e.UID = os.Uid } } } func isSymlinkWindowsNonAdmin(err error) bool { return false } golang-gopkg-src-d-go-git.v4-4.11.0/worktree_commit.go000066400000000000000000000123611345605224300224430ustar00rootroot00000000000000package git import ( "bytes" "path" "sort" "strings" "golang.org/x/crypto/openpgp" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/filemode" "gopkg.in/src-d/go-git.v4/plumbing/format/index" "gopkg.in/src-d/go-git.v4/plumbing/object" "gopkg.in/src-d/go-git.v4/storage" "gopkg.in/src-d/go-billy.v4" ) // Commit stores the current contents of the index in a new commit along with // a log message from the user describing the changes. func (w *Worktree) Commit(msg string, opts *CommitOptions) (plumbing.Hash, error) { if err := opts.Validate(w.r); err != nil { return plumbing.ZeroHash, err } if opts.All { if err := w.autoAddModifiedAndDeleted(); err != nil { return plumbing.ZeroHash, err } } idx, err := w.r.Storer.Index() if err != nil { return plumbing.ZeroHash, err } h := &buildTreeHelper{ fs: w.Filesystem, s: w.r.Storer, } tree, err := h.BuildTree(idx) if err != nil { return plumbing.ZeroHash, err } commit, err := w.buildCommitObject(msg, opts, tree) if err != nil { return plumbing.ZeroHash, err } return commit, w.updateHEAD(commit) } func (w *Worktree) autoAddModifiedAndDeleted() error { s, err := w.Status() if err != nil { return err } for path, fs := range s { if fs.Worktree != Modified && fs.Worktree != Deleted { continue } if _, err := w.Add(path); err != nil { return err } } return nil } func (w *Worktree) updateHEAD(commit plumbing.Hash) error { head, err := w.r.Storer.Reference(plumbing.HEAD) if err != nil { return err } name := plumbing.HEAD if head.Type() != plumbing.HashReference { name = head.Target() } ref := plumbing.NewHashReference(name, commit) return w.r.Storer.SetReference(ref) } func (w *Worktree) buildCommitObject(msg string, opts *CommitOptions, tree plumbing.Hash) (plumbing.Hash, error) { commit := &object.Commit{ Author: *opts.Author, Committer: *opts.Committer, Message: msg, TreeHash: tree, ParentHashes: opts.Parents, } if opts.SignKey != nil { sig, err := w.buildCommitSignature(commit, opts.SignKey) if err != nil { return plumbing.ZeroHash, err } commit.PGPSignature = sig } obj := w.r.Storer.NewEncodedObject() if err := commit.Encode(obj); err != nil { return plumbing.ZeroHash, err } return w.r.Storer.SetEncodedObject(obj) } func (w *Worktree) buildCommitSignature(commit *object.Commit, signKey *openpgp.Entity) (string, error) { encoded := &plumbing.MemoryObject{} if err := commit.Encode(encoded); err != nil { return "", err } r, err := encoded.Reader() if err != nil { return "", err } var b bytes.Buffer if err := openpgp.ArmoredDetachSign(&b, signKey, r, nil); err != nil { return "", err } return b.String(), nil } // buildTreeHelper converts a given index.Index file into multiple git objects // reading the blobs from the given filesystem and creating the trees from the // index structure. The created objects are pushed to a given Storer. type buildTreeHelper struct { fs billy.Filesystem s storage.Storer trees map[string]*object.Tree entries map[string]*object.TreeEntry } // BuildTree builds the tree objects and push its to the storer, the hash // of the root tree is returned. func (h *buildTreeHelper) BuildTree(idx *index.Index) (plumbing.Hash, error) { const rootNode = "" h.trees = map[string]*object.Tree{rootNode: {}} h.entries = map[string]*object.TreeEntry{} for _, e := range idx.Entries { if err := h.commitIndexEntry(e); err != nil { return plumbing.ZeroHash, err } } return h.copyTreeToStorageRecursive(rootNode, h.trees[rootNode]) } func (h *buildTreeHelper) commitIndexEntry(e *index.Entry) error { parts := strings.Split(e.Name, "/") var fullpath string for _, part := range parts { parent := fullpath fullpath = path.Join(fullpath, part) h.doBuildTree(e, parent, fullpath) } return nil } func (h *buildTreeHelper) doBuildTree(e *index.Entry, parent, fullpath string) { if _, ok := h.trees[fullpath]; ok { return } if _, ok := h.entries[fullpath]; ok { return } te := object.TreeEntry{Name: path.Base(fullpath)} if fullpath == e.Name { te.Mode = e.Mode te.Hash = e.Hash } else { te.Mode = filemode.Dir h.trees[fullpath] = &object.Tree{} } h.trees[parent].Entries = append(h.trees[parent].Entries, te) } type sortableEntries []object.TreeEntry func (sortableEntries) sortName(te object.TreeEntry) string { if te.Mode == filemode.Dir { return te.Name + "/" } return te.Name } func (se sortableEntries) Len() int { return len(se) } func (se sortableEntries) Less(i int, j int) bool { return se.sortName(se[i]) < se.sortName(se[j]) } func (se sortableEntries) Swap(i int, j int) { se[i], se[j] = se[j], se[i] } func (h *buildTreeHelper) copyTreeToStorageRecursive(parent string, t *object.Tree) (plumbing.Hash, error) { sort.Sort(sortableEntries(t.Entries)) for i, e := range t.Entries { if e.Mode != filemode.Dir && !e.Hash.IsZero() { continue } path := path.Join(parent, e.Name) var err error e.Hash, err = h.copyTreeToStorageRecursive(path, h.trees[path]) if err != nil { return plumbing.ZeroHash, err } t.Entries[i] = e } o := h.s.NewEncodedObject() if err := t.Encode(o); err != nil { return plumbing.ZeroHash, err } return h.s.SetEncodedObject(o) } golang-gopkg-src-d-go-git.v4-4.11.0/worktree_commit_test.go000066400000000000000000000250421345605224300235020ustar00rootroot00000000000000package git import ( "bytes" "io/ioutil" "os" "os/exec" "strings" "time" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/cache" "gopkg.in/src-d/go-git.v4/plumbing/object" "gopkg.in/src-d/go-git.v4/plumbing/storer" "gopkg.in/src-d/go-git.v4/storage/filesystem" "gopkg.in/src-d/go-git.v4/storage/memory" "golang.org/x/crypto/openpgp" "golang.org/x/crypto/openpgp/armor" "golang.org/x/crypto/openpgp/errors" . "gopkg.in/check.v1" "gopkg.in/src-d/go-billy.v4/memfs" "gopkg.in/src-d/go-billy.v4/osfs" "gopkg.in/src-d/go-billy.v4/util" ) func (s *WorktreeSuite) TestCommitInvalidOptions(c *C) { r, err := Init(memory.NewStorage(), memfs.New()) c.Assert(err, IsNil) w, err := r.Worktree() c.Assert(err, IsNil) hash, err := w.Commit("", &CommitOptions{}) c.Assert(err, Equals, ErrMissingAuthor) c.Assert(hash.IsZero(), Equals, true) } func (s *WorktreeSuite) TestCommitInitial(c *C) { expected := plumbing.NewHash("98c4ac7c29c913f7461eae06e024dc18e80d23a4") fs := memfs.New() storage := memory.NewStorage() r, err := Init(storage, fs) c.Assert(err, IsNil) w, err := r.Worktree() c.Assert(err, IsNil) util.WriteFile(fs, "foo", []byte("foo"), 0644) _, err = w.Add("foo") c.Assert(err, IsNil) hash, err := w.Commit("foo\n", &CommitOptions{Author: defaultSignature()}) c.Assert(hash, Equals, expected) c.Assert(err, IsNil) assertStorageStatus(c, r, 1, 1, 1, expected) } func (s *WorktreeSuite) TestCommitParent(c *C) { expected := plumbing.NewHash("ef3ca05477530b37f48564be33ddd48063fc7a22") fs := memfs.New() w := &Worktree{ r: s.Repository, Filesystem: fs, } err := w.Checkout(&CheckoutOptions{}) c.Assert(err, IsNil) util.WriteFile(fs, "foo", []byte("foo"), 0644) _, err = w.Add("foo") c.Assert(err, IsNil) hash, err := w.Commit("foo\n", &CommitOptions{Author: defaultSignature()}) c.Assert(hash, Equals, expected) c.Assert(err, IsNil) assertStorageStatus(c, s.Repository, 13, 11, 10, expected) } func (s *WorktreeSuite) TestCommitAll(c *C) { expected := plumbing.NewHash("aede6f8c9c1c7ec9ca8d287c64b8ed151276fa28") fs := memfs.New() w := &Worktree{ r: s.Repository, Filesystem: fs, } err := w.Checkout(&CheckoutOptions{}) c.Assert(err, IsNil) util.WriteFile(fs, "LICENSE", []byte("foo"), 0644) util.WriteFile(fs, "foo", []byte("foo"), 0644) hash, err := w.Commit("foo\n", &CommitOptions{ All: true, Author: defaultSignature(), }) c.Assert(hash, Equals, expected) c.Assert(err, IsNil) assertStorageStatus(c, s.Repository, 13, 11, 10, expected) } func (s *WorktreeSuite) TestRemoveAndCommitAll(c *C) { expected := plumbing.NewHash("907cd576c6ced2ecd3dab34a72bf9cf65944b9a9") fs := memfs.New() w := &Worktree{ r: s.Repository, Filesystem: fs, } err := w.Checkout(&CheckoutOptions{}) c.Assert(err, IsNil) util.WriteFile(fs, "foo", []byte("foo"), 0644) _, err = w.Add("foo") c.Assert(err, IsNil) _, errFirst := w.Commit("Add in Repo\n", &CommitOptions{ Author: defaultSignature(), }) c.Assert(errFirst, IsNil) errRemove := fs.Remove("foo") c.Assert(errRemove, IsNil) hash, errSecond := w.Commit("Remove foo\n", &CommitOptions{ All: true, Author: defaultSignature(), }) c.Assert(errSecond, IsNil) c.Assert(hash, Equals, expected) c.Assert(err, IsNil) assertStorageStatus(c, s.Repository, 13, 11, 11, expected) } func (s *WorktreeSuite) TestCommitSign(c *C) { fs := memfs.New() storage := memory.NewStorage() r, err := Init(storage, fs) c.Assert(err, IsNil) w, err := r.Worktree() c.Assert(err, IsNil) util.WriteFile(fs, "foo", []byte("foo"), 0644) _, err = w.Add("foo") c.Assert(err, IsNil) key := commitSignKey(c, true) hash, err := w.Commit("foo\n", &CommitOptions{Author: defaultSignature(), SignKey: key}) c.Assert(err, IsNil) // Verify the commit. pks := new(bytes.Buffer) pkw, err := armor.Encode(pks, openpgp.PublicKeyType, nil) c.Assert(err, IsNil) err = key.Serialize(pkw) c.Assert(err, IsNil) err = pkw.Close() c.Assert(err, IsNil) expectedCommit, err := r.CommitObject(hash) c.Assert(err, IsNil) actual, err := expectedCommit.Verify(pks.String()) c.Assert(err, IsNil) c.Assert(actual.PrimaryKey, DeepEquals, key.PrimaryKey) } func (s *WorktreeSuite) TestCommitSignBadKey(c *C) { fs := memfs.New() storage := memory.NewStorage() r, err := Init(storage, fs) c.Assert(err, IsNil) w, err := r.Worktree() c.Assert(err, IsNil) util.WriteFile(fs, "foo", []byte("foo"), 0644) _, err = w.Add("foo") c.Assert(err, IsNil) key := commitSignKey(c, false) _, err = w.Commit("foo\n", &CommitOptions{Author: defaultSignature(), SignKey: key}) c.Assert(err, Equals, errors.InvalidArgumentError("signing key is encrypted")) } func (s *WorktreeSuite) TestCommitTreeSort(c *C) { path, err := ioutil.TempDir(os.TempDir(), "test-commit-tree-sort") c.Assert(err, IsNil) fs := osfs.New(path) st := filesystem.NewStorage(fs, cache.NewObjectLRUDefault()) r, err := Init(st, nil) c.Assert(err, IsNil) r, err = Clone(memory.NewStorage(), memfs.New(), &CloneOptions{ URL: path, }) w, err := r.Worktree() c.Assert(err, IsNil) mfs := w.Filesystem err = mfs.MkdirAll("delta", 0755) c.Assert(err, IsNil) for _, p := range []string{"delta_last", "Gamma", "delta/middle", "Beta", "delta-first", "alpha"} { util.WriteFile(mfs, p, []byte("foo"), 0644) _, err = w.Add(p) c.Assert(err, IsNil) } _, err = w.Commit("foo\n", &CommitOptions{ All: true, Author: defaultSignature(), }) c.Assert(err, IsNil) err = r.Push(&PushOptions{}) c.Assert(err, IsNil) cmd := exec.Command("git", "fsck") cmd.Dir = path cmd.Env = os.Environ() buf := &bytes.Buffer{} cmd.Stderr = buf cmd.Stdout = buf err = cmd.Run() c.Assert(err, IsNil, Commentf("%s", buf.Bytes())) } func assertStorageStatus( c *C, r *Repository, treesCount, blobCount, commitCount int, head plumbing.Hash, ) { trees, err := r.Storer.IterEncodedObjects(plumbing.TreeObject) c.Assert(err, IsNil) blobs, err := r.Storer.IterEncodedObjects(plumbing.BlobObject) c.Assert(err, IsNil) commits, err := r.Storer.IterEncodedObjects(plumbing.CommitObject) c.Assert(err, IsNil) c.Assert(lenIterEncodedObjects(trees), Equals, treesCount) c.Assert(lenIterEncodedObjects(blobs), Equals, blobCount) c.Assert(lenIterEncodedObjects(commits), Equals, commitCount) ref, err := r.Head() c.Assert(err, IsNil) c.Assert(ref.Hash(), Equals, head) } func lenIterEncodedObjects(iter storer.EncodedObjectIter) int { count := 0 iter.ForEach(func(plumbing.EncodedObject) error { count++ return nil }) return count } func defaultSignature() *object.Signature { when, _ := time.Parse(object.DateFormat, "Thu May 04 00:03:43 2017 +0200") return &object.Signature{ Name: "foo", Email: "foo@foo.foo", When: when, } } func commitSignKey(c *C, decrypt bool) *openpgp.Entity { s := strings.NewReader(armoredKeyRing) es, err := openpgp.ReadArmoredKeyRing(s) c.Assert(err, IsNil) c.Assert(es, HasLen, 1) c.Assert(es[0].Identities, HasLen, 1) _, ok := es[0].Identities["foo bar "] c.Assert(ok, Equals, true) key := es[0] if decrypt { err = key.PrivateKey.Decrypt([]byte(keyPassphrase)) c.Assert(err, IsNil) } return key } const armoredKeyRing = ` -----BEGIN PGP PRIVATE KEY BLOCK----- lQdGBFt89QIBEAC8du0Purt9yeFuLlBYHcexnZvcbaci2pY+Ejn1VnxM7caFxRX/ b2weZi9E6+I0F+K/hKIaidPdcbK92UCL0Vp6F3izjqategZ7o44vlK/HfWFME4wv sou6lnig9ovA73HRyzngi3CmqWxSdg8lL0kIJLNzlvCFEd4Z34BnEkagklQJRymo 0WnmLJjSnZFT5Nk7q5jrcR7ApbD98cakvgivDlUBPJCk2JFPWheCkouWPHMvLXQz bZXW5RFz4lJsMUWa/S3ofvIOnjG5Etnil3IA4uksS8fSDkGus998mBvUwzqX7xBh dK17ZEbxDdO4PuVJDkjvq618rMu8FVk5yVd59rUketSnGrehd/+vdh6qtgQC4tu1 RldbUVAuKZGg79H61nWnvrDZmbw4eoqCEuv1+aZsM9ElSC5Ps2J0rtpHRyBndKn+ 8Jlc/KTH04/O+FAhEv0IgMTFEm3iAq8udBhRBgu6Y4gJyn4tqy6+6ZjPUNos8GOG +ZJPdrgHHHfQged1ygeceN6W2AwQRet/B3/rieHf2V93uHJy/DjYUEuBhPm9nxqi R6ILUr97Sj2EsvLyfQO9pFpIctoNKEJmDx/C9tkFMNNlQhpsBitSdR2/wancw9ND iWV/J9roUdC0qns7eNSbiFe3Len8Xir7srnjAFgbGvOu9jDBUuiKGT5F3wARAQAB /gcDAl+0SktmjrUW8uwpvru6GeIeo5kc4rXuD7iIxH6nDl3nmjZMX7qWvp+pRTHH 0hEDH44899PDvzclBN3ouehfFUbJ+DBy8umBiLqF8Mu2PrKjdmyv3BvnbTkqPM3m 2Su7WmUDBhG00X07lfl8fTpZJG80onEGzGynryP/xVm4ymzoHyYGksntXLYr2HJ5 aV6L7sL2/STsaaOVHoa/oEmVBo1+NRsTxRRUcFVLs3g0OIi6ZCeSevBdavMwf9Iv b5Bs/e0+GLpP71XzFpdrGcL6oGjZH/dgdeypzbGA+FHtQJqynN3qEE9eCc9cfTGL 2zN2OtnMA28NtPVN4SnSxQIDvycWx68NZjfwLOK+gswfKpimp+6xMWSnNIRDyU9M w0hdNPMK9JAxm/MlnkR7x6ysX/8vrVVFl9gWOmxzJ5L4kvfMsHcV5ZFRP8OnVA6a NFBWIBGXF1uQC4qrXup/xKyWJOoH++cMo2cjPT3+3oifZgdBydVfHXjS9aQ/S3Sa A6henWyx/qeBGPVRuXWdXIOKDboOPK8JwQaGd6yazKkH9c5tDohmQHzZ6ho0gyAt dh+g9ZyiZVpjc6excfK/DP/RdUOYKw3Ur9652hKephvYZzHvPjTbqVkhS7JjZkVY rukQ64d5T0pE1B4y+If4hLFXMNQtfo0TIsATNA69jop+KFnJpLzAB+Ee33EA/HUl YC5EJCJaXt6kdtYFac0HvVWiz5ZuMhdtzpJfvOe+Olp/xR9nIPW3XZojQoHIZKwu gXeZeVMvfeoq+ymKAKNH5Np4WaUDF7Wh9VLl045jGyF5viyy61ivC0eyAzp5W1uy gJBZwafVma5MhmZUS2dFs0hBwBrKRzZZhN65VvfSYw6CnXp83ryUjReDvrLmqZDM FNpSMDKRk1+k9Wwi3m+fzLAvlxoHscJ5Any7ApsvBRbyehP8MAAG7UV3jImugTLi yN6FKVwziQXiC4/97oKbA1YYNjTT7Qw9gWTXvLRspn4f9997brcA9dm0M0seTjLa lc5hTJwJQdvPPI2klf+YgPvsD6nrP1moeWBb8irICqG1/BoE0JHPS+bqJ1J+m1iV kRV/+4pV2bLlXKqg1LEvqANW+1P1eM2nbbVB7EQn8ZOPIKMoCLoC1QWUPNfnemsW U5ynAbhsbm16PDJql0ApEgUCEDfsXTu1ui6SIO3bs/gWyD9HEmnfaYMYDKF+j+0r jXd4GnCxb+Yu3wV5WyewOHouzC+++h/3WcDLkOYZ9pcIbA86qT+v6b9MuTAU0D3c wlDv8r5J59zOcXl4HpMb2BY5F9dZn8hjgeVJRhJdij9x1TQ8qlVasSi4Eq8SiPmZ PZz33Pk6yn2caQ6wd47A79LXCbFQqJqA5aA6oS4DOpENGS5fh7WUZq/MTcmm9GsG w2gHxocASK9RCUYgZFWVYgLDuviMMWvc/2TJcTMxdF0Amu3erYAD90smFs0g/6fZ 4pRLnKFuifwAMGMOx7jbW5tmOaSPx6XkuYvkDJeLMHoN3z/8bZEG5VpayypwFGyV bk/YIUWg/KM/43juDPdTvab9tZzYIjxC6on7dtYIAGjZis97XZou3KYKTaMe1VY6 IhrnVzJ0JAHpd1prf9NUz96e1vjGdn3I61JgjNp5sWklIJEZzvaD28Eovf/LH1BO gYFFCvsWXaRoPHNQ5a9m7CROkLeHUFgRu5uriqHxxQHgogDznc8/3fnvDAHNpNb6 Jnk4zaeVR3tTyIjiNM+wxUFPDNFpJWmQbSDCcPVYTbpznzVRnhqrw7q0FWZvbyBi YXIgPGZvb0Bmb28uZm9vPokCVAQTAQgAPgIbAwULCQgHAgYVCAkKCwIEFgIDAQIe AQIXgBYhBJOhf/AeVDKFRgh8jgKTlUAu/M1TBQJbfPU4BQkSzAM2AAoJEAKTlUAu /M1TVTIQALA6ocNc2fXz1loLykMxlfnX/XxiyNDOUPDZkrZtscqqWPYaWvJK3OiD 32bdVEbftnAiFvJYkinrCXLEmwwf5wyOxKFmCHwwKhH0UYt60yF4WwlOVNstGSAy RkPMEEmVfMXS9K1nzKv/9A5YsqMQob7sN5CMN66Vrm0RKSvOF/NhhM9v8fC0QSU2 GZNO0tnRfaS4wMnFr5L4FuDST+14F5sJT7ZEJz7HfbxXKLvvWbvqLlCYHJOdz56s X/eKde8eT9/LSzcmgsd7rGS2np5901kubww5jllUl1CFnk3Mdg9FTJl5u9Epuhnn 823Jpdy1ZNbyLqZ266Z/q2HepDA7P/GqIXgWdHjwG2y1YAC4JIkA4RBbesQwqAXs 6cX5gqRFRl5iDGEP5zclS0y5mWi/J8bLYxMYfqxs9EZtHd9DumWISi87804TEzYa WDijMlW7PR8QRW0vdmtYOhJZOlTnomLQx2v27iqpVXRh12J1aYVBFC+IvG1vhCf9 FL3LzAHHEGlIoDaKJMd+Wg/Lm/f1PqqQx3lWIh9hhKh5Qx6hcuJH669JOWuEdxfo 1so50aItG+tdDKqXflmOi7grrUURchYYKteaW2fC2SQgzDClprALI7aj9s/lDrEN CgLH6twOqdSFWqB/4ASDMsNeLeKX3WOYKYYMlE01cj3T1m6dpRUO =gIM9 -----END PGP PRIVATE KEY BLOCK----- ` const keyPassphrase = "abcdef0123456789" golang-gopkg-src-d-go-git.v4-4.11.0/worktree_linux.go000066400000000000000000000007131345605224300223100ustar00rootroot00000000000000// +build linux package git import ( "syscall" "time" "gopkg.in/src-d/go-git.v4/plumbing/format/index" ) func init() { fillSystemInfo = func(e *index.Entry, sys interface{}) { if os, ok := sys.(*syscall.Stat_t); ok { e.CreatedAt = time.Unix(int64(os.Ctim.Sec), int64(os.Ctim.Nsec)) e.Dev = uint32(os.Dev) e.Inode = uint32(os.Ino) e.GID = os.Gid e.UID = os.Uid } } } func isSymlinkWindowsNonAdmin(err error) bool { return false } golang-gopkg-src-d-go-git.v4-4.11.0/worktree_status.go000066400000000000000000000336751345605224300225110ustar00rootroot00000000000000package git import ( "bytes" "errors" "io" "os" "path" "path/filepath" "gopkg.in/src-d/go-billy.v4/util" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/filemode" "gopkg.in/src-d/go-git.v4/plumbing/format/gitignore" "gopkg.in/src-d/go-git.v4/plumbing/format/index" "gopkg.in/src-d/go-git.v4/plumbing/object" "gopkg.in/src-d/go-git.v4/utils/ioutil" "gopkg.in/src-d/go-git.v4/utils/merkletrie" "gopkg.in/src-d/go-git.v4/utils/merkletrie/filesystem" mindex "gopkg.in/src-d/go-git.v4/utils/merkletrie/index" "gopkg.in/src-d/go-git.v4/utils/merkletrie/noder" ) var ( // ErrDestinationExists in an Move operation means that the target exists on // the worktree. ErrDestinationExists = errors.New("destination exists") // ErrGlobNoMatches in an AddGlob if the glob pattern does not match any // files in the worktree. ErrGlobNoMatches = errors.New("glob pattern did not match any files") ) // Status returns the working tree status. func (w *Worktree) Status() (Status, error) { var hash plumbing.Hash ref, err := w.r.Head() if err != nil && err != plumbing.ErrReferenceNotFound { return nil, err } if err == nil { hash = ref.Hash() } return w.status(hash) } func (w *Worktree) status(commit plumbing.Hash) (Status, error) { s := make(Status) left, err := w.diffCommitWithStaging(commit, false) if err != nil { return nil, err } for _, ch := range left { a, err := ch.Action() if err != nil { return nil, err } fs := s.File(nameFromAction(&ch)) fs.Worktree = Unmodified switch a { case merkletrie.Delete: s.File(ch.From.String()).Staging = Deleted case merkletrie.Insert: s.File(ch.To.String()).Staging = Added case merkletrie.Modify: s.File(ch.To.String()).Staging = Modified } } right, err := w.diffStagingWithWorktree(false) if err != nil { return nil, err } for _, ch := range right { a, err := ch.Action() if err != nil { return nil, err } fs := s.File(nameFromAction(&ch)) if fs.Staging == Untracked { fs.Staging = Unmodified } switch a { case merkletrie.Delete: fs.Worktree = Deleted case merkletrie.Insert: fs.Worktree = Untracked fs.Staging = Untracked case merkletrie.Modify: fs.Worktree = Modified } } return s, nil } func nameFromAction(ch *merkletrie.Change) string { name := ch.To.String() if name == "" { return ch.From.String() } return name } func (w *Worktree) diffStagingWithWorktree(reverse bool) (merkletrie.Changes, error) { idx, err := w.r.Storer.Index() if err != nil { return nil, err } from := mindex.NewRootNode(idx) submodules, err := w.getSubmodulesStatus() if err != nil { return nil, err } to := filesystem.NewRootNode(w.Filesystem, submodules) var c merkletrie.Changes if reverse { c, err = merkletrie.DiffTree(to, from, diffTreeIsEquals) } else { c, err = merkletrie.DiffTree(from, to, diffTreeIsEquals) } if err != nil { return nil, err } return w.excludeIgnoredChanges(c), nil } func (w *Worktree) excludeIgnoredChanges(changes merkletrie.Changes) merkletrie.Changes { patterns, err := gitignore.ReadPatterns(w.Filesystem, nil) if err != nil { return changes } patterns = append(patterns, w.Excludes...) if len(patterns) == 0 { return changes } m := gitignore.NewMatcher(patterns) var res merkletrie.Changes for _, ch := range changes { var path []string for _, n := range ch.To { path = append(path, n.Name()) } if len(path) == 0 { for _, n := range ch.From { path = append(path, n.Name()) } } if len(path) != 0 { isDir := (len(ch.To) > 0 && ch.To.IsDir()) || (len(ch.From) > 0 && ch.From.IsDir()) if m.Match(path, isDir) { continue } } res = append(res, ch) } return res } func (w *Worktree) getSubmodulesStatus() (map[string]plumbing.Hash, error) { o := map[string]plumbing.Hash{} sub, err := w.Submodules() if err != nil { return nil, err } status, err := sub.Status() if err != nil { return nil, err } for _, s := range status { if s.Current.IsZero() { o[s.Path] = s.Expected continue } o[s.Path] = s.Current } return o, nil } func (w *Worktree) diffCommitWithStaging(commit plumbing.Hash, reverse bool) (merkletrie.Changes, error) { var t *object.Tree if !commit.IsZero() { c, err := w.r.CommitObject(commit) if err != nil { return nil, err } t, err = c.Tree() if err != nil { return nil, err } } return w.diffTreeWithStaging(t, reverse) } func (w *Worktree) diffTreeWithStaging(t *object.Tree, reverse bool) (merkletrie.Changes, error) { var from noder.Noder if t != nil { from = object.NewTreeRootNode(t) } idx, err := w.r.Storer.Index() if err != nil { return nil, err } to := mindex.NewRootNode(idx) if reverse { return merkletrie.DiffTree(to, from, diffTreeIsEquals) } return merkletrie.DiffTree(from, to, diffTreeIsEquals) } var emptyNoderHash = make([]byte, 24) // diffTreeIsEquals is a implementation of noder.Equals, used to compare // noder.Noder, it compare the content and the length of the hashes. // // Since some of the noder.Noder implementations doesn't compute a hash for // some directories, if any of the hashes is a 24-byte slice of zero values // the comparison is not done and the hashes are take as different. func diffTreeIsEquals(a, b noder.Hasher) bool { hashA := a.Hash() hashB := b.Hash() if bytes.Equal(hashA, emptyNoderHash) || bytes.Equal(hashB, emptyNoderHash) { return false } return bytes.Equal(hashA, hashB) } // Add adds the file contents of a file in the worktree to the index. if the // file is already staged in the index no error is returned. If a file deleted // from the Workspace is given, the file is removed from the index. If a // directory given, adds the files and all his sub-directories recursively in // the worktree to the index. If any of the files is already staged in the index // no error is returned. When path is a file, the blob.Hash is returned. func (w *Worktree) Add(path string) (plumbing.Hash, error) { // TODO(mcuadros): remove plumbing.Hash from signature at v5. s, err := w.Status() if err != nil { return plumbing.ZeroHash, err } idx, err := w.r.Storer.Index() if err != nil { return plumbing.ZeroHash, err } var h plumbing.Hash var added bool fi, err := w.Filesystem.Lstat(path) if err != nil || !fi.IsDir() { added, h, err = w.doAddFile(idx, s, path) } else { added, err = w.doAddDirectory(idx, s, path) } if err != nil { return h, err } if !added { return h, nil } return h, w.r.Storer.SetIndex(idx) } func (w *Worktree) doAddDirectory(idx *index.Index, s Status, directory string) (added bool, err error) { files, err := w.Filesystem.ReadDir(directory) if err != nil { return false, err } for _, file := range files { name := path.Join(directory, file.Name()) var a bool if file.IsDir() { if file.Name() == GitDirName { // ignore special git directory continue } a, err = w.doAddDirectory(idx, s, name) } else { a, _, err = w.doAddFile(idx, s, name) } if err != nil { return } if !added && a { added = true } } return } // AddGlob adds all paths, matching pattern, to the index. If pattern matches a // directory path, all directory contents are added to the index recursively. No // error is returned if all matching paths are already staged in index. func (w *Worktree) AddGlob(pattern string) error { files, err := util.Glob(w.Filesystem, pattern) if err != nil { return err } if len(files) == 0 { return ErrGlobNoMatches } s, err := w.Status() if err != nil { return err } idx, err := w.r.Storer.Index() if err != nil { return err } var saveIndex bool for _, file := range files { fi, err := w.Filesystem.Lstat(file) if err != nil { return err } var added bool if fi.IsDir() { added, err = w.doAddDirectory(idx, s, file) } else { added, _, err = w.doAddFile(idx, s, file) } if err != nil { return err } if !saveIndex && added { saveIndex = true } } if saveIndex { return w.r.Storer.SetIndex(idx) } return nil } // doAddFile create a new blob from path and update the index, added is true if // the file added is different from the index. func (w *Worktree) doAddFile(idx *index.Index, s Status, path string) (added bool, h plumbing.Hash, err error) { if s.File(path).Worktree == Unmodified { return false, h, nil } h, err = w.copyFileToStorage(path) if err != nil { if os.IsNotExist(err) { added = true h, err = w.deleteFromIndex(idx, path) } return } if err := w.addOrUpdateFileToIndex(idx, path, h); err != nil { return false, h, err } return true, h, err } func (w *Worktree) copyFileToStorage(path string) (hash plumbing.Hash, err error) { fi, err := w.Filesystem.Lstat(path) if err != nil { return plumbing.ZeroHash, err } obj := w.r.Storer.NewEncodedObject() obj.SetType(plumbing.BlobObject) obj.SetSize(fi.Size()) writer, err := obj.Writer() if err != nil { return plumbing.ZeroHash, err } defer ioutil.CheckClose(writer, &err) if fi.Mode()&os.ModeSymlink != 0 { err = w.fillEncodedObjectFromSymlink(writer, path, fi) } else { err = w.fillEncodedObjectFromFile(writer, path, fi) } if err != nil { return plumbing.ZeroHash, err } return w.r.Storer.SetEncodedObject(obj) } func (w *Worktree) fillEncodedObjectFromFile(dst io.Writer, path string, fi os.FileInfo) (err error) { src, err := w.Filesystem.Open(path) if err != nil { return err } defer ioutil.CheckClose(src, &err) if _, err := io.Copy(dst, src); err != nil { return err } return err } func (w *Worktree) fillEncodedObjectFromSymlink(dst io.Writer, path string, fi os.FileInfo) error { target, err := w.Filesystem.Readlink(path) if err != nil { return err } _, err = dst.Write([]byte(target)) return err } func (w *Worktree) addOrUpdateFileToIndex(idx *index.Index, filename string, h plumbing.Hash) error { e, err := idx.Entry(filename) if err != nil && err != index.ErrEntryNotFound { return err } if err == index.ErrEntryNotFound { return w.doAddFileToIndex(idx, filename, h) } return w.doUpdateFileToIndex(e, filename, h) } func (w *Worktree) doAddFileToIndex(idx *index.Index, filename string, h plumbing.Hash) error { return w.doUpdateFileToIndex(idx.Add(filename), filename, h) } func (w *Worktree) doUpdateFileToIndex(e *index.Entry, filename string, h plumbing.Hash) error { info, err := w.Filesystem.Lstat(filename) if err != nil { return err } e.Hash = h e.ModifiedAt = info.ModTime() e.Mode, err = filemode.NewFromOSFileMode(info.Mode()) if err != nil { return err } if e.Mode.IsRegular() { e.Size = uint32(info.Size()) } fillSystemInfo(e, info.Sys()) return nil } // Remove removes files from the working tree and from the index. func (w *Worktree) Remove(path string) (plumbing.Hash, error) { // TODO(mcuadros): remove plumbing.Hash from signature at v5. idx, err := w.r.Storer.Index() if err != nil { return plumbing.ZeroHash, err } var h plumbing.Hash fi, err := w.Filesystem.Lstat(path) if err != nil || !fi.IsDir() { h, err = w.doRemoveFile(idx, path) } else { _, err = w.doRemoveDirectory(idx, path) } if err != nil { return h, err } return h, w.r.Storer.SetIndex(idx) } func (w *Worktree) doRemoveDirectory(idx *index.Index, directory string) (removed bool, err error) { files, err := w.Filesystem.ReadDir(directory) if err != nil { return false, err } for _, file := range files { name := path.Join(directory, file.Name()) var r bool if file.IsDir() { r, err = w.doRemoveDirectory(idx, name) } else { _, err = w.doRemoveFile(idx, name) if err == index.ErrEntryNotFound { err = nil } } if err != nil { return } if !removed && r { removed = true } } err = w.removeEmptyDirectory(directory) return } func (w *Worktree) removeEmptyDirectory(path string) error { files, err := w.Filesystem.ReadDir(path) if err != nil { return err } if len(files) != 0 { return nil } return w.Filesystem.Remove(path) } func (w *Worktree) doRemoveFile(idx *index.Index, path string) (plumbing.Hash, error) { hash, err := w.deleteFromIndex(idx, path) if err != nil { return plumbing.ZeroHash, err } return hash, w.deleteFromFilesystem(path) } func (w *Worktree) deleteFromIndex(idx *index.Index, path string) (plumbing.Hash, error) { e, err := idx.Remove(path) if err != nil { return plumbing.ZeroHash, err } return e.Hash, nil } func (w *Worktree) deleteFromFilesystem(path string) error { err := w.Filesystem.Remove(path) if os.IsNotExist(err) { return nil } return err } // RemoveGlob removes all paths, matching pattern, from the index. If pattern // matches a directory path, all directory contents are removed from the index // recursively. func (w *Worktree) RemoveGlob(pattern string) error { idx, err := w.r.Storer.Index() if err != nil { return err } entries, err := idx.Glob(pattern) if err != nil { return err } for _, e := range entries { file := filepath.FromSlash(e.Name) if _, err := w.Filesystem.Lstat(file); err != nil && !os.IsNotExist(err) { return err } if _, err := w.doRemoveFile(idx, file); err != nil { return err } dir, _ := filepath.Split(file) if err := w.removeEmptyDirectory(dir); err != nil { return err } } return w.r.Storer.SetIndex(idx) } // Move moves or rename a file in the worktree and the index, directories are // not supported. func (w *Worktree) Move(from, to string) (plumbing.Hash, error) { // TODO(mcuadros): support directories and/or implement support for glob if _, err := w.Filesystem.Lstat(from); err != nil { return plumbing.ZeroHash, err } if _, err := w.Filesystem.Lstat(to); err == nil { return plumbing.ZeroHash, ErrDestinationExists } idx, err := w.r.Storer.Index() if err != nil { return plumbing.ZeroHash, err } hash, err := w.deleteFromIndex(idx, from) if err != nil { return plumbing.ZeroHash, err } if err := w.Filesystem.Rename(from, to); err != nil { return hash, err } if err := w.addOrUpdateFileToIndex(idx, to, hash); err != nil { return hash, err } return hash, w.r.Storer.SetIndex(idx) } golang-gopkg-src-d-go-git.v4-4.11.0/worktree_test.go000066400000000000000000001361351345605224300221400ustar00rootroot00000000000000package git import ( "bytes" "context" "errors" "io/ioutil" "os" "path/filepath" "regexp" "runtime" "testing" "time" "gopkg.in/src-d/go-git.v4/config" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/filemode" "gopkg.in/src-d/go-git.v4/plumbing/format/gitignore" "gopkg.in/src-d/go-git.v4/plumbing/format/index" "gopkg.in/src-d/go-git.v4/plumbing/object" "gopkg.in/src-d/go-git.v4/storage/memory" "golang.org/x/text/unicode/norm" . "gopkg.in/check.v1" "gopkg.in/src-d/go-billy.v4/memfs" "gopkg.in/src-d/go-billy.v4/osfs" "gopkg.in/src-d/go-billy.v4/util" "gopkg.in/src-d/go-git-fixtures.v3" ) type WorktreeSuite struct { BaseSuite } var _ = Suite(&WorktreeSuite{}) func (s *WorktreeSuite) SetUpTest(c *C) { f := fixtures.Basic().One() s.Repository = s.NewRepositoryWithEmptyWorktree(f) } func (s *WorktreeSuite) TestPullCheckout(c *C) { fs := memfs.New() r, _ := Init(memory.NewStorage(), fs) r.CreateRemote(&config.RemoteConfig{ Name: DefaultRemoteName, URLs: []string{s.GetBasicLocalRepositoryURL()}, }) w, err := r.Worktree() c.Assert(err, IsNil) err = w.Pull(&PullOptions{}) c.Assert(err, IsNil) fi, err := fs.ReadDir("") c.Assert(err, IsNil) c.Assert(fi, HasLen, 8) } func (s *WorktreeSuite) TestPullFastForward(c *C) { url := c.MkDir() path := fixtures.Basic().ByTag("worktree").One().Worktree().Root() server, err := PlainClone(url, false, &CloneOptions{ URL: path, }) c.Assert(err, IsNil) r, err := PlainClone(c.MkDir(), false, &CloneOptions{ URL: url, }) c.Assert(err, IsNil) w, err := server.Worktree() c.Assert(err, IsNil) err = ioutil.WriteFile(filepath.Join(path, "foo"), []byte("foo"), 0755) c.Assert(err, IsNil) hash, err := w.Commit("foo", &CommitOptions{Author: defaultSignature()}) c.Assert(err, IsNil) w, err = r.Worktree() c.Assert(err, IsNil) err = w.Pull(&PullOptions{}) c.Assert(err, IsNil) head, err := r.Head() c.Assert(err, IsNil) c.Assert(head.Hash(), Equals, hash) } func (s *WorktreeSuite) TestPullNonFastForward(c *C) { url := c.MkDir() path := fixtures.Basic().ByTag("worktree").One().Worktree().Root() server, err := PlainClone(url, false, &CloneOptions{ URL: path, }) c.Assert(err, IsNil) r, err := PlainClone(c.MkDir(), false, &CloneOptions{ URL: url, }) c.Assert(err, IsNil) w, err := server.Worktree() c.Assert(err, IsNil) err = ioutil.WriteFile(filepath.Join(path, "foo"), []byte("foo"), 0755) c.Assert(err, IsNil) _, err = w.Commit("foo", &CommitOptions{Author: defaultSignature()}) c.Assert(err, IsNil) w, err = r.Worktree() c.Assert(err, IsNil) err = ioutil.WriteFile(filepath.Join(path, "bar"), []byte("bar"), 0755) c.Assert(err, IsNil) _, err = w.Commit("bar", &CommitOptions{Author: defaultSignature()}) c.Assert(err, IsNil) err = w.Pull(&PullOptions{}) c.Assert(err, Equals, ErrNonFastForwardUpdate) } func (s *WorktreeSuite) TestPullUpdateReferencesIfNeeded(c *C) { r, _ := Init(memory.NewStorage(), memfs.New()) r.CreateRemote(&config.RemoteConfig{ Name: DefaultRemoteName, URLs: []string{s.GetBasicLocalRepositoryURL()}, }) err := r.Fetch(&FetchOptions{}) c.Assert(err, IsNil) _, err = r.Reference("refs/heads/master", false) c.Assert(err, NotNil) w, err := r.Worktree() c.Assert(err, IsNil) err = w.Pull(&PullOptions{}) c.Assert(err, IsNil) head, err := r.Reference(plumbing.HEAD, true) c.Assert(err, IsNil) c.Assert(head.Hash().String(), Equals, "6ecf0ef2c2dffb796033e5a02219af86ec6584e5") branch, err := r.Reference("refs/heads/master", false) c.Assert(err, IsNil) c.Assert(branch.Hash().String(), Equals, "6ecf0ef2c2dffb796033e5a02219af86ec6584e5") err = w.Pull(&PullOptions{}) c.Assert(err, Equals, NoErrAlreadyUpToDate) } func (s *WorktreeSuite) TestPullInSingleBranch(c *C) { r, _ := Init(memory.NewStorage(), memfs.New()) err := r.clone(context.Background(), &CloneOptions{ URL: s.GetBasicLocalRepositoryURL(), SingleBranch: true, }) c.Assert(err, IsNil) w, err := r.Worktree() c.Assert(err, IsNil) err = w.Pull(&PullOptions{}) c.Assert(err, Equals, NoErrAlreadyUpToDate) branch, err := r.Reference("refs/heads/master", false) c.Assert(err, IsNil) c.Assert(branch.Hash().String(), Equals, "6ecf0ef2c2dffb796033e5a02219af86ec6584e5") branch, err = r.Reference("refs/remotes/foo/branch", false) c.Assert(err, NotNil) storage := r.Storer.(*memory.Storage) c.Assert(storage.Objects, HasLen, 28) } func (s *WorktreeSuite) TestPullProgress(c *C) { r, _ := Init(memory.NewStorage(), memfs.New()) r.CreateRemote(&config.RemoteConfig{ Name: DefaultRemoteName, URLs: []string{s.GetBasicLocalRepositoryURL()}, }) w, err := r.Worktree() c.Assert(err, IsNil) buf := bytes.NewBuffer(nil) err = w.Pull(&PullOptions{ Progress: buf, }) c.Assert(err, IsNil) c.Assert(buf.Len(), Not(Equals), 0) } func (s *WorktreeSuite) TestPullProgressWithRecursion(c *C) { if testing.Short() { c.Skip("skipping test in short mode.") } path := fixtures.ByTag("submodule").One().Worktree().Root() dir, err := ioutil.TempDir("", "plain-clone-submodule") c.Assert(err, IsNil) defer os.RemoveAll(dir) r, _ := PlainInit(dir, false) r.CreateRemote(&config.RemoteConfig{ Name: DefaultRemoteName, URLs: []string{path}, }) w, err := r.Worktree() c.Assert(err, IsNil) err = w.Pull(&PullOptions{ RecurseSubmodules: DefaultSubmoduleRecursionDepth, }) c.Assert(err, IsNil) cfg, err := r.Config() c.Assert(err, IsNil) c.Assert(cfg.Submodules, HasLen, 2) } func (s *RepositorySuite) TestPullAdd(c *C) { path := fixtures.Basic().ByTag("worktree").One().Worktree().Root() r, err := Clone(memory.NewStorage(), memfs.New(), &CloneOptions{ URL: filepath.Join(path, ".git"), }) c.Assert(err, IsNil) storage := r.Storer.(*memory.Storage) c.Assert(storage.Objects, HasLen, 28) branch, err := r.Reference("refs/heads/master", false) c.Assert(err, IsNil) c.Assert(branch.Hash().String(), Equals, "6ecf0ef2c2dffb796033e5a02219af86ec6584e5") ExecuteOnPath(c, path, "touch foo", "git add foo", "git commit -m foo foo", ) w, err := r.Worktree() c.Assert(err, IsNil) err = w.Pull(&PullOptions{RemoteName: "origin"}) c.Assert(err, IsNil) // the commit command has introduced a new commit, tree and blob c.Assert(storage.Objects, HasLen, 31) branch, err = r.Reference("refs/heads/master", false) c.Assert(err, IsNil) c.Assert(branch.Hash().String(), Not(Equals), "6ecf0ef2c2dffb796033e5a02219af86ec6584e5") } func (s *WorktreeSuite) TestCheckout(c *C) { fs := memfs.New() w := &Worktree{ r: s.Repository, Filesystem: fs, } err := w.Checkout(&CheckoutOptions{ Force: true, }) c.Assert(err, IsNil) entries, err := fs.ReadDir("/") c.Assert(err, IsNil) c.Assert(entries, HasLen, 8) ch, err := fs.Open("CHANGELOG") c.Assert(err, IsNil) content, err := ioutil.ReadAll(ch) c.Assert(err, IsNil) c.Assert(string(content), Equals, "Initial changelog\n") idx, err := s.Repository.Storer.Index() c.Assert(err, IsNil) c.Assert(idx.Entries, HasLen, 9) } func (s *WorktreeSuite) TestCheckoutForce(c *C) { w := &Worktree{ r: s.Repository, Filesystem: memfs.New(), } err := w.Checkout(&CheckoutOptions{}) c.Assert(err, IsNil) w.Filesystem = memfs.New() err = w.Checkout(&CheckoutOptions{ Force: true, }) c.Assert(err, IsNil) entries, err := w.Filesystem.ReadDir("/") c.Assert(err, IsNil) c.Assert(entries, HasLen, 8) } func (s *WorktreeSuite) TestCheckoutSymlink(c *C) { if runtime.GOOS == "windows" { c.Skip("git doesn't support symlinks by default in windows") } dir, err := ioutil.TempDir("", "checkout") c.Assert(err, IsNil) defer os.RemoveAll(dir) r, err := PlainInit(dir, false) c.Assert(err, IsNil) w, err := r.Worktree() c.Assert(err, IsNil) w.Filesystem.Symlink("not-exists", "bar") w.Add("bar") w.Commit("foo", &CommitOptions{Author: defaultSignature()}) r.Storer.SetIndex(&index.Index{Version: 2}) w.Filesystem = osfs.New(filepath.Join(dir, "worktree-empty")) err = w.Checkout(&CheckoutOptions{}) c.Assert(err, IsNil) status, err := w.Status() c.Assert(err, IsNil) c.Assert(status.IsClean(), Equals, true) target, err := w.Filesystem.Readlink("bar") c.Assert(target, Equals, "not-exists") c.Assert(err, IsNil) } func (s *WorktreeSuite) TestFilenameNormalization(c *C) { if runtime.GOOS == "windows" { c.Skip("windows paths may contain non utf-8 sequences") } url := c.MkDir() path := fixtures.Basic().ByTag("worktree").One().Worktree().Root() server, err := PlainClone(url, false, &CloneOptions{ URL: path, }) c.Assert(err, IsNil) filename := "페" w, err := server.Worktree() c.Assert(err, IsNil) writeFile := func(path string) { err := util.WriteFile(w.Filesystem, path, []byte("foo"), 0755) c.Assert(err, IsNil) } writeFile(filename) origHash, err := w.Add(filename) c.Assert(err, IsNil) _, err = w.Commit("foo", &CommitOptions{Author: defaultSignature()}) c.Assert(err, IsNil) r, err := Clone(memory.NewStorage(), memfs.New(), &CloneOptions{ URL: url, }) c.Assert(err, IsNil) w, err = r.Worktree() c.Assert(err, IsNil) status, err := w.Status() c.Assert(err, IsNil) c.Assert(status.IsClean(), Equals, true) err = w.Filesystem.Remove(filename) c.Assert(err, IsNil) modFilename := norm.Form(norm.NFKD).String(filename) writeFile(modFilename) _, err = w.Add(filename) c.Assert(err, IsNil) modHash, err := w.Add(modFilename) c.Assert(err, IsNil) // At this point we've got two files with the same content. // Hence their hashes must be the same. c.Assert(origHash == modHash, Equals, true) status, err = w.Status() c.Assert(err, IsNil) // However, their names are different and the work tree is still dirty. c.Assert(status.IsClean(), Equals, false) // Revert back the deletion of the first file. writeFile(filename) _, err = w.Add(filename) c.Assert(err, IsNil) status, err = w.Status() c.Assert(err, IsNil) // Still dirty - the second file is added. c.Assert(status.IsClean(), Equals, false) _, err = w.Remove(modFilename) c.Assert(err, IsNil) status, err = w.Status() c.Assert(err, IsNil) c.Assert(status.IsClean(), Equals, true) } func (s *WorktreeSuite) TestCheckoutSubmodule(c *C) { url := "https://github.com/git-fixtures/submodule.git" r := s.NewRepositoryWithEmptyWorktree(fixtures.ByURL(url).One()) w, err := r.Worktree() c.Assert(err, IsNil) err = w.Checkout(&CheckoutOptions{}) c.Assert(err, IsNil) status, err := w.Status() c.Assert(err, IsNil) c.Assert(status.IsClean(), Equals, true) } func (s *WorktreeSuite) TestCheckoutSubmoduleInitialized(c *C) { url := "https://github.com/git-fixtures/submodule.git" r := s.NewRepository(fixtures.ByURL(url).One()) w, err := r.Worktree() c.Assert(err, IsNil) sub, err := w.Submodules() c.Assert(err, IsNil) err = sub.Update(&SubmoduleUpdateOptions{Init: true}) c.Assert(err, IsNil) status, err := w.Status() c.Assert(err, IsNil) c.Assert(status.IsClean(), Equals, true) } func (s *WorktreeSuite) TestCheckoutIndexMem(c *C) { fs := memfs.New() w := &Worktree{ r: s.Repository, Filesystem: fs, } err := w.Checkout(&CheckoutOptions{}) c.Assert(err, IsNil) idx, err := s.Repository.Storer.Index() c.Assert(err, IsNil) c.Assert(idx.Entries, HasLen, 9) c.Assert(idx.Entries[0].Hash.String(), Equals, "32858aad3c383ed1ff0a0f9bdf231d54a00c9e88") c.Assert(idx.Entries[0].Name, Equals, ".gitignore") c.Assert(idx.Entries[0].Mode, Equals, filemode.Regular) c.Assert(idx.Entries[0].ModifiedAt.IsZero(), Equals, false) c.Assert(idx.Entries[0].Size, Equals, uint32(189)) // ctime, dev, inode, uid and gid are not supported on memfs fs c.Assert(idx.Entries[0].CreatedAt.IsZero(), Equals, true) c.Assert(idx.Entries[0].Dev, Equals, uint32(0)) c.Assert(idx.Entries[0].Inode, Equals, uint32(0)) c.Assert(idx.Entries[0].UID, Equals, uint32(0)) c.Assert(idx.Entries[0].GID, Equals, uint32(0)) } func (s *WorktreeSuite) TestCheckoutIndexOS(c *C) { dir, err := ioutil.TempDir("", "checkout") c.Assert(err, IsNil) defer os.RemoveAll(dir) fs := osfs.New(filepath.Join(dir, "worktree")) w := &Worktree{ r: s.Repository, Filesystem: fs, } err = w.Checkout(&CheckoutOptions{}) c.Assert(err, IsNil) idx, err := s.Repository.Storer.Index() c.Assert(err, IsNil) c.Assert(idx.Entries, HasLen, 9) c.Assert(idx.Entries[0].Hash.String(), Equals, "32858aad3c383ed1ff0a0f9bdf231d54a00c9e88") c.Assert(idx.Entries[0].Name, Equals, ".gitignore") c.Assert(idx.Entries[0].Mode, Equals, filemode.Regular) c.Assert(idx.Entries[0].ModifiedAt.IsZero(), Equals, false) c.Assert(idx.Entries[0].Size, Equals, uint32(189)) c.Assert(idx.Entries[0].CreatedAt.IsZero(), Equals, false) if runtime.GOOS != "windows" { c.Assert(idx.Entries[0].Dev, Not(Equals), uint32(0)) c.Assert(idx.Entries[0].Inode, Not(Equals), uint32(0)) c.Assert(idx.Entries[0].UID, Not(Equals), uint32(0)) c.Assert(idx.Entries[0].GID, Not(Equals), uint32(0)) } } func (s *WorktreeSuite) TestCheckoutBranch(c *C) { w := &Worktree{ r: s.Repository, Filesystem: memfs.New(), } err := w.Checkout(&CheckoutOptions{ Branch: "refs/heads/branch", }) c.Assert(err, IsNil) head, err := w.r.Head() c.Assert(err, IsNil) c.Assert(head.Name().String(), Equals, "refs/heads/branch") status, err := w.Status() c.Assert(err, IsNil) c.Assert(status.IsClean(), Equals, true) } func (s *WorktreeSuite) TestCheckoutCreateWithHash(c *C) { w := &Worktree{ r: s.Repository, Filesystem: memfs.New(), } err := w.Checkout(&CheckoutOptions{ Create: true, Branch: "refs/heads/foo", Hash: plumbing.NewHash("35e85108805c84807bc66a02d91535e1e24b38b9"), }) c.Assert(err, IsNil) head, err := w.r.Head() c.Assert(err, IsNil) c.Assert(head.Name().String(), Equals, "refs/heads/foo") c.Assert(head.Hash(), Equals, plumbing.NewHash("35e85108805c84807bc66a02d91535e1e24b38b9")) status, err := w.Status() c.Assert(err, IsNil) c.Assert(status.IsClean(), Equals, true) } func (s *WorktreeSuite) TestCheckoutCreate(c *C) { w := &Worktree{ r: s.Repository, Filesystem: memfs.New(), } err := w.Checkout(&CheckoutOptions{ Create: true, Branch: "refs/heads/foo", }) c.Assert(err, IsNil) head, err := w.r.Head() c.Assert(err, IsNil) c.Assert(head.Name().String(), Equals, "refs/heads/foo") c.Assert(head.Hash(), Equals, plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5")) status, err := w.Status() c.Assert(err, IsNil) c.Assert(status.IsClean(), Equals, true) } func (s *WorktreeSuite) TestCheckoutBranchAndHash(c *C) { w := &Worktree{ r: s.Repository, Filesystem: memfs.New(), } err := w.Checkout(&CheckoutOptions{ Branch: "refs/heads/foo", Hash: plumbing.NewHash("35e85108805c84807bc66a02d91535e1e24b38b9"), }) c.Assert(err, Equals, ErrBranchHashExclusive) } func (s *WorktreeSuite) TestCheckoutCreateMissingBranch(c *C) { w := &Worktree{ r: s.Repository, Filesystem: memfs.New(), } err := w.Checkout(&CheckoutOptions{ Create: true, }) c.Assert(err, Equals, ErrCreateRequiresBranch) } func (s *WorktreeSuite) TestCheckoutTag(c *C) { f := fixtures.ByTag("tags").One() r := s.NewRepositoryWithEmptyWorktree(f) w, err := r.Worktree() c.Assert(err, IsNil) err = w.Checkout(&CheckoutOptions{}) c.Assert(err, IsNil) head, err := w.r.Head() c.Assert(err, IsNil) c.Assert(head.Name().String(), Equals, "refs/heads/master") status, err := w.Status() c.Assert(err, IsNil) c.Assert(status.IsClean(), Equals, true) err = w.Checkout(&CheckoutOptions{Branch: "refs/tags/lightweight-tag"}) c.Assert(err, IsNil) head, err = w.r.Head() c.Assert(err, IsNil) c.Assert(head.Name().String(), Equals, "HEAD") c.Assert(head.Hash().String(), Equals, "f7b877701fbf855b44c0a9e86f3fdce2c298b07f") err = w.Checkout(&CheckoutOptions{Branch: "refs/tags/commit-tag"}) c.Assert(err, IsNil) head, err = w.r.Head() c.Assert(err, IsNil) c.Assert(head.Name().String(), Equals, "HEAD") c.Assert(head.Hash().String(), Equals, "f7b877701fbf855b44c0a9e86f3fdce2c298b07f") err = w.Checkout(&CheckoutOptions{Branch: "refs/tags/tree-tag"}) c.Assert(err, NotNil) head, err = w.r.Head() c.Assert(err, IsNil) c.Assert(head.Name().String(), Equals, "HEAD") } func (s *WorktreeSuite) TestCheckoutBisect(c *C) { if testing.Short() { c.Skip("skipping test in short mode.") } s.testCheckoutBisect(c, "https://github.com/src-d/go-git.git") } func (s *WorktreeSuite) TestCheckoutBisectSubmodules(c *C) { s.testCheckoutBisect(c, "https://github.com/git-fixtures/submodule.git") } // TestCheckoutBisect simulates a git bisect going through the git history and // checking every commit over the previous commit func (s *WorktreeSuite) testCheckoutBisect(c *C, url string) { f := fixtures.ByURL(url).One() r := s.NewRepositoryWithEmptyWorktree(f) w, err := r.Worktree() c.Assert(err, IsNil) iter, err := w.r.Log(&LogOptions{}) c.Assert(err, IsNil) iter.ForEach(func(commit *object.Commit) error { err := w.Checkout(&CheckoutOptions{Hash: commit.Hash}) c.Assert(err, IsNil) status, err := w.Status() c.Assert(err, IsNil) c.Assert(status.IsClean(), Equals, true) return nil }) } func (s *WorktreeSuite) TestStatus(c *C) { fs := memfs.New() w := &Worktree{ r: s.Repository, Filesystem: fs, } status, err := w.Status() c.Assert(err, IsNil) c.Assert(status.IsClean(), Equals, false) c.Assert(status, HasLen, 9) } func (s *WorktreeSuite) TestStatusEmpty(c *C) { fs := memfs.New() storage := memory.NewStorage() r, err := Init(storage, fs) c.Assert(err, IsNil) w, err := r.Worktree() c.Assert(err, IsNil) status, err := w.Status() c.Assert(err, IsNil) c.Assert(status.IsClean(), Equals, true) c.Assert(status, NotNil) } func (s *WorktreeSuite) TestStatusEmptyDirty(c *C) { fs := memfs.New() err := util.WriteFile(fs, "foo", []byte("foo"), 0755) c.Assert(err, IsNil) storage := memory.NewStorage() r, err := Init(storage, fs) c.Assert(err, IsNil) w, err := r.Worktree() c.Assert(err, IsNil) status, err := w.Status() c.Assert(err, IsNil) c.Assert(status.IsClean(), Equals, false) c.Assert(status, HasLen, 1) } func (s *WorktreeSuite) TestReset(c *C) { fs := memfs.New() w := &Worktree{ r: s.Repository, Filesystem: fs, } commit := plumbing.NewHash("35e85108805c84807bc66a02d91535e1e24b38b9") err := w.Checkout(&CheckoutOptions{}) c.Assert(err, IsNil) branch, err := w.r.Reference(plumbing.Master, false) c.Assert(err, IsNil) c.Assert(branch.Hash(), Not(Equals), commit) err = w.Reset(&ResetOptions{Mode: MergeReset, Commit: commit}) c.Assert(err, IsNil) branch, err = w.r.Reference(plumbing.Master, false) c.Assert(err, IsNil) c.Assert(branch.Hash(), Equals, commit) status, err := w.Status() c.Assert(err, IsNil) c.Assert(status.IsClean(), Equals, true) } func (s *WorktreeSuite) TestResetWithUntracked(c *C) { fs := memfs.New() w := &Worktree{ r: s.Repository, Filesystem: fs, } commit := plumbing.NewHash("35e85108805c84807bc66a02d91535e1e24b38b9") err := w.Checkout(&CheckoutOptions{}) c.Assert(err, IsNil) err = util.WriteFile(fs, "foo", nil, 0755) c.Assert(err, IsNil) err = w.Reset(&ResetOptions{Mode: MergeReset, Commit: commit}) c.Assert(err, IsNil) status, err := w.Status() c.Assert(err, IsNil) c.Assert(status.IsClean(), Equals, true) } func (s *WorktreeSuite) TestResetSoft(c *C) { fs := memfs.New() w := &Worktree{ r: s.Repository, Filesystem: fs, } commit := plumbing.NewHash("35e85108805c84807bc66a02d91535e1e24b38b9") err := w.Checkout(&CheckoutOptions{}) c.Assert(err, IsNil) err = w.Reset(&ResetOptions{Mode: SoftReset, Commit: commit}) c.Assert(err, IsNil) branch, err := w.r.Reference(plumbing.Master, false) c.Assert(err, IsNil) c.Assert(branch.Hash(), Equals, commit) status, err := w.Status() c.Assert(err, IsNil) c.Assert(status.IsClean(), Equals, false) c.Assert(status.File("CHANGELOG").Staging, Equals, Added) } func (s *WorktreeSuite) TestResetMixed(c *C) { fs := memfs.New() w := &Worktree{ r: s.Repository, Filesystem: fs, } commit := plumbing.NewHash("35e85108805c84807bc66a02d91535e1e24b38b9") err := w.Checkout(&CheckoutOptions{}) c.Assert(err, IsNil) err = w.Reset(&ResetOptions{Mode: MixedReset, Commit: commit}) c.Assert(err, IsNil) branch, err := w.r.Reference(plumbing.Master, false) c.Assert(err, IsNil) c.Assert(branch.Hash(), Equals, commit) status, err := w.Status() c.Assert(err, IsNil) c.Assert(status.IsClean(), Equals, false) c.Assert(status.File("CHANGELOG").Staging, Equals, Untracked) } func (s *WorktreeSuite) TestResetMerge(c *C) { fs := memfs.New() w := &Worktree{ r: s.Repository, Filesystem: fs, } commitA := plumbing.NewHash("918c48b83bd081e863dbe1b80f8998f058cd8294") commitB := plumbing.NewHash("35e85108805c84807bc66a02d91535e1e24b38b9") err := w.Checkout(&CheckoutOptions{}) c.Assert(err, IsNil) err = w.Reset(&ResetOptions{Mode: MergeReset, Commit: commitA}) c.Assert(err, IsNil) branch, err := w.r.Reference(plumbing.Master, false) c.Assert(err, IsNil) c.Assert(branch.Hash(), Equals, commitA) f, err := fs.Create(".gitignore") c.Assert(err, IsNil) _, err = f.Write([]byte("foo")) c.Assert(err, IsNil) err = f.Close() c.Assert(err, IsNil) err = w.Reset(&ResetOptions{Mode: MergeReset, Commit: commitB}) c.Assert(err, Equals, ErrUnstagedChanges) branch, err = w.r.Reference(plumbing.Master, false) c.Assert(err, IsNil) c.Assert(branch.Hash(), Equals, commitA) } func (s *WorktreeSuite) TestResetHard(c *C) { fs := memfs.New() w := &Worktree{ r: s.Repository, Filesystem: fs, } commit := plumbing.NewHash("35e85108805c84807bc66a02d91535e1e24b38b9") err := w.Checkout(&CheckoutOptions{}) c.Assert(err, IsNil) f, err := fs.Create(".gitignore") c.Assert(err, IsNil) _, err = f.Write([]byte("foo")) c.Assert(err, IsNil) err = f.Close() c.Assert(err, IsNil) err = w.Reset(&ResetOptions{Mode: HardReset, Commit: commit}) c.Assert(err, IsNil) branch, err := w.r.Reference(plumbing.Master, false) c.Assert(err, IsNil) c.Assert(branch.Hash(), Equals, commit) } func (s *WorktreeSuite) TestStatusAfterCheckout(c *C) { fs := memfs.New() w := &Worktree{ r: s.Repository, Filesystem: fs, } err := w.Checkout(&CheckoutOptions{Force: true}) c.Assert(err, IsNil) status, err := w.Status() c.Assert(err, IsNil) c.Assert(status.IsClean(), Equals, true) } func (s *WorktreeSuite) TestStatusModified(c *C) { dir, err := ioutil.TempDir("", "status") c.Assert(err, IsNil) defer os.RemoveAll(dir) fs := osfs.New(filepath.Join(dir, "worktree")) w := &Worktree{ r: s.Repository, Filesystem: fs, } err = w.Checkout(&CheckoutOptions{}) c.Assert(err, IsNil) f, err := fs.Create(".gitignore") c.Assert(err, IsNil) _, err = f.Write([]byte("foo")) c.Assert(err, IsNil) err = f.Close() c.Assert(err, IsNil) status, err := w.Status() c.Assert(err, IsNil) c.Assert(status.IsClean(), Equals, false) c.Assert(status.File(".gitignore").Worktree, Equals, Modified) } func (s *WorktreeSuite) TestStatusIgnored(c *C) { fs := memfs.New() w := &Worktree{ r: s.Repository, Filesystem: fs, } w.Checkout(&CheckoutOptions{}) fs.MkdirAll("another", os.ModePerm) f, _ := fs.Create("another/file") f.Close() fs.MkdirAll("vendor/github.com", os.ModePerm) f, _ = fs.Create("vendor/github.com/file") f.Close() fs.MkdirAll("vendor/gopkg.in", os.ModePerm) f, _ = fs.Create("vendor/gopkg.in/file") f.Close() status, _ := w.Status() c.Assert(len(status), Equals, 3) _, ok := status["another/file"] c.Assert(ok, Equals, true) _, ok = status["vendor/github.com/file"] c.Assert(ok, Equals, true) _, ok = status["vendor/gopkg.in/file"] c.Assert(ok, Equals, true) f, _ = fs.Create(".gitignore") f.Write([]byte("vendor/g*/")) f.Close() f, _ = fs.Create("vendor/.gitignore") f.Write([]byte("!github.com/\n")) f.Close() status, _ = w.Status() c.Assert(len(status), Equals, 4) _, ok = status[".gitignore"] c.Assert(ok, Equals, true) _, ok = status["another/file"] c.Assert(ok, Equals, true) _, ok = status["vendor/.gitignore"] c.Assert(ok, Equals, true) _, ok = status["vendor/github.com/file"] c.Assert(ok, Equals, true) } func (s *WorktreeSuite) TestStatusUntracked(c *C) { fs := memfs.New() w := &Worktree{ r: s.Repository, Filesystem: fs, } err := w.Checkout(&CheckoutOptions{Force: true}) c.Assert(err, IsNil) f, err := w.Filesystem.Create("foo") c.Assert(err, IsNil) c.Assert(f.Close(), IsNil) status, err := w.Status() c.Assert(err, IsNil) c.Assert(status.File("foo").Staging, Equals, Untracked) c.Assert(status.File("foo").Worktree, Equals, Untracked) } func (s *WorktreeSuite) TestStatusDeleted(c *C) { dir, err := ioutil.TempDir("", "status") c.Assert(err, IsNil) defer os.RemoveAll(dir) fs := osfs.New(filepath.Join(dir, "worktree")) w := &Worktree{ r: s.Repository, Filesystem: fs, } err = w.Checkout(&CheckoutOptions{}) c.Assert(err, IsNil) err = fs.Remove(".gitignore") c.Assert(err, IsNil) status, err := w.Status() c.Assert(err, IsNil) c.Assert(status.IsClean(), Equals, false) c.Assert(status.File(".gitignore").Worktree, Equals, Deleted) } func (s *WorktreeSuite) TestSubmodule(c *C) { path := fixtures.ByTag("submodule").One().Worktree().Root() r, err := PlainOpen(path) c.Assert(err, IsNil) w, err := r.Worktree() c.Assert(err, IsNil) m, err := w.Submodule("basic") c.Assert(err, IsNil) c.Assert(m.Config().Name, Equals, "basic") } func (s *WorktreeSuite) TestSubmodules(c *C) { path := fixtures.ByTag("submodule").One().Worktree().Root() r, err := PlainOpen(path) c.Assert(err, IsNil) w, err := r.Worktree() c.Assert(err, IsNil) l, err := w.Submodules() c.Assert(err, IsNil) c.Assert(l, HasLen, 2) } func (s *WorktreeSuite) TestAddUntracked(c *C) { fs := memfs.New() w := &Worktree{ r: s.Repository, Filesystem: fs, } err := w.Checkout(&CheckoutOptions{Force: true}) c.Assert(err, IsNil) idx, err := w.r.Storer.Index() c.Assert(err, IsNil) c.Assert(idx.Entries, HasLen, 9) err = util.WriteFile(w.Filesystem, "foo", []byte("FOO"), 0755) c.Assert(err, IsNil) hash, err := w.Add("foo") c.Assert(hash.String(), Equals, "d96c7efbfec2814ae0301ad054dc8d9fc416c9b5") c.Assert(err, IsNil) idx, err = w.r.Storer.Index() c.Assert(err, IsNil) c.Assert(idx.Entries, HasLen, 10) e, err := idx.Entry("foo") c.Assert(err, IsNil) c.Assert(e.Hash, Equals, hash) c.Assert(e.Mode, Equals, filemode.Executable) status, err := w.Status() c.Assert(err, IsNil) c.Assert(status, HasLen, 1) file := status.File("foo") c.Assert(file.Staging, Equals, Added) c.Assert(file.Worktree, Equals, Unmodified) obj, err := w.r.Storer.EncodedObject(plumbing.BlobObject, hash) c.Assert(err, IsNil) c.Assert(obj, NotNil) c.Assert(obj.Size(), Equals, int64(3)) } func (s *WorktreeSuite) TestIgnored(c *C) { fs := memfs.New() w := &Worktree{ r: s.Repository, Filesystem: fs, } w.Excludes = make([]gitignore.Pattern, 0) w.Excludes = append(w.Excludes, gitignore.ParsePattern("foo", nil)) err := w.Checkout(&CheckoutOptions{Force: true}) c.Assert(err, IsNil) idx, err := w.r.Storer.Index() c.Assert(err, IsNil) c.Assert(idx.Entries, HasLen, 9) err = util.WriteFile(w.Filesystem, "foo", []byte("FOO"), 0755) c.Assert(err, IsNil) status, err := w.Status() c.Assert(err, IsNil) c.Assert(status, HasLen, 0) file := status.File("foo") c.Assert(file.Staging, Equals, Untracked) c.Assert(file.Worktree, Equals, Untracked) } func (s *WorktreeSuite) TestExcludedNoGitignore(c *C) { f := fixtures.ByTag("empty").One() r := s.NewRepository(f) fs := memfs.New() w := &Worktree{ r: r, Filesystem: fs, } _, err := fs.Open(".gitignore") c.Assert(err, Equals, os.ErrNotExist) w.Excludes = make([]gitignore.Pattern, 0) w.Excludes = append(w.Excludes, gitignore.ParsePattern("foo", nil)) err = util.WriteFile(w.Filesystem, "foo", []byte("FOO"), 0755) c.Assert(err, IsNil) status, err := w.Status() c.Assert(err, IsNil) c.Assert(status, HasLen, 0) file := status.File("foo") c.Assert(file.Staging, Equals, Untracked) c.Assert(file.Worktree, Equals, Untracked) } func (s *WorktreeSuite) TestAddModified(c *C) { fs := memfs.New() w := &Worktree{ r: s.Repository, Filesystem: fs, } err := w.Checkout(&CheckoutOptions{Force: true}) c.Assert(err, IsNil) idx, err := w.r.Storer.Index() c.Assert(err, IsNil) c.Assert(idx.Entries, HasLen, 9) err = util.WriteFile(w.Filesystem, "LICENSE", []byte("FOO"), 0644) c.Assert(err, IsNil) hash, err := w.Add("LICENSE") c.Assert(err, IsNil) c.Assert(hash.String(), Equals, "d96c7efbfec2814ae0301ad054dc8d9fc416c9b5") idx, err = w.r.Storer.Index() c.Assert(err, IsNil) c.Assert(idx.Entries, HasLen, 9) e, err := idx.Entry("LICENSE") c.Assert(err, IsNil) c.Assert(e.Hash, Equals, hash) c.Assert(e.Mode, Equals, filemode.Regular) status, err := w.Status() c.Assert(err, IsNil) c.Assert(status, HasLen, 1) file := status.File("LICENSE") c.Assert(file.Staging, Equals, Modified) c.Assert(file.Worktree, Equals, Unmodified) } func (s *WorktreeSuite) TestAddUnmodified(c *C) { fs := memfs.New() w := &Worktree{ r: s.Repository, Filesystem: fs, } err := w.Checkout(&CheckoutOptions{Force: true}) c.Assert(err, IsNil) hash, err := w.Add("LICENSE") c.Assert(hash.String(), Equals, "c192bd6a24ea1ab01d78686e417c8bdc7c3d197f") c.Assert(err, IsNil) } func (s *WorktreeSuite) TestAddRemoved(c *C) { fs := memfs.New() w := &Worktree{ r: s.Repository, Filesystem: fs, } err := w.Checkout(&CheckoutOptions{Force: true}) c.Assert(err, IsNil) idx, err := w.r.Storer.Index() c.Assert(err, IsNil) c.Assert(idx.Entries, HasLen, 9) err = w.Filesystem.Remove("LICENSE") c.Assert(err, IsNil) hash, err := w.Add("LICENSE") c.Assert(err, IsNil) c.Assert(hash.String(), Equals, "c192bd6a24ea1ab01d78686e417c8bdc7c3d197f") e, err := idx.Entry("LICENSE") c.Assert(err, IsNil) c.Assert(e.Hash, Equals, hash) c.Assert(e.Mode, Equals, filemode.Regular) status, err := w.Status() c.Assert(err, IsNil) c.Assert(status, HasLen, 1) file := status.File("LICENSE") c.Assert(file.Staging, Equals, Deleted) } func (s *WorktreeSuite) TestAddSymlink(c *C) { dir, err := ioutil.TempDir("", "checkout") c.Assert(err, IsNil) defer os.RemoveAll(dir) r, err := PlainInit(dir, false) c.Assert(err, IsNil) err = util.WriteFile(r.wt, "foo", []byte("qux"), 0644) c.Assert(err, IsNil) err = r.wt.Symlink("foo", "bar") c.Assert(err, IsNil) w, err := r.Worktree() c.Assert(err, IsNil) h, err := w.Add("foo") c.Assert(err, IsNil) c.Assert(h, Not(Equals), plumbing.NewHash("19102815663d23f8b75a47e7a01965dcdc96468c")) h, err = w.Add("bar") c.Assert(err, IsNil) c.Assert(h, Equals, plumbing.NewHash("19102815663d23f8b75a47e7a01965dcdc96468c")) obj, err := w.r.Storer.EncodedObject(plumbing.BlobObject, h) c.Assert(err, IsNil) c.Assert(obj, NotNil) c.Assert(obj.Size(), Equals, int64(3)) } func (s *WorktreeSuite) TestAddDirectory(c *C) { fs := memfs.New() w := &Worktree{ r: s.Repository, Filesystem: fs, } err := w.Checkout(&CheckoutOptions{Force: true}) c.Assert(err, IsNil) idx, err := w.r.Storer.Index() c.Assert(err, IsNil) c.Assert(idx.Entries, HasLen, 9) err = util.WriteFile(w.Filesystem, "qux/foo", []byte("FOO"), 0755) c.Assert(err, IsNil) err = util.WriteFile(w.Filesystem, "qux/baz/bar", []byte("BAR"), 0755) c.Assert(err, IsNil) h, err := w.Add("qux") c.Assert(err, IsNil) c.Assert(h.IsZero(), Equals, true) idx, err = w.r.Storer.Index() c.Assert(err, IsNil) c.Assert(idx.Entries, HasLen, 11) e, err := idx.Entry("qux/foo") c.Assert(err, IsNil) c.Assert(e.Mode, Equals, filemode.Executable) e, err = idx.Entry("qux/baz/bar") c.Assert(err, IsNil) c.Assert(e.Mode, Equals, filemode.Executable) status, err := w.Status() c.Assert(err, IsNil) c.Assert(status, HasLen, 2) file := status.File("qux/foo") c.Assert(file.Staging, Equals, Added) c.Assert(file.Worktree, Equals, Unmodified) file = status.File("qux/baz/bar") c.Assert(file.Staging, Equals, Added) c.Assert(file.Worktree, Equals, Unmodified) } func (s *WorktreeSuite) TestAddDirectoryErrorNotFound(c *C) { r, _ := Init(memory.NewStorage(), memfs.New()) w, _ := r.Worktree() h, err := w.Add("foo") c.Assert(err, NotNil) c.Assert(h.IsZero(), Equals, true) } func (s *WorktreeSuite) TestAddGlob(c *C) { fs := memfs.New() w := &Worktree{ r: s.Repository, Filesystem: fs, } err := w.Checkout(&CheckoutOptions{Force: true}) c.Assert(err, IsNil) idx, err := w.r.Storer.Index() c.Assert(err, IsNil) c.Assert(idx.Entries, HasLen, 9) err = util.WriteFile(w.Filesystem, "qux/qux", []byte("QUX"), 0755) c.Assert(err, IsNil) err = util.WriteFile(w.Filesystem, "qux/baz", []byte("BAZ"), 0755) c.Assert(err, IsNil) err = util.WriteFile(w.Filesystem, "qux/bar/baz", []byte("BAZ"), 0755) c.Assert(err, IsNil) err = w.AddGlob(w.Filesystem.Join("qux", "b*")) c.Assert(err, IsNil) idx, err = w.r.Storer.Index() c.Assert(err, IsNil) c.Assert(idx.Entries, HasLen, 11) e, err := idx.Entry("qux/baz") c.Assert(err, IsNil) c.Assert(e.Mode, Equals, filemode.Executable) e, err = idx.Entry("qux/bar/baz") c.Assert(err, IsNil) c.Assert(e.Mode, Equals, filemode.Executable) status, err := w.Status() c.Assert(err, IsNil) c.Assert(status, HasLen, 3) file := status.File("qux/qux") c.Assert(file.Staging, Equals, Untracked) c.Assert(file.Worktree, Equals, Untracked) file = status.File("qux/baz") c.Assert(file.Staging, Equals, Added) c.Assert(file.Worktree, Equals, Unmodified) file = status.File("qux/bar/baz") c.Assert(file.Staging, Equals, Added) c.Assert(file.Worktree, Equals, Unmodified) } func (s *WorktreeSuite) TestAddGlobErrorNoMatches(c *C) { r, _ := Init(memory.NewStorage(), memfs.New()) w, _ := r.Worktree() err := w.AddGlob("foo") c.Assert(err, Equals, ErrGlobNoMatches) } func (s *WorktreeSuite) TestRemove(c *C) { fs := memfs.New() w := &Worktree{ r: s.Repository, Filesystem: fs, } err := w.Checkout(&CheckoutOptions{Force: true}) c.Assert(err, IsNil) hash, err := w.Remove("LICENSE") c.Assert(hash.String(), Equals, "c192bd6a24ea1ab01d78686e417c8bdc7c3d197f") c.Assert(err, IsNil) status, err := w.Status() c.Assert(err, IsNil) c.Assert(status, HasLen, 1) c.Assert(status.File("LICENSE").Staging, Equals, Deleted) } func (s *WorktreeSuite) TestRemoveNotExistentEntry(c *C) { fs := memfs.New() w := &Worktree{ r: s.Repository, Filesystem: fs, } err := w.Checkout(&CheckoutOptions{Force: true}) c.Assert(err, IsNil) hash, err := w.Remove("not-exists") c.Assert(hash.IsZero(), Equals, true) c.Assert(err, NotNil) } func (s *WorktreeSuite) TestRemoveDirectory(c *C) { fs := memfs.New() w := &Worktree{ r: s.Repository, Filesystem: fs, } err := w.Checkout(&CheckoutOptions{Force: true}) c.Assert(err, IsNil) hash, err := w.Remove("json") c.Assert(hash.IsZero(), Equals, true) c.Assert(err, IsNil) status, err := w.Status() c.Assert(err, IsNil) c.Assert(status, HasLen, 2) c.Assert(status.File("json/long.json").Staging, Equals, Deleted) c.Assert(status.File("json/short.json").Staging, Equals, Deleted) _, err = w.Filesystem.Stat("json") c.Assert(os.IsNotExist(err), Equals, true) } func (s *WorktreeSuite) TestRemoveDirectoryUntracked(c *C) { fs := memfs.New() w := &Worktree{ r: s.Repository, Filesystem: fs, } err := w.Checkout(&CheckoutOptions{Force: true}) c.Assert(err, IsNil) err = util.WriteFile(w.Filesystem, "json/foo", []byte("FOO"), 0755) c.Assert(err, IsNil) hash, err := w.Remove("json") c.Assert(hash.IsZero(), Equals, true) c.Assert(err, IsNil) status, err := w.Status() c.Assert(err, IsNil) c.Assert(status, HasLen, 3) c.Assert(status.File("json/long.json").Staging, Equals, Deleted) c.Assert(status.File("json/short.json").Staging, Equals, Deleted) c.Assert(status.File("json/foo").Staging, Equals, Untracked) _, err = w.Filesystem.Stat("json") c.Assert(err, IsNil) } func (s *WorktreeSuite) TestRemoveDeletedFromWorktree(c *C) { fs := memfs.New() w := &Worktree{ r: s.Repository, Filesystem: fs, } err := w.Checkout(&CheckoutOptions{Force: true}) c.Assert(err, IsNil) err = fs.Remove("LICENSE") c.Assert(err, IsNil) hash, err := w.Remove("LICENSE") c.Assert(hash.String(), Equals, "c192bd6a24ea1ab01d78686e417c8bdc7c3d197f") c.Assert(err, IsNil) status, err := w.Status() c.Assert(err, IsNil) c.Assert(status, HasLen, 1) c.Assert(status.File("LICENSE").Staging, Equals, Deleted) } func (s *WorktreeSuite) TestRemoveGlob(c *C) { fs := memfs.New() w := &Worktree{ r: s.Repository, Filesystem: fs, } err := w.Checkout(&CheckoutOptions{Force: true}) c.Assert(err, IsNil) err = w.RemoveGlob(w.Filesystem.Join("json", "l*")) c.Assert(err, IsNil) status, err := w.Status() c.Assert(err, IsNil) c.Assert(status, HasLen, 1) c.Assert(status.File("json/long.json").Staging, Equals, Deleted) } func (s *WorktreeSuite) TestRemoveGlobDirectory(c *C) { fs := memfs.New() w := &Worktree{ r: s.Repository, Filesystem: fs, } err := w.Checkout(&CheckoutOptions{Force: true}) c.Assert(err, IsNil) err = w.RemoveGlob("js*") c.Assert(err, IsNil) status, err := w.Status() c.Assert(err, IsNil) c.Assert(status, HasLen, 2) c.Assert(status.File("json/short.json").Staging, Equals, Deleted) c.Assert(status.File("json/long.json").Staging, Equals, Deleted) _, err = w.Filesystem.Stat("json") c.Assert(os.IsNotExist(err), Equals, true) } func (s *WorktreeSuite) TestRemoveGlobDirectoryDeleted(c *C) { fs := memfs.New() w := &Worktree{ r: s.Repository, Filesystem: fs, } err := w.Checkout(&CheckoutOptions{Force: true}) c.Assert(err, IsNil) err = fs.Remove("json/short.json") c.Assert(err, IsNil) err = util.WriteFile(w.Filesystem, "json/foo", []byte("FOO"), 0755) c.Assert(err, IsNil) err = w.RemoveGlob("js*") c.Assert(err, IsNil) status, err := w.Status() c.Assert(err, IsNil) c.Assert(status, HasLen, 3) c.Assert(status.File("json/short.json").Staging, Equals, Deleted) c.Assert(status.File("json/long.json").Staging, Equals, Deleted) } func (s *WorktreeSuite) TestMove(c *C) { fs := memfs.New() w := &Worktree{ r: s.Repository, Filesystem: fs, } err := w.Checkout(&CheckoutOptions{Force: true}) c.Assert(err, IsNil) hash, err := w.Move("LICENSE", "foo") c.Check(hash.String(), Equals, "c192bd6a24ea1ab01d78686e417c8bdc7c3d197f") c.Assert(err, IsNil) status, err := w.Status() c.Assert(err, IsNil) c.Assert(status, HasLen, 2) c.Assert(status.File("LICENSE").Staging, Equals, Deleted) c.Assert(status.File("foo").Staging, Equals, Added) } func (s *WorktreeSuite) TestMoveNotExistentEntry(c *C) { fs := memfs.New() w := &Worktree{ r: s.Repository, Filesystem: fs, } err := w.Checkout(&CheckoutOptions{Force: true}) c.Assert(err, IsNil) hash, err := w.Move("not-exists", "foo") c.Assert(hash.IsZero(), Equals, true) c.Assert(err, NotNil) } func (s *WorktreeSuite) TestMoveToExistent(c *C) { fs := memfs.New() w := &Worktree{ r: s.Repository, Filesystem: fs, } err := w.Checkout(&CheckoutOptions{Force: true}) c.Assert(err, IsNil) hash, err := w.Move(".gitignore", "LICENSE") c.Assert(hash.IsZero(), Equals, true) c.Assert(err, Equals, ErrDestinationExists) } func (s *WorktreeSuite) TestClean(c *C) { fs := fixtures.ByTag("dirty").One().Worktree() // Open the repo. fs, err := fs.Chroot("repo") c.Assert(err, IsNil) r, err := PlainOpen(fs.Root()) c.Assert(err, IsNil) wt, err := r.Worktree() c.Assert(err, IsNil) // Status before cleaning. status, err := wt.Status() c.Assert(len(status), Equals, 2) err = wt.Clean(&CleanOptions{}) c.Assert(err, IsNil) // Status after cleaning. status, err = wt.Status() c.Assert(err, IsNil) c.Assert(len(status), Equals, 1) fi, err := fs.Lstat("pkgA") c.Assert(err, IsNil) c.Assert(fi.IsDir(), Equals, true) // Clean with Dir: true. err = wt.Clean(&CleanOptions{Dir: true}) c.Assert(err, IsNil) status, err = wt.Status() c.Assert(err, IsNil) c.Assert(len(status), Equals, 0) // An empty dir should be deleted, as well. _, err = fs.Lstat("pkgA") c.Assert(err, ErrorMatches, ".*(no such file or directory.*|.*file does not exist)*.") } func (s *WorktreeSuite) TestAlternatesRepo(c *C) { fs := fixtures.ByTag("alternates").One().Worktree() // Open 1st repo. rep1fs, err := fs.Chroot("rep1") c.Assert(err, IsNil) rep1, err := PlainOpen(rep1fs.Root()) c.Assert(err, IsNil) // Open 2nd repo. rep2fs, err := fs.Chroot("rep2") c.Assert(err, IsNil) rep2, err := PlainOpen(rep2fs.Root()) c.Assert(err, IsNil) // Get the HEAD commit from the main repo. h, err := rep1.Head() c.Assert(err, IsNil) commit1, err := rep1.CommitObject(h.Hash()) c.Assert(err, IsNil) // Get the HEAD commit from the shared repo. h, err = rep2.Head() c.Assert(err, IsNil) commit2, err := rep2.CommitObject(h.Hash()) c.Assert(err, IsNil) c.Assert(commit1.String(), Equals, commit2.String()) } func (s *WorktreeSuite) TestGrep(c *C) { cases := []struct { name string options GrepOptions wantResult []GrepResult dontWantResult []GrepResult wantError error }{ { name: "basic word match", options: GrepOptions{ Patterns: []*regexp.Regexp{regexp.MustCompile("import")}, }, wantResult: []GrepResult{ { FileName: "go/example.go", LineNumber: 3, Content: "import (", TreeName: "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", }, { FileName: "vendor/foo.go", LineNumber: 3, Content: "import \"fmt\"", TreeName: "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", }, }, }, { name: "case insensitive match", options: GrepOptions{ Patterns: []*regexp.Regexp{regexp.MustCompile(`(?i)IMport`)}, }, wantResult: []GrepResult{ { FileName: "go/example.go", LineNumber: 3, Content: "import (", TreeName: "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", }, { FileName: "vendor/foo.go", LineNumber: 3, Content: "import \"fmt\"", TreeName: "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", }, }, }, { name: "invert match", options: GrepOptions{ Patterns: []*regexp.Regexp{regexp.MustCompile("import")}, InvertMatch: true, }, dontWantResult: []GrepResult{ { FileName: "go/example.go", LineNumber: 3, Content: "import (", TreeName: "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", }, { FileName: "vendor/foo.go", LineNumber: 3, Content: "import \"fmt\"", TreeName: "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", }, }, }, { name: "match at a given commit hash", options: GrepOptions{ Patterns: []*regexp.Regexp{regexp.MustCompile("The MIT License")}, CommitHash: plumbing.NewHash("b029517f6300c2da0f4b651b8642506cd6aaf45d"), }, wantResult: []GrepResult{ { FileName: "LICENSE", LineNumber: 1, Content: "The MIT License (MIT)", TreeName: "b029517f6300c2da0f4b651b8642506cd6aaf45d", }, }, dontWantResult: []GrepResult{ { FileName: "go/example.go", LineNumber: 3, Content: "import (", TreeName: "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", }, }, }, { name: "match for a given pathspec", options: GrepOptions{ Patterns: []*regexp.Regexp{regexp.MustCompile("import")}, PathSpecs: []*regexp.Regexp{regexp.MustCompile("go/")}, }, wantResult: []GrepResult{ { FileName: "go/example.go", LineNumber: 3, Content: "import (", TreeName: "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", }, }, dontWantResult: []GrepResult{ { FileName: "vendor/foo.go", LineNumber: 3, Content: "import \"fmt\"", TreeName: "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", }, }, }, { name: "match at a given reference name", options: GrepOptions{ Patterns: []*regexp.Regexp{regexp.MustCompile("import")}, ReferenceName: "refs/heads/master", }, wantResult: []GrepResult{ { FileName: "go/example.go", LineNumber: 3, Content: "import (", TreeName: "refs/heads/master", }, }, }, { name: "ambiguous options", options: GrepOptions{ Patterns: []*regexp.Regexp{regexp.MustCompile("import")}, CommitHash: plumbing.NewHash("2d55a722f3c3ecc36da919dfd8b6de38352f3507"), ReferenceName: "somereferencename", }, wantError: ErrHashOrReference, }, { name: "multiple patterns", options: GrepOptions{ Patterns: []*regexp.Regexp{ regexp.MustCompile("import"), regexp.MustCompile("License"), }, }, wantResult: []GrepResult{ { FileName: "go/example.go", LineNumber: 3, Content: "import (", TreeName: "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", }, { FileName: "vendor/foo.go", LineNumber: 3, Content: "import \"fmt\"", TreeName: "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", }, { FileName: "LICENSE", LineNumber: 1, Content: "The MIT License (MIT)", TreeName: "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", }, }, }, { name: "multiple pathspecs", options: GrepOptions{ Patterns: []*regexp.Regexp{regexp.MustCompile("import")}, PathSpecs: []*regexp.Regexp{ regexp.MustCompile("go/"), regexp.MustCompile("vendor/"), }, }, wantResult: []GrepResult{ { FileName: "go/example.go", LineNumber: 3, Content: "import (", TreeName: "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", }, { FileName: "vendor/foo.go", LineNumber: 3, Content: "import \"fmt\"", TreeName: "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", }, }, }, } path := fixtures.Basic().ByTag("worktree").One().Worktree().Root() server, err := PlainClone(c.MkDir(), false, &CloneOptions{ URL: path, }) c.Assert(err, IsNil) w, err := server.Worktree() c.Assert(err, IsNil) for _, tc := range cases { gr, err := w.Grep(&tc.options) if tc.wantError != nil { c.Assert(err, Equals, tc.wantError) } else { c.Assert(err, IsNil) } // Iterate through the results and check if the wanted result is present // in the got result. for _, wantResult := range tc.wantResult { found := false for _, gotResult := range gr { if wantResult == gotResult { found = true break } } if !found { c.Errorf("unexpected grep results for %q, expected result to contain: %v", tc.name, wantResult) } } // Iterate through the results and check if the not wanted result is // present in the got result. for _, dontWantResult := range tc.dontWantResult { found := false for _, gotResult := range gr { if dontWantResult == gotResult { found = true break } } if found { c.Errorf("unexpected grep results for %q, expected result to NOT contain: %v", tc.name, dontWantResult) } } } } func (s *WorktreeSuite) TestAddAndCommit(c *C) { dir, err := ioutil.TempDir("", "plain-repo") c.Assert(err, IsNil) defer os.RemoveAll(dir) repo, err := PlainInit(dir, false) c.Assert(err, IsNil) w, err := repo.Worktree() c.Assert(err, IsNil) _, err = w.Add(".") c.Assert(err, IsNil) w.Commit("Test Add And Commit", &CommitOptions{Author: &object.Signature{ Name: "foo", Email: "foo@foo.foo", When: time.Now(), }}) iter, err := w.r.Log(&LogOptions{}) c.Assert(err, IsNil) err = iter.ForEach(func(c *object.Commit) error { files, err := c.Files() if err != nil { return err } err = files.ForEach(func(f *object.File) error { return errors.New("Expected no files, got at least 1") }) return err }) c.Assert(err, IsNil) } golang-gopkg-src-d-go-git.v4-4.11.0/worktree_unix_other.go000066400000000000000000000007371345605224300233430ustar00rootroot00000000000000// +build openbsd dragonfly solaris package git import ( "syscall" "time" "gopkg.in/src-d/go-git.v4/plumbing/format/index" ) func init() { fillSystemInfo = func(e *index.Entry, sys interface{}) { if os, ok := sys.(*syscall.Stat_t); ok { e.CreatedAt = time.Unix(int64(os.Atim.Sec), int64(os.Atim.Nsec)) e.Dev = uint32(os.Dev) e.Inode = uint32(os.Ino) e.GID = os.Gid e.UID = os.Uid } } } func isSymlinkWindowsNonAdmin(err error) bool { return false } golang-gopkg-src-d-go-git.v4-4.11.0/worktree_windows.go000066400000000000000000000013301345605224300226370ustar00rootroot00000000000000// +build windows package git import ( "os" "syscall" "time" "gopkg.in/src-d/go-git.v4/plumbing/format/index" ) func init() { fillSystemInfo = func(e *index.Entry, sys interface{}) { if os, ok := sys.(*syscall.Win32FileAttributeData); ok { seconds := os.CreationTime.Nanoseconds() / 1000000000 nanoseconds := os.CreationTime.Nanoseconds() - seconds*1000000000 e.CreatedAt = time.Unix(seconds, nanoseconds) } } } func isSymlinkWindowsNonAdmin(err error) bool { const ERROR_PRIVILEGE_NOT_HELD syscall.Errno = 1314 if err != nil { if errLink, ok := err.(*os.LinkError); ok { if errNo, ok := errLink.Err.(syscall.Errno); ok { return errNo == ERROR_PRIVILEGE_NOT_HELD } } } return false }